summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/intel')
-rw-r--r--drivers/crypto/intel/Kconfig5
-rw-r--r--drivers/crypto/intel/Makefile5
-rw-r--r--drivers/crypto/intel/ixp4xx/Kconfig14
-rw-r--r--drivers/crypto/intel/ixp4xx/Makefile2
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c1604
-rw-r--r--drivers/crypto/intel/keembay/Kconfig90
-rw-r--r--drivers/crypto/intel/keembay/Makefile10
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-aes-core.c1691
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c1009
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c1261
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.c1489
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.h129
-rw-r--r--drivers/crypto/intel/keembay/ocs-hcu.c840
-rw-r--r--drivers/crypto/intel/keembay/ocs-hcu.h106
-rw-r--r--drivers/crypto/intel/qat/Kconfig97
-rw-r--r--drivers/crypto/intel/qat/Makefile9
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c594
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h86
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c469
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c167
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h35
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c254
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c102
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h18
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c228
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c169
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h35
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c254
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c102
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h18
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c228
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile41
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h334
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_engine.c212
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c453
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c186
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.c353
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.h45
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h76
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h34
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h53
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_user.h38
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_clock.c131
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_clock.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h249
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c475
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c81
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.h29
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c452
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_counters.c264
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_counters.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_config.c213
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_config.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c268
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h168
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c399
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h29
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c83
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c194
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h145
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c147
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h17
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c147
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h45
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.c336
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.h79
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c194
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c105
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c516
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c388
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h259
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c52
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c348
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h13
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c65
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h31
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c167
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h23
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c368
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h17
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c215
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c232
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport.c577
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport.h20
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h58
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_debug.c209
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_internal.h73
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c313
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw.h298
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h404
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h111
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h367
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h68
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h68
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hal.h143
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h378
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h164
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h300
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h585
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c1423
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs_send.c91
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs_send.h25
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_asym_algs.c1307
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c410
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.h69
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c489
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_req.h123
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c296
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.h37
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_crypto.c287
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_crypto.h68
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c1594
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c2130
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c265
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h44
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c254
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c102
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h18
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c228
133 files changed, 33559 insertions, 0 deletions
diff --git a/drivers/crypto/intel/Kconfig b/drivers/crypto/intel/Kconfig
new file mode 100644
index 0000000000..3d90c87d40
--- /dev/null
+++ b/drivers/crypto/intel/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+source "drivers/crypto/intel/keembay/Kconfig"
+source "drivers/crypto/intel/ixp4xx/Kconfig"
+source "drivers/crypto/intel/qat/Kconfig"
diff --git a/drivers/crypto/intel/Makefile b/drivers/crypto/intel/Makefile
new file mode 100644
index 0000000000..b3d0352ae1
--- /dev/null
+++ b/drivers/crypto/intel/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y += keembay/
+obj-y += ixp4xx/
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
diff --git a/drivers/crypto/intel/ixp4xx/Kconfig b/drivers/crypto/intel/ixp4xx/Kconfig
new file mode 100644
index 0000000000..af3cc56883
--- /dev/null
+++ b/drivers/crypto/intel/ixp4xx/Kconfig
@@ -0,0 +1,14 @@
+config CRYPTO_DEV_IXP4XX
+ tristate "Driver for IXP4xx crypto hardware acceleration"
+ depends on (ARCH_IXP4XX || COMPILE_TEST) && IXP4XX_QMGR && IXP4XX_NPE
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CTR
+ select CRYPTO_LIB_DES
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_SKCIPHER
+ help
+ Driver for the IXP4xx NPE crypto engine.
diff --git a/drivers/crypto/intel/ixp4xx/Makefile b/drivers/crypto/intel/ixp4xx/Makefile
new file mode 100644
index 0000000000..74ebefd930
--- /dev/null
+++ b/drivers/crypto/intel/ixp4xx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
new file mode 100644
index 0000000000..4a18095ae5
--- /dev/null
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -0,0 +1,1604 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IXP4xx NPE-C crypto driver
+ *
+ * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <crypto/ctr.h>
+#include <crypto/internal/des.h>
+#include <crypto/aes.h>
+#include <crypto/hmac.h>
+#include <crypto/sha1.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
+
+/* Intermittent includes, delete this after v5.14-rc1 */
+#include <linux/soc/ixp4xx/cpu.h>
+
+#define MAX_KEYLEN 32
+
+/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
+#define NPE_CTX_LEN 80
+#define AES_BLOCK128 16
+
+#define NPE_OP_HASH_VERIFY 0x01
+#define NPE_OP_CCM_ENABLE 0x04
+#define NPE_OP_CRYPT_ENABLE 0x08
+#define NPE_OP_HASH_ENABLE 0x10
+#define NPE_OP_NOT_IN_PLACE 0x20
+#define NPE_OP_HMAC_DISABLE 0x40
+#define NPE_OP_CRYPT_ENCRYPT 0x80
+
+#define NPE_OP_CCM_GEN_MIC 0xcc
+#define NPE_OP_HASH_GEN_ICV 0x50
+#define NPE_OP_ENC_GEN_KEY 0xc9
+
+#define MOD_ECB 0x0000
+#define MOD_CTR 0x1000
+#define MOD_CBC_ENC 0x2000
+#define MOD_CBC_DEC 0x3000
+#define MOD_CCM_ENC 0x4000
+#define MOD_CCM_DEC 0x5000
+
+#define KEYLEN_128 4
+#define KEYLEN_192 6
+#define KEYLEN_256 8
+
+#define CIPH_DECR 0x0000
+#define CIPH_ENCR 0x0400
+
+#define MOD_DES 0x0000
+#define MOD_TDEA2 0x0100
+#define MOD_3DES 0x0200
+#define MOD_AES 0x0800
+#define MOD_AES128 (0x0800 | KEYLEN_128)
+#define MOD_AES192 (0x0900 | KEYLEN_192)
+#define MOD_AES256 (0x0a00 | KEYLEN_256)
+
+#define MAX_IVLEN 16
+#define NPE_QLEN 16
+/* Space for registering when the first
+ * NPE_QLEN crypt_ctl are busy */
+#define NPE_QLEN_TOTAL 64
+
+#define CTL_FLAG_UNUSED 0x0000
+#define CTL_FLAG_USED 0x1000
+#define CTL_FLAG_PERFORM_ABLK 0x0001
+#define CTL_FLAG_GEN_ICV 0x0002
+#define CTL_FLAG_GEN_REVAES 0x0004
+#define CTL_FLAG_PERFORM_AEAD 0x0008
+#define CTL_FLAG_MASK 0x000f
+
+#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
+
+#define MD5_DIGEST_SIZE 16
+
+struct buffer_desc {
+ u32 phys_next;
+#ifdef __ARMEB__
+ u16 buf_len;
+ u16 pkt_len;
+#else
+ u16 pkt_len;
+ u16 buf_len;
+#endif
+ dma_addr_t phys_addr;
+ u32 __reserved[4];
+ struct buffer_desc *next;
+ enum dma_data_direction dir;
+};
+
+struct crypt_ctl {
+#ifdef __ARMEB__
+ u8 mode; /* NPE_OP_* operation mode */
+ u8 init_len;
+ u16 reserved;
+#else
+ u16 reserved;
+ u8 init_len;
+ u8 mode; /* NPE_OP_* operation mode */
+#endif
+ u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
+ u32 icv_rev_aes; /* icv or rev aes */
+ u32 src_buf;
+ u32 dst_buf;
+#ifdef __ARMEB__
+ u16 auth_offs; /* Authentication start offset */
+ u16 auth_len; /* Authentication data length */
+ u16 crypt_offs; /* Cryption start offset */
+ u16 crypt_len; /* Cryption data length */
+#else
+ u16 auth_len; /* Authentication data length */
+ u16 auth_offs; /* Authentication start offset */
+ u16 crypt_len; /* Cryption data length */
+ u16 crypt_offs; /* Cryption start offset */
+#endif
+ u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
+ u32 crypto_ctx; /* NPE Crypto Param structure address */
+
+ /* Used by Host: 4*4 bytes*/
+ unsigned int ctl_flags;
+ union {
+ struct skcipher_request *ablk_req;
+ struct aead_request *aead_req;
+ struct crypto_tfm *tfm;
+ } data;
+ struct buffer_desc *regist_buf;
+ u8 *regist_ptr;
+};
+
+struct ablk_ctx {
+ struct buffer_desc *src;
+ struct buffer_desc *dst;
+ u8 iv[MAX_IVLEN];
+ bool encrypt;
+ struct skcipher_request fallback_req; // keep at the end
+};
+
+struct aead_ctx {
+ struct buffer_desc *src;
+ struct buffer_desc *dst;
+ struct scatterlist ivlist;
+ /* used when the hmac is not on one sg entry */
+ u8 *hmac_virt;
+ int encrypt;
+};
+
+struct ix_hash_algo {
+ u32 cfgword;
+ unsigned char *icv;
+};
+
+struct ix_sa_dir {
+ unsigned char *npe_ctx;
+ dma_addr_t npe_ctx_phys;
+ int npe_ctx_idx;
+ u8 npe_mode;
+};
+
+struct ixp_ctx {
+ struct ix_sa_dir encrypt;
+ struct ix_sa_dir decrypt;
+ int authkey_len;
+ u8 authkey[MAX_KEYLEN];
+ int enckey_len;
+ u8 enckey[MAX_KEYLEN];
+ u8 salt[MAX_IVLEN];
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
+ unsigned int salted;
+ atomic_t configuring;
+ struct completion completion;
+ struct crypto_skcipher *fallback_tfm;
+};
+
+struct ixp_alg {
+ struct skcipher_alg crypto;
+ const struct ix_hash_algo *hash;
+ u32 cfg_enc;
+ u32 cfg_dec;
+
+ int registered;
+};
+
+struct ixp_aead_alg {
+ struct aead_alg crypto;
+ const struct ix_hash_algo *hash;
+ u32 cfg_enc;
+ u32 cfg_dec;
+
+ int registered;
+};
+
+static const struct ix_hash_algo hash_alg_md5 = {
+ .cfgword = 0xAA010004,
+ .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+};
+
+static const struct ix_hash_algo hash_alg_sha1 = {
+ .cfgword = 0x00000005,
+ .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
+ "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
+};
+
+static struct npe *npe_c;
+
+static unsigned int send_qid;
+static unsigned int recv_qid;
+static struct dma_pool *buffer_pool;
+static struct dma_pool *ctx_pool;
+
+static struct crypt_ctl *crypt_virt;
+static dma_addr_t crypt_phys;
+
+static int support_aes = 1;
+
+static struct platform_device *pdev;
+
+static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
+{
+ return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
+}
+
+static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
+{
+ return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
+}
+
+static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
+{
+ return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
+}
+
+static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
+{
+ return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
+}
+
+static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
+{
+ return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
+}
+
+static int setup_crypt_desc(void)
+{
+ struct device *dev = &pdev->dev;
+
+ BUILD_BUG_ON(!(IS_ENABLED(CONFIG_COMPILE_TEST) &&
+ IS_ENABLED(CONFIG_64BIT)) &&
+ sizeof(struct crypt_ctl) != 64);
+ crypt_virt = dma_alloc_coherent(dev,
+ NPE_QLEN * sizeof(struct crypt_ctl),
+ &crypt_phys, GFP_ATOMIC);
+ if (!crypt_virt)
+ return -ENOMEM;
+ return 0;
+}
+
+static DEFINE_SPINLOCK(desc_lock);
+static struct crypt_ctl *get_crypt_desc(void)
+{
+ int i;
+ static int idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc_lock, flags);
+
+ if (unlikely(!crypt_virt))
+ setup_crypt_desc();
+ if (unlikely(!crypt_virt)) {
+ spin_unlock_irqrestore(&desc_lock, flags);
+ return NULL;
+ }
+ i = idx;
+ if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
+ if (++idx >= NPE_QLEN)
+ idx = 0;
+ crypt_virt[i].ctl_flags = CTL_FLAG_USED;
+ spin_unlock_irqrestore(&desc_lock, flags);
+ return crypt_virt + i;
+ } else {
+ spin_unlock_irqrestore(&desc_lock, flags);
+ return NULL;
+ }
+}
+
+static DEFINE_SPINLOCK(emerg_lock);
+static struct crypt_ctl *get_crypt_desc_emerg(void)
+{
+ int i;
+ static int idx = NPE_QLEN;
+ struct crypt_ctl *desc;
+ unsigned long flags;
+
+ desc = get_crypt_desc();
+ if (desc)
+ return desc;
+ if (unlikely(!crypt_virt))
+ return NULL;
+
+ spin_lock_irqsave(&emerg_lock, flags);
+ i = idx;
+ if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
+ if (++idx >= NPE_QLEN_TOTAL)
+ idx = NPE_QLEN;
+ crypt_virt[i].ctl_flags = CTL_FLAG_USED;
+ spin_unlock_irqrestore(&emerg_lock, flags);
+ return crypt_virt + i;
+ } else {
+ spin_unlock_irqrestore(&emerg_lock, flags);
+ return NULL;
+ }
+}
+
+static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
+ dma_addr_t phys)
+{
+ while (buf) {
+ struct buffer_desc *buf1;
+ u32 phys1;
+
+ buf1 = buf->next;
+ phys1 = buf->phys_next;
+ dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
+ dma_pool_free(buffer_pool, buf, phys);
+ buf = buf1;
+ phys = phys1;
+ }
+}
+
+static struct tasklet_struct crypto_done_tasklet;
+
+static void finish_scattered_hmac(struct crypt_ctl *crypt)
+{
+ struct aead_request *req = crypt->data.aead_req;
+ struct aead_ctx *req_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int authsize = crypto_aead_authsize(tfm);
+ int decryptlen = req->assoclen + req->cryptlen - authsize;
+
+ if (req_ctx->encrypt) {
+ scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
+ decryptlen, authsize, 1);
+ }
+ dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
+}
+
+static void one_packet(dma_addr_t phys)
+{
+ struct device *dev = &pdev->dev;
+ struct crypt_ctl *crypt;
+ struct ixp_ctx *ctx;
+ int failed;
+
+ failed = phys & 0x1 ? -EBADMSG : 0;
+ phys &= ~0x3;
+ crypt = crypt_phys2virt(phys);
+
+ switch (crypt->ctl_flags & CTL_FLAG_MASK) {
+ case CTL_FLAG_PERFORM_AEAD: {
+ struct aead_request *req = crypt->data.aead_req;
+ struct aead_ctx *req_ctx = aead_request_ctx(req);
+
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+ if (req_ctx->hmac_virt)
+ finish_scattered_hmac(crypt);
+
+ aead_request_complete(req, failed);
+ break;
+ }
+ case CTL_FLAG_PERFORM_ABLK: {
+ struct skcipher_request *req = crypt->data.ablk_req;
+ struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ unsigned int offset;
+
+ if (ivsize > 0) {
+ offset = req->cryptlen - ivsize;
+ if (req_ctx->encrypt) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ offset, ivsize, 0);
+ } else {
+ memcpy(req->iv, req_ctx->iv, ivsize);
+ memzero_explicit(req_ctx->iv, ivsize);
+ }
+ }
+
+ if (req_ctx->dst)
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+ skcipher_request_complete(req, failed);
+ break;
+ }
+ case CTL_FLAG_GEN_ICV:
+ ctx = crypto_tfm_ctx(crypt->data.tfm);
+ dma_pool_free(ctx_pool, crypt->regist_ptr,
+ crypt->regist_buf->phys_addr);
+ dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
+ if (atomic_dec_and_test(&ctx->configuring))
+ complete(&ctx->completion);
+ break;
+ case CTL_FLAG_GEN_REVAES:
+ ctx = crypto_tfm_ctx(crypt->data.tfm);
+ *(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
+ if (atomic_dec_and_test(&ctx->configuring))
+ complete(&ctx->completion);
+ break;
+ default:
+ BUG();
+ }
+ crypt->ctl_flags = CTL_FLAG_UNUSED;
+}
+
+static void irqhandler(void *_unused)
+{
+ tasklet_schedule(&crypto_done_tasklet);
+}
+
+static void crypto_done_action(unsigned long arg)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ dma_addr_t phys = qmgr_get_entry(recv_qid);
+ if (!phys)
+ return;
+ one_packet(phys);
+ }
+ tasklet_schedule(&crypto_done_tasklet);
+}
+
+static int init_ixp_crypto(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ u32 msg[2] = { 0, 0 };
+ int ret = -ENODEV;
+ u32 npe_id;
+
+ dev_info(dev, "probing...\n");
+
+ /* Locate the NPE and queue manager to use from device tree */
+ if (IS_ENABLED(CONFIG_OF) && np) {
+ struct of_phandle_args queue_spec;
+ struct of_phandle_args npe_spec;
+
+ ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
+ 1, 0, &npe_spec);
+ if (ret) {
+ dev_err(dev, "no NPE engine specified\n");
+ return -ENODEV;
+ }
+ npe_id = npe_spec.args[0];
+
+ ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
+ &queue_spec);
+ if (ret) {
+ dev_err(dev, "no rx queue phandle\n");
+ return -ENODEV;
+ }
+ recv_qid = queue_spec.args[0];
+
+ ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
+ &queue_spec);
+ if (ret) {
+ dev_err(dev, "no txready queue phandle\n");
+ return -ENODEV;
+ }
+ send_qid = queue_spec.args[0];
+ } else {
+ /*
+ * Hardcoded engine when using platform data, this goes away
+ * when we switch to using DT only.
+ */
+ npe_id = 2;
+ send_qid = 29;
+ recv_qid = 30;
+ }
+
+ npe_c = npe_request(npe_id);
+ if (!npe_c)
+ return ret;
+
+ if (!npe_running(npe_c)) {
+ ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
+ if (ret)
+ goto npe_release;
+ if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+ goto npe_error;
+ } else {
+ if (npe_send_message(npe_c, msg, "STATUS_MSG"))
+ goto npe_error;
+
+ if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+ goto npe_error;
+ }
+
+ switch ((msg[1] >> 16) & 0xff) {
+ case 3:
+ dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
+ support_aes = 0;
+ break;
+ case 4:
+ case 5:
+ support_aes = 1;
+ break;
+ default:
+ dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
+ ret = -ENODEV;
+ goto npe_release;
+ }
+ /* buffer_pool will also be used to sometimes store the hmac,
+ * so assure it is large enough
+ */
+ BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
+ buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
+ 32, 0);
+ ret = -ENOMEM;
+ if (!buffer_pool)
+ goto err;
+
+ ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
+ if (!ctx_pool)
+ goto err;
+
+ ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
+ "ixp_crypto:out", NULL);
+ if (ret)
+ goto err;
+ ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
+ "ixp_crypto:in", NULL);
+ if (ret) {
+ qmgr_release_queue(send_qid);
+ goto err;
+ }
+ qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
+ tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
+
+ qmgr_enable_irq(recv_qid);
+ return 0;
+
+npe_error:
+ dev_err(dev, "%s not responding\n", npe_name(npe_c));
+ ret = -EIO;
+err:
+ dma_pool_destroy(ctx_pool);
+ dma_pool_destroy(buffer_pool);
+npe_release:
+ npe_release(npe_c);
+ return ret;
+}
+
+static void release_ixp_crypto(struct device *dev)
+{
+ qmgr_disable_irq(recv_qid);
+ tasklet_kill(&crypto_done_tasklet);
+
+ qmgr_release_queue(send_qid);
+ qmgr_release_queue(recv_qid);
+
+ dma_pool_destroy(ctx_pool);
+ dma_pool_destroy(buffer_pool);
+
+ npe_release(npe_c);
+
+ if (crypt_virt)
+ dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
+ crypt_virt, crypt_phys);
+}
+
+static void reset_sa_dir(struct ix_sa_dir *dir)
+{
+ memset(dir->npe_ctx, 0, NPE_CTX_LEN);
+ dir->npe_ctx_idx = 0;
+ dir->npe_mode = 0;
+}
+
+static int init_sa_dir(struct ix_sa_dir *dir)
+{
+ dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
+ if (!dir->npe_ctx)
+ return -ENOMEM;
+
+ reset_sa_dir(dir);
+ return 0;
+}
+
+static void free_sa_dir(struct ix_sa_dir *dir)
+{
+ memset(dir->npe_ctx, 0, NPE_CTX_LEN);
+ dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
+}
+
+static int init_tfm(struct crypto_tfm *tfm)
+{
+ struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ atomic_set(&ctx->configuring, 0);
+ ret = init_sa_dir(&ctx->encrypt);
+ if (ret)
+ return ret;
+ ret = init_sa_dir(&ctx->decrypt);
+ if (ret)
+ free_sa_dir(&ctx->encrypt);
+
+ return ret;
+}
+
+static int init_tfm_ablk(struct crypto_skcipher *tfm)
+{
+ struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
+ struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
+ const char *name = crypto_tfm_alg_name(ctfm);
+
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ pr_info("Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(&tfm->base),
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
+ );
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
+ return init_tfm(crypto_skcipher_tfm(tfm));
+}
+
+static int init_tfm_aead(struct crypto_aead *tfm)
+{
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
+ return init_tfm(crypto_aead_tfm(tfm));
+}
+
+static void exit_tfm(struct crypto_tfm *tfm)
+{
+ struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ free_sa_dir(&ctx->encrypt);
+ free_sa_dir(&ctx->decrypt);
+}
+
+static void exit_tfm_ablk(struct crypto_skcipher *tfm)
+{
+ struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
+ struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
+
+ crypto_free_skcipher(ctx->fallback_tfm);
+ exit_tfm(crypto_skcipher_tfm(tfm));
+}
+
+static void exit_tfm_aead(struct crypto_aead *tfm)
+{
+ exit_tfm(crypto_aead_tfm(tfm));
+}
+
+static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
+ int init_len, u32 ctx_addr, const u8 *key,
+ int key_len)
+{
+ struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypt_ctl *crypt;
+ struct buffer_desc *buf;
+ int i;
+ u8 *pad;
+ dma_addr_t pad_phys, buf_phys;
+
+ BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
+ pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
+ if (!pad)
+ return -ENOMEM;
+ buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
+ if (!buf) {
+ dma_pool_free(ctx_pool, pad, pad_phys);
+ return -ENOMEM;
+ }
+ crypt = get_crypt_desc_emerg();
+ if (!crypt) {
+ dma_pool_free(ctx_pool, pad, pad_phys);
+ dma_pool_free(buffer_pool, buf, buf_phys);
+ return -EAGAIN;
+ }
+
+ memcpy(pad, key, key_len);
+ memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
+ for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
+ pad[i] ^= xpad;
+
+ crypt->data.tfm = tfm;
+ crypt->regist_ptr = pad;
+ crypt->regist_buf = buf;
+
+ crypt->auth_offs = 0;
+ crypt->auth_len = HMAC_PAD_BLOCKLEN;
+ crypt->crypto_ctx = ctx_addr;
+ crypt->src_buf = buf_phys;
+ crypt->icv_rev_aes = target;
+ crypt->mode = NPE_OP_HASH_GEN_ICV;
+ crypt->init_len = init_len;
+ crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
+
+ buf->next = NULL;
+ buf->buf_len = HMAC_PAD_BLOCKLEN;
+ buf->pkt_len = 0;
+ buf->phys_addr = pad_phys;
+
+ atomic_inc(&ctx->configuring);
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
+ return 0;
+}
+
+static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
+ const u8 *key, int key_len, unsigned int digest_len)
+{
+ u32 itarget, otarget, npe_ctx_addr;
+ unsigned char *cinfo;
+ int init_len, ret = 0;
+ u32 cfgword;
+ struct ix_sa_dir *dir;
+ struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+ const struct ix_hash_algo *algo;
+
+ dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+ cinfo = dir->npe_ctx + dir->npe_ctx_idx;
+ algo = ix_hash(tfm);
+
+ /* write cfg word to cryptinfo */
+ cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
+#ifndef __ARMEB__
+ cfgword ^= 0xAA000000; /* change the "byte swap" flags */
+#endif
+ *(__be32 *)cinfo = cpu_to_be32(cfgword);
+ cinfo += sizeof(cfgword);
+
+ /* write ICV to cryptinfo */
+ memcpy(cinfo, algo->icv, digest_len);
+ cinfo += digest_len;
+
+ itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
+ + sizeof(algo->cfgword);
+ otarget = itarget + digest_len;
+ init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
+ npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
+
+ dir->npe_ctx_idx += init_len;
+ dir->npe_mode |= NPE_OP_HASH_ENABLE;
+
+ if (!encrypt)
+ dir->npe_mode |= NPE_OP_HASH_VERIFY;
+
+ ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
+ init_len, npe_ctx_addr, key, key_len);
+ if (ret)
+ return ret;
+ return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
+ init_len, npe_ctx_addr, key, key_len);
+}
+
+static int gen_rev_aes_key(struct crypto_tfm *tfm)
+{
+ struct crypt_ctl *crypt;
+ struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ix_sa_dir *dir = &ctx->decrypt;
+
+ crypt = get_crypt_desc_emerg();
+ if (!crypt)
+ return -EAGAIN;
+
+ *(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
+
+ crypt->data.tfm = tfm;
+ crypt->crypt_offs = 0;
+ crypt->crypt_len = AES_BLOCK128;
+ crypt->src_buf = 0;
+ crypt->crypto_ctx = dir->npe_ctx_phys;
+ crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
+ crypt->mode = NPE_OP_ENC_GEN_KEY;
+ crypt->init_len = dir->npe_ctx_idx;
+ crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
+
+ atomic_inc(&ctx->configuring);
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
+ return 0;
+}
+
+static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
+ int key_len)
+{
+ u8 *cinfo;
+ u32 cipher_cfg;
+ u32 keylen_cfg = 0;
+ struct ix_sa_dir *dir;
+ struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+ int err;
+
+ dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+ cinfo = dir->npe_ctx;
+
+ if (encrypt) {
+ cipher_cfg = cipher_cfg_enc(tfm);
+ dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
+ } else {
+ cipher_cfg = cipher_cfg_dec(tfm);
+ }
+ if (cipher_cfg & MOD_AES) {
+ switch (key_len) {
+ case 16:
+ keylen_cfg = MOD_AES128;
+ break;
+ case 24:
+ keylen_cfg = MOD_AES192;
+ break;
+ case 32:
+ keylen_cfg = MOD_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ cipher_cfg |= keylen_cfg;
+ } else {
+ err = crypto_des_verify_key(tfm, key);
+ if (err)
+ return err;
+ }
+ /* write cfg word to cryptinfo */
+ *(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
+ cinfo += sizeof(cipher_cfg);
+
+ /* write cipher key to cryptinfo */
+ memcpy(cinfo, key, key_len);
+ /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
+ if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
+ memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
+ key_len = DES3_EDE_KEY_SIZE;
+ }
+ dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
+ dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
+ if ((cipher_cfg & MOD_AES) && !encrypt)
+ return gen_rev_aes_key(tfm);
+
+ return 0;
+}
+
+static struct buffer_desc *chainup_buffers(struct device *dev,
+ struct scatterlist *sg, unsigned int nbytes,
+ struct buffer_desc *buf, gfp_t flags,
+ enum dma_data_direction dir)
+{
+ for (; nbytes > 0; sg = sg_next(sg)) {
+ unsigned int len = min(nbytes, sg->length);
+ struct buffer_desc *next_buf;
+ dma_addr_t next_buf_phys;
+ void *ptr;
+
+ nbytes -= len;
+ ptr = sg_virt(sg);
+ next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
+ if (!next_buf) {
+ buf = NULL;
+ break;
+ }
+ sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
+ buf->next = next_buf;
+ buf->phys_next = next_buf_phys;
+ buf = next_buf;
+
+ buf->phys_addr = sg_dma_address(sg);
+ buf->buf_len = len;
+ buf->dir = dir;
+ }
+ buf->next = NULL;
+ buf->phys_next = 0;
+ return buf;
+}
+
+static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ init_completion(&ctx->completion);
+ atomic_inc(&ctx->configuring);
+
+ reset_sa_dir(&ctx->encrypt);
+ reset_sa_dir(&ctx->decrypt);
+
+ ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
+ ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
+
+ ret = setup_cipher(&tfm->base, 0, key, key_len);
+ if (ret)
+ goto out;
+ ret = setup_cipher(&tfm->base, 1, key, key_len);
+out:
+ if (!atomic_dec_and_test(&ctx->configuring))
+ wait_for_completion(&ctx->completion);
+ if (ret)
+ return ret;
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
+}
+
+static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ return verify_skcipher_des3_key(tfm, key) ?:
+ ablk_setkey(tfm, key, key_len);
+}
+
+static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ /* the nonce is stored in bytes at end of key */
+ if (key_len < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
+ CTR_RFC3686_NONCE_SIZE);
+
+ key_len -= CTR_RFC3686_NONCE_SIZE;
+ return ablk_setkey(tfm, key, key_len);
+}
+
+static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
+ struct ablk_ctx *rctx = skcipher_request_ctx(areq);
+ int err;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (encrypt)
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+ return err;
+}
+
+static int ablk_perform(struct skcipher_request *req, int encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct ix_sa_dir *dir;
+ struct crypt_ctl *crypt;
+ unsigned int nbytes = req->cryptlen;
+ enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+ struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
+ struct buffer_desc src_hook;
+ struct device *dev = &pdev->dev;
+ unsigned int offset;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
+ return ixp4xx_cipher_fallback(req, encrypt);
+
+ if (qmgr_stat_full(send_qid))
+ return -EAGAIN;
+ if (atomic_read(&ctx->configuring))
+ return -EAGAIN;
+
+ dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+ req_ctx->encrypt = encrypt;
+
+ crypt = get_crypt_desc();
+ if (!crypt)
+ return -ENOMEM;
+
+ crypt->data.ablk_req = req;
+ crypt->crypto_ctx = dir->npe_ctx_phys;
+ crypt->mode = dir->npe_mode;
+ crypt->init_len = dir->npe_ctx_idx;
+
+ crypt->crypt_offs = 0;
+ crypt->crypt_len = nbytes;
+
+ BUG_ON(ivsize && !req->iv);
+ memcpy(crypt->iv, req->iv, ivsize);
+ if (ivsize > 0 && !encrypt) {
+ offset = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
+ }
+ if (req->src != req->dst) {
+ struct buffer_desc dst_hook;
+
+ crypt->mode |= NPE_OP_NOT_IN_PLACE;
+ /* This was never tested by Intel
+ * for more than one dst buffer, I think. */
+ req_ctx->dst = NULL;
+ if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+ flags, DMA_FROM_DEVICE))
+ goto free_buf_dest;
+ src_direction = DMA_TO_DEVICE;
+ req_ctx->dst = dst_hook.next;
+ crypt->dst_buf = dst_hook.phys_next;
+ } else {
+ req_ctx->dst = NULL;
+ }
+ req_ctx->src = NULL;
+ if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
+ src_direction))
+ goto free_buf_src;
+
+ req_ctx->src = src_hook.next;
+ crypt->src_buf = src_hook.phys_next;
+ crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
+ return -EINPROGRESS;
+
+free_buf_src:
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+free_buf_dest:
+ if (req->src != req->dst)
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+
+ crypt->ctl_flags = CTL_FLAG_UNUSED;
+ return -ENOMEM;
+}
+
+static int ablk_encrypt(struct skcipher_request *req)
+{
+ return ablk_perform(req, 1);
+}
+
+static int ablk_decrypt(struct skcipher_request *req)
+{
+ return ablk_perform(req, 0);
+}
+
+static int ablk_rfc3686_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u8 iv[CTR_RFC3686_BLOCK_SIZE];
+ u8 *info = req->iv;
+ int ret;
+
+ /* set up counter block */
+ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
+
+ /* initialize counter portion of counter block */
+ *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
+ cpu_to_be32(1);
+
+ req->iv = iv;
+ ret = ablk_perform(req, 1);
+ req->iv = info;
+ return ret;
+}
+
+static int aead_perform(struct aead_request *req, int encrypt,
+ int cryptoffset, int eff_cryptlen, u8 *iv)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int ivsize = crypto_aead_ivsize(tfm);
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ struct ix_sa_dir *dir;
+ struct crypt_ctl *crypt;
+ unsigned int cryptlen;
+ struct buffer_desc *buf, src_hook;
+ struct aead_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = &pdev->dev;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+ enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+ unsigned int lastlen;
+
+ if (qmgr_stat_full(send_qid))
+ return -EAGAIN;
+ if (atomic_read(&ctx->configuring))
+ return -EAGAIN;
+
+ if (encrypt) {
+ dir = &ctx->encrypt;
+ cryptlen = req->cryptlen;
+ } else {
+ dir = &ctx->decrypt;
+ /* req->cryptlen includes the authsize when decrypting */
+ cryptlen = req->cryptlen - authsize;
+ eff_cryptlen -= authsize;
+ }
+ crypt = get_crypt_desc();
+ if (!crypt)
+ return -ENOMEM;
+
+ crypt->data.aead_req = req;
+ crypt->crypto_ctx = dir->npe_ctx_phys;
+ crypt->mode = dir->npe_mode;
+ crypt->init_len = dir->npe_ctx_idx;
+
+ crypt->crypt_offs = cryptoffset;
+ crypt->crypt_len = eff_cryptlen;
+
+ crypt->auth_offs = 0;
+ crypt->auth_len = req->assoclen + cryptlen;
+ BUG_ON(ivsize && !req->iv);
+ memcpy(crypt->iv, req->iv, ivsize);
+
+ buf = chainup_buffers(dev, req->src, crypt->auth_len,
+ &src_hook, flags, src_direction);
+ req_ctx->src = src_hook.next;
+ crypt->src_buf = src_hook.phys_next;
+ if (!buf)
+ goto free_buf_src;
+
+ lastlen = buf->buf_len;
+ if (lastlen >= authsize)
+ crypt->icv_rev_aes = buf->phys_addr +
+ buf->buf_len - authsize;
+
+ req_ctx->dst = NULL;
+
+ if (req->src != req->dst) {
+ struct buffer_desc dst_hook;
+
+ crypt->mode |= NPE_OP_NOT_IN_PLACE;
+ src_direction = DMA_TO_DEVICE;
+
+ buf = chainup_buffers(dev, req->dst, crypt->auth_len,
+ &dst_hook, flags, DMA_FROM_DEVICE);
+ req_ctx->dst = dst_hook.next;
+ crypt->dst_buf = dst_hook.phys_next;
+
+ if (!buf)
+ goto free_buf_dst;
+
+ if (encrypt) {
+ lastlen = buf->buf_len;
+ if (lastlen >= authsize)
+ crypt->icv_rev_aes = buf->phys_addr +
+ buf->buf_len - authsize;
+ }
+ }
+
+ if (unlikely(lastlen < authsize)) {
+ dma_addr_t dma;
+ /* The 12 hmac bytes are scattered,
+ * we need to copy them into a safe buffer */
+ req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
+ if (unlikely(!req_ctx->hmac_virt))
+ goto free_buf_dst;
+ crypt->icv_rev_aes = dma;
+ if (!encrypt) {
+ scatterwalk_map_and_copy(req_ctx->hmac_virt,
+ req->src, cryptlen, authsize, 0);
+ }
+ req_ctx->encrypt = encrypt;
+ } else {
+ req_ctx->hmac_virt = NULL;
+ }
+
+ crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
+ qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+ BUG_ON(qmgr_stat_overflow(send_qid));
+ return -EINPROGRESS;
+
+free_buf_dst:
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+ crypt->ctl_flags = CTL_FLAG_UNUSED;
+ return -ENOMEM;
+}
+
+static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int digest_len = crypto_aead_maxauthsize(tfm);
+ int ret;
+
+ if (!ctx->enckey_len && !ctx->authkey_len)
+ return 0;
+ init_completion(&ctx->completion);
+ atomic_inc(&ctx->configuring);
+
+ reset_sa_dir(&ctx->encrypt);
+ reset_sa_dir(&ctx->decrypt);
+
+ ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
+ if (ret)
+ goto out;
+ ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
+ if (ret)
+ goto out;
+ ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
+ ctx->authkey_len, digest_len);
+ if (ret)
+ goto out;
+ ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
+ ctx->authkey_len, digest_len);
+out:
+ if (!atomic_dec_and_test(&ctx->configuring))
+ wait_for_completion(&ctx->completion);
+ return ret;
+}
+
+static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ int max = crypto_aead_maxauthsize(tfm) >> 2;
+
+ if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
+ return -EINVAL;
+ return aead_setup(tfm, authsize);
+}
+
+static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_authenc_keys keys;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+ if (keys.authkeylen > sizeof(ctx->authkey))
+ goto badkey;
+
+ if (keys.enckeylen > sizeof(ctx->enckey))
+ goto badkey;
+
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+ ctx->authkey_len = keys.authkeylen;
+ ctx->enckey_len = keys.enckeylen;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+ memzero_explicit(&keys, sizeof(keys));
+ return -EINVAL;
+}
+
+static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_authenc_keys keys;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.authkeylen > sizeof(ctx->authkey))
+ goto badkey;
+
+ err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
+ if (err)
+ goto badkey;
+
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+ ctx->authkey_len = keys.authkeylen;
+ ctx->enckey_len = keys.enckeylen;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+ return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+ return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
+}
+
+static struct ixp_alg ixp4xx_algos[] = {
+{
+ .crypto = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+ .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+
+}, {
+ .crypto = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
+ .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
+}, {
+ .crypto = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablk_des3_setkey,
+ },
+ .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+ .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+ .crypto = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = ablk_des3_setkey,
+ },
+ .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
+ .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
+}, {
+ .crypto = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+ .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+}, {
+ .crypto = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
+ .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
+}, {
+ .crypto = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_blocksize = 1,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
+ .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
+}, {
+ .crypto = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_blocksize = 1,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_rfc3686_setkey,
+ .encrypt = ablk_rfc3686_crypt,
+ .decrypt = ablk_rfc3686_crypt,
+ },
+ .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
+ .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
+} };
+
+static struct ixp_aead_alg ixp4xx_aeads[] = {
+{
+ .crypto = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .hash = &hash_alg_md5,
+ .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+ .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+ .crypto = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .setkey = des3_aead_setkey,
+ },
+ .hash = &hash_alg_md5,
+ .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+ .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+ .crypto = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .hash = &hash_alg_sha1,
+ .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+ .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+ .crypto = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = des3_aead_setkey,
+ },
+ .hash = &hash_alg_sha1,
+ .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+ .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+ .crypto = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .hash = &hash_alg_md5,
+ .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+ .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+}, {
+ .crypto = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .hash = &hash_alg_sha1,
+ .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+ .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+} };
+
+#define IXP_POSTFIX "-ixp4xx"
+
+static int ixp_crypto_probe(struct platform_device *_pdev)
+{
+ struct device *dev = &_pdev->dev;
+ int num = ARRAY_SIZE(ixp4xx_algos);
+ int i, err;
+
+ pdev = _pdev;
+
+ err = init_ixp_crypto(dev);
+ if (err)
+ return err;
+
+ for (i = 0; i < num; i++) {
+ struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
+
+ if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s"IXP_POSTFIX, cra->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ continue;
+ if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+ continue;
+
+ /* block ciphers */
+ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ if (!cra->setkey)
+ cra->setkey = ablk_setkey;
+ if (!cra->encrypt)
+ cra->encrypt = ablk_encrypt;
+ if (!cra->decrypt)
+ cra->decrypt = ablk_decrypt;
+ cra->init = init_tfm_ablk;
+ cra->exit = exit_tfm_ablk;
+
+ cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+ cra->base.cra_module = THIS_MODULE;
+ cra->base.cra_alignmask = 3;
+ cra->base.cra_priority = 300;
+ if (crypto_register_skcipher(cra))
+ dev_err(&pdev->dev, "Failed to register '%s'\n",
+ cra->base.cra_name);
+ else
+ ixp4xx_algos[i].registered = 1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+ struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
+
+ if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s"IXP_POSTFIX, cra->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ continue;
+ if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+ continue;
+
+ /* authenc */
+ cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
+ cra->setkey = cra->setkey ?: aead_setkey;
+ cra->setauthsize = aead_setauthsize;
+ cra->encrypt = aead_encrypt;
+ cra->decrypt = aead_decrypt;
+ cra->init = init_tfm_aead;
+ cra->exit = exit_tfm_aead;
+
+ cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+ cra->base.cra_module = THIS_MODULE;
+ cra->base.cra_alignmask = 3;
+ cra->base.cra_priority = 300;
+
+ if (crypto_register_aead(cra))
+ dev_err(&pdev->dev, "Failed to register '%s'\n",
+ cra->base.cra_driver_name);
+ else
+ ixp4xx_aeads[i].registered = 1;
+ }
+ return 0;
+}
+
+static int ixp_crypto_remove(struct platform_device *pdev)
+{
+ int num = ARRAY_SIZE(ixp4xx_algos);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+ if (ixp4xx_aeads[i].registered)
+ crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
+ }
+
+ for (i = 0; i < num; i++) {
+ if (ixp4xx_algos[i].registered)
+ crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
+ }
+ release_ixp_crypto(&pdev->dev);
+
+ return 0;
+}
+static const struct of_device_id ixp4xx_crypto_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-crypto",
+ },
+ {},
+};
+
+static struct platform_driver ixp_crypto_driver = {
+ .probe = ixp_crypto_probe,
+ .remove = ixp_crypto_remove,
+ .driver = {
+ .name = "ixp4xx_crypto",
+ .of_match_table = ixp4xx_crypto_of_match,
+ },
+};
+module_platform_driver(ixp_crypto_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
+MODULE_DESCRIPTION("IXP4xx hardware crypto");
+
diff --git a/drivers/crypto/intel/keembay/Kconfig b/drivers/crypto/intel/keembay/Kconfig
new file mode 100644
index 0000000000..1cd62f9c3e
--- /dev/null
+++ b/drivers/crypto/intel/keembay/Kconfig
@@ -0,0 +1,90 @@
+config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
+ tristate "Support for Intel Keem Bay OCS AES/SM4 HW acceleration"
+ depends on HAS_IOMEM
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ select CRYPTO_SKCIPHER
+ select CRYPTO_AEAD
+ select CRYPTO_ENGINE
+ help
+ Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) AES and
+ SM4 cipher hardware acceleration for use with Crypto API.
+
+ Provides HW acceleration for the following transformations:
+ cbc(aes), ctr(aes), ccm(aes), gcm(aes), cbc(sm4), ctr(sm4), ccm(sm4)
+ and gcm(sm4).
+
+ Optionally, support for the following transformations can also be
+ enabled: ecb(aes), cts(cbc(aes)), ecb(sm4) and cts(cbc(sm4)).
+
+config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+ bool "Support for Intel Keem Bay OCS AES/SM4 ECB HW acceleration"
+ depends on CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
+ help
+ Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
+ AES/SM4 ECB mode hardware acceleration for use with Crypto API.
+
+ Provides OCS version of ecb(aes) and ecb(sm4)
+
+ Intel does not recommend use of ECB mode with AES/SM4.
+
+config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+ bool "Support for Intel Keem Bay OCS AES/SM4 CTS HW acceleration"
+ depends on CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
+ help
+ Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
+ AES/SM4 CBC with CTS mode hardware acceleration for use with
+ Crypto API.
+
+ Provides OCS version of cts(cbc(aes)) and cts(cbc(sm4)).
+
+ Intel does not recommend use of CTS mode with AES/SM4.
+
+config CRYPTO_DEV_KEEMBAY_OCS_ECC
+ tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on OF
+ depends on HAS_IOMEM
+ select CRYPTO_ECDH
+ select CRYPTO_ENGINE
+ help
+ Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
+ Elliptic Curve Cryptography (ECC) hardware acceleration for use with
+ Crypto API.
+
+ Provides OCS acceleration for ECDH-256 and ECDH-384.
+
+ Say Y or M if you are compiling for the Intel Keem Bay SoC. The
+ module will be called keembay-ocs-ecc.
+
+ If unsure, say N.
+
+config CRYPTO_DEV_KEEMBAY_OCS_HCU
+ tristate "Support for Intel Keem Bay OCS HCU HW acceleration"
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ depends on HAS_IOMEM
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on OF
+ help
+ Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
+ Control Unit (HCU) hardware acceleration for use with Crypto API.
+
+ Provides OCS HCU hardware acceleration of sha256, sha384, sha512, and
+ sm3, as well as the HMAC variant of these algorithms.
+
+ Say Y or M if you're building for the Intel Keem Bay SoC. If compiled
+ as a module, the module will be called keembay-ocs-hcu.
+
+ If unsure, say N.
+
+config CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+ bool "Enable sha224 and hmac(sha224) support in Intel Keem Bay OCS HCU"
+ depends on CRYPTO_DEV_KEEMBAY_OCS_HCU
+ help
+ Enables support for sha224 and hmac(sha224) algorithms in the Intel
+ Keem Bay OCS HCU driver. Intel recommends not to use these
+ algorithms.
+
+ Provides OCS HCU hardware acceleration of sha224 and hmac(224).
+
+ If unsure, say N.
diff --git a/drivers/crypto/intel/keembay/Makefile b/drivers/crypto/intel/keembay/Makefile
new file mode 100644
index 0000000000..7c12c3c138
--- /dev/null
+++ b/drivers/crypto/intel/keembay/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for Intel Keem Bay OCS Crypto API Linux drivers
+#
+obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4) += keembay-ocs-aes.o
+keembay-ocs-aes-objs := keembay-ocs-aes-core.o ocs-aes.o
+
+obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_ECC) += keembay-ocs-ecc.o
+
+obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU) += keembay-ocs-hcu.o
+keembay-ocs-hcu-objs := keembay-ocs-hcu-core.o ocs-hcu.o
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
new file mode 100644
index 0000000000..1e2fd9a754
--- /dev/null
+++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
@@ -0,0 +1,1691 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS AES Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+#include "ocs-aes.h"
+
+#define KMB_OCS_PRIORITY 350
+#define DRV_NAME "keembay-ocs-aes"
+
+#define OCS_AES_MIN_KEY_SIZE 16
+#define OCS_AES_MAX_KEY_SIZE 32
+#define OCS_AES_KEYSIZE_128 16
+#define OCS_AES_KEYSIZE_192 24
+#define OCS_AES_KEYSIZE_256 32
+#define OCS_SM4_KEY_SIZE 16
+
+/**
+ * struct ocs_aes_tctx - OCS AES Transform context
+ * @aes_dev: The OCS AES device.
+ * @key: AES/SM4 key.
+ * @key_len: The length (in bytes) of @key.
+ * @cipher: OCS cipher to use (either AES or SM4).
+ * @sw_cipher: The cipher to use as fallback.
+ * @use_fallback: Whether or not fallback cipher should be used.
+ */
+struct ocs_aes_tctx {
+ struct ocs_aes_dev *aes_dev;
+ u8 key[OCS_AES_KEYSIZE_256];
+ unsigned int key_len;
+ enum ocs_cipher cipher;
+ union {
+ struct crypto_sync_skcipher *sk;
+ struct crypto_aead *aead;
+ } sw_cipher;
+ bool use_fallback;
+};
+
+/**
+ * struct ocs_aes_rctx - OCS AES Request context.
+ * @instruction: Instruction to be executed (encrypt / decrypt).
+ * @mode: Mode to use (ECB, CBC, CTR, CCm, GCM, CTS)
+ * @src_nents: Number of source SG entries.
+ * @dst_nents: Number of destination SG entries.
+ * @src_dma_count: The number of DMA-mapped entries of the source SG.
+ * @dst_dma_count: The number of DMA-mapped entries of the destination SG.
+ * @in_place: Whether or not this is an in place request, i.e.,
+ * src_sg == dst_sg.
+ * @src_dll: OCS DMA linked list for input data.
+ * @dst_dll: OCS DMA linked list for output data.
+ * @last_ct_blk: Buffer to hold last cipher text block (only used in CBC
+ * mode).
+ * @cts_swap: Whether or not CTS swap must be performed.
+ * @aad_src_dll: OCS DMA linked list for input AAD data.
+ * @aad_dst_dll: OCS DMA linked list for output AAD data.
+ * @in_tag: Buffer to hold input encrypted tag (only used for
+ * CCM/GCM decrypt).
+ * @out_tag: Buffer to hold output encrypted / decrypted tag (only
+ * used for GCM encrypt / decrypt).
+ */
+struct ocs_aes_rctx {
+ /* Fields common across all modes. */
+ enum ocs_instruction instruction;
+ enum ocs_mode mode;
+ int src_nents;
+ int dst_nents;
+ int src_dma_count;
+ int dst_dma_count;
+ bool in_place;
+ struct ocs_dll_desc src_dll;
+ struct ocs_dll_desc dst_dll;
+
+ /* CBC specific */
+ u8 last_ct_blk[AES_BLOCK_SIZE];
+
+ /* CTS specific */
+ int cts_swap;
+
+ /* CCM/GCM specific */
+ struct ocs_dll_desc aad_src_dll;
+ struct ocs_dll_desc aad_dst_dll;
+ u8 in_tag[AES_BLOCK_SIZE];
+
+ /* GCM specific */
+ u8 out_tag[AES_BLOCK_SIZE];
+};
+
+/* Driver data. */
+struct ocs_aes_drv {
+ struct list_head dev_list;
+ spinlock_t lock; /* Protects dev_list. */
+};
+
+static struct ocs_aes_drv ocs_aes = {
+ .dev_list = LIST_HEAD_INIT(ocs_aes.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(ocs_aes.lock),
+};
+
+static struct ocs_aes_dev *kmb_ocs_aes_find_dev(struct ocs_aes_tctx *tctx)
+{
+ struct ocs_aes_dev *aes_dev;
+
+ spin_lock(&ocs_aes.lock);
+
+ if (tctx->aes_dev) {
+ aes_dev = tctx->aes_dev;
+ goto exit;
+ }
+
+ /* Only a single OCS device available */
+ aes_dev = list_first_entry(&ocs_aes.dev_list, struct ocs_aes_dev, list);
+ tctx->aes_dev = aes_dev;
+
+exit:
+ spin_unlock(&ocs_aes.lock);
+
+ return aes_dev;
+}
+
+/*
+ * Ensure key is 128-bit or 256-bit for AES or 128-bit for SM4 and an actual
+ * key is being passed in.
+ *
+ * Return: 0 if key is valid, -EINVAL otherwise.
+ */
+static int check_key(const u8 *in_key, size_t key_len, enum ocs_cipher cipher)
+{
+ if (!in_key)
+ return -EINVAL;
+
+ /* For AES, only 128-byte or 256-byte keys are supported. */
+ if (cipher == OCS_AES && (key_len == OCS_AES_KEYSIZE_128 ||
+ key_len == OCS_AES_KEYSIZE_256))
+ return 0;
+
+ /* For SM4, only 128-byte keys are supported. */
+ if (cipher == OCS_SM4 && key_len == OCS_AES_KEYSIZE_128)
+ return 0;
+
+ /* Everything else is unsupported. */
+ return -EINVAL;
+}
+
+/* Save key into transformation context. */
+static int save_key(struct ocs_aes_tctx *tctx, const u8 *in_key, size_t key_len,
+ enum ocs_cipher cipher)
+{
+ int ret;
+
+ ret = check_key(in_key, key_len, cipher);
+ if (ret)
+ return ret;
+
+ memcpy(tctx->key, in_key, key_len);
+ tctx->key_len = key_len;
+ tctx->cipher = cipher;
+
+ return 0;
+}
+
+/* Set key for symmetric cypher. */
+static int kmb_ocs_sk_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
+ size_t key_len, enum ocs_cipher cipher)
+{
+ struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+
+ /* Fallback is used for AES with 192-bit key. */
+ tctx->use_fallback = (cipher == OCS_AES &&
+ key_len == OCS_AES_KEYSIZE_192);
+
+ if (!tctx->use_fallback)
+ return save_key(tctx, in_key, key_len, cipher);
+
+ crypto_sync_skcipher_clear_flags(tctx->sw_cipher.sk,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(tctx->sw_cipher.sk,
+ tfm->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(tctx->sw_cipher.sk, in_key, key_len);
+}
+
+/* Set key for AEAD cipher. */
+static int kmb_ocs_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
+ size_t key_len, enum ocs_cipher cipher)
+{
+ struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
+
+ /* Fallback is used for AES with 192-bit key. */
+ tctx->use_fallback = (cipher == OCS_AES &&
+ key_len == OCS_AES_KEYSIZE_192);
+
+ if (!tctx->use_fallback)
+ return save_key(tctx, in_key, key_len, cipher);
+
+ crypto_aead_clear_flags(tctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(tctx->sw_cipher.aead,
+ crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_aead_setkey(tctx->sw_cipher.aead, in_key, key_len);
+}
+
+/* Swap two AES blocks in SG lists. */
+static void sg_swap_blocks(struct scatterlist *sgl, unsigned int nents,
+ off_t blk1_offset, off_t blk2_offset)
+{
+ u8 tmp_buf1[AES_BLOCK_SIZE], tmp_buf2[AES_BLOCK_SIZE];
+
+ /*
+ * No easy way to copy within sg list, so copy both blocks to temporary
+ * buffers first.
+ */
+ sg_pcopy_to_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk1_offset);
+ sg_pcopy_to_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk2_offset);
+ sg_pcopy_from_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk2_offset);
+ sg_pcopy_from_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk1_offset);
+}
+
+/* Initialize request context to default values. */
+static void ocs_aes_init_rctx(struct ocs_aes_rctx *rctx)
+{
+ /* Zero everything. */
+ memset(rctx, 0, sizeof(*rctx));
+
+ /* Set initial value for DMA addresses. */
+ rctx->src_dll.dma_addr = DMA_MAPPING_ERROR;
+ rctx->dst_dll.dma_addr = DMA_MAPPING_ERROR;
+ rctx->aad_src_dll.dma_addr = DMA_MAPPING_ERROR;
+ rctx->aad_dst_dll.dma_addr = DMA_MAPPING_ERROR;
+}
+
+static int kmb_ocs_sk_validate_input(struct skcipher_request *req,
+ enum ocs_mode mode)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ int iv_size = crypto_skcipher_ivsize(tfm);
+
+ switch (mode) {
+ case OCS_MODE_ECB:
+ /* Ensure input length is multiple of block size */
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ return 0;
+
+ case OCS_MODE_CBC:
+ /* Ensure input length is multiple of block size */
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ /* Ensure IV is present and block size in length */
+ if (!req->iv || iv_size != AES_BLOCK_SIZE)
+ return -EINVAL;
+ /*
+ * NOTE: Since req->cryptlen == 0 case was already handled in
+ * kmb_ocs_sk_common(), the above two conditions also guarantee
+ * that: cryptlen >= iv_size
+ */
+ return 0;
+
+ case OCS_MODE_CTR:
+ /* Ensure IV is present and block size in length */
+ if (!req->iv || iv_size != AES_BLOCK_SIZE)
+ return -EINVAL;
+ return 0;
+
+ case OCS_MODE_CTS:
+ /* Ensure input length >= block size */
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ /* Ensure IV is present and block size in length */
+ if (!req->iv || iv_size != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Called by encrypt() / decrypt() skcipher functions.
+ *
+ * Use fallback if needed, otherwise initialize context and enqueue request
+ * into engine.
+ */
+static int kmb_ocs_sk_common(struct skcipher_request *req,
+ enum ocs_cipher cipher,
+ enum ocs_instruction instruction,
+ enum ocs_mode mode)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
+ struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+ struct ocs_aes_dev *aes_dev;
+ int rc;
+
+ if (tctx->use_fallback) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, tctx->sw_cipher.sk);
+
+ skcipher_request_set_sync_tfm(subreq, tctx->sw_cipher.sk);
+ skcipher_request_set_callback(subreq, req->base.flags, NULL,
+ NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ if (instruction == OCS_ENCRYPT)
+ rc = crypto_skcipher_encrypt(subreq);
+ else
+ rc = crypto_skcipher_decrypt(subreq);
+
+ skcipher_request_zero(subreq);
+
+ return rc;
+ }
+
+ /*
+ * If cryptlen == 0, no processing needed for ECB, CBC and CTR.
+ *
+ * For CTS continue: kmb_ocs_sk_validate_input() will return -EINVAL.
+ */
+ if (!req->cryptlen && mode != OCS_MODE_CTS)
+ return 0;
+
+ rc = kmb_ocs_sk_validate_input(req, mode);
+ if (rc)
+ return rc;
+
+ aes_dev = kmb_ocs_aes_find_dev(tctx);
+ if (!aes_dev)
+ return -ENODEV;
+
+ if (cipher != tctx->cipher)
+ return -EINVAL;
+
+ ocs_aes_init_rctx(rctx);
+ rctx->instruction = instruction;
+ rctx->mode = mode;
+
+ return crypto_transfer_skcipher_request_to_engine(aes_dev->engine, req);
+}
+
+static void cleanup_ocs_dma_linked_list(struct device *dev,
+ struct ocs_dll_desc *dll)
+{
+ if (dll->vaddr)
+ dma_free_coherent(dev, dll->size, dll->vaddr, dll->dma_addr);
+ dll->vaddr = NULL;
+ dll->size = 0;
+ dll->dma_addr = DMA_MAPPING_ERROR;
+}
+
+static void kmb_ocs_sk_dma_cleanup(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
+ struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+ struct device *dev = tctx->aes_dev->dev;
+
+ if (rctx->src_dma_count) {
+ dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+ rctx->src_dma_count = 0;
+ }
+
+ if (rctx->dst_dma_count) {
+ dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
+ DMA_BIDIRECTIONAL :
+ DMA_FROM_DEVICE);
+ rctx->dst_dma_count = 0;
+ }
+
+ /* Clean up OCS DMA linked lists */
+ cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
+ cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
+}
+
+static int kmb_ocs_sk_prepare_inplace(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
+ struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+ int iv_size = crypto_skcipher_ivsize(tfm);
+ int rc;
+
+ /*
+ * For CBC decrypt, save last block (iv) to last_ct_blk buffer.
+ *
+ * Note: if we are here, we already checked that cryptlen >= iv_size
+ * and iv_size == AES_BLOCK_SIZE (i.e., the size of last_ct_blk); see
+ * kmb_ocs_sk_validate_input().
+ */
+ if (rctx->mode == OCS_MODE_CBC && rctx->instruction == OCS_DECRYPT)
+ scatterwalk_map_and_copy(rctx->last_ct_blk, req->src,
+ req->cryptlen - iv_size, iv_size, 0);
+
+ /* For CTS decrypt, swap last two blocks, if needed. */
+ if (rctx->cts_swap && rctx->instruction == OCS_DECRYPT)
+ sg_swap_blocks(req->dst, rctx->dst_nents,
+ req->cryptlen - AES_BLOCK_SIZE,
+ req->cryptlen - (2 * AES_BLOCK_SIZE));
+
+ /* src and dst buffers are the same, use bidirectional DMA mapping. */
+ rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
+ rctx->dst_nents, DMA_BIDIRECTIONAL);
+ if (rctx->dst_dma_count == 0) {
+ dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
+ return -ENOMEM;
+ }
+
+ /* Create DST linked list */
+ rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+ rctx->dst_dma_count, &rctx->dst_dll,
+ req->cryptlen, 0);
+ if (rc)
+ return rc;
+ /*
+ * If descriptor creation was successful, set the src_dll.dma_addr to
+ * the value of dst_dll.dma_addr, as we do in-place AES operation on
+ * the src.
+ */
+ rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
+
+ return 0;
+}
+
+static int kmb_ocs_sk_prepare_notinplace(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
+ struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+ int rc;
+
+ rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (rctx->src_nents < 0)
+ return -EBADMSG;
+
+ /* Map SRC SG. */
+ rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
+ rctx->src_nents, DMA_TO_DEVICE);
+ if (rctx->src_dma_count == 0) {
+ dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
+ return -ENOMEM;
+ }
+
+ /* Create SRC linked list */
+ rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
+ rctx->src_dma_count, &rctx->src_dll,
+ req->cryptlen, 0);
+ if (rc)
+ return rc;
+
+ /* Map DST SG. */
+ rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
+ rctx->dst_nents, DMA_FROM_DEVICE);
+ if (rctx->dst_dma_count == 0) {
+ dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
+ return -ENOMEM;
+ }
+
+ /* Create DST linked list */
+ rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+ rctx->dst_dma_count, &rctx->dst_dll,
+ req->cryptlen, 0);
+ if (rc)
+ return rc;
+
+ /* If this is not a CTS decrypt operation with swapping, we are done. */
+ if (!(rctx->cts_swap && rctx->instruction == OCS_DECRYPT))
+ return 0;
+
+ /*
+ * Otherwise, we have to copy src to dst (as we cannot modify src).
+ * Use OCS AES bypass mode to copy src to dst via DMA.
+ *
+ * NOTE: for anything other than small data sizes this is rather
+ * inefficient.
+ */
+ rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->dst_dll.dma_addr,
+ rctx->src_dll.dma_addr, req->cryptlen);
+ if (rc)
+ return rc;
+
+ /*
+ * Now dst == src, so clean up what we did so far and use in_place
+ * logic.
+ */
+ kmb_ocs_sk_dma_cleanup(req);
+ rctx->in_place = true;
+
+ return kmb_ocs_sk_prepare_inplace(req);
+}
+
+static int kmb_ocs_sk_run(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
+ struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+ struct ocs_aes_dev *aes_dev = tctx->aes_dev;
+ int iv_size = crypto_skcipher_ivsize(tfm);
+ int rc;
+
+ rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
+ if (rctx->dst_nents < 0)
+ return -EBADMSG;
+
+ /*
+ * If 2 blocks or greater, and multiple of block size swap last two
+ * blocks to be compatible with other crypto API CTS implementations:
+ * OCS mode uses CBC-CS2, whereas other crypto API implementations use
+ * CBC-CS3.
+ * CBC-CS2 and CBC-CS3 defined by:
+ * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a-add.pdf
+ */
+ rctx->cts_swap = (rctx->mode == OCS_MODE_CTS &&
+ req->cryptlen > AES_BLOCK_SIZE &&
+ req->cryptlen % AES_BLOCK_SIZE == 0);
+
+ rctx->in_place = (req->src == req->dst);
+
+ if (rctx->in_place)
+ rc = kmb_ocs_sk_prepare_inplace(req);
+ else
+ rc = kmb_ocs_sk_prepare_notinplace(req);
+
+ if (rc)
+ goto error;
+
+ rc = ocs_aes_op(aes_dev, rctx->mode, tctx->cipher, rctx->instruction,
+ rctx->dst_dll.dma_addr, rctx->src_dll.dma_addr,
+ req->cryptlen, req->iv, iv_size);
+ if (rc)
+ goto error;
+
+ /* Clean-up DMA before further processing output. */
+ kmb_ocs_sk_dma_cleanup(req);
+
+ /* For CTS Encrypt, swap last 2 blocks, if needed. */
+ if (rctx->cts_swap && rctx->instruction == OCS_ENCRYPT) {
+ sg_swap_blocks(req->dst, rctx->dst_nents,
+ req->cryptlen - AES_BLOCK_SIZE,
+ req->cryptlen - (2 * AES_BLOCK_SIZE));
+ return 0;
+ }
+
+ /* For CBC copy IV to req->IV. */
+ if (rctx->mode == OCS_MODE_CBC) {
+ /* CBC encrypt case. */
+ if (rctx->instruction == OCS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - iv_size,
+ iv_size, 0);
+ return 0;
+ }
+ /* CBC decrypt case. */
+ if (rctx->in_place)
+ memcpy(req->iv, rctx->last_ct_blk, iv_size);
+ else
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - iv_size,
+ iv_size, 0);
+ return 0;
+ }
+ /* For all other modes there's nothing to do. */
+
+ return 0;
+
+error:
+ kmb_ocs_sk_dma_cleanup(req);
+
+ return rc;
+}
+
+static int kmb_ocs_aead_validate_input(struct aead_request *req,
+ enum ocs_instruction instruction,
+ enum ocs_mode mode)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int tag_size = crypto_aead_authsize(tfm);
+ int iv_size = crypto_aead_ivsize(tfm);
+
+ /* For decrypt crytplen == len(PT) + len(tag). */
+ if (instruction == OCS_DECRYPT && req->cryptlen < tag_size)
+ return -EINVAL;
+
+ /* IV is mandatory. */
+ if (!req->iv)
+ return -EINVAL;
+
+ switch (mode) {
+ case OCS_MODE_GCM:
+ if (iv_size != GCM_AES_IV_SIZE)
+ return -EINVAL;
+
+ return 0;
+
+ case OCS_MODE_CCM:
+ /* Ensure IV is present and block size in length */
+ if (iv_size != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Called by encrypt() / decrypt() aead functions.
+ *
+ * Use fallback if needed, otherwise initialize context and enqueue request
+ * into engine.
+ */
+static int kmb_ocs_aead_common(struct aead_request *req,
+ enum ocs_cipher cipher,
+ enum ocs_instruction instruction,
+ enum ocs_mode mode)
+{
+ struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct ocs_aes_rctx *rctx = aead_request_ctx(req);
+ struct ocs_aes_dev *dd;
+ int rc;
+
+ if (tctx->use_fallback) {
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, tctx->sw_cipher.aead);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+ rc = crypto_aead_setauthsize(tctx->sw_cipher.aead,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)));
+ if (rc)
+ return rc;
+
+ return (instruction == OCS_ENCRYPT) ?
+ crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+ }
+
+ rc = kmb_ocs_aead_validate_input(req, instruction, mode);
+ if (rc)
+ return rc;
+
+ dd = kmb_ocs_aes_find_dev(tctx);
+ if (!dd)
+ return -ENODEV;
+
+ if (cipher != tctx->cipher)
+ return -EINVAL;
+
+ ocs_aes_init_rctx(rctx);
+ rctx->instruction = instruction;
+ rctx->mode = mode;
+
+ return crypto_transfer_aead_request_to_engine(dd->engine, req);
+}
+
+static void kmb_ocs_aead_dma_cleanup(struct aead_request *req)
+{
+ struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct ocs_aes_rctx *rctx = aead_request_ctx(req);
+ struct device *dev = tctx->aes_dev->dev;
+
+ if (rctx->src_dma_count) {
+ dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+ rctx->src_dma_count = 0;
+ }
+
+ if (rctx->dst_dma_count) {
+ dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
+ DMA_BIDIRECTIONAL :
+ DMA_FROM_DEVICE);
+ rctx->dst_dma_count = 0;
+ }
+ /* Clean up OCS DMA linked lists */
+ cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
+ cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
+ cleanup_ocs_dma_linked_list(dev, &rctx->aad_src_dll);
+ cleanup_ocs_dma_linked_list(dev, &rctx->aad_dst_dll);
+}
+
+/**
+ * kmb_ocs_aead_dma_prepare() - Do DMA mapping for AEAD processing.
+ * @req: The AEAD request being processed.
+ * @src_dll_size: Where to store the length of the data mapped into the
+ * src_dll OCS DMA list.
+ *
+ * Do the following:
+ * - DMA map req->src and req->dst
+ * - Initialize the following OCS DMA linked lists: rctx->src_dll,
+ * rctx->dst_dll, rctx->aad_src_dll and rxtc->aad_dst_dll.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int kmb_ocs_aead_dma_prepare(struct aead_request *req, u32 *src_dll_size)
+{
+ struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
+ struct ocs_aes_rctx *rctx = aead_request_ctx(req);
+ u32 in_size; /* The length of the data to be mapped by src_dll. */
+ u32 out_size; /* The length of the data to be mapped by dst_dll. */
+ u32 dst_size; /* The length of the data in dst_sg. */
+ int rc;
+
+ /* Get number of entries in input data SG list. */
+ rctx->src_nents = sg_nents_for_len(req->src,
+ req->assoclen + req->cryptlen);
+ if (rctx->src_nents < 0)
+ return -EBADMSG;
+
+ if (rctx->instruction == OCS_DECRYPT) {
+ /*
+ * For decrypt:
+ * - src sg list is: AAD|CT|tag
+ * - dst sg list expects: AAD|PT
+ *
+ * in_size == len(CT); out_size == len(PT)
+ */
+
+ /* req->cryptlen includes both CT and tag. */
+ in_size = req->cryptlen - tag_size;
+
+ /* out_size = PT size == CT size */
+ out_size = in_size;
+
+ /* len(dst_sg) == len(AAD) + len(PT) */
+ dst_size = req->assoclen + out_size;
+
+ /*
+ * Copy tag from source SG list to 'in_tag' buffer.
+ *
+ * Note: this needs to be done here, before DMA mapping src_sg.
+ */
+ sg_pcopy_to_buffer(req->src, rctx->src_nents, rctx->in_tag,
+ tag_size, req->assoclen + in_size);
+
+ } else { /* OCS_ENCRYPT */
+ /*
+ * For encrypt:
+ * src sg list is: AAD|PT
+ * dst sg list expects: AAD|CT|tag
+ */
+ /* in_size == len(PT) */
+ in_size = req->cryptlen;
+
+ /*
+ * In CCM mode the OCS engine appends the tag to the ciphertext,
+ * but in GCM mode the tag must be read from the tag registers
+ * and appended manually below
+ */
+ out_size = (rctx->mode == OCS_MODE_CCM) ? in_size + tag_size :
+ in_size;
+ /* len(dst_sg) == len(AAD) + len(CT) + len(tag) */
+ dst_size = req->assoclen + in_size + tag_size;
+ }
+ *src_dll_size = in_size;
+
+ /* Get number of entries in output data SG list. */
+ rctx->dst_nents = sg_nents_for_len(req->dst, dst_size);
+ if (rctx->dst_nents < 0)
+ return -EBADMSG;
+
+ rctx->in_place = (req->src == req->dst) ? 1 : 0;
+
+ /* Map destination; use bidirectional mapping for in-place case. */
+ rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
+ rctx->dst_nents,
+ rctx->in_place ? DMA_BIDIRECTIONAL :
+ DMA_FROM_DEVICE);
+ if (rctx->dst_dma_count == 0 && rctx->dst_nents != 0) {
+ dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
+ return -ENOMEM;
+ }
+
+ /* Create AAD DST list: maps dst[0:AAD_SIZE-1]. */
+ rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+ rctx->dst_dma_count,
+ &rctx->aad_dst_dll, req->assoclen,
+ 0);
+ if (rc)
+ return rc;
+
+ /* Create DST list: maps dst[AAD_SIZE:out_size] */
+ rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+ rctx->dst_dma_count, &rctx->dst_dll,
+ out_size, req->assoclen);
+ if (rc)
+ return rc;
+
+ if (rctx->in_place) {
+ /* If this is not CCM encrypt, we are done. */
+ if (!(rctx->mode == OCS_MODE_CCM &&
+ rctx->instruction == OCS_ENCRYPT)) {
+ /*
+ * SRC and DST are the same, so re-use the same DMA
+ * addresses (to avoid allocating new DMA lists
+ * identical to the dst ones).
+ */
+ rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
+ rctx->aad_src_dll.dma_addr = rctx->aad_dst_dll.dma_addr;
+
+ return 0;
+ }
+ /*
+ * For CCM encrypt the input and output linked lists contain
+ * different amounts of data, so, we need to create different
+ * SRC and AAD SRC lists, even for the in-place case.
+ */
+ rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+ rctx->dst_dma_count,
+ &rctx->aad_src_dll,
+ req->assoclen, 0);
+ if (rc)
+ return rc;
+ rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
+ rctx->dst_dma_count,
+ &rctx->src_dll, in_size,
+ req->assoclen);
+ if (rc)
+ return rc;
+
+ return 0;
+ }
+ /* Not in-place case. */
+
+ /* Map source SG. */
+ rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
+ rctx->src_nents, DMA_TO_DEVICE);
+ if (rctx->src_dma_count == 0 && rctx->src_nents != 0) {
+ dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
+ return -ENOMEM;
+ }
+
+ /* Create AAD SRC list. */
+ rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
+ rctx->src_dma_count,
+ &rctx->aad_src_dll,
+ req->assoclen, 0);
+ if (rc)
+ return rc;
+
+ /* Create SRC list. */
+ rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
+ rctx->src_dma_count,
+ &rctx->src_dll, in_size,
+ req->assoclen);
+ if (rc)
+ return rc;
+
+ if (req->assoclen == 0)
+ return 0;
+
+ /* Copy AAD from src sg to dst sg using OCS DMA. */
+ rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->aad_dst_dll.dma_addr,
+ rctx->aad_src_dll.dma_addr, req->cryptlen);
+ if (rc)
+ dev_err(tctx->aes_dev->dev,
+ "Failed to copy source AAD to destination AAD\n");
+
+ return rc;
+}
+
+static int kmb_ocs_aead_run(struct aead_request *req)
+{
+ struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
+ struct ocs_aes_rctx *rctx = aead_request_ctx(req);
+ u32 in_size; /* The length of the data mapped by src_dll. */
+ int rc;
+
+ rc = kmb_ocs_aead_dma_prepare(req, &in_size);
+ if (rc)
+ goto exit;
+
+ /* For CCM, we just call the OCS processing and we are done. */
+ if (rctx->mode == OCS_MODE_CCM) {
+ rc = ocs_aes_ccm_op(tctx->aes_dev, tctx->cipher,
+ rctx->instruction, rctx->dst_dll.dma_addr,
+ rctx->src_dll.dma_addr, in_size,
+ req->iv,
+ rctx->aad_src_dll.dma_addr, req->assoclen,
+ rctx->in_tag, tag_size);
+ goto exit;
+ }
+ /* GCM case; invoke OCS processing. */
+ rc = ocs_aes_gcm_op(tctx->aes_dev, tctx->cipher,
+ rctx->instruction,
+ rctx->dst_dll.dma_addr,
+ rctx->src_dll.dma_addr, in_size,
+ req->iv,
+ rctx->aad_src_dll.dma_addr, req->assoclen,
+ rctx->out_tag, tag_size);
+ if (rc)
+ goto exit;
+
+ /* For GCM decrypt, we have to compare in_tag with out_tag. */
+ if (rctx->instruction == OCS_DECRYPT) {
+ rc = memcmp(rctx->in_tag, rctx->out_tag, tag_size) ?
+ -EBADMSG : 0;
+ goto exit;
+ }
+
+ /* For GCM encrypt, we must manually copy out_tag to DST sg. */
+
+ /* Clean-up must be called before the sg_pcopy_from_buffer() below. */
+ kmb_ocs_aead_dma_cleanup(req);
+
+ /* Copy tag to destination sg after AAD and CT. */
+ sg_pcopy_from_buffer(req->dst, rctx->dst_nents, rctx->out_tag,
+ tag_size, req->assoclen + req->cryptlen);
+
+ /* Return directly as DMA cleanup already done. */
+ return 0;
+
+exit:
+ kmb_ocs_aead_dma_cleanup(req);
+
+ return rc;
+}
+
+static int kmb_ocs_aes_sk_do_one_request(struct crypto_engine *engine,
+ void *areq)
+{
+ struct skcipher_request *req =
+ container_of(areq, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ if (!tctx->aes_dev) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
+ tctx->cipher);
+ if (err)
+ goto exit;
+
+ err = kmb_ocs_sk_run(req);
+
+exit:
+ crypto_finalize_skcipher_request(engine, req, err);
+
+ return 0;
+}
+
+static int kmb_ocs_aes_aead_do_one_request(struct crypto_engine *engine,
+ void *areq)
+{
+ struct aead_request *req = container_of(areq,
+ struct aead_request, base);
+ struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ int err;
+
+ if (!tctx->aes_dev)
+ return -ENODEV;
+
+ err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
+ tctx->cipher);
+ if (err)
+ goto exit;
+
+ err = kmb_ocs_aead_run(req);
+
+exit:
+ crypto_finalize_aead_request(tctx->aes_dev->engine, req, err);
+
+ return 0;
+}
+
+static int kmb_ocs_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_AES);
+}
+
+static int kmb_ocs_aes_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_AES);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+static int kmb_ocs_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_ECB);
+}
+
+static int kmb_ocs_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_ECB);
+}
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
+
+static int kmb_ocs_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CBC);
+}
+
+static int kmb_ocs_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CBC);
+}
+
+static int kmb_ocs_aes_ctr_encrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTR);
+}
+
+static int kmb_ocs_aes_ctr_decrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTR);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+static int kmb_ocs_aes_cts_encrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTS);
+}
+
+static int kmb_ocs_aes_cts_decrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTS);
+}
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
+
+static int kmb_ocs_aes_gcm_encrypt(struct aead_request *req)
+{
+ return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_GCM);
+}
+
+static int kmb_ocs_aes_gcm_decrypt(struct aead_request *req)
+{
+ return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_GCM);
+}
+
+static int kmb_ocs_aes_ccm_encrypt(struct aead_request *req)
+{
+ return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CCM);
+}
+
+static int kmb_ocs_aes_ccm_decrypt(struct aead_request *req)
+{
+ return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CCM);
+}
+
+static int kmb_ocs_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_SM4);
+}
+
+static int kmb_ocs_sm4_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_SM4);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+static int kmb_ocs_sm4_ecb_encrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_ECB);
+}
+
+static int kmb_ocs_sm4_ecb_decrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_ECB);
+}
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
+
+static int kmb_ocs_sm4_cbc_encrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CBC);
+}
+
+static int kmb_ocs_sm4_cbc_decrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CBC);
+}
+
+static int kmb_ocs_sm4_ctr_encrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTR);
+}
+
+static int kmb_ocs_sm4_ctr_decrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTR);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+static int kmb_ocs_sm4_cts_encrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTS);
+}
+
+static int kmb_ocs_sm4_cts_decrypt(struct skcipher_request *req)
+{
+ return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTS);
+}
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
+
+static int kmb_ocs_sm4_gcm_encrypt(struct aead_request *req)
+{
+ return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_GCM);
+}
+
+static int kmb_ocs_sm4_gcm_decrypt(struct aead_request *req)
+{
+ return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_GCM);
+}
+
+static int kmb_ocs_sm4_ccm_encrypt(struct aead_request *req)
+{
+ return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CCM);
+}
+
+static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req)
+{
+ return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM);
+}
+
+static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
+{
+ const char *alg_name = crypto_tfm_alg_name(&tfm->base);
+ struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+ struct crypto_sync_skcipher *blk;
+
+ /* set fallback cipher in case it will be needed */
+ blk = crypto_alloc_sync_skcipher(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(blk))
+ return PTR_ERR(blk);
+
+ tctx->sw_cipher.sk = blk;
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
+
+ return 0;
+}
+
+static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm)
+{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
+
+ return 0;
+}
+
+static inline void clear_key(struct ocs_aes_tctx *tctx)
+{
+ memzero_explicit(tctx->key, OCS_AES_KEYSIZE_256);
+
+ /* Zero key registers if set */
+ if (tctx->aes_dev)
+ ocs_aes_set_key(tctx->aes_dev, OCS_AES_KEYSIZE_256,
+ tctx->key, OCS_AES);
+}
+
+static void ocs_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
+
+ clear_key(tctx);
+
+ if (tctx->sw_cipher.sk) {
+ crypto_free_sync_skcipher(tctx->sw_cipher.sk);
+ tctx->sw_cipher.sk = NULL;
+ }
+}
+
+static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
+{
+ const char *alg_name = crypto_tfm_alg_name(&tfm->base);
+ struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
+ struct crypto_aead *blk;
+
+ /* Set fallback cipher in case it will be needed */
+ blk = crypto_alloc_aead(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(blk))
+ return PTR_ERR(blk);
+
+ tctx->sw_cipher.aead = blk;
+
+ crypto_aead_set_reqsize(tfm,
+ max(sizeof(struct ocs_aes_rctx),
+ (sizeof(struct aead_request) +
+ crypto_aead_reqsize(tctx->sw_cipher.aead))));
+
+ return 0;
+}
+
+static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_gcm_check_authsize(authsize);
+}
+
+static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm)
+{
+ crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
+
+ return 0;
+}
+
+static void ocs_aead_cra_exit(struct crypto_aead *tfm)
+{
+ struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
+
+ clear_key(tctx);
+
+ if (tctx->sw_cipher.aead) {
+ crypto_free_aead(tctx->sw_cipher.aead);
+ tctx->sw_cipher.aead = NULL;
+ }
+}
+
+static struct skcipher_engine_alg algs[] = {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+ {
+ .base.base.cra_name = "ecb(aes)",
+ .base.base.cra_driver_name = "ecb-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_ecb_encrypt,
+ .base.decrypt = kmb_ocs_aes_ecb_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
+ },
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
+ {
+ .base.base.cra_name = "cbc(aes)",
+ .base.base.cra_driver_name = "cbc-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_cbc_encrypt,
+ .base.decrypt = kmb_ocs_aes_cbc_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
+ },
+ {
+ .base.base.cra_name = "ctr(aes)",
+ .base.base.cra_driver_name = "ctr-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = 1,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_ctr_encrypt,
+ .base.decrypt = kmb_ocs_aes_ctr_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
+ },
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+ {
+ .base.base.cra_name = "cts(cbc(aes))",
+ .base.base.cra_driver_name = "cts-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_cts_encrypt,
+ .base.decrypt = kmb_ocs_aes_cts_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
+ },
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+ {
+ .base.base.cra_name = "ecb(sm4)",
+ .base.base.cra_driver_name = "ecb-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_ecb_encrypt,
+ .base.decrypt = kmb_ocs_sm4_ecb_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
+ },
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
+ {
+ .base.base.cra_name = "cbc(sm4)",
+ .base.base.cra_driver_name = "cbc-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_cbc_encrypt,
+ .base.decrypt = kmb_ocs_sm4_cbc_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
+ },
+ {
+ .base.base.cra_name = "ctr(sm4)",
+ .base.base.cra_driver_name = "ctr-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = 1,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_ctr_encrypt,
+ .base.decrypt = kmb_ocs_sm4_ctr_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
+ },
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+ {
+ .base.base.cra_name = "cts(cbc(sm4))",
+ .base.base.cra_driver_name = "cts-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_cts_encrypt,
+ .base.decrypt = kmb_ocs_sm4_cts_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
+ }
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
+};
+
+static struct aead_engine_alg algs_aead[] = {
+ {
+ .base.base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-keembay-ocs",
+ .cra_priority = KMB_OCS_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .base.init = ocs_aes_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = GCM_AES_IV_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
+ .base.setkey = kmb_ocs_aes_aead_set_key,
+ .base.encrypt = kmb_ocs_aes_gcm_encrypt,
+ .base.decrypt = kmb_ocs_aes_gcm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
+ },
+ {
+ .base.base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-keembay-ocs",
+ .cra_priority = KMB_OCS_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .base.init = ocs_aes_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
+ .base.setkey = kmb_ocs_aes_aead_set_key,
+ .base.encrypt = kmb_ocs_aes_ccm_encrypt,
+ .base.decrypt = kmb_ocs_aes_ccm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
+ },
+ {
+ .base.base = {
+ .cra_name = "gcm(sm4)",
+ .cra_driver_name = "gcm-sm4-keembay-ocs",
+ .cra_priority = KMB_OCS_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .base.init = ocs_sm4_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = GCM_AES_IV_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
+ .base.setkey = kmb_ocs_sm4_aead_set_key,
+ .base.encrypt = kmb_ocs_sm4_gcm_encrypt,
+ .base.decrypt = kmb_ocs_sm4_gcm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
+ },
+ {
+ .base.base = {
+ .cra_name = "ccm(sm4)",
+ .cra_driver_name = "ccm-sm4-keembay-ocs",
+ .cra_priority = KMB_OCS_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .base.init = ocs_sm4_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
+ .base.setkey = kmb_ocs_sm4_aead_set_key,
+ .base.encrypt = kmb_ocs_sm4_ccm_encrypt,
+ .base.decrypt = kmb_ocs_sm4_ccm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
+ }
+};
+
+static void unregister_aes_algs(struct ocs_aes_dev *aes_dev)
+{
+ crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
+ crypto_engine_unregister_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+static int register_aes_algs(struct ocs_aes_dev *aes_dev)
+{
+ int ret;
+
+ /*
+ * If any algorithm fails to register, all preceding algorithms that
+ * were successfully registered will be automatically unregistered.
+ */
+ ret = crypto_engine_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
+ if (ret)
+ return ret;
+
+ ret = crypto_engine_register_skciphers(algs, ARRAY_SIZE(algs));
+ if (ret)
+ crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
+
+ return ret;
+}
+
+/* Device tree driver match. */
+static const struct of_device_id kmb_ocs_aes_of_match[] = {
+ {
+ .compatible = "intel,keembay-ocs-aes",
+ },
+ {}
+};
+
+static int kmb_ocs_aes_remove(struct platform_device *pdev)
+{
+ struct ocs_aes_dev *aes_dev;
+
+ aes_dev = platform_get_drvdata(pdev);
+
+ unregister_aes_algs(aes_dev);
+
+ spin_lock(&ocs_aes.lock);
+ list_del(&aes_dev->list);
+ spin_unlock(&ocs_aes.lock);
+
+ crypto_engine_exit(aes_dev->engine);
+
+ return 0;
+}
+
+static int kmb_ocs_aes_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocs_aes_dev *aes_dev;
+ int rc;
+
+ aes_dev = devm_kzalloc(dev, sizeof(*aes_dev), GFP_KERNEL);
+ if (!aes_dev)
+ return -ENOMEM;
+
+ aes_dev->dev = dev;
+
+ platform_set_drvdata(pdev, aes_dev);
+
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(dev, "Failed to set 32 bit dma mask %d\n", rc);
+ return rc;
+ }
+
+ /* Get base register address. */
+ aes_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(aes_dev->base_reg))
+ return PTR_ERR(aes_dev->base_reg);
+
+ /* Get and request IRQ */
+ aes_dev->irq = platform_get_irq(pdev, 0);
+ if (aes_dev->irq < 0)
+ return aes_dev->irq;
+
+ rc = devm_request_threaded_irq(dev, aes_dev->irq, ocs_aes_irq_handler,
+ NULL, 0, "keembay-ocs-aes", aes_dev);
+ if (rc < 0) {
+ dev_err(dev, "Could not request IRQ\n");
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&aes_dev->list);
+ spin_lock(&ocs_aes.lock);
+ list_add_tail(&aes_dev->list, &ocs_aes.dev_list);
+ spin_unlock(&ocs_aes.lock);
+
+ init_completion(&aes_dev->irq_completion);
+
+ /* Initialize crypto engine */
+ aes_dev->engine = crypto_engine_alloc_init(dev, true);
+ if (!aes_dev->engine) {
+ rc = -ENOMEM;
+ goto list_del;
+ }
+
+ rc = crypto_engine_start(aes_dev->engine);
+ if (rc) {
+ dev_err(dev, "Could not start crypto engine\n");
+ goto cleanup;
+ }
+
+ rc = register_aes_algs(aes_dev);
+ if (rc) {
+ dev_err(dev,
+ "Could not register OCS algorithms with Crypto API\n");
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ crypto_engine_exit(aes_dev->engine);
+list_del:
+ spin_lock(&ocs_aes.lock);
+ list_del(&aes_dev->list);
+ spin_unlock(&ocs_aes.lock);
+
+ return rc;
+}
+
+/* The OCS driver is a platform device. */
+static struct platform_driver kmb_ocs_aes_driver = {
+ .probe = kmb_ocs_aes_probe,
+ .remove = kmb_ocs_aes_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = kmb_ocs_aes_of_match,
+ },
+};
+
+module_platform_driver(kmb_ocs_aes_driver);
+
+MODULE_DESCRIPTION("Intel Keem Bay Offload and Crypto Subsystem (OCS) AES/SM4 Driver");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS_CRYPTO("cbc-aes-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ctr-aes-keembay-ocs");
+MODULE_ALIAS_CRYPTO("gcm-aes-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ccm-aes-keembay-ocs");
+
+MODULE_ALIAS_CRYPTO("cbc-sm4-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ctr-sm4-keembay-ocs");
+MODULE_ALIAS_CRYPTO("gcm-sm4-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ccm-sm4-keembay-ocs");
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
+MODULE_ALIAS_CRYPTO("ecb-aes-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ecb-sm4-keembay-ocs");
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
+
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
+MODULE_ALIAS_CRYPTO("cts-aes-keembay-ocs");
+MODULE_ALIAS_CRYPTO("cts-sm4-keembay-ocs");
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
new file mode 100644
index 0000000000..fb95deed90
--- /dev/null
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
@@ -0,0 +1,1009 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS ECC Crypto Driver.
+ *
+ * Copyright (C) 2019-2021 Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
+#include <crypto/engine.h>
+#include <crypto/internal/ecc.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/kpp.h>
+#include <crypto/rng.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/fips.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+
+#define DRV_NAME "keembay-ocs-ecc"
+
+#define KMB_OCS_ECC_PRIORITY 350
+
+#define HW_OFFS_OCS_ECC_COMMAND 0x00000000
+#define HW_OFFS_OCS_ECC_STATUS 0x00000004
+#define HW_OFFS_OCS_ECC_DATA_IN 0x00000080
+#define HW_OFFS_OCS_ECC_CX_DATA_OUT 0x00000100
+#define HW_OFFS_OCS_ECC_CY_DATA_OUT 0x00000180
+#define HW_OFFS_OCS_ECC_ISR 0x00000400
+#define HW_OFFS_OCS_ECC_IER 0x00000404
+
+#define HW_OCS_ECC_ISR_INT_STATUS_DONE BIT(0)
+#define HW_OCS_ECC_COMMAND_INS_BP BIT(0)
+
+#define HW_OCS_ECC_COMMAND_START_VAL BIT(0)
+
+#define OCS_ECC_OP_SIZE_384 BIT(8)
+#define OCS_ECC_OP_SIZE_256 0
+
+/* ECC Instruction : for ECC_COMMAND */
+#define OCS_ECC_INST_WRITE_AX (0x1 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_AY (0x2 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_BX_D (0x3 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_BY_L (0x4 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_P (0x5 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_A (0x6 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_D_IDX_A (0x8 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_POW_B_MODP (0xB << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_MUL_B_MODP (0xC << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_ADD_B_MODP (0xD << HW_OCS_ECC_COMMAND_INS_BP)
+
+#define ECC_ENABLE_INTR 1
+
+#define POLL_USEC 100
+#define TIMEOUT_USEC 10000
+
+#define KMB_ECC_VLI_MAX_DIGITS ECC_CURVE_NIST_P384_DIGITS
+#define KMB_ECC_VLI_MAX_BYTES (KMB_ECC_VLI_MAX_DIGITS \
+ << ECC_DIGITS_TO_BYTES_SHIFT)
+
+#define POW_CUBE 3
+
+/**
+ * struct ocs_ecc_dev - ECC device context
+ * @list: List of device contexts
+ * @dev: OCS ECC device
+ * @base_reg: IO base address of OCS ECC
+ * @engine: Crypto engine for the device
+ * @irq_done: IRQ done completion.
+ * @irq: IRQ number
+ */
+struct ocs_ecc_dev {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *base_reg;
+ struct crypto_engine *engine;
+ struct completion irq_done;
+ int irq;
+};
+
+/**
+ * struct ocs_ecc_ctx - Transformation context.
+ * @ecc_dev: The ECC driver associated with this context.
+ * @curve: The elliptic curve used by this transformation.
+ * @private_key: The private key.
+ */
+struct ocs_ecc_ctx {
+ struct ocs_ecc_dev *ecc_dev;
+ const struct ecc_curve *curve;
+ u64 private_key[KMB_ECC_VLI_MAX_DIGITS];
+};
+
+/* Driver data. */
+struct ocs_ecc_drv {
+ struct list_head dev_list;
+ spinlock_t lock; /* Protects dev_list. */
+};
+
+/* Global variable holding the list of OCS ECC devices (only one expected). */
+static struct ocs_ecc_drv ocs_ecc = {
+ .dev_list = LIST_HEAD_INIT(ocs_ecc.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(ocs_ecc.lock),
+};
+
+/* Get OCS ECC tfm context from kpp_request. */
+static inline struct ocs_ecc_ctx *kmb_ocs_ecc_tctx(struct kpp_request *req)
+{
+ return kpp_tfm_ctx(crypto_kpp_reqtfm(req));
+}
+
+/* Converts number of digits to number of bytes. */
+static inline unsigned int digits_to_bytes(unsigned int n)
+{
+ return n << ECC_DIGITS_TO_BYTES_SHIFT;
+}
+
+/*
+ * Wait for ECC idle i.e when an operation (other than write operations)
+ * is done.
+ */
+static inline int ocs_ecc_wait_idle(struct ocs_ecc_dev *dev)
+{
+ u32 value;
+
+ return readl_poll_timeout((dev->base_reg + HW_OFFS_OCS_ECC_STATUS),
+ value,
+ !(value & HW_OCS_ECC_ISR_INT_STATUS_DONE),
+ POLL_USEC, TIMEOUT_USEC);
+}
+
+static void ocs_ecc_cmd_start(struct ocs_ecc_dev *ecc_dev, u32 op_size)
+{
+ iowrite32(op_size | HW_OCS_ECC_COMMAND_START_VAL,
+ ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+}
+
+/* Direct write of u32 buffer to ECC engine with associated instruction. */
+static void ocs_ecc_write_cmd_and_data(struct ocs_ecc_dev *dev,
+ u32 op_size,
+ u32 inst,
+ const void *data_in,
+ size_t data_size)
+{
+ iowrite32(op_size | inst, dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+
+ /* MMIO Write src uint32 to dst. */
+ memcpy_toio(dev->base_reg + HW_OFFS_OCS_ECC_DATA_IN, data_in,
+ data_size);
+}
+
+/* Start OCS ECC operation and wait for its completion. */
+static int ocs_ecc_trigger_op(struct ocs_ecc_dev *ecc_dev, u32 op_size,
+ u32 inst)
+{
+ reinit_completion(&ecc_dev->irq_done);
+
+ iowrite32(ECC_ENABLE_INTR, ecc_dev->base_reg + HW_OFFS_OCS_ECC_IER);
+ iowrite32(op_size | inst, ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+
+ return wait_for_completion_interruptible(&ecc_dev->irq_done);
+}
+
+/**
+ * ocs_ecc_read_cx_out() - Read the CX data output buffer.
+ * @dev: The OCS ECC device to read from.
+ * @cx_out: The buffer where to store the CX value. Must be at least
+ * @byte_count byte long.
+ * @byte_count: The amount of data to read.
+ */
+static inline void ocs_ecc_read_cx_out(struct ocs_ecc_dev *dev, void *cx_out,
+ size_t byte_count)
+{
+ memcpy_fromio(cx_out, dev->base_reg + HW_OFFS_OCS_ECC_CX_DATA_OUT,
+ byte_count);
+}
+
+/**
+ * ocs_ecc_read_cy_out() - Read the CX data output buffer.
+ * @dev: The OCS ECC device to read from.
+ * @cy_out: The buffer where to store the CY value. Must be at least
+ * @byte_count byte long.
+ * @byte_count: The amount of data to read.
+ */
+static inline void ocs_ecc_read_cy_out(struct ocs_ecc_dev *dev, void *cy_out,
+ size_t byte_count)
+{
+ memcpy_fromio(cy_out, dev->base_reg + HW_OFFS_OCS_ECC_CY_DATA_OUT,
+ byte_count);
+}
+
+static struct ocs_ecc_dev *kmb_ocs_ecc_find_dev(struct ocs_ecc_ctx *tctx)
+{
+ if (tctx->ecc_dev)
+ return tctx->ecc_dev;
+
+ spin_lock(&ocs_ecc.lock);
+
+ /* Only a single OCS device available. */
+ tctx->ecc_dev = list_first_entry(&ocs_ecc.dev_list, struct ocs_ecc_dev,
+ list);
+
+ spin_unlock(&ocs_ecc.lock);
+
+ return tctx->ecc_dev;
+}
+
+/* Do point multiplication using OCS ECC HW. */
+static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev,
+ struct ecc_point *result,
+ const struct ecc_point *point,
+ u64 *scalar,
+ const struct ecc_curve *curve)
+{
+ u8 sca[KMB_ECC_VLI_MAX_BYTES]; /* Use the maximum data size. */
+ u32 op_size = (curve->g.ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
+ OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
+ size_t nbytes = digits_to_bytes(curve->g.ndigits);
+ int rc = 0;
+
+ /* Generate random nbytes for Simple and Differential SCA protection. */
+ rc = crypto_get_default_rng();
+ if (rc)
+ return rc;
+
+ rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes);
+ crypto_put_default_rng();
+ if (rc)
+ return rc;
+
+ /* Wait engine to be idle before starting new operation. */
+ rc = ocs_ecc_wait_idle(ecc_dev);
+ if (rc)
+ return rc;
+
+ /* Send ecc_start pulse as well as indicating operation size. */
+ ocs_ecc_cmd_start(ecc_dev, op_size);
+
+ /* Write ax param; Base point (Gx). */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
+ point->x, nbytes);
+
+ /* Write ay param; Base point (Gy). */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
+ point->y, nbytes);
+
+ /*
+ * Write the private key into DATA_IN reg.
+ *
+ * Since DATA_IN register is used to write different values during the
+ * computation private Key value is overwritten with
+ * side-channel-resistance value.
+ */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BX_D,
+ scalar, nbytes);
+
+ /* Write operand by/l. */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BY_L,
+ sca, nbytes);
+ memzero_explicit(sca, sizeof(sca));
+
+ /* Write p = curve prime(GF modulus). */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
+ curve->p, nbytes);
+
+ /* Write a = curve coefficient. */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_A,
+ curve->a, nbytes);
+
+ /* Make hardware perform the multiplication. */
+ rc = ocs_ecc_trigger_op(ecc_dev, op_size, OCS_ECC_INST_CALC_D_IDX_A);
+ if (rc)
+ return rc;
+
+ /* Read result. */
+ ocs_ecc_read_cx_out(ecc_dev, result->x, nbytes);
+ ocs_ecc_read_cy_out(ecc_dev, result->y, nbytes);
+
+ return 0;
+}
+
+/**
+ * kmb_ecc_do_scalar_op() - Perform Scalar operation using OCS ECC HW.
+ * @ecc_dev: The OCS ECC device to use.
+ * @scalar_out: Where to store the output scalar.
+ * @scalar_a: Input scalar operand 'a'.
+ * @scalar_b: Input scalar operand 'b'
+ * @curve: The curve on which the operation is performed.
+ * @ndigits: The size of the operands (in digits).
+ * @inst: The operation to perform (as an OCS ECC instruction).
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int kmb_ecc_do_scalar_op(struct ocs_ecc_dev *ecc_dev, u64 *scalar_out,
+ const u64 *scalar_a, const u64 *scalar_b,
+ const struct ecc_curve *curve,
+ unsigned int ndigits, const u32 inst)
+{
+ u32 op_size = (ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
+ OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
+ size_t nbytes = digits_to_bytes(ndigits);
+ int rc;
+
+ /* Wait engine to be idle before starting new operation. */
+ rc = ocs_ecc_wait_idle(ecc_dev);
+ if (rc)
+ return rc;
+
+ /* Send ecc_start pulse as well as indicating operation size. */
+ ocs_ecc_cmd_start(ecc_dev, op_size);
+
+ /* Write ax param (Base point (Gx).*/
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
+ scalar_a, nbytes);
+
+ /* Write ay param Base point (Gy).*/
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
+ scalar_b, nbytes);
+
+ /* Write p = curve prime(GF modulus).*/
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
+ curve->p, nbytes);
+
+ /* Give instruction A.B or A+B to ECC engine. */
+ rc = ocs_ecc_trigger_op(ecc_dev, op_size, inst);
+ if (rc)
+ return rc;
+
+ ocs_ecc_read_cx_out(ecc_dev, scalar_out, nbytes);
+
+ if (vli_is_zero(scalar_out, ndigits))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
+static int kmb_ocs_ecc_is_pubkey_valid_partial(struct ocs_ecc_dev *ecc_dev,
+ const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ u64 xxx[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+ u64 yy[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+ u64 w[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+ int rc;
+
+ if (WARN_ON(pk->ndigits != curve->g.ndigits))
+ return -EINVAL;
+
+ /* Check 1: Verify key is not the zero point. */
+ if (ecc_point_is_zero(pk))
+ return -EINVAL;
+
+ /* Check 2: Verify key is in the range [0, p-1]. */
+ if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
+ return -EINVAL;
+
+ if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
+ return -EINVAL;
+
+ /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
+
+ /* y^2 */
+ /* Compute y^2 -> store in yy */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, yy, pk->y, pk->y, curve, pk->ndigits,
+ OCS_ECC_INST_CALC_A_MUL_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* x^3 */
+ /* Assigning w = 3, used for calculating x^3. */
+ w[0] = POW_CUBE;
+ /* Load the next stage.*/
+ rc = kmb_ecc_do_scalar_op(ecc_dev, xxx, pk->x, w, curve, pk->ndigits,
+ OCS_ECC_INST_CALC_A_POW_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* Do a*x -> store in w. */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, w, curve->a, pk->x, curve,
+ pk->ndigits,
+ OCS_ECC_INST_CALC_A_MUL_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* Do ax + b == w + b; store in w. */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, w, w, curve->b, curve,
+ pk->ndigits,
+ OCS_ECC_INST_CALC_A_ADD_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* x^3 + ax + b == x^3 + w -> store in w. */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, w, xxx, w, curve, pk->ndigits,
+ OCS_ECC_INST_CALC_A_ADD_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* Compare y^2 == x^3 + a·x + b. */
+ rc = vli_cmp(yy, w, pk->ndigits);
+ if (rc)
+ rc = -EINVAL;
+
+exit:
+ memzero_explicit(xxx, sizeof(xxx));
+ memzero_explicit(yy, sizeof(yy));
+ memzero_explicit(w, sizeof(w));
+
+ return rc;
+}
+
+/* SP800-56A section 5.6.2.3.3 full verification */
+static int kmb_ocs_ecc_is_pubkey_valid_full(struct ocs_ecc_dev *ecc_dev,
+ const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ struct ecc_point *nQ;
+ int rc;
+
+ /* Checks 1 through 3 */
+ rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
+ if (rc)
+ return rc;
+
+ /* Check 4: Verify that nQ is the zero point. */
+ nQ = ecc_alloc_point(pk->ndigits);
+ if (!nQ)
+ return -ENOMEM;
+
+ rc = kmb_ecc_point_mult(ecc_dev, nQ, pk, curve->n, curve);
+ if (rc)
+ goto exit;
+
+ if (!ecc_point_is_zero(nQ))
+ rc = -EINVAL;
+
+exit:
+ ecc_free_point(nQ);
+
+ return rc;
+}
+
+static int kmb_ecc_is_key_valid(const struct ecc_curve *curve,
+ const u64 *private_key, size_t private_key_len)
+{
+ size_t ndigits = curve->g.ndigits;
+ u64 one[KMB_ECC_VLI_MAX_DIGITS] = {1};
+ u64 res[KMB_ECC_VLI_MAX_DIGITS];
+
+ if (private_key_len != digits_to_bytes(ndigits))
+ return -EINVAL;
+
+ if (!private_key)
+ return -EINVAL;
+
+ /* Make sure the private key is in the range [2, n-3]. */
+ if (vli_cmp(one, private_key, ndigits) != -1)
+ return -EINVAL;
+
+ vli_sub(res, curve->n, one, ndigits);
+ vli_sub(res, res, one, ndigits);
+ if (vli_cmp(res, private_key, ndigits) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * ECC private keys are generated using the method of extra random bits,
+ * equivalent to that described in FIPS 186-4, Appendix B.4.1.
+ *
+ * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer
+ * than requested
+ * 0 <= c mod(n-1) <= n-2 and implies that
+ * 1 <= d <= n-1
+ *
+ * This method generates a private key uniformly distributed in the range
+ * [1, n-1].
+ */
+static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey)
+{
+ size_t nbytes = digits_to_bytes(curve->g.ndigits);
+ u64 priv[KMB_ECC_VLI_MAX_DIGITS];
+ size_t nbits;
+ int rc;
+
+ nbits = vli_num_bits(curve->n, curve->g.ndigits);
+
+ /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
+ if (nbits < 160 || curve->g.ndigits > ARRAY_SIZE(priv))
+ return -EINVAL;
+
+ /*
+ * FIPS 186-4 recommends that the private key should be obtained from a
+ * RBG with a security strength equal to or greater than the security
+ * strength associated with N.
+ *
+ * The maximum security strength identified by NIST SP800-57pt1r4 for
+ * ECC is 256 (N >= 512).
+ *
+ * This condition is met by the default RNG because it selects a favored
+ * DRBG with a security strength of 256.
+ */
+ if (crypto_get_default_rng())
+ return -EFAULT;
+
+ rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
+ crypto_put_default_rng();
+ if (rc)
+ goto cleanup;
+
+ rc = kmb_ecc_is_key_valid(curve, priv, nbytes);
+ if (rc)
+ goto cleanup;
+
+ ecc_swap_digits(priv, privkey, curve->g.ndigits);
+
+cleanup:
+ memzero_explicit(&priv, sizeof(priv));
+
+ return rc;
+}
+
+static int kmb_ocs_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+ struct ecdh params;
+ int rc = 0;
+
+ rc = crypto_ecdh_decode_key(buf, len, &params);
+ if (rc)
+ goto cleanup;
+
+ /* Ensure key size is not bigger then expected. */
+ if (params.key_size > digits_to_bytes(tctx->curve->g.ndigits)) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Auto-generate private key is not provided. */
+ if (!params.key || !params.key_size) {
+ rc = kmb_ecc_gen_privkey(tctx->curve, tctx->private_key);
+ goto cleanup;
+ }
+
+ rc = kmb_ecc_is_key_valid(tctx->curve, (const u64 *)params.key,
+ params.key_size);
+ if (rc)
+ goto cleanup;
+
+ ecc_swap_digits((const u64 *)params.key, tctx->private_key,
+ tctx->curve->g.ndigits);
+cleanup:
+ memzero_explicit(&params, sizeof(params));
+
+ if (rc)
+ tctx->curve = NULL;
+
+ return rc;
+}
+
+/* Compute shared secret. */
+static int kmb_ecc_do_shared_secret(struct ocs_ecc_ctx *tctx,
+ struct kpp_request *req)
+{
+ struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
+ const struct ecc_curve *curve = tctx->curve;
+ u64 shared_secret[KMB_ECC_VLI_MAX_DIGITS];
+ u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
+ size_t copied, nbytes, pubk_len;
+ struct ecc_point *pk, *result;
+ int rc;
+
+ nbytes = digits_to_bytes(curve->g.ndigits);
+
+ /* Public key is a point, thus it has two coordinates */
+ pubk_len = 2 * nbytes;
+
+ /* Copy public key from SG list to pubk_buf. */
+ copied = sg_copy_to_buffer(req->src,
+ sg_nents_for_len(req->src, pubk_len),
+ pubk_buf, pubk_len);
+ if (copied != pubk_len)
+ return -EINVAL;
+
+ /* Allocate and initialize public key point. */
+ pk = ecc_alloc_point(curve->g.ndigits);
+ if (!pk)
+ return -ENOMEM;
+
+ ecc_swap_digits(pubk_buf, pk->x, curve->g.ndigits);
+ ecc_swap_digits(&pubk_buf[curve->g.ndigits], pk->y, curve->g.ndigits);
+
+ /*
+ * Check the public key for following
+ * Check 1: Verify key is not the zero point.
+ * Check 2: Verify key is in the range [1, p-1].
+ * Check 3: Verify that y^2 == (x^3 + a·x + b) mod p
+ */
+ rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
+ if (rc)
+ goto exit_free_pk;
+
+ /* Allocate point for storing computed shared secret. */
+ result = ecc_alloc_point(pk->ndigits);
+ if (!result) {
+ rc = -ENOMEM;
+ goto exit_free_pk;
+ }
+
+ /* Calculate the shared secret.*/
+ rc = kmb_ecc_point_mult(ecc_dev, result, pk, tctx->private_key, curve);
+ if (rc)
+ goto exit_free_result;
+
+ if (ecc_point_is_zero(result)) {
+ rc = -EFAULT;
+ goto exit_free_result;
+ }
+
+ /* Copy shared secret from point to buffer. */
+ ecc_swap_digits(result->x, shared_secret, result->ndigits);
+
+ /* Request might ask for less bytes than what we have. */
+ nbytes = min_t(size_t, nbytes, req->dst_len);
+
+ copied = sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, nbytes),
+ shared_secret, nbytes);
+
+ if (copied != nbytes)
+ rc = -EINVAL;
+
+ memzero_explicit(shared_secret, sizeof(shared_secret));
+
+exit_free_result:
+ ecc_free_point(result);
+
+exit_free_pk:
+ ecc_free_point(pk);
+
+ return rc;
+}
+
+/* Compute public key. */
+static int kmb_ecc_do_public_key(struct ocs_ecc_ctx *tctx,
+ struct kpp_request *req)
+{
+ const struct ecc_curve *curve = tctx->curve;
+ u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
+ struct ecc_point *pk;
+ size_t pubk_len;
+ size_t copied;
+ int rc;
+
+ /* Public key is a point, so it has double the digits. */
+ pubk_len = 2 * digits_to_bytes(curve->g.ndigits);
+
+ pk = ecc_alloc_point(curve->g.ndigits);
+ if (!pk)
+ return -ENOMEM;
+
+ /* Public Key(pk) = priv * G. */
+ rc = kmb_ecc_point_mult(tctx->ecc_dev, pk, &curve->g, tctx->private_key,
+ curve);
+ if (rc)
+ goto exit;
+
+ /* SP800-56A rev 3 5.6.2.1.3 key check */
+ if (kmb_ocs_ecc_is_pubkey_valid_full(tctx->ecc_dev, curve, pk)) {
+ rc = -EAGAIN;
+ goto exit;
+ }
+
+ /* Copy public key from point to buffer. */
+ ecc_swap_digits(pk->x, pubk_buf, pk->ndigits);
+ ecc_swap_digits(pk->y, &pubk_buf[pk->ndigits], pk->ndigits);
+
+ /* Copy public key to req->dst. */
+ copied = sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, pubk_len),
+ pubk_buf, pubk_len);
+
+ if (copied != pubk_len)
+ rc = -EINVAL;
+
+exit:
+ ecc_free_point(pk);
+
+ return rc;
+}
+
+static int kmb_ocs_ecc_do_one_request(struct crypto_engine *engine,
+ void *areq)
+{
+ struct kpp_request *req = container_of(areq, struct kpp_request, base);
+ struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+ struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
+ int rc;
+
+ if (req->src)
+ rc = kmb_ecc_do_shared_secret(tctx, req);
+ else
+ rc = kmb_ecc_do_public_key(tctx, req);
+
+ crypto_finalize_kpp_request(ecc_dev->engine, req, rc);
+
+ return 0;
+}
+
+static int kmb_ocs_ecdh_generate_public_key(struct kpp_request *req)
+{
+ struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+ const struct ecc_curve *curve = tctx->curve;
+
+ /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
+ if (!tctx->curve)
+ return -EINVAL;
+
+ /* Ensure dst is present. */
+ if (!req->dst)
+ return -EINVAL;
+
+ /* Check the request dst is big enough to hold the public key. */
+ if (req->dst_len < (2 * digits_to_bytes(curve->g.ndigits)))
+ return -EINVAL;
+
+ /* 'src' is not supposed to be present when generate pubk is called. */
+ if (req->src)
+ return -EINVAL;
+
+ return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
+ req);
+}
+
+static int kmb_ocs_ecdh_compute_shared_secret(struct kpp_request *req)
+{
+ struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+ const struct ecc_curve *curve = tctx->curve;
+
+ /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
+ if (!tctx->curve)
+ return -EINVAL;
+
+ /* Ensure dst is present. */
+ if (!req->dst)
+ return -EINVAL;
+
+ /* Ensure src is present. */
+ if (!req->src)
+ return -EINVAL;
+
+ /*
+ * req->src is expected to the (other-side) public key, so its length
+ * must be 2 * coordinate size (in bytes).
+ */
+ if (req->src_len != 2 * digits_to_bytes(curve->g.ndigits))
+ return -EINVAL;
+
+ return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
+ req);
+}
+
+static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id)
+{
+ memset(tctx, 0, sizeof(*tctx));
+
+ tctx->ecc_dev = kmb_ocs_ecc_find_dev(tctx);
+
+ if (IS_ERR(tctx->ecc_dev)) {
+ pr_err("Failed to find the device : %ld\n",
+ PTR_ERR(tctx->ecc_dev));
+ return PTR_ERR(tctx->ecc_dev);
+ }
+
+ tctx->curve = ecc_get_curve(curve_id);
+ if (!tctx->curve)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int kmb_ocs_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P256);
+}
+
+static int kmb_ocs_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P384);
+}
+
+static void kmb_ocs_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ memzero_explicit(tctx->private_key, sizeof(*tctx->private_key));
+}
+
+static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ /* Public key is made of two coordinates, so double the digits. */
+ return digits_to_bytes(tctx->curve->g.ndigits) * 2;
+}
+
+static struct kpp_engine_alg ocs_ecdh_p256 = {
+ .base.set_secret = kmb_ocs_ecdh_set_secret,
+ .base.generate_public_key = kmb_ocs_ecdh_generate_public_key,
+ .base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+ .base.init = kmb_ocs_ecdh_nist_p256_init_tfm,
+ .base.exit = kmb_ocs_ecdh_exit_tfm,
+ .base.max_size = kmb_ocs_ecdh_max_size,
+ .base.base = {
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "ecdh-nist-p256-keembay-ocs",
+ .cra_priority = KMB_OCS_ECC_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
+ },
+ .op.do_one_request = kmb_ocs_ecc_do_one_request,
+};
+
+static struct kpp_engine_alg ocs_ecdh_p384 = {
+ .base.set_secret = kmb_ocs_ecdh_set_secret,
+ .base.generate_public_key = kmb_ocs_ecdh_generate_public_key,
+ .base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+ .base.init = kmb_ocs_ecdh_nist_p384_init_tfm,
+ .base.exit = kmb_ocs_ecdh_exit_tfm,
+ .base.max_size = kmb_ocs_ecdh_max_size,
+ .base.base = {
+ .cra_name = "ecdh-nist-p384",
+ .cra_driver_name = "ecdh-nist-p384-keembay-ocs",
+ .cra_priority = KMB_OCS_ECC_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
+ },
+ .op.do_one_request = kmb_ocs_ecc_do_one_request,
+};
+
+static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id)
+{
+ struct ocs_ecc_dev *ecc_dev = dev_id;
+ u32 status;
+
+ /*
+ * Read the status register and write it back to clear the
+ * DONE_INT_STATUS bit.
+ */
+ status = ioread32(ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
+ iowrite32(status, ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
+
+ if (!(status & HW_OCS_ECC_ISR_INT_STATUS_DONE))
+ return IRQ_NONE;
+
+ complete(&ecc_dev->irq_done);
+
+ return IRQ_HANDLED;
+}
+
+static int kmb_ocs_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocs_ecc_dev *ecc_dev;
+ int rc;
+
+ ecc_dev = devm_kzalloc(dev, sizeof(*ecc_dev), GFP_KERNEL);
+ if (!ecc_dev)
+ return -ENOMEM;
+
+ ecc_dev->dev = dev;
+
+ platform_set_drvdata(pdev, ecc_dev);
+
+ INIT_LIST_HEAD(&ecc_dev->list);
+ init_completion(&ecc_dev->irq_done);
+
+ /* Get base register address. */
+ ecc_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ecc_dev->base_reg)) {
+ dev_err(dev, "Failed to get base address\n");
+ rc = PTR_ERR(ecc_dev->base_reg);
+ goto list_del;
+ }
+
+ /* Get and request IRQ */
+ ecc_dev->irq = platform_get_irq(pdev, 0);
+ if (ecc_dev->irq < 0) {
+ rc = ecc_dev->irq;
+ goto list_del;
+ }
+
+ rc = devm_request_threaded_irq(dev, ecc_dev->irq, ocs_ecc_irq_handler,
+ NULL, 0, "keembay-ocs-ecc", ecc_dev);
+ if (rc < 0) {
+ dev_err(dev, "Could not request IRQ\n");
+ goto list_del;
+ }
+
+ /* Add device to the list of OCS ECC devices. */
+ spin_lock(&ocs_ecc.lock);
+ list_add_tail(&ecc_dev->list, &ocs_ecc.dev_list);
+ spin_unlock(&ocs_ecc.lock);
+
+ /* Initialize crypto engine. */
+ ecc_dev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!ecc_dev->engine) {
+ dev_err(dev, "Could not allocate crypto engine\n");
+ rc = -ENOMEM;
+ goto list_del;
+ }
+
+ rc = crypto_engine_start(ecc_dev->engine);
+ if (rc) {
+ dev_err(dev, "Could not start crypto engine\n");
+ goto cleanup;
+ }
+
+ /* Register the KPP algo. */
+ rc = crypto_engine_register_kpp(&ocs_ecdh_p256);
+ if (rc) {
+ dev_err(dev,
+ "Could not register OCS algorithms with Crypto API\n");
+ goto cleanup;
+ }
+
+ rc = crypto_engine_register_kpp(&ocs_ecdh_p384);
+ if (rc) {
+ dev_err(dev,
+ "Could not register OCS algorithms with Crypto API\n");
+ goto ocs_ecdh_p384_error;
+ }
+
+ return 0;
+
+ocs_ecdh_p384_error:
+ crypto_engine_unregister_kpp(&ocs_ecdh_p256);
+
+cleanup:
+ crypto_engine_exit(ecc_dev->engine);
+
+list_del:
+ spin_lock(&ocs_ecc.lock);
+ list_del(&ecc_dev->list);
+ spin_unlock(&ocs_ecc.lock);
+
+ return rc;
+}
+
+static int kmb_ocs_ecc_remove(struct platform_device *pdev)
+{
+ struct ocs_ecc_dev *ecc_dev;
+
+ ecc_dev = platform_get_drvdata(pdev);
+
+ crypto_engine_unregister_kpp(&ocs_ecdh_p384);
+ crypto_engine_unregister_kpp(&ocs_ecdh_p256);
+
+ spin_lock(&ocs_ecc.lock);
+ list_del(&ecc_dev->list);
+ spin_unlock(&ocs_ecc.lock);
+
+ crypto_engine_exit(ecc_dev->engine);
+
+ return 0;
+}
+
+/* Device tree driver match. */
+static const struct of_device_id kmb_ocs_ecc_of_match[] = {
+ {
+ .compatible = "intel,keembay-ocs-ecc",
+ },
+ {}
+};
+
+/* The OCS driver is a platform device. */
+static struct platform_driver kmb_ocs_ecc_driver = {
+ .probe = kmb_ocs_ecc_probe,
+ .remove = kmb_ocs_ecc_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = kmb_ocs_ecc_of_match,
+ },
+};
+module_platform_driver(kmb_ocs_ecc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel Keem Bay OCS ECC Driver");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p256");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p384");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p256-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p384-keembay-ocs");
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
new file mode 100644
index 0000000000..daba8ca05d
--- /dev/null
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -0,0 +1,1261 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha2.h>
+#include <crypto/sm3.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+#include "ocs-hcu.h"
+
+#define DRV_NAME "keembay-ocs-hcu"
+
+/* Flag marking a final request. */
+#define REQ_FINAL BIT(0)
+/* Flag marking a HMAC request. */
+#define REQ_FLAGS_HMAC BIT(1)
+/* Flag set when HW HMAC is being used. */
+#define REQ_FLAGS_HMAC_HW BIT(2)
+/* Flag set when SW HMAC is being used. */
+#define REQ_FLAGS_HMAC_SW BIT(3)
+
+/**
+ * struct ocs_hcu_ctx: OCS HCU Transform context.
+ * @hcu_dev: The OCS HCU device used by the transformation.
+ * @key: The key (used only for HMAC transformations).
+ * @key_len: The length of the key.
+ * @is_sm3_tfm: Whether or not this is an SM3 transformation.
+ * @is_hmac_tfm: Whether or not this is a HMAC transformation.
+ */
+struct ocs_hcu_ctx {
+ struct ocs_hcu_dev *hcu_dev;
+ u8 key[SHA512_BLOCK_SIZE];
+ size_t key_len;
+ bool is_sm3_tfm;
+ bool is_hmac_tfm;
+};
+
+/**
+ * struct ocs_hcu_rctx - Context for the request.
+ * @hcu_dev: OCS HCU device to be used to service the request.
+ * @flags: Flags tracking request status.
+ * @algo: Algorithm to use for the request.
+ * @blk_sz: Block size of the transformation / request.
+ * @dig_sz: Digest size of the transformation / request.
+ * @dma_list: OCS DMA linked list.
+ * @hash_ctx: OCS HCU hashing context.
+ * @buffer: Buffer to store: partial block of data and SW HMAC
+ * artifacts (ipad, opad, etc.).
+ * @buf_cnt: Number of bytes currently stored in the buffer.
+ * @buf_dma_addr: The DMA address of @buffer (when mapped).
+ * @buf_dma_count: The number of bytes in @buffer currently DMA-mapped.
+ * @sg: Head of the scatterlist entries containing data.
+ * @sg_data_total: Total data in the SG list at any time.
+ * @sg_data_offset: Offset into the data of the current individual SG node.
+ * @sg_dma_nents: Number of sg entries mapped in dma_list.
+ */
+struct ocs_hcu_rctx {
+ struct ocs_hcu_dev *hcu_dev;
+ u32 flags;
+ enum ocs_hcu_algo algo;
+ size_t blk_sz;
+ size_t dig_sz;
+ struct ocs_hcu_dma_list *dma_list;
+ struct ocs_hcu_hash_ctx hash_ctx;
+ /*
+ * Buffer is double the block size because we need space for SW HMAC
+ * artifacts, i.e:
+ * - ipad (1 block) + a possible partial block of data.
+ * - opad (1 block) + digest of H(k ^ ipad || m)
+ */
+ u8 buffer[2 * SHA512_BLOCK_SIZE];
+ size_t buf_cnt;
+ dma_addr_t buf_dma_addr;
+ size_t buf_dma_count;
+ struct scatterlist *sg;
+ unsigned int sg_data_total;
+ unsigned int sg_data_offset;
+ unsigned int sg_dma_nents;
+};
+
+/**
+ * struct ocs_hcu_drv - Driver data
+ * @dev_list: The list of HCU devices.
+ * @lock: The lock protecting dev_list.
+ */
+struct ocs_hcu_drv {
+ struct list_head dev_list;
+ spinlock_t lock; /* Protects dev_list. */
+};
+
+static struct ocs_hcu_drv ocs_hcu = {
+ .dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
+};
+
+/*
+ * Return the total amount of data in the request; that is: the data in the
+ * request buffer + the data in the sg list.
+ */
+static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
+{
+ return rctx->sg_data_total + rctx->buf_cnt;
+}
+
+/* Move remaining content of scatter-gather list to context buffer. */
+static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
+{
+ size_t count;
+
+ if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
+ WARN(1, "%s: sg data does not fit in buffer\n", __func__);
+ return -EINVAL;
+ }
+
+ while (rctx->sg_data_total) {
+ if (!rctx->sg) {
+ WARN(1, "%s: unexpected NULL sg\n", __func__);
+ return -EINVAL;
+ }
+ /*
+ * If current sg has been fully processed, skip to the next
+ * one.
+ */
+ if (rctx->sg_data_offset == rctx->sg->length) {
+ rctx->sg = sg_next(rctx->sg);
+ rctx->sg_data_offset = 0;
+ continue;
+ }
+ /*
+ * Determine the maximum data available to copy from the node.
+ * Minimum of the length left in the sg node, or the total data
+ * in the request.
+ */
+ count = min(rctx->sg->length - rctx->sg_data_offset,
+ rctx->sg_data_total);
+ /* Copy from scatter-list entry to context buffer. */
+ scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
+ rctx->sg, rctx->sg_data_offset,
+ count, 0);
+
+ rctx->sg_data_offset += count;
+ rctx->sg_data_total -= count;
+ rctx->buf_cnt += count;
+ }
+
+ return 0;
+}
+
+static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ /* If the HCU device for the request was previously set, return it. */
+ if (tctx->hcu_dev)
+ return tctx->hcu_dev;
+
+ /*
+ * Otherwise, get the first HCU device available (there should be one
+ * and only one device).
+ */
+ spin_lock_bh(&ocs_hcu.lock);
+ tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
+ struct ocs_hcu_dev,
+ list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ return tctx->hcu_dev;
+}
+
+/* Free OCS DMA linked list and DMA-able context buffer. */
+static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
+ struct ocs_hcu_rctx *rctx)
+{
+ struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
+ struct device *dev = hcu_dev->dev;
+
+ /* Unmap rctx->buffer (if mapped). */
+ if (rctx->buf_dma_count) {
+ dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
+ DMA_TO_DEVICE);
+ rctx->buf_dma_count = 0;
+ }
+
+ /* Unmap req->src (if mapped). */
+ if (rctx->sg_dma_nents) {
+ dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
+ rctx->sg_dma_nents = 0;
+ }
+
+ /* Free dma_list (if allocated). */
+ if (rctx->dma_list) {
+ ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
+ rctx->dma_list = NULL;
+ }
+}
+
+/*
+ * Prepare for DMA operation:
+ * - DMA-map request context buffer (if needed)
+ * - DMA-map SG list (only the entries to be processed, see note below)
+ * - Allocate OCS HCU DMA linked list (number of elements = SG entries to
+ * process + context buffer (if not empty)).
+ * - Add DMA-mapped request context buffer to OCS HCU DMA list.
+ * - Add SG entries to DMA list.
+ *
+ * Note: if this is a final request, we process all the data in the SG list,
+ * otherwise we can only process up to the maximum amount of block-aligned data
+ * (the remainder will be put into the context buffer and processed in the next
+ * request).
+ */
+static int kmb_ocs_dma_prepare(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+ struct device *dev = rctx->hcu_dev->dev;
+ unsigned int remainder = 0;
+ unsigned int total;
+ size_t nents;
+ size_t count;
+ int rc;
+ int i;
+
+ /* This function should be called only when there is data to process. */
+ total = kmb_get_total_data(rctx);
+ if (!total)
+ return -EINVAL;
+
+ /*
+ * If this is not a final DMA (terminated DMA), the data passed to the
+ * HCU must be aligned to the block size; compute the remainder data to
+ * be processed in the next request.
+ */
+ if (!(rctx->flags & REQ_FINAL))
+ remainder = total % rctx->blk_sz;
+
+ /* Determine the number of scatter gather list entries to process. */
+ nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
+
+ /* If there are entries to process, map them. */
+ if (nents) {
+ rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
+ DMA_TO_DEVICE);
+ if (!rctx->sg_dma_nents) {
+ dev_err(dev, "Failed to MAP SG\n");
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ /*
+ * The value returned by dma_map_sg() can be < nents; so update
+ * nents accordingly.
+ */
+ nents = rctx->sg_dma_nents;
+ }
+
+ /*
+ * If context buffer is not empty, map it and add extra DMA entry for
+ * it.
+ */
+ if (rctx->buf_cnt) {
+ rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
+ rctx->buf_cnt,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
+ dev_err(dev, "Failed to map request context buffer\n");
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ rctx->buf_dma_count = rctx->buf_cnt;
+ /* Increase number of dma entries. */
+ nents++;
+ }
+
+ /* Allocate OCS HCU DMA list. */
+ rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
+ if (!rctx->dma_list) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Add request context buffer (if previously DMA-mapped) */
+ if (rctx->buf_dma_count) {
+ rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
+ rctx->buf_dma_addr,
+ rctx->buf_dma_count);
+ if (rc)
+ goto cleanup;
+ }
+
+ /* Add the SG nodes to be processed to the DMA linked list. */
+ for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
+ /*
+ * The number of bytes to add to the list entry is the minimum
+ * between:
+ * - The DMA length of the SG entry.
+ * - The data left to be processed.
+ */
+ count = min(rctx->sg_data_total - remainder,
+ sg_dma_len(rctx->sg) - rctx->sg_data_offset);
+ /*
+ * Do not create a zero length DMA descriptor. Check in case of
+ * zero length SG node.
+ */
+ if (count == 0)
+ continue;
+ /* Add sg to HCU DMA list. */
+ rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
+ rctx->dma_list,
+ rctx->sg->dma_address,
+ count);
+ if (rc)
+ goto cleanup;
+
+ /* Update amount of data remaining in SG list. */
+ rctx->sg_data_total -= count;
+
+ /*
+ * If remaining data is equal to remainder (note: 'less than'
+ * case should never happen in practice), we are done: update
+ * offset and exit the loop.
+ */
+ if (rctx->sg_data_total <= remainder) {
+ WARN_ON(rctx->sg_data_total < remainder);
+ rctx->sg_data_offset += count;
+ break;
+ }
+
+ /*
+ * If we get here is because we need to process the next sg in
+ * the list; set offset within the sg to 0.
+ */
+ rctx->sg_data_offset = 0;
+ }
+
+ return 0;
+cleanup:
+ dev_err(dev, "Failed to prepare DMA.\n");
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ return rc;
+}
+
+static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+
+ /* Clear buffer of any data. */
+ memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
+}
+
+static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
+{
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+
+ if (!hcu_dev)
+ return -ENOENT;
+
+ return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
+}
+
+static int prepare_ipad(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+ int i;
+
+ WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
+ WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
+ "%s: HMAC_SW flag is not set\n", __func__);
+ /*
+ * Key length must be equal to block size. If key is shorter,
+ * we pad it with zero (note: key cannot be longer, since
+ * longer keys are hashed by kmb_ocs_hcu_setkey()).
+ */
+ if (ctx->key_len > rctx->blk_sz) {
+ WARN(1, "%s: Invalid key length in tfm context\n", __func__);
+ return -EINVAL;
+ }
+ memzero_explicit(&ctx->key[ctx->key_len],
+ rctx->blk_sz - ctx->key_len);
+ ctx->key_len = rctx->blk_sz;
+ /*
+ * Prepare IPAD for HMAC. Only done for first block.
+ * HMAC(k,m) = H(k ^ opad || H(k ^ ipad || m))
+ * k ^ ipad will be first hashed block.
+ * k ^ opad will be calculated in the final request.
+ * Only needed if not using HW HMAC.
+ */
+ for (i = 0; i < rctx->blk_sz; i++)
+ rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
+ rctx->buf_cnt = rctx->blk_sz;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+ struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
+ int rc;
+ int i;
+
+ if (!hcu_dev) {
+ rc = -ENOENT;
+ goto error;
+ }
+
+ /*
+ * If hardware HMAC flag is set, perform HMAC in hardware.
+ *
+ * NOTE: this flag implies REQ_FINAL && kmb_get_total_data(rctx)
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC_HW) {
+ /* Map input data into the HCU DMA linked list. */
+ rc = kmb_ocs_dma_prepare(req);
+ if (rc)
+ goto error;
+
+ rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
+ rctx->dma_list, req->result, rctx->dig_sz);
+
+ /* Unmap data and free DMA list regardless of return code. */
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ /* Process previous return code. */
+ if (rc)
+ goto error;
+
+ goto done;
+ }
+
+ /* Handle update request case. */
+ if (!(rctx->flags & REQ_FINAL)) {
+ /* Update should always have input data. */
+ if (!kmb_get_total_data(rctx))
+ return -EINVAL;
+
+ /* Map input data into the HCU DMA linked list. */
+ rc = kmb_ocs_dma_prepare(req);
+ if (rc)
+ goto error;
+
+ /* Do hashing step. */
+ rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
+ rctx->dma_list);
+
+ /* Unmap data and free DMA list regardless of return code. */
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ /* Process previous return code. */
+ if (rc)
+ goto error;
+
+ /*
+ * Reset request buffer count (data in the buffer was just
+ * processed).
+ */
+ rctx->buf_cnt = 0;
+ /*
+ * Move remaining sg data into the request buffer, so that it
+ * will be processed during the next request.
+ *
+ * NOTE: we have remaining data if kmb_get_total_data() was not
+ * a multiple of block size.
+ */
+ rc = flush_sg_to_ocs_buffer(rctx);
+ if (rc)
+ goto error;
+
+ goto done;
+ }
+
+ /* If we get here, this is a final request. */
+
+ /* If there is data to process, use finup. */
+ if (kmb_get_total_data(rctx)) {
+ /* Map input data into the HCU DMA linked list. */
+ rc = kmb_ocs_dma_prepare(req);
+ if (rc)
+ goto error;
+
+ /* Do hashing step. */
+ rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
+ rctx->dma_list,
+ req->result, rctx->dig_sz);
+ /* Free DMA list regardless of return code. */
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ /* Process previous return code. */
+ if (rc)
+ goto error;
+
+ } else { /* Otherwise (if we have no data), use final. */
+ rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
+ rctx->dig_sz);
+ if (rc)
+ goto error;
+ }
+
+ /*
+ * If we are finalizing a SW HMAC request, we just computed the result
+ * of: H(k ^ ipad || m).
+ *
+ * We now need to complete the HMAC calculation with the OPAD step,
+ * that is, we need to compute H(k ^ opad || digest), where digest is
+ * the digest we just obtained, i.e., H(k ^ ipad || m).
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC_SW) {
+ /*
+ * Compute k ^ opad and store it in the request buffer (which
+ * is not used anymore at this point).
+ * Note: key has been padded / hashed already (so keylen ==
+ * blksz) .
+ */
+ WARN_ON(tctx->key_len != rctx->blk_sz);
+ for (i = 0; i < rctx->blk_sz; i++)
+ rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
+ /* Now append the digest to the rest of the buffer. */
+ for (i = 0; (i < rctx->dig_sz); i++)
+ rctx->buffer[rctx->blk_sz + i] = req->result[i];
+
+ /* Now hash the buffer to obtain the final HMAC. */
+ rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
+ rctx->blk_sz + rctx->dig_sz, req->result,
+ rctx->dig_sz);
+ if (rc)
+ goto error;
+ }
+
+ /* Perform secure clean-up. */
+ kmb_ocs_hcu_secure_cleanup(req);
+done:
+ crypto_finalize_hash_request(hcu_dev->engine, req, 0);
+
+ return 0;
+
+error:
+ kmb_ocs_hcu_secure_cleanup(req);
+ return rc;
+}
+
+static int kmb_ocs_hcu_init(struct ahash_request *req)
+{
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (!hcu_dev)
+ return -ENOENT;
+
+ /* Initialize entire request context to zero. */
+ memset(rctx, 0, sizeof(*rctx));
+
+ rctx->hcu_dev = hcu_dev;
+ rctx->dig_sz = crypto_ahash_digestsize(tfm);
+
+ switch (rctx->dig_sz) {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+ case SHA224_DIGEST_SIZE:
+ rctx->blk_sz = SHA224_BLOCK_SIZE;
+ rctx->algo = OCS_HCU_ALGO_SHA224;
+ break;
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+ case SHA256_DIGEST_SIZE:
+ rctx->blk_sz = SHA256_BLOCK_SIZE;
+ /*
+ * SHA256 and SM3 have the same digest size: use info from tfm
+ * context to find out which one we should use.
+ */
+ rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
+ OCS_HCU_ALGO_SHA256;
+ break;
+ case SHA384_DIGEST_SIZE:
+ rctx->blk_sz = SHA384_BLOCK_SIZE;
+ rctx->algo = OCS_HCU_ALGO_SHA384;
+ break;
+ case SHA512_DIGEST_SIZE:
+ rctx->blk_sz = SHA512_BLOCK_SIZE;
+ rctx->algo = OCS_HCU_ALGO_SHA512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Initialize intermediate data. */
+ ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
+
+ /* If this a HMAC request, set HMAC flag. */
+ if (ctx->is_hmac_tfm)
+ rctx->flags |= REQ_FLAGS_HMAC;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_update(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+ int rc;
+
+ if (!req->nbytes)
+ return 0;
+
+ rctx->sg_data_total = req->nbytes;
+ rctx->sg_data_offset = 0;
+ rctx->sg = req->src;
+
+ /*
+ * If we are doing HMAC, then we must use SW-assisted HMAC, since HW
+ * HMAC does not support context switching (there it can only be used
+ * with finup() or digest()).
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC &&
+ !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
+ rctx->flags |= REQ_FLAGS_HMAC_SW;
+ rc = prepare_ipad(req);
+ if (rc)
+ return rc;
+ }
+
+ /*
+ * If remaining sg_data fits into ctx buffer, just copy it there; we'll
+ * process it at the next update() or final().
+ */
+ if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
+ return flush_sg_to_ocs_buffer(rctx);
+
+ return kmb_ocs_hcu_handle_queue(req);
+}
+
+/* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
+static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+ int rc;
+
+ rctx->flags |= REQ_FINAL;
+
+ /*
+ * If this is a HMAC request and, so far, we didn't have to switch to
+ * SW HMAC, check if we can use HW HMAC.
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC &&
+ !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
+ /*
+ * If we are here, it means we never processed any data so far,
+ * so we can use HW HMAC, but only if there is some data to
+ * process (since OCS HW MAC does not support zero-length
+ * messages) and the key length is supported by the hardware
+ * (OCS HCU HW only supports length <= 64); if HW HMAC cannot
+ * be used, fall back to SW-assisted HMAC.
+ */
+ if (kmb_get_total_data(rctx) &&
+ ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
+ rctx->flags |= REQ_FLAGS_HMAC_HW;
+ } else {
+ rctx->flags |= REQ_FLAGS_HMAC_SW;
+ rc = prepare_ipad(req);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return kmb_ocs_hcu_handle_queue(req);
+}
+
+static int kmb_ocs_hcu_final(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+
+ rctx->sg_data_total = 0;
+ rctx->sg_data_offset = 0;
+ rctx->sg = NULL;
+
+ return kmb_ocs_hcu_fin_common(req);
+}
+
+static int kmb_ocs_hcu_finup(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+
+ rctx->sg_data_total = req->nbytes;
+ rctx->sg_data_offset = 0;
+ rctx->sg = req->src;
+
+ return kmb_ocs_hcu_fin_common(req);
+}
+
+static int kmb_ocs_hcu_digest(struct ahash_request *req)
+{
+ int rc = 0;
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+
+ if (!hcu_dev)
+ return -ENOENT;
+
+ rc = kmb_ocs_hcu_init(req);
+ if (rc)
+ return rc;
+
+ rc = kmb_ocs_hcu_finup(req);
+
+ return rc;
+}
+
+static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+
+ /* Intermediate data is always stored and applied per request. */
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
+
+ /* Intermediate data is always stored and applied per request. */
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+ size_t blk_sz = crypto_ahash_blocksize(tfm);
+ struct crypto_ahash *ahash_tfm;
+ struct ahash_request *req;
+ struct crypto_wait wait;
+ struct scatterlist sg;
+ const char *alg_name;
+ int rc;
+
+ /*
+ * Key length must be equal to block size:
+ * - If key is shorter, we are done for now (the key will be padded
+ * later on); this is to maximize the use of HW HMAC (which works
+ * only for keys <= 64 bytes).
+ * - If key is longer, we hash it.
+ */
+ if (keylen <= blk_sz) {
+ memcpy(ctx->key, key, keylen);
+ ctx->key_len = keylen;
+ return 0;
+ }
+
+ switch (digestsize) {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+ case SHA224_DIGEST_SIZE:
+ alg_name = "sha224-keembay-ocs";
+ break;
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+ case SHA256_DIGEST_SIZE:
+ alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
+ "sha256-keembay-ocs";
+ break;
+ case SHA384_DIGEST_SIZE:
+ alg_name = "sha384-keembay-ocs";
+ break;
+ case SHA512_DIGEST_SIZE:
+ alg_name = "sha512-keembay-ocs";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
+ if (IS_ERR(ahash_tfm))
+ return PTR_ERR(ahash_tfm);
+
+ req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+ if (!req) {
+ rc = -ENOMEM;
+ goto err_free_ahash;
+ }
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+ sg_init_one(&sg, key, keylen);
+ ahash_request_set_crypt(req, &sg, ctx->key, keylen);
+
+ rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (rc == 0)
+ ctx->key_len = digestsize;
+
+ ahash_request_free(req);
+err_free_ahash:
+ crypto_free_ahash(ahash_tfm);
+
+ return rc;
+}
+
+/* Set request size and initialize tfm context. */
+static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
+{
+ crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
+ sizeof(struct ocs_hcu_rctx));
+}
+
+static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ ctx->is_sm3_tfm = true;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ ctx->is_sm3_tfm = true;
+ ctx->is_hmac_tfm = true;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ ctx->is_hmac_tfm = true;
+
+ return 0;
+}
+
+/* Function called when 'tfm' is de-initialized. */
+static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* Clear the key. */
+ memzero_explicit(ctx->key, sizeof(ctx->key));
+}
+
+static struct ahash_engine_alg ocs_hcu_algs[] = {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+{
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
+},
+{
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "hmac-sha224-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
+},
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+{
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
+},
+{
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac-sha256-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
+},
+{
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "sm3-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sm3_cra_init,
+ }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
+},
+{
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sm3)",
+ .cra_driver_name = "hmac-sm3-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_sm3_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
+},
+{
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
+},
+{
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "hmac-sha384-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
+},
+{
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
+},
+{
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "hmac-sha512-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
+},
+};
+
+/* Device tree driver match. */
+static const struct of_device_id kmb_ocs_hcu_of_match[] = {
+ {
+ .compatible = "intel,keembay-ocs-hcu",
+ },
+ {}
+};
+
+static int kmb_ocs_hcu_remove(struct platform_device *pdev)
+{
+ struct ocs_hcu_dev *hcu_dev;
+ int rc;
+
+ hcu_dev = platform_get_drvdata(pdev);
+ if (!hcu_dev)
+ return -ENODEV;
+
+ crypto_engine_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+
+ rc = crypto_engine_exit(hcu_dev->engine);
+
+ spin_lock_bh(&ocs_hcu.lock);
+ list_del(&hcu_dev->list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ return rc;
+}
+
+static int kmb_ocs_hcu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocs_hcu_dev *hcu_dev;
+ int rc;
+
+ hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
+ if (!hcu_dev)
+ return -ENOMEM;
+
+ hcu_dev->dev = dev;
+
+ platform_set_drvdata(pdev, hcu_dev);
+ rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
+ if (rc)
+ return rc;
+
+ hcu_dev->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hcu_dev->io_base))
+ return PTR_ERR(hcu_dev->io_base);
+
+ init_completion(&hcu_dev->irq_done);
+
+ /* Get and request IRQ. */
+ hcu_dev->irq = platform_get_irq(pdev, 0);
+ if (hcu_dev->irq < 0)
+ return hcu_dev->irq;
+
+ rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
+ ocs_hcu_irq_handler, NULL, 0,
+ "keembay-ocs-hcu", hcu_dev);
+ if (rc < 0) {
+ dev_err(dev, "Could not request IRQ.\n");
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&hcu_dev->list);
+
+ spin_lock_bh(&ocs_hcu.lock);
+ list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ /* Initialize crypto engine */
+ hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!hcu_dev->engine) {
+ rc = -ENOMEM;
+ goto list_del;
+ }
+
+ rc = crypto_engine_start(hcu_dev->engine);
+ if (rc) {
+ dev_err(dev, "Could not start engine.\n");
+ goto cleanup;
+ }
+
+ /* Security infrastructure guarantees OCS clock is enabled. */
+
+ rc = crypto_engine_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+ if (rc) {
+ dev_err(dev, "Could not register algorithms.\n");
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ crypto_engine_exit(hcu_dev->engine);
+list_del:
+ spin_lock_bh(&ocs_hcu.lock);
+ list_del(&hcu_dev->list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ return rc;
+}
+
+/* The OCS driver is a platform device. */
+static struct platform_driver kmb_ocs_hcu_driver = {
+ .probe = kmb_ocs_hcu_probe,
+ .remove = kmb_ocs_hcu_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = kmb_ocs_hcu_of_match,
+ },
+};
+
+module_platform_driver(kmb_ocs_hcu_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/intel/keembay/ocs-aes.c b/drivers/crypto/intel/keembay/ocs-aes.c
new file mode 100644
index 0000000000..be9f32fc8f
--- /dev/null
+++ b/drivers/crypto/intel/keembay/ocs-aes.c
@@ -0,0 +1,1489 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS AES Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
+
+#include <asm/byteorder.h>
+#include <asm/errno.h>
+
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+
+#include "ocs-aes.h"
+
+#define AES_COMMAND_OFFSET 0x0000
+#define AES_KEY_0_OFFSET 0x0004
+#define AES_KEY_1_OFFSET 0x0008
+#define AES_KEY_2_OFFSET 0x000C
+#define AES_KEY_3_OFFSET 0x0010
+#define AES_KEY_4_OFFSET 0x0014
+#define AES_KEY_5_OFFSET 0x0018
+#define AES_KEY_6_OFFSET 0x001C
+#define AES_KEY_7_OFFSET 0x0020
+#define AES_IV_0_OFFSET 0x0024
+#define AES_IV_1_OFFSET 0x0028
+#define AES_IV_2_OFFSET 0x002C
+#define AES_IV_3_OFFSET 0x0030
+#define AES_ACTIVE_OFFSET 0x0034
+#define AES_STATUS_OFFSET 0x0038
+#define AES_KEY_SIZE_OFFSET 0x0044
+#define AES_IER_OFFSET 0x0048
+#define AES_ISR_OFFSET 0x005C
+#define AES_MULTIPURPOSE1_0_OFFSET 0x0200
+#define AES_MULTIPURPOSE1_1_OFFSET 0x0204
+#define AES_MULTIPURPOSE1_2_OFFSET 0x0208
+#define AES_MULTIPURPOSE1_3_OFFSET 0x020C
+#define AES_MULTIPURPOSE2_0_OFFSET 0x0220
+#define AES_MULTIPURPOSE2_1_OFFSET 0x0224
+#define AES_MULTIPURPOSE2_2_OFFSET 0x0228
+#define AES_MULTIPURPOSE2_3_OFFSET 0x022C
+#define AES_BYTE_ORDER_CFG_OFFSET 0x02C0
+#define AES_TLEN_OFFSET 0x0300
+#define AES_T_MAC_0_OFFSET 0x0304
+#define AES_T_MAC_1_OFFSET 0x0308
+#define AES_T_MAC_2_OFFSET 0x030C
+#define AES_T_MAC_3_OFFSET 0x0310
+#define AES_PLEN_OFFSET 0x0314
+#define AES_A_DMA_SRC_ADDR_OFFSET 0x0400
+#define AES_A_DMA_DST_ADDR_OFFSET 0x0404
+#define AES_A_DMA_SRC_SIZE_OFFSET 0x0408
+#define AES_A_DMA_DST_SIZE_OFFSET 0x040C
+#define AES_A_DMA_DMA_MODE_OFFSET 0x0410
+#define AES_A_DMA_NEXT_SRC_DESCR_OFFSET 0x0418
+#define AES_A_DMA_NEXT_DST_DESCR_OFFSET 0x041C
+#define AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET 0x0420
+#define AES_A_DMA_LOG_OFFSET 0x0424
+#define AES_A_DMA_STATUS_OFFSET 0x0428
+#define AES_A_DMA_PERF_CNTR_OFFSET 0x042C
+#define AES_A_DMA_MSI_ISR_OFFSET 0x0480
+#define AES_A_DMA_MSI_IER_OFFSET 0x0484
+#define AES_A_DMA_MSI_MASK_OFFSET 0x0488
+#define AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET 0x0600
+#define AES_A_DMA_OUTBUFFER_READ_FIFO_OFFSET 0x0700
+
+/*
+ * AES_A_DMA_DMA_MODE register.
+ * Default: 0x00000000.
+ * bit[31] ACTIVE
+ * This bit activates the DMA. When the DMA finishes, it resets
+ * this bit to zero.
+ * bit[30:26] Unused by this driver.
+ * bit[25] SRC_LINK_LIST_EN
+ * Source link list enable bit. When the linked list is terminated
+ * this bit is reset by the DMA.
+ * bit[24] DST_LINK_LIST_EN
+ * Destination link list enable bit. When the linked list is
+ * terminated this bit is reset by the DMA.
+ * bit[23:0] Unused by this driver.
+ */
+#define AES_A_DMA_DMA_MODE_ACTIVE BIT(31)
+#define AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN BIT(25)
+#define AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN BIT(24)
+
+/*
+ * AES_ACTIVE register
+ * default 0x00000000
+ * bit[31:10] Reserved
+ * bit[9] LAST_ADATA
+ * bit[8] LAST_GCX
+ * bit[7:2] Reserved
+ * bit[1] TERMINATION
+ * bit[0] TRIGGER
+ */
+#define AES_ACTIVE_LAST_ADATA BIT(9)
+#define AES_ACTIVE_LAST_CCM_GCM BIT(8)
+#define AES_ACTIVE_TERMINATION BIT(1)
+#define AES_ACTIVE_TRIGGER BIT(0)
+
+#define AES_DISABLE_INT 0x00000000
+#define AES_DMA_CPD_ERR_INT BIT(8)
+#define AES_DMA_OUTBUF_RD_ERR_INT BIT(7)
+#define AES_DMA_OUTBUF_WR_ERR_INT BIT(6)
+#define AES_DMA_INBUF_RD_ERR_INT BIT(5)
+#define AES_DMA_INBUF_WR_ERR_INT BIT(4)
+#define AES_DMA_BAD_COMP_INT BIT(3)
+#define AES_DMA_SAI_INT BIT(2)
+#define AES_DMA_SRC_DONE_INT BIT(0)
+#define AES_COMPLETE_INT BIT(1)
+
+#define AES_DMA_MSI_MASK_CLEAR BIT(0)
+
+#define AES_128_BIT_KEY 0x00000000
+#define AES_256_BIT_KEY BIT(0)
+
+#define AES_DEACTIVATE_PERF_CNTR 0x00000000
+#define AES_ACTIVATE_PERF_CNTR BIT(0)
+
+#define AES_MAX_TAG_SIZE_U32 4
+
+#define OCS_LL_DMA_FLAG_TERMINATE BIT(31)
+
+/*
+ * There is an inconsistency in the documentation. This is documented as a
+ * 11-bit value, but it is actually 10-bits.
+ */
+#define AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK 0x3FF
+
+/*
+ * During CCM decrypt, the OCS block needs to finish processing the ciphertext
+ * before the tag is written. For 128-bit mode this required delay is 28 OCS
+ * clock cycles. For 256-bit mode it is 36 OCS clock cycles.
+ */
+#define CCM_DECRYPT_DELAY_TAG_CLK_COUNT 36UL
+
+/*
+ * During CCM decrypt there must be a delay of at least 42 OCS clock cycles
+ * between setting the TRIGGER bit in AES_ACTIVE and setting the LAST_CCM_GCM
+ * bit in the same register (as stated in the OCS databook)
+ */
+#define CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT 42UL
+
+/* See RFC3610 section 2.2 */
+#define L_PRIME_MIN (1)
+#define L_PRIME_MAX (7)
+/*
+ * CCM IV format from RFC 3610 section 2.3
+ *
+ * Octet Number Contents
+ * ------------ ---------
+ * 0 Flags
+ * 1 ... 15-L Nonce N
+ * 16-L ... 15 Counter i
+ *
+ * Flags = L' = L - 1
+ */
+#define L_PRIME_IDX 0
+#define COUNTER_START(lprime) (16 - ((lprime) + 1))
+#define COUNTER_LEN(lprime) ((lprime) + 1)
+
+enum aes_counter_mode {
+ AES_CTR_M_NO_INC = 0,
+ AES_CTR_M_32_INC = 1,
+ AES_CTR_M_64_INC = 2,
+ AES_CTR_M_128_INC = 3,
+};
+
+/**
+ * struct ocs_dma_linked_list - OCS DMA linked list entry.
+ * @src_addr: Source address of the data.
+ * @src_len: Length of data to be fetched.
+ * @next: Next dma_list to fetch.
+ * @ll_flags: Flags (Freeze @ terminate) for the DMA engine.
+ */
+struct ocs_dma_linked_list {
+ u32 src_addr;
+ u32 src_len;
+ u32 next;
+ u32 ll_flags;
+} __packed;
+
+/*
+ * Set endianness of inputs and outputs
+ * AES_BYTE_ORDER_CFG
+ * default 0x00000000
+ * bit [10] - KEY_HI_LO_SWAP
+ * bit [9] - KEY_HI_SWAP_DWORDS_IN_OCTWORD
+ * bit [8] - KEY_HI_SWAP_BYTES_IN_DWORD
+ * bit [7] - KEY_LO_SWAP_DWORDS_IN_OCTWORD
+ * bit [6] - KEY_LO_SWAP_BYTES_IN_DWORD
+ * bit [5] - IV_SWAP_DWORDS_IN_OCTWORD
+ * bit [4] - IV_SWAP_BYTES_IN_DWORD
+ * bit [3] - DOUT_SWAP_DWORDS_IN_OCTWORD
+ * bit [2] - DOUT_SWAP_BYTES_IN_DWORD
+ * bit [1] - DOUT_SWAP_DWORDS_IN_OCTWORD
+ * bit [0] - DOUT_SWAP_BYTES_IN_DWORD
+ */
+static inline void aes_a_set_endianness(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(0x7FF, aes_dev->base_reg + AES_BYTE_ORDER_CFG_OFFSET);
+}
+
+/* Trigger AES process start. */
+static inline void aes_a_op_trigger(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(AES_ACTIVE_TRIGGER, aes_dev->base_reg + AES_ACTIVE_OFFSET);
+}
+
+/* Indicate last bulk of data. */
+static inline void aes_a_op_termination(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(AES_ACTIVE_TERMINATION,
+ aes_dev->base_reg + AES_ACTIVE_OFFSET);
+}
+
+/*
+ * Set LAST_CCM_GCM in AES_ACTIVE register and clear all other bits.
+ *
+ * Called when DMA is programmed to fetch the last batch of data.
+ * - For AES-CCM it is called for the last batch of Payload data and Ciphertext
+ * data.
+ * - For AES-GCM, it is called for the last batch of Plaintext data and
+ * Ciphertext data.
+ */
+static inline void aes_a_set_last_gcx(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(AES_ACTIVE_LAST_CCM_GCM,
+ aes_dev->base_reg + AES_ACTIVE_OFFSET);
+}
+
+/* Wait for LAST_CCM_GCM bit to be unset. */
+static inline void aes_a_wait_last_gcx(const struct ocs_aes_dev *aes_dev)
+{
+ u32 aes_active_reg;
+
+ do {
+ aes_active_reg = ioread32(aes_dev->base_reg +
+ AES_ACTIVE_OFFSET);
+ } while (aes_active_reg & AES_ACTIVE_LAST_CCM_GCM);
+}
+
+/* Wait for 10 bits of input occupancy. */
+static void aes_a_dma_wait_input_buffer_occupancy(const struct ocs_aes_dev *aes_dev)
+{
+ u32 reg;
+
+ do {
+ reg = ioread32(aes_dev->base_reg + AES_A_DMA_STATUS_OFFSET);
+ } while (reg & AES_DMA_STATUS_INPUT_BUFFER_OCCUPANCY_MASK);
+}
+
+ /*
+ * Set LAST_CCM_GCM and LAST_ADATA bits in AES_ACTIVE register (and clear all
+ * other bits).
+ *
+ * Called when DMA is programmed to fetch the last batch of Associated Data
+ * (CCM case) or Additional Authenticated Data (GCM case).
+ */
+static inline void aes_a_set_last_gcx_and_adata(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(AES_ACTIVE_LAST_ADATA | AES_ACTIVE_LAST_CCM_GCM,
+ aes_dev->base_reg + AES_ACTIVE_OFFSET);
+}
+
+/* Set DMA src and dst transfer size to 0 */
+static inline void aes_a_dma_set_xfer_size_zero(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
+ iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
+}
+
+/* Activate DMA for zero-byte transfer case. */
+static inline void aes_a_dma_active(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(AES_A_DMA_DMA_MODE_ACTIVE,
+ aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
+}
+
+/* Activate DMA and enable src linked list */
+static inline void aes_a_dma_active_src_ll_en(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
+ AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN,
+ aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
+}
+
+/* Activate DMA and enable dst linked list */
+static inline void aes_a_dma_active_dst_ll_en(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
+ AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
+ aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
+}
+
+/* Activate DMA and enable src and dst linked lists */
+static inline void aes_a_dma_active_src_dst_ll_en(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(AES_A_DMA_DMA_MODE_ACTIVE |
+ AES_A_DMA_DMA_MODE_SRC_LINK_LIST_EN |
+ AES_A_DMA_DMA_MODE_DST_LINK_LIST_EN,
+ aes_dev->base_reg + AES_A_DMA_DMA_MODE_OFFSET);
+}
+
+/* Reset PERF_CNTR to 0 and activate it */
+static inline void aes_a_dma_reset_and_activate_perf_cntr(const struct ocs_aes_dev *aes_dev)
+{
+ iowrite32(0x00000000, aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET);
+ iowrite32(AES_ACTIVATE_PERF_CNTR,
+ aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
+}
+
+/* Wait until PERF_CNTR is > delay, then deactivate it */
+static inline void aes_a_dma_wait_and_deactivate_perf_cntr(const struct ocs_aes_dev *aes_dev,
+ int delay)
+{
+ while (ioread32(aes_dev->base_reg + AES_A_DMA_PERF_CNTR_OFFSET) < delay)
+ ;
+ iowrite32(AES_DEACTIVATE_PERF_CNTR,
+ aes_dev->base_reg + AES_A_DMA_WHILE_ACTIVE_MODE_OFFSET);
+}
+
+/* Disable AES and DMA IRQ. */
+static void aes_irq_disable(struct ocs_aes_dev *aes_dev)
+{
+ u32 isr_val = 0;
+
+ /* Disable interrupts */
+ iowrite32(AES_DISABLE_INT,
+ aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
+ iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
+
+ /* Clear any pending interrupt */
+ isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
+ if (isr_val)
+ iowrite32(isr_val,
+ aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
+
+ isr_val = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
+ if (isr_val)
+ iowrite32(isr_val,
+ aes_dev->base_reg + AES_A_DMA_MSI_MASK_OFFSET);
+
+ isr_val = ioread32(aes_dev->base_reg + AES_ISR_OFFSET);
+ if (isr_val)
+ iowrite32(isr_val, aes_dev->base_reg + AES_ISR_OFFSET);
+}
+
+/* Enable AES or DMA IRQ. IRQ is disabled once fired. */
+static void aes_irq_enable(struct ocs_aes_dev *aes_dev, u8 irq)
+{
+ if (irq == AES_COMPLETE_INT) {
+ /* Ensure DMA error interrupts are enabled */
+ iowrite32(AES_DMA_CPD_ERR_INT |
+ AES_DMA_OUTBUF_RD_ERR_INT |
+ AES_DMA_OUTBUF_WR_ERR_INT |
+ AES_DMA_INBUF_RD_ERR_INT |
+ AES_DMA_INBUF_WR_ERR_INT |
+ AES_DMA_BAD_COMP_INT |
+ AES_DMA_SAI_INT,
+ aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
+ /*
+ * AES_IER
+ * default 0x00000000
+ * bits [31:3] - reserved
+ * bit [2] - EN_SKS_ERR
+ * bit [1] - EN_AES_COMPLETE
+ * bit [0] - reserved
+ */
+ iowrite32(AES_COMPLETE_INT, aes_dev->base_reg + AES_IER_OFFSET);
+ return;
+ }
+ if (irq == AES_DMA_SRC_DONE_INT) {
+ /* Ensure AES interrupts are disabled */
+ iowrite32(AES_DISABLE_INT, aes_dev->base_reg + AES_IER_OFFSET);
+ /*
+ * DMA_MSI_IER
+ * default 0x00000000
+ * bits [31:9] - reserved
+ * bit [8] - CPD_ERR_INT_EN
+ * bit [7] - OUTBUF_RD_ERR_INT_EN
+ * bit [6] - OUTBUF_WR_ERR_INT_EN
+ * bit [5] - INBUF_RD_ERR_INT_EN
+ * bit [4] - INBUF_WR_ERR_INT_EN
+ * bit [3] - BAD_COMP_INT_EN
+ * bit [2] - SAI_INT_EN
+ * bit [1] - DST_DONE_INT_EN
+ * bit [0] - SRC_DONE_INT_EN
+ */
+ iowrite32(AES_DMA_CPD_ERR_INT |
+ AES_DMA_OUTBUF_RD_ERR_INT |
+ AES_DMA_OUTBUF_WR_ERR_INT |
+ AES_DMA_INBUF_RD_ERR_INT |
+ AES_DMA_INBUF_WR_ERR_INT |
+ AES_DMA_BAD_COMP_INT |
+ AES_DMA_SAI_INT |
+ AES_DMA_SRC_DONE_INT,
+ aes_dev->base_reg + AES_A_DMA_MSI_IER_OFFSET);
+ }
+}
+
+/* Enable and wait for IRQ (either from OCS AES engine or DMA) */
+static int ocs_aes_irq_enable_and_wait(struct ocs_aes_dev *aes_dev, u8 irq)
+{
+ int rc;
+
+ reinit_completion(&aes_dev->irq_completion);
+ aes_irq_enable(aes_dev, irq);
+ rc = wait_for_completion_interruptible(&aes_dev->irq_completion);
+ if (rc)
+ return rc;
+
+ return aes_dev->dma_err_mask ? -EIO : 0;
+}
+
+/* Configure DMA to OCS, linked list mode */
+static inline void dma_to_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
+ dma_addr_t dma_list)
+{
+ iowrite32(0, aes_dev->base_reg + AES_A_DMA_SRC_SIZE_OFFSET);
+ iowrite32(dma_list,
+ aes_dev->base_reg + AES_A_DMA_NEXT_SRC_DESCR_OFFSET);
+}
+
+/* Configure DMA from OCS, linked list mode */
+static inline void dma_from_ocs_aes_ll(struct ocs_aes_dev *aes_dev,
+ dma_addr_t dma_list)
+{
+ iowrite32(0, aes_dev->base_reg + AES_A_DMA_DST_SIZE_OFFSET);
+ iowrite32(dma_list,
+ aes_dev->base_reg + AES_A_DMA_NEXT_DST_DESCR_OFFSET);
+}
+
+irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id)
+{
+ struct ocs_aes_dev *aes_dev = dev_id;
+ u32 aes_dma_isr;
+
+ /* Read DMA ISR status. */
+ aes_dma_isr = ioread32(aes_dev->base_reg + AES_A_DMA_MSI_ISR_OFFSET);
+
+ /* Disable and clear interrupts. */
+ aes_irq_disable(aes_dev);
+
+ /* Save DMA error status. */
+ aes_dev->dma_err_mask = aes_dma_isr &
+ (AES_DMA_CPD_ERR_INT |
+ AES_DMA_OUTBUF_RD_ERR_INT |
+ AES_DMA_OUTBUF_WR_ERR_INT |
+ AES_DMA_INBUF_RD_ERR_INT |
+ AES_DMA_INBUF_WR_ERR_INT |
+ AES_DMA_BAD_COMP_INT |
+ AES_DMA_SAI_INT);
+
+ /* Signal IRQ completion. */
+ complete(&aes_dev->irq_completion);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ocs_aes_set_key() - Write key into OCS AES hardware.
+ * @aes_dev: The OCS AES device to write the key to.
+ * @key_size: The size of the key (in bytes).
+ * @key: The key to write.
+ * @cipher: The cipher the key is for.
+ *
+ * For AES @key_size must be either 16 or 32. For SM4 @key_size must be 16.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, u32 key_size, const u8 *key,
+ enum ocs_cipher cipher)
+{
+ const u32 *key_u32;
+ u32 val;
+ int i;
+
+ /* OCS AES supports 128-bit and 256-bit keys only. */
+ if (cipher == OCS_AES && !(key_size == 32 || key_size == 16)) {
+ dev_err(aes_dev->dev,
+ "%d-bit keys not supported by AES cipher\n",
+ key_size * 8);
+ return -EINVAL;
+ }
+ /* OCS SM4 supports 128-bit keys only. */
+ if (cipher == OCS_SM4 && key_size != 16) {
+ dev_err(aes_dev->dev,
+ "%d-bit keys not supported for SM4 cipher\n",
+ key_size * 8);
+ return -EINVAL;
+ }
+
+ if (!key)
+ return -EINVAL;
+
+ key_u32 = (const u32 *)key;
+
+ /* Write key to AES_KEY[0-7] registers */
+ for (i = 0; i < (key_size / sizeof(u32)); i++) {
+ iowrite32(key_u32[i],
+ aes_dev->base_reg + AES_KEY_0_OFFSET +
+ (i * sizeof(u32)));
+ }
+ /*
+ * Write key size
+ * bits [31:1] - reserved
+ * bit [0] - AES_KEY_SIZE
+ * 0 - 128 bit key
+ * 1 - 256 bit key
+ */
+ val = (key_size == 16) ? AES_128_BIT_KEY : AES_256_BIT_KEY;
+ iowrite32(val, aes_dev->base_reg + AES_KEY_SIZE_OFFSET);
+
+ return 0;
+}
+
+/* Write AES_COMMAND */
+static inline void set_ocs_aes_command(struct ocs_aes_dev *aes_dev,
+ enum ocs_cipher cipher,
+ enum ocs_mode mode,
+ enum ocs_instruction instruction)
+{
+ u32 val;
+
+ /* AES_COMMAND
+ * default 0x000000CC
+ * bit [14] - CIPHER_SELECT
+ * 0 - AES
+ * 1 - SM4
+ * bits [11:8] - OCS_AES_MODE
+ * 0000 - ECB
+ * 0001 - CBC
+ * 0010 - CTR
+ * 0110 - CCM
+ * 0111 - GCM
+ * 1001 - CTS
+ * bits [7:6] - AES_INSTRUCTION
+ * 00 - ENCRYPT
+ * 01 - DECRYPT
+ * 10 - EXPAND
+ * 11 - BYPASS
+ * bits [3:2] - CTR_M_BITS
+ * 00 - No increment
+ * 01 - Least significant 32 bits are incremented
+ * 10 - Least significant 64 bits are incremented
+ * 11 - Full 128 bits are incremented
+ */
+ val = (cipher << 14) | (mode << 8) | (instruction << 6) |
+ (AES_CTR_M_128_INC << 2);
+ iowrite32(val, aes_dev->base_reg + AES_COMMAND_OFFSET);
+}
+
+static void ocs_aes_init(struct ocs_aes_dev *aes_dev,
+ enum ocs_mode mode,
+ enum ocs_cipher cipher,
+ enum ocs_instruction instruction)
+{
+ /* Ensure interrupts are disabled and pending interrupts cleared. */
+ aes_irq_disable(aes_dev);
+
+ /* Set endianness recommended by data-sheet. */
+ aes_a_set_endianness(aes_dev);
+
+ /* Set AES_COMMAND register. */
+ set_ocs_aes_command(aes_dev, cipher, mode, instruction);
+}
+
+/*
+ * Write the byte length of the last AES/SM4 block of Payload data (without
+ * zero padding and without the length of the MAC) in register AES_PLEN.
+ */
+static inline void ocs_aes_write_last_data_blk_len(struct ocs_aes_dev *aes_dev,
+ u32 size)
+{
+ u32 val;
+
+ if (size == 0) {
+ val = 0;
+ goto exit;
+ }
+
+ val = size % AES_BLOCK_SIZE;
+ if (val == 0)
+ val = AES_BLOCK_SIZE;
+
+exit:
+ iowrite32(val, aes_dev->base_reg + AES_PLEN_OFFSET);
+}
+
+/*
+ * Validate inputs according to mode.
+ * If OK return 0; else return -EINVAL.
+ */
+static int ocs_aes_validate_inputs(dma_addr_t src_dma_list, u32 src_size,
+ const u8 *iv, u32 iv_size,
+ dma_addr_t aad_dma_list, u32 aad_size,
+ const u8 *tag, u32 tag_size,
+ enum ocs_cipher cipher, enum ocs_mode mode,
+ enum ocs_instruction instruction,
+ dma_addr_t dst_dma_list)
+{
+ /* Ensure cipher, mode and instruction are valid. */
+ if (!(cipher == OCS_AES || cipher == OCS_SM4))
+ return -EINVAL;
+
+ if (mode != OCS_MODE_ECB && mode != OCS_MODE_CBC &&
+ mode != OCS_MODE_CTR && mode != OCS_MODE_CCM &&
+ mode != OCS_MODE_GCM && mode != OCS_MODE_CTS)
+ return -EINVAL;
+
+ if (instruction != OCS_ENCRYPT && instruction != OCS_DECRYPT &&
+ instruction != OCS_EXPAND && instruction != OCS_BYPASS)
+ return -EINVAL;
+
+ /*
+ * When instruction is OCS_BYPASS, OCS simply copies data from source
+ * to destination using DMA.
+ *
+ * AES mode is irrelevant, but both source and destination DMA
+ * linked-list must be defined.
+ */
+ if (instruction == OCS_BYPASS) {
+ if (src_dma_list == DMA_MAPPING_ERROR ||
+ dst_dma_list == DMA_MAPPING_ERROR)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ /*
+ * For performance reasons switch based on mode to limit unnecessary
+ * conditionals for each mode
+ */
+ switch (mode) {
+ case OCS_MODE_ECB:
+ /* Ensure input length is multiple of block size */
+ if (src_size % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ /* Ensure source and destination linked lists are created */
+ if (src_dma_list == DMA_MAPPING_ERROR ||
+ dst_dma_list == DMA_MAPPING_ERROR)
+ return -EINVAL;
+
+ return 0;
+
+ case OCS_MODE_CBC:
+ /* Ensure input length is multiple of block size */
+ if (src_size % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ /* Ensure source and destination linked lists are created */
+ if (src_dma_list == DMA_MAPPING_ERROR ||
+ dst_dma_list == DMA_MAPPING_ERROR)
+ return -EINVAL;
+
+ /* Ensure IV is present and block size in length */
+ if (!iv || iv_size != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ return 0;
+
+ case OCS_MODE_CTR:
+ /* Ensure input length of 1 byte or greater */
+ if (src_size == 0)
+ return -EINVAL;
+
+ /* Ensure source and destination linked lists are created */
+ if (src_dma_list == DMA_MAPPING_ERROR ||
+ dst_dma_list == DMA_MAPPING_ERROR)
+ return -EINVAL;
+
+ /* Ensure IV is present and block size in length */
+ if (!iv || iv_size != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ return 0;
+
+ case OCS_MODE_CTS:
+ /* Ensure input length >= block size */
+ if (src_size < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ /* Ensure source and destination linked lists are created */
+ if (src_dma_list == DMA_MAPPING_ERROR ||
+ dst_dma_list == DMA_MAPPING_ERROR)
+ return -EINVAL;
+
+ /* Ensure IV is present and block size in length */
+ if (!iv || iv_size != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ return 0;
+
+ case OCS_MODE_GCM:
+ /* Ensure IV is present and GCM_AES_IV_SIZE in length */
+ if (!iv || iv_size != GCM_AES_IV_SIZE)
+ return -EINVAL;
+
+ /*
+ * If input data present ensure source and destination linked
+ * lists are created
+ */
+ if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
+ dst_dma_list == DMA_MAPPING_ERROR))
+ return -EINVAL;
+
+ /* If aad present ensure aad linked list is created */
+ if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
+ return -EINVAL;
+
+ /* Ensure tag destination is set */
+ if (!tag)
+ return -EINVAL;
+
+ /* Just ensure that tag_size doesn't cause overflows. */
+ if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
+ return -EINVAL;
+
+ return 0;
+
+ case OCS_MODE_CCM:
+ /* Ensure IV is present and block size in length */
+ if (!iv || iv_size != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ /* 2 <= L <= 8, so 1 <= L' <= 7 */
+ if (iv[L_PRIME_IDX] < L_PRIME_MIN ||
+ iv[L_PRIME_IDX] > L_PRIME_MAX)
+ return -EINVAL;
+
+ /* If aad present ensure aad linked list is created */
+ if (aad_size && aad_dma_list == DMA_MAPPING_ERROR)
+ return -EINVAL;
+
+ /* Just ensure that tag_size doesn't cause overflows. */
+ if (tag_size > (AES_MAX_TAG_SIZE_U32 * sizeof(u32)))
+ return -EINVAL;
+
+ if (instruction == OCS_DECRYPT) {
+ /*
+ * If input data present ensure source and destination
+ * linked lists are created
+ */
+ if (src_size && (src_dma_list == DMA_MAPPING_ERROR ||
+ dst_dma_list == DMA_MAPPING_ERROR))
+ return -EINVAL;
+
+ /* Ensure input tag is present */
+ if (!tag)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ /* Instruction == OCS_ENCRYPT */
+
+ /*
+ * Destination linked list always required (for tag even if no
+ * input data)
+ */
+ if (dst_dma_list == DMA_MAPPING_ERROR)
+ return -EINVAL;
+
+ /* If input data present ensure src linked list is created */
+ if (src_size && src_dma_list == DMA_MAPPING_ERROR)
+ return -EINVAL;
+
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * ocs_aes_op() - Perform AES/SM4 operation.
+ * @aes_dev: The OCS AES device to use.
+ * @mode: The mode to use (ECB, CBC, CTR, or CTS).
+ * @cipher: The cipher to use (AES or SM4).
+ * @instruction: The instruction to perform (encrypt or decrypt).
+ * @dst_dma_list: The OCS DMA list mapping output memory.
+ * @src_dma_list: The OCS DMA list mapping input payload data.
+ * @src_size: The amount of data mapped by @src_dma_list.
+ * @iv: The IV vector.
+ * @iv_size: The size (in bytes) of @iv.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_aes_op(struct ocs_aes_dev *aes_dev,
+ enum ocs_mode mode,
+ enum ocs_cipher cipher,
+ enum ocs_instruction instruction,
+ dma_addr_t dst_dma_list,
+ dma_addr_t src_dma_list,
+ u32 src_size,
+ u8 *iv,
+ u32 iv_size)
+{
+ u32 *iv32;
+ int rc;
+
+ rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv, iv_size, 0, 0,
+ NULL, 0, cipher, mode, instruction,
+ dst_dma_list);
+ if (rc)
+ return rc;
+ /*
+ * ocs_aes_validate_inputs() is a generic check, now ensure mode is not
+ * GCM or CCM.
+ */
+ if (mode == OCS_MODE_GCM || mode == OCS_MODE_CCM)
+ return -EINVAL;
+
+ /* Cast IV to u32 array. */
+ iv32 = (u32 *)iv;
+
+ ocs_aes_init(aes_dev, mode, cipher, instruction);
+
+ if (mode == OCS_MODE_CTS) {
+ /* Write the byte length of the last data block to engine. */
+ ocs_aes_write_last_data_blk_len(aes_dev, src_size);
+ }
+
+ /* ECB is the only mode that doesn't use IV. */
+ if (mode != OCS_MODE_ECB) {
+ iowrite32(iv32[0], aes_dev->base_reg + AES_IV_0_OFFSET);
+ iowrite32(iv32[1], aes_dev->base_reg + AES_IV_1_OFFSET);
+ iowrite32(iv32[2], aes_dev->base_reg + AES_IV_2_OFFSET);
+ iowrite32(iv32[3], aes_dev->base_reg + AES_IV_3_OFFSET);
+ }
+
+ /* Set AES_ACTIVE.TRIGGER to start the operation. */
+ aes_a_op_trigger(aes_dev);
+
+ /* Configure and activate input / output DMA. */
+ dma_to_ocs_aes_ll(aes_dev, src_dma_list);
+ dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
+ aes_a_dma_active_src_dst_ll_en(aes_dev);
+
+ if (mode == OCS_MODE_CTS) {
+ /*
+ * For CTS mode, instruct engine to activate ciphertext
+ * stealing if last block of data is incomplete.
+ */
+ aes_a_set_last_gcx(aes_dev);
+ } else {
+ /* For all other modes, just write the 'termination' bit. */
+ aes_a_op_termination(aes_dev);
+ }
+
+ /* Wait for engine to complete processing. */
+ rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
+ if (rc)
+ return rc;
+
+ if (mode == OCS_MODE_CTR) {
+ /* Read back IV for streaming mode */
+ iv32[0] = ioread32(aes_dev->base_reg + AES_IV_0_OFFSET);
+ iv32[1] = ioread32(aes_dev->base_reg + AES_IV_1_OFFSET);
+ iv32[2] = ioread32(aes_dev->base_reg + AES_IV_2_OFFSET);
+ iv32[3] = ioread32(aes_dev->base_reg + AES_IV_3_OFFSET);
+ }
+
+ return 0;
+}
+
+/* Compute and write J0 to engine registers. */
+static void ocs_aes_gcm_write_j0(const struct ocs_aes_dev *aes_dev,
+ const u8 *iv)
+{
+ const u32 *j0 = (u32 *)iv;
+
+ /*
+ * IV must be 12 bytes; Other sizes not supported as Linux crypto API
+ * does only expects/allows 12 byte IV for GCM
+ */
+ iowrite32(0x00000001, aes_dev->base_reg + AES_IV_0_OFFSET);
+ iowrite32(__swab32(j0[2]), aes_dev->base_reg + AES_IV_1_OFFSET);
+ iowrite32(__swab32(j0[1]), aes_dev->base_reg + AES_IV_2_OFFSET);
+ iowrite32(__swab32(j0[0]), aes_dev->base_reg + AES_IV_3_OFFSET);
+}
+
+/* Read GCM tag from engine registers. */
+static inline void ocs_aes_gcm_read_tag(struct ocs_aes_dev *aes_dev,
+ u8 *tag, u32 tag_size)
+{
+ u32 tag_u32[AES_MAX_TAG_SIZE_U32];
+
+ /*
+ * The Authentication Tag T is stored in Little Endian order in the
+ * registers with the most significant bytes stored from AES_T_MAC[3]
+ * downward.
+ */
+ tag_u32[0] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_3_OFFSET));
+ tag_u32[1] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_2_OFFSET));
+ tag_u32[2] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_1_OFFSET));
+ tag_u32[3] = __swab32(ioread32(aes_dev->base_reg + AES_T_MAC_0_OFFSET));
+
+ memcpy(tag, tag_u32, tag_size);
+}
+
+/**
+ * ocs_aes_gcm_op() - Perform GCM operation.
+ * @aes_dev: The OCS AES device to use.
+ * @cipher: The Cipher to use (AES or SM4).
+ * @instruction: The instruction to perform (encrypt or decrypt).
+ * @dst_dma_list: The OCS DMA list mapping output memory.
+ * @src_dma_list: The OCS DMA list mapping input payload data.
+ * @src_size: The amount of data mapped by @src_dma_list.
+ * @iv: The input IV vector.
+ * @aad_dma_list: The OCS DMA list mapping input AAD data.
+ * @aad_size: The amount of data mapped by @aad_dma_list.
+ * @out_tag: Where to store computed tag.
+ * @tag_size: The size (in bytes) of @out_tag.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
+ enum ocs_cipher cipher,
+ enum ocs_instruction instruction,
+ dma_addr_t dst_dma_list,
+ dma_addr_t src_dma_list,
+ u32 src_size,
+ const u8 *iv,
+ dma_addr_t aad_dma_list,
+ u32 aad_size,
+ u8 *out_tag,
+ u32 tag_size)
+{
+ u64 bit_len;
+ u32 val;
+ int rc;
+
+ rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
+ GCM_AES_IV_SIZE, aad_dma_list,
+ aad_size, out_tag, tag_size, cipher,
+ OCS_MODE_GCM, instruction,
+ dst_dma_list);
+ if (rc)
+ return rc;
+
+ ocs_aes_init(aes_dev, OCS_MODE_GCM, cipher, instruction);
+
+ /* Compute and write J0 to OCS HW. */
+ ocs_aes_gcm_write_j0(aes_dev, iv);
+
+ /* Write out_tag byte length */
+ iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
+
+ /* Write the byte length of the last plaintext / ciphertext block. */
+ ocs_aes_write_last_data_blk_len(aes_dev, src_size);
+
+ /* Write ciphertext bit length */
+ bit_len = (u64)src_size * 8;
+ val = bit_len & 0xFFFFFFFF;
+ iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_0_OFFSET);
+ val = bit_len >> 32;
+ iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_1_OFFSET);
+
+ /* Write aad bit length */
+ bit_len = (u64)aad_size * 8;
+ val = bit_len & 0xFFFFFFFF;
+ iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_2_OFFSET);
+ val = bit_len >> 32;
+ iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_3_OFFSET);
+
+ /* Set AES_ACTIVE.TRIGGER to start the operation. */
+ aes_a_op_trigger(aes_dev);
+
+ /* Process AAD. */
+ if (aad_size) {
+ /* If aad present, configure DMA to feed it to the engine. */
+ dma_to_ocs_aes_ll(aes_dev, aad_dma_list);
+ aes_a_dma_active_src_ll_en(aes_dev);
+
+ /* Instructs engine to pad last block of aad, if needed. */
+ aes_a_set_last_gcx_and_adata(aes_dev);
+
+ /* Wait for DMA transfer to complete. */
+ rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
+ if (rc)
+ return rc;
+ } else {
+ aes_a_set_last_gcx_and_adata(aes_dev);
+ }
+
+ /* Wait until adata (if present) has been processed. */
+ aes_a_wait_last_gcx(aes_dev);
+ aes_a_dma_wait_input_buffer_occupancy(aes_dev);
+
+ /* Now process payload. */
+ if (src_size) {
+ /* Configure and activate DMA for both input and output data. */
+ dma_to_ocs_aes_ll(aes_dev, src_dma_list);
+ dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
+ aes_a_dma_active_src_dst_ll_en(aes_dev);
+ } else {
+ aes_a_dma_set_xfer_size_zero(aes_dev);
+ aes_a_dma_active(aes_dev);
+ }
+
+ /* Instruct AES/SMA4 engine payload processing is over. */
+ aes_a_set_last_gcx(aes_dev);
+
+ /* Wait for OCS AES engine to complete processing. */
+ rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
+ if (rc)
+ return rc;
+
+ ocs_aes_gcm_read_tag(aes_dev, out_tag, tag_size);
+
+ return 0;
+}
+
+/* Write encrypted tag to AES/SM4 engine. */
+static void ocs_aes_ccm_write_encrypted_tag(struct ocs_aes_dev *aes_dev,
+ const u8 *in_tag, u32 tag_size)
+{
+ int i;
+
+ /* Ensure DMA input buffer is empty */
+ aes_a_dma_wait_input_buffer_occupancy(aes_dev);
+
+ /*
+ * During CCM decrypt, the OCS block needs to finish processing the
+ * ciphertext before the tag is written. So delay needed after DMA has
+ * completed writing the ciphertext
+ */
+ aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
+ aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
+ CCM_DECRYPT_DELAY_TAG_CLK_COUNT);
+
+ /* Write encrypted tag to AES/SM4 engine. */
+ for (i = 0; i < tag_size; i++) {
+ iowrite8(in_tag[i], aes_dev->base_reg +
+ AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
+ }
+}
+
+/*
+ * Write B0 CCM block to OCS AES HW.
+ *
+ * Note: B0 format is documented in NIST Special Publication 800-38C
+ * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
+ * (see Section A.2.1)
+ */
+static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev *aes_dev,
+ const u8 *iv, u32 adata_size, u32 tag_size,
+ u32 cryptlen)
+{
+ u8 b0[16]; /* CCM B0 block is 16 bytes long. */
+ int i, q;
+
+ /* Initialize B0 to 0. */
+ memset(b0, 0, sizeof(b0));
+
+ /*
+ * B0[0] is the 'Flags Octet' and has the following structure:
+ * bit 7: Reserved
+ * bit 6: Adata flag
+ * bit 5-3: t value encoded as (t-2)/2
+ * bit 2-0: q value encoded as q - 1
+ */
+ /* If there is AAD data, set the Adata flag. */
+ if (adata_size)
+ b0[0] |= BIT(6);
+ /*
+ * t denotes the octet length of T.
+ * t can only be an element of { 4, 6, 8, 10, 12, 14, 16} and is
+ * encoded as (t - 2) / 2
+ */
+ b0[0] |= (((tag_size - 2) / 2) & 0x7) << 3;
+ /*
+ * q is the octet length of Q.
+ * q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as
+ * q - 1 == iv[0] & 0x7;
+ */
+ b0[0] |= iv[0] & 0x7;
+ /*
+ * Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]
+ * and must be copied to b0[1]..b0[15-q].
+ * q == (iv[0] & 0x7) + 1
+ */
+ q = (iv[0] & 0x7) + 1;
+ for (i = 1; i <= 15 - q; i++)
+ b0[i] = iv[i];
+ /*
+ * The rest of B0 must contain Q, i.e., the message length.
+ * Q is encoded in q octets, in big-endian order, so to write it, we
+ * start from the end of B0 and we move backward.
+ */
+ i = sizeof(b0) - 1;
+ while (q) {
+ b0[i] = cryptlen & 0xff;
+ cryptlen >>= 8;
+ i--;
+ q--;
+ }
+ /*
+ * If cryptlen is not zero at this point, it means that its original
+ * value was too big.
+ */
+ if (cryptlen)
+ return -EOVERFLOW;
+ /* Now write B0 to OCS AES input buffer. */
+ for (i = 0; i < sizeof(b0); i++)
+ iowrite8(b0[i], aes_dev->base_reg +
+ AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
+ return 0;
+}
+
+/*
+ * Write adata length to OCS AES HW.
+ *
+ * Note: adata len encoding is documented in NIST Special Publication 800-38C
+ * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38c.pdf
+ * (see Section A.2.2)
+ */
+static void ocs_aes_ccm_write_adata_len(const struct ocs_aes_dev *aes_dev,
+ u64 adata_len)
+{
+ u8 enc_a[10]; /* Maximum encoded size: 10 octets. */
+ int i, len;
+
+ /*
+ * adata_len ('a') is encoded as follows:
+ * If 0 < a < 2^16 - 2^8 ==> 'a' encoded as [a]16, i.e., two octets
+ * (big endian).
+ * If 2^16 - 2^8 ≤ a < 2^32 ==> 'a' encoded as 0xff || 0xfe || [a]32,
+ * i.e., six octets (big endian).
+ * If 2^32 ≤ a < 2^64 ==> 'a' encoded as 0xff || 0xff || [a]64,
+ * i.e., ten octets (big endian).
+ */
+ if (adata_len < 65280) {
+ len = 2;
+ *(__be16 *)enc_a = cpu_to_be16(adata_len);
+ } else if (adata_len <= 0xFFFFFFFF) {
+ len = 6;
+ *(__be16 *)enc_a = cpu_to_be16(0xfffe);
+ *(__be32 *)&enc_a[2] = cpu_to_be32(adata_len);
+ } else { /* adata_len >= 2^32 */
+ len = 10;
+ *(__be16 *)enc_a = cpu_to_be16(0xffff);
+ *(__be64 *)&enc_a[2] = cpu_to_be64(adata_len);
+ }
+ for (i = 0; i < len; i++)
+ iowrite8(enc_a[i],
+ aes_dev->base_reg +
+ AES_A_DMA_INBUFFER_WRITE_FIFO_OFFSET);
+}
+
+static int ocs_aes_ccm_do_adata(struct ocs_aes_dev *aes_dev,
+ dma_addr_t adata_dma_list, u32 adata_size)
+{
+ int rc;
+
+ if (!adata_size) {
+ /* Since no aad the LAST_GCX bit can be set now */
+ aes_a_set_last_gcx_and_adata(aes_dev);
+ goto exit;
+ }
+
+ /* Adata case. */
+
+ /*
+ * Form the encoding of the Associated data length and write it
+ * to the AES/SM4 input buffer.
+ */
+ ocs_aes_ccm_write_adata_len(aes_dev, adata_size);
+
+ /* Configure the AES/SM4 DMA to fetch the Associated Data */
+ dma_to_ocs_aes_ll(aes_dev, adata_dma_list);
+
+ /* Activate DMA to fetch Associated data. */
+ aes_a_dma_active_src_ll_en(aes_dev);
+
+ /* Set LAST_GCX and LAST_ADATA in AES ACTIVE register. */
+ aes_a_set_last_gcx_and_adata(aes_dev);
+
+ /* Wait for DMA transfer to complete. */
+ rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
+ if (rc)
+ return rc;
+
+exit:
+ /* Wait until adata (if present) has been processed. */
+ aes_a_wait_last_gcx(aes_dev);
+ aes_a_dma_wait_input_buffer_occupancy(aes_dev);
+
+ return 0;
+}
+
+static int ocs_aes_ccm_encrypt_do_payload(struct ocs_aes_dev *aes_dev,
+ dma_addr_t dst_dma_list,
+ dma_addr_t src_dma_list,
+ u32 src_size)
+{
+ if (src_size) {
+ /*
+ * Configure and activate DMA for both input and output
+ * data.
+ */
+ dma_to_ocs_aes_ll(aes_dev, src_dma_list);
+ dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
+ aes_a_dma_active_src_dst_ll_en(aes_dev);
+ } else {
+ /* Configure and activate DMA for output data only. */
+ dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
+ aes_a_dma_active_dst_ll_en(aes_dev);
+ }
+
+ /*
+ * Set the LAST GCX bit in AES_ACTIVE Register to instruct
+ * AES/SM4 engine to pad the last block of data.
+ */
+ aes_a_set_last_gcx(aes_dev);
+
+ /* We are done, wait for IRQ and return. */
+ return ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
+}
+
+static int ocs_aes_ccm_decrypt_do_payload(struct ocs_aes_dev *aes_dev,
+ dma_addr_t dst_dma_list,
+ dma_addr_t src_dma_list,
+ u32 src_size)
+{
+ if (!src_size) {
+ /* Let engine process 0-length input. */
+ aes_a_dma_set_xfer_size_zero(aes_dev);
+ aes_a_dma_active(aes_dev);
+ aes_a_set_last_gcx(aes_dev);
+
+ return 0;
+ }
+
+ /*
+ * Configure and activate DMA for both input and output
+ * data.
+ */
+ dma_to_ocs_aes_ll(aes_dev, src_dma_list);
+ dma_from_ocs_aes_ll(aes_dev, dst_dma_list);
+ aes_a_dma_active_src_dst_ll_en(aes_dev);
+ /*
+ * Set the LAST GCX bit in AES_ACTIVE Register; this allows the
+ * AES/SM4 engine to differentiate between encrypted data and
+ * encrypted MAC.
+ */
+ aes_a_set_last_gcx(aes_dev);
+ /*
+ * Enable DMA DONE interrupt; once DMA transfer is over,
+ * interrupt handler will process the MAC/tag.
+ */
+ return ocs_aes_irq_enable_and_wait(aes_dev, AES_DMA_SRC_DONE_INT);
+}
+
+/*
+ * Compare Tag to Yr.
+ *
+ * Only used at the end of CCM decrypt. If tag == yr, message authentication
+ * has succeeded.
+ */
+static inline int ccm_compare_tag_to_yr(struct ocs_aes_dev *aes_dev,
+ u8 tag_size_bytes)
+{
+ u32 tag[AES_MAX_TAG_SIZE_U32];
+ u32 yr[AES_MAX_TAG_SIZE_U32];
+ u8 i;
+
+ /* Read Tag and Yr from AES registers. */
+ for (i = 0; i < AES_MAX_TAG_SIZE_U32; i++) {
+ tag[i] = ioread32(aes_dev->base_reg +
+ AES_T_MAC_0_OFFSET + (i * sizeof(u32)));
+ yr[i] = ioread32(aes_dev->base_reg +
+ AES_MULTIPURPOSE2_0_OFFSET +
+ (i * sizeof(u32)));
+ }
+
+ return memcmp(tag, yr, tag_size_bytes) ? -EBADMSG : 0;
+}
+
+/**
+ * ocs_aes_ccm_op() - Perform CCM operation.
+ * @aes_dev: The OCS AES device to use.
+ * @cipher: The Cipher to use (AES or SM4).
+ * @instruction: The instruction to perform (encrypt or decrypt).
+ * @dst_dma_list: The OCS DMA list mapping output memory.
+ * @src_dma_list: The OCS DMA list mapping input payload data.
+ * @src_size: The amount of data mapped by @src_dma_list.
+ * @iv: The input IV vector.
+ * @adata_dma_list: The OCS DMA list mapping input A-data.
+ * @adata_size: The amount of data mapped by @adata_dma_list.
+ * @in_tag: Input tag.
+ * @tag_size: The size (in bytes) of @in_tag.
+ *
+ * Note: for encrypt the tag is appended to the ciphertext (in the memory
+ * mapped by @dst_dma_list).
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,
+ enum ocs_cipher cipher,
+ enum ocs_instruction instruction,
+ dma_addr_t dst_dma_list,
+ dma_addr_t src_dma_list,
+ u32 src_size,
+ u8 *iv,
+ dma_addr_t adata_dma_list,
+ u32 adata_size,
+ u8 *in_tag,
+ u32 tag_size)
+{
+ u32 *iv_32;
+ u8 lprime;
+ int rc;
+
+ rc = ocs_aes_validate_inputs(src_dma_list, src_size, iv,
+ AES_BLOCK_SIZE, adata_dma_list, adata_size,
+ in_tag, tag_size, cipher, OCS_MODE_CCM,
+ instruction, dst_dma_list);
+ if (rc)
+ return rc;
+
+ ocs_aes_init(aes_dev, OCS_MODE_CCM, cipher, instruction);
+
+ /*
+ * Note: rfc 3610 and NIST 800-38C require counter of zero to encrypt
+ * auth tag so ensure this is the case
+ */
+ lprime = iv[L_PRIME_IDX];
+ memset(&iv[COUNTER_START(lprime)], 0, COUNTER_LEN(lprime));
+
+ /*
+ * Nonce is already converted to ctr0 before being passed into this
+ * function as iv.
+ */
+ iv_32 = (u32 *)iv;
+ iowrite32(__swab32(iv_32[0]),
+ aes_dev->base_reg + AES_MULTIPURPOSE1_3_OFFSET);
+ iowrite32(__swab32(iv_32[1]),
+ aes_dev->base_reg + AES_MULTIPURPOSE1_2_OFFSET);
+ iowrite32(__swab32(iv_32[2]),
+ aes_dev->base_reg + AES_MULTIPURPOSE1_1_OFFSET);
+ iowrite32(__swab32(iv_32[3]),
+ aes_dev->base_reg + AES_MULTIPURPOSE1_0_OFFSET);
+
+ /* Write MAC/tag length in register AES_TLEN */
+ iowrite32(tag_size, aes_dev->base_reg + AES_TLEN_OFFSET);
+ /*
+ * Write the byte length of the last AES/SM4 block of Payload data
+ * (without zero padding and without the length of the MAC) in register
+ * AES_PLEN.
+ */
+ ocs_aes_write_last_data_blk_len(aes_dev, src_size);
+
+ /* Set AES_ACTIVE.TRIGGER to start the operation. */
+ aes_a_op_trigger(aes_dev);
+
+ aes_a_dma_reset_and_activate_perf_cntr(aes_dev);
+
+ /* Form block B0 and write it to the AES/SM4 input buffer. */
+ rc = ocs_aes_ccm_write_b0(aes_dev, iv, adata_size, tag_size, src_size);
+ if (rc)
+ return rc;
+ /*
+ * Ensure there has been at least CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT
+ * clock cycles since TRIGGER bit was set
+ */
+ aes_a_dma_wait_and_deactivate_perf_cntr(aes_dev,
+ CCM_DECRYPT_DELAY_LAST_GCX_CLK_COUNT);
+
+ /* Process Adata. */
+ ocs_aes_ccm_do_adata(aes_dev, adata_dma_list, adata_size);
+
+ /* For Encrypt case we just process the payload and return. */
+ if (instruction == OCS_ENCRYPT) {
+ return ocs_aes_ccm_encrypt_do_payload(aes_dev, dst_dma_list,
+ src_dma_list, src_size);
+ }
+ /* For Decypt we need to process the payload and then the tag. */
+ rc = ocs_aes_ccm_decrypt_do_payload(aes_dev, dst_dma_list,
+ src_dma_list, src_size);
+ if (rc)
+ return rc;
+
+ /* Process MAC/tag directly: feed tag to engine and wait for IRQ. */
+ ocs_aes_ccm_write_encrypted_tag(aes_dev, in_tag, tag_size);
+ rc = ocs_aes_irq_enable_and_wait(aes_dev, AES_COMPLETE_INT);
+ if (rc)
+ return rc;
+
+ return ccm_compare_tag_to_yr(aes_dev, tag_size);
+}
+
+/**
+ * ocs_create_linked_list_from_sg() - Create OCS DMA linked list from SG list.
+ * @aes_dev: The OCS AES device the list will be created for.
+ * @sg: The SG list OCS DMA linked list will be created from. When
+ * passed to this function, @sg must have been already mapped
+ * with dma_map_sg().
+ * @sg_dma_count: The number of DMA-mapped entries in @sg. This must be the
+ * value returned by dma_map_sg() when @sg was mapped.
+ * @dll_desc: The OCS DMA dma_list to use to store information about the
+ * created linked list.
+ * @data_size: The size of the data (from the SG list) to be mapped into the
+ * OCS DMA linked list.
+ * @data_offset: The offset (within the SG list) of the data to be mapped.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
+ struct scatterlist *sg,
+ int sg_dma_count,
+ struct ocs_dll_desc *dll_desc,
+ size_t data_size, size_t data_offset)
+{
+ struct ocs_dma_linked_list *ll = NULL;
+ struct scatterlist *sg_tmp;
+ unsigned int tmp;
+ int dma_nents;
+ int i;
+
+ if (!dll_desc || !sg || !aes_dev)
+ return -EINVAL;
+
+ /* Default values for when no ddl_desc is created. */
+ dll_desc->vaddr = NULL;
+ dll_desc->dma_addr = DMA_MAPPING_ERROR;
+ dll_desc->size = 0;
+
+ if (data_size == 0)
+ return 0;
+
+ /* Loop over sg_list until we reach entry at specified offset. */
+ while (data_offset >= sg_dma_len(sg)) {
+ data_offset -= sg_dma_len(sg);
+ sg_dma_count--;
+ sg = sg_next(sg);
+ /* If we reach the end of the list, offset was invalid. */
+ if (!sg || sg_dma_count == 0)
+ return -EINVAL;
+ }
+
+ /* Compute number of DMA-mapped SG entries to add into OCS DMA list. */
+ dma_nents = 0;
+ tmp = 0;
+ sg_tmp = sg;
+ while (tmp < data_offset + data_size) {
+ /* If we reach the end of the list, data_size was invalid. */
+ if (!sg_tmp)
+ return -EINVAL;
+ tmp += sg_dma_len(sg_tmp);
+ dma_nents++;
+ sg_tmp = sg_next(sg_tmp);
+ }
+ if (dma_nents > sg_dma_count)
+ return -EINVAL;
+
+ /* Allocate the DMA list, one entry for each SG entry. */
+ dll_desc->size = sizeof(struct ocs_dma_linked_list) * dma_nents;
+ dll_desc->vaddr = dma_alloc_coherent(aes_dev->dev, dll_desc->size,
+ &dll_desc->dma_addr, GFP_KERNEL);
+ if (!dll_desc->vaddr)
+ return -ENOMEM;
+
+ /* Populate DMA linked list entries. */
+ ll = dll_desc->vaddr;
+ for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
+ ll[i].src_addr = sg_dma_address(sg) + data_offset;
+ ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
+ (sg_dma_len(sg) - data_offset) : data_size;
+ data_offset = 0;
+ data_size -= ll[i].src_len;
+ /* Current element points to the DMA address of the next one. */
+ ll[i].next = dll_desc->dma_addr + (sizeof(*ll) * (i + 1));
+ ll[i].ll_flags = 0;
+ }
+ /* Terminate last element. */
+ ll[i - 1].next = 0;
+ ll[i - 1].ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
+
+ return 0;
+}
diff --git a/drivers/crypto/intel/keembay/ocs-aes.h b/drivers/crypto/intel/keembay/ocs-aes.h
new file mode 100644
index 0000000000..c035fc48b7
--- /dev/null
+++ b/drivers/crypto/intel/keembay/ocs-aes.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Keem Bay OCS AES Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#ifndef _CRYPTO_OCS_AES_H
+#define _CRYPTO_OCS_AES_H
+
+#include <linux/dma-mapping.h>
+
+enum ocs_cipher {
+ OCS_AES = 0,
+ OCS_SM4 = 1,
+};
+
+enum ocs_mode {
+ OCS_MODE_ECB = 0,
+ OCS_MODE_CBC = 1,
+ OCS_MODE_CTR = 2,
+ OCS_MODE_CCM = 6,
+ OCS_MODE_GCM = 7,
+ OCS_MODE_CTS = 9,
+};
+
+enum ocs_instruction {
+ OCS_ENCRYPT = 0,
+ OCS_DECRYPT = 1,
+ OCS_EXPAND = 2,
+ OCS_BYPASS = 3,
+};
+
+/**
+ * struct ocs_aes_dev - AES device context.
+ * @list: List head for insertion into device list hold
+ * by driver.
+ * @dev: OCS AES device.
+ * @irq: IRQ number.
+ * @base_reg: IO base address of OCS AES.
+ * @irq_copy_completion: Completion to indicate IRQ has been triggered.
+ * @dma_err_mask: Error reported by OCS DMA interrupts.
+ * @engine: Crypto engine for the device.
+ */
+struct ocs_aes_dev {
+ struct list_head list;
+ struct device *dev;
+ int irq;
+ void __iomem *base_reg;
+ struct completion irq_completion;
+ u32 dma_err_mask;
+ struct crypto_engine *engine;
+};
+
+/**
+ * struct ocs_dll_desc - Descriptor of an OCS DMA Linked List.
+ * @vaddr: Virtual address of the linked list head.
+ * @dma_addr: DMA address of the linked list head.
+ * @size: Size (in bytes) of the linked list.
+ */
+struct ocs_dll_desc {
+ void *vaddr;
+ dma_addr_t dma_addr;
+ size_t size;
+};
+
+int ocs_aes_set_key(struct ocs_aes_dev *aes_dev, const u32 key_size,
+ const u8 *key, const enum ocs_cipher cipher);
+
+int ocs_aes_op(struct ocs_aes_dev *aes_dev,
+ enum ocs_mode mode,
+ enum ocs_cipher cipher,
+ enum ocs_instruction instruction,
+ dma_addr_t dst_dma_list,
+ dma_addr_t src_dma_list,
+ u32 src_size,
+ u8 *iv,
+ u32 iv_size);
+
+/**
+ * ocs_aes_bypass_op() - Use OCS DMA to copy data.
+ * @aes_dev: The OCS AES device to use.
+ * @dst_dma_list: The OCS DMA list mapping the memory where input data
+ * will be copied to.
+ * @src_dma_list: The OCS DMA list mapping input data.
+ * @src_size: The amount of data to copy.
+ */
+static inline int ocs_aes_bypass_op(struct ocs_aes_dev *aes_dev,
+ dma_addr_t dst_dma_list,
+ dma_addr_t src_dma_list, u32 src_size)
+{
+ return ocs_aes_op(aes_dev, OCS_MODE_ECB, OCS_AES, OCS_BYPASS,
+ dst_dma_list, src_dma_list, src_size, NULL, 0);
+}
+
+int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
+ enum ocs_cipher cipher,
+ enum ocs_instruction instruction,
+ dma_addr_t dst_dma_list,
+ dma_addr_t src_dma_list,
+ u32 src_size,
+ const u8 *iv,
+ dma_addr_t aad_dma_list,
+ u32 aad_size,
+ u8 *out_tag,
+ u32 tag_size);
+
+int ocs_aes_ccm_op(struct ocs_aes_dev *aes_dev,
+ enum ocs_cipher cipher,
+ enum ocs_instruction instruction,
+ dma_addr_t dst_dma_list,
+ dma_addr_t src_dma_list,
+ u32 src_size,
+ u8 *iv,
+ dma_addr_t adata_dma_list,
+ u32 adata_size,
+ u8 *in_tag,
+ u32 tag_size);
+
+int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
+ struct scatterlist *sg,
+ int sg_dma_count,
+ struct ocs_dll_desc *dll_desc,
+ size_t data_size,
+ size_t data_offset);
+
+irqreturn_t ocs_aes_irq_handler(int irq, void *dev_id);
+
+#endif
diff --git a/drivers/crypto/intel/keembay/ocs-hcu.c b/drivers/crypto/intel/keembay/ocs-hcu.c
new file mode 100644
index 0000000000..deb9bd460e
--- /dev/null
+++ b/drivers/crypto/intel/keembay/ocs-hcu.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include <crypto/sha2.h>
+
+#include "ocs-hcu.h"
+
+/* Registers. */
+#define OCS_HCU_MODE 0x00
+#define OCS_HCU_CHAIN 0x04
+#define OCS_HCU_OPERATION 0x08
+#define OCS_HCU_KEY_0 0x0C
+#define OCS_HCU_ISR 0x50
+#define OCS_HCU_IER 0x54
+#define OCS_HCU_STATUS 0x58
+#define OCS_HCU_MSG_LEN_LO 0x60
+#define OCS_HCU_MSG_LEN_HI 0x64
+#define OCS_HCU_KEY_BYTE_ORDER_CFG 0x80
+#define OCS_HCU_DMA_SRC_ADDR 0x400
+#define OCS_HCU_DMA_SRC_SIZE 0x408
+#define OCS_HCU_DMA_DST_SIZE 0x40C
+#define OCS_HCU_DMA_DMA_MODE 0x410
+#define OCS_HCU_DMA_NEXT_SRC_DESCR 0x418
+#define OCS_HCU_DMA_MSI_ISR 0x480
+#define OCS_HCU_DMA_MSI_IER 0x484
+#define OCS_HCU_DMA_MSI_MASK 0x488
+
+/* Register bit definitions. */
+#define HCU_MODE_ALGO_SHIFT 16
+#define HCU_MODE_HMAC_SHIFT 22
+
+#define HCU_STATUS_BUSY BIT(0)
+
+#define HCU_BYTE_ORDER_SWAP BIT(0)
+
+#define HCU_IRQ_HASH_DONE BIT(2)
+#define HCU_IRQ_HASH_ERR_MASK (BIT(3) | BIT(1) | BIT(0))
+
+#define HCU_DMA_IRQ_SRC_DONE BIT(0)
+#define HCU_DMA_IRQ_SAI_ERR BIT(2)
+#define HCU_DMA_IRQ_BAD_COMP_ERR BIT(3)
+#define HCU_DMA_IRQ_INBUF_RD_ERR BIT(4)
+#define HCU_DMA_IRQ_INBUF_WD_ERR BIT(5)
+#define HCU_DMA_IRQ_OUTBUF_WR_ERR BIT(6)
+#define HCU_DMA_IRQ_OUTBUF_RD_ERR BIT(7)
+#define HCU_DMA_IRQ_CRD_ERR BIT(8)
+#define HCU_DMA_IRQ_ERR_MASK (HCU_DMA_IRQ_SAI_ERR | \
+ HCU_DMA_IRQ_BAD_COMP_ERR | \
+ HCU_DMA_IRQ_INBUF_RD_ERR | \
+ HCU_DMA_IRQ_INBUF_WD_ERR | \
+ HCU_DMA_IRQ_OUTBUF_WR_ERR | \
+ HCU_DMA_IRQ_OUTBUF_RD_ERR | \
+ HCU_DMA_IRQ_CRD_ERR)
+
+#define HCU_DMA_SNOOP_MASK (0x7 << 28)
+#define HCU_DMA_SRC_LL_EN BIT(25)
+#define HCU_DMA_EN BIT(31)
+
+#define OCS_HCU_ENDIANNESS_VALUE 0x2A
+
+#define HCU_DMA_MSI_UNMASK BIT(0)
+#define HCU_DMA_MSI_DISABLE 0
+#define HCU_IRQ_DISABLE 0
+
+#define OCS_HCU_START BIT(0)
+#define OCS_HCU_TERMINATE BIT(1)
+
+#define OCS_LL_DMA_FLAG_TERMINATE BIT(31)
+
+#define OCS_HCU_HW_KEY_LEN_U32 (OCS_HCU_HW_KEY_LEN / sizeof(u32))
+
+#define HCU_DATA_WRITE_ENDIANNESS_OFFSET 26
+
+#define OCS_HCU_NUM_CHAINS_SHA256_224_SM3 (SHA256_DIGEST_SIZE / sizeof(u32))
+#define OCS_HCU_NUM_CHAINS_SHA384_512 (SHA512_DIGEST_SIZE / sizeof(u32))
+
+/*
+ * While polling on a busy HCU, wait maximum 200us between one check and the
+ * other.
+ */
+#define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US 200
+/* Wait on a busy HCU for maximum 1 second. */
+#define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000
+
+/**
+ * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
+ * @src_addr: Source address of the data.
+ * @src_len: Length of data to be fetched.
+ * @nxt_desc: Next descriptor to fetch.
+ * @ll_flags: Flags (Freeze @ terminate) for the DMA engine.
+ */
+struct ocs_hcu_dma_entry {
+ u32 src_addr;
+ u32 src_len;
+ u32 nxt_desc;
+ u32 ll_flags;
+};
+
+/**
+ * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
+ * @head: The head of the list (points to the array backing the list).
+ * @tail: The current tail of the list; NULL if the list is empty.
+ * @dma_addr: The DMA address of @head (i.e., the DMA address of the backing
+ * array).
+ * @max_nents: Maximum number of entries in the list (i.e., number of elements
+ * in the backing array).
+ *
+ * The OCS DMA list is an array-backed list of OCS DMA descriptors. The array
+ * backing the list is allocated with dma_alloc_coherent() and pointed by
+ * @head.
+ */
+struct ocs_hcu_dma_list {
+ struct ocs_hcu_dma_entry *head;
+ struct ocs_hcu_dma_entry *tail;
+ dma_addr_t dma_addr;
+ size_t max_nents;
+};
+
+static inline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
+{
+ switch (algo) {
+ case OCS_HCU_ALGO_SHA224:
+ case OCS_HCU_ALGO_SHA256:
+ case OCS_HCU_ALGO_SM3:
+ return OCS_HCU_NUM_CHAINS_SHA256_224_SM3;
+ case OCS_HCU_ALGO_SHA384:
+ case OCS_HCU_ALGO_SHA512:
+ return OCS_HCU_NUM_CHAINS_SHA384_512;
+ default:
+ return 0;
+ };
+}
+
+static inline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
+{
+ switch (algo) {
+ case OCS_HCU_ALGO_SHA224:
+ return SHA224_DIGEST_SIZE;
+ case OCS_HCU_ALGO_SHA256:
+ case OCS_HCU_ALGO_SM3:
+ /* SM3 shares the same block size. */
+ return SHA256_DIGEST_SIZE;
+ case OCS_HCU_ALGO_SHA384:
+ return SHA384_DIGEST_SIZE;
+ case OCS_HCU_ALGO_SHA512:
+ return SHA512_DIGEST_SIZE;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable.
+ * @hcu_dev: OCS HCU device to wait for.
+ *
+ * Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has
+ * expired.
+ */
+static int ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
+{
+ long val;
+
+ return readl_poll_timeout(hcu_dev->io_base + OCS_HCU_STATUS, val,
+ !(val & HCU_STATUS_BUSY),
+ OCS_HCU_WAIT_BUSY_RETRY_DELAY_US,
+ OCS_HCU_WAIT_BUSY_TIMEOUT_US);
+}
+
+static void ocs_hcu_done_irq_en(struct ocs_hcu_dev *hcu_dev)
+{
+ /* Clear any pending interrupts. */
+ writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_ISR);
+ hcu_dev->irq_err = false;
+ /* Enable error and HCU done interrupts. */
+ writel(HCU_IRQ_HASH_DONE | HCU_IRQ_HASH_ERR_MASK,
+ hcu_dev->io_base + OCS_HCU_IER);
+}
+
+static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev *hcu_dev)
+{
+ /* Clear any pending interrupts. */
+ writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+ hcu_dev->irq_err = false;
+ /* Only operating on DMA source completion and error interrupts. */
+ writel(HCU_DMA_IRQ_ERR_MASK | HCU_DMA_IRQ_SRC_DONE,
+ hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
+ /* Unmask */
+ writel(HCU_DMA_MSI_UNMASK, hcu_dev->io_base + OCS_HCU_DMA_MSI_MASK);
+}
+
+static void ocs_hcu_irq_dis(struct ocs_hcu_dev *hcu_dev)
+{
+ writel(HCU_IRQ_DISABLE, hcu_dev->io_base + OCS_HCU_IER);
+ writel(HCU_DMA_MSI_DISABLE, hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
+}
+
+static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
+{
+ int rc;
+
+ rc = wait_for_completion_interruptible(&hcu_dev->irq_done);
+ if (rc)
+ goto exit;
+
+ if (hcu_dev->irq_err) {
+ /* Unset flag and return error. */
+ hcu_dev->irq_err = false;
+ rc = -EIO;
+ goto exit;
+ }
+
+exit:
+ ocs_hcu_irq_dis(hcu_dev);
+
+ return rc;
+}
+
+/**
+ * ocs_hcu_get_intermediate_data() - Get intermediate data.
+ * @hcu_dev: The target HCU device.
+ * @data: Where to store the intermediate.
+ * @algo: The algorithm being used.
+ *
+ * This function is used to save the current hashing process state in order to
+ * continue it in the future.
+ *
+ * Note: once all data has been processed, the intermediate data actually
+ * contains the hashing result. So this function is also used to retrieve the
+ * final result of a hashing process.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_idata *data,
+ enum ocs_hcu_algo algo)
+{
+ const int n = ocs_hcu_num_chains(algo);
+ u32 *chain;
+ int rc;
+ int i;
+
+ /* Data not requested. */
+ if (!data)
+ return -EINVAL;
+
+ chain = (u32 *)data->digest;
+
+ /* Ensure that the OCS is no longer busy before reading the chains. */
+ rc = ocs_hcu_wait_busy(hcu_dev);
+ if (rc)
+ return rc;
+
+ /*
+ * This loops is safe because data->digest is an array of
+ * SHA512_DIGEST_SIZE bytes and the maximum value returned by
+ * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
+ * to SHA512_DIGEST_SIZE / sizeof(u32).
+ */
+ for (i = 0; i < n; i++)
+ chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
+
+ data->msg_len_lo = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
+ data->msg_len_hi = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_set_intermediate_data() - Set intermediate data.
+ * @hcu_dev: The target HCU device.
+ * @data: The intermediate data to be set.
+ * @algo: The algorithm being used.
+ *
+ * This function is used to continue a previous hashing process.
+ */
+static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_idata *data,
+ enum ocs_hcu_algo algo)
+{
+ const int n = ocs_hcu_num_chains(algo);
+ u32 *chain = (u32 *)data->digest;
+ int i;
+
+ /*
+ * This loops is safe because data->digest is an array of
+ * SHA512_DIGEST_SIZE bytes and the maximum value returned by
+ * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
+ * to SHA512_DIGEST_SIZE / sizeof(u32).
+ */
+ for (i = 0; i < n; i++)
+ writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);
+
+ writel(data->msg_len_lo, hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
+ writel(data->msg_len_hi, hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
+}
+
+static int ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev,
+ enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
+{
+ u32 *chain;
+ int rc;
+ int i;
+
+ if (!dgst)
+ return -EINVAL;
+
+ /* Length of the output buffer must match the algo digest size. */
+ if (dgst_len != ocs_hcu_digest_size(algo))
+ return -EINVAL;
+
+ /* Ensure that the OCS is no longer busy before reading the chains. */
+ rc = ocs_hcu_wait_busy(hcu_dev);
+ if (rc)
+ return rc;
+
+ chain = (u32 *)dgst;
+ for (i = 0; i < dgst_len / sizeof(u32); i++)
+ chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_hw_cfg() - Configure the HCU hardware.
+ * @hcu_dev: The HCU device to configure.
+ * @algo: The algorithm to be used by the HCU device.
+ * @use_hmac: Whether or not HW HMAC should be used.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ bool use_hmac)
+{
+ u32 cfg;
+ int rc;
+
+ if (algo != OCS_HCU_ALGO_SHA256 && algo != OCS_HCU_ALGO_SHA224 &&
+ algo != OCS_HCU_ALGO_SHA384 && algo != OCS_HCU_ALGO_SHA512 &&
+ algo != OCS_HCU_ALGO_SM3)
+ return -EINVAL;
+
+ rc = ocs_hcu_wait_busy(hcu_dev);
+ if (rc)
+ return rc;
+
+ /* Ensure interrupts are disabled. */
+ ocs_hcu_irq_dis(hcu_dev);
+
+ /* Configure endianness, hashing algorithm and HW HMAC (if needed) */
+ cfg = OCS_HCU_ENDIANNESS_VALUE << HCU_DATA_WRITE_ENDIANNESS_OFFSET;
+ cfg |= algo << HCU_MODE_ALGO_SHIFT;
+ if (use_hmac)
+ cfg |= BIT(HCU_MODE_HMAC_SHIFT);
+
+ writel(cfg, hcu_dev->io_base + OCS_HCU_MODE);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_clear_key() - Clear key stored in OCS HMAC KEY registers.
+ * @hcu_dev: The OCS HCU device whose key registers should be cleared.
+ */
+static void ocs_hcu_clear_key(struct ocs_hcu_dev *hcu_dev)
+{
+ int reg_off;
+
+ /* Clear OCS_HCU_KEY_[0..15] */
+ for (reg_off = 0; reg_off < OCS_HCU_HW_KEY_LEN; reg_off += sizeof(u32))
+ writel(0, hcu_dev->io_base + OCS_HCU_KEY_0 + reg_off);
+}
+
+/**
+ * ocs_hcu_write_key() - Write key to OCS HMAC KEY registers.
+ * @hcu_dev: The OCS HCU device the key should be written to.
+ * @key: The key to be written.
+ * @len: The size of the key to write. It must be OCS_HCU_HW_KEY_LEN.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
+{
+ u32 key_u32[OCS_HCU_HW_KEY_LEN_U32];
+ int i;
+
+ if (len > OCS_HCU_HW_KEY_LEN)
+ return -EINVAL;
+
+ /* Copy key into temporary u32 array. */
+ memcpy(key_u32, key, len);
+
+ /*
+ * Hardware requires all the bytes of the HW Key vector to be
+ * written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
+ */
+ memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);
+
+ /*
+ * OCS hardware expects the MSB of the key to be written at the highest
+ * address of the HCU Key vector; in other word, the key must be
+ * written in reverse order.
+ *
+ * Therefore, we first enable byte swapping for the HCU key vector;
+ * so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be
+ * swapped:
+ * 3 <---> 0, 2 <---> 1.
+ */
+ writel(HCU_BYTE_ORDER_SWAP,
+ hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG);
+ /*
+ * And then we write the 32-bit words composing the key starting from
+ * the end of the key.
+ */
+ for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
+ writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
+ hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));
+
+ memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA
+ * @hcu_dev: The OCS HCU device to use.
+ * @dma_list: The OCS DMA list mapping the data to hash.
+ * @finalize: Whether or not this is the last hashing operation and therefore
+ * the final hash should be compute even if data is not
+ * block-aligned.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_dma_list *dma_list,
+ bool finalize)
+{
+ u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN;
+ int rc;
+
+ if (!dma_list)
+ return -EINVAL;
+
+ /*
+ * For final requests we use HCU_DONE IRQ to be notified when all input
+ * data has been processed by the HCU; however, we cannot do so for
+ * non-final requests, because we don't get a HCU_DONE IRQ when we
+ * don't terminate the operation.
+ *
+ * Therefore, for non-final requests, we use the DMA IRQ, which
+ * triggers when DMA has finishing feeding all the input data to the
+ * HCU, but the HCU may still be processing it. This is fine, since we
+ * will wait for the HCU processing to be completed when we try to read
+ * intermediate results, in ocs_hcu_get_intermediate_data().
+ */
+ if (finalize)
+ ocs_hcu_done_irq_en(hcu_dev);
+ else
+ ocs_hcu_dma_irq_en(hcu_dev);
+
+ reinit_completion(&hcu_dev->irq_done);
+ writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR);
+ writel(0, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
+ writel(0, hcu_dev->io_base + OCS_HCU_DMA_DST_SIZE);
+
+ writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ writel(cfg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
+
+ if (finalize)
+ writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
+ int max_nents)
+{
+ struct ocs_hcu_dma_list *dma_list;
+
+ dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
+ if (!dma_list)
+ return NULL;
+
+ /* Total size of the DMA list to allocate. */
+ dma_list->head = dma_alloc_coherent(hcu_dev->dev,
+ sizeof(*dma_list->head) * max_nents,
+ &dma_list->dma_addr, GFP_KERNEL);
+ if (!dma_list->head) {
+ kfree(dma_list);
+ return NULL;
+ }
+ dma_list->max_nents = max_nents;
+ dma_list->tail = NULL;
+
+ return dma_list;
+}
+
+void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list)
+{
+ if (!dma_list)
+ return;
+
+ dma_free_coherent(hcu_dev->dev,
+ sizeof(*dma_list->head) * dma_list->max_nents,
+ dma_list->head, dma_list->dma_addr);
+
+ kfree(dma_list);
+}
+
+/* Add a new DMA entry at the end of the OCS DMA list. */
+int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list,
+ dma_addr_t addr, u32 len)
+{
+ struct device *dev = hcu_dev->dev;
+ struct ocs_hcu_dma_entry *old_tail;
+ struct ocs_hcu_dma_entry *new_tail;
+
+ if (!len)
+ return 0;
+
+ if (!dma_list)
+ return -EINVAL;
+
+ if (addr & ~OCS_HCU_DMA_BIT_MASK) {
+ dev_err(dev,
+ "Unexpected error: Invalid DMA address for OCS HCU\n");
+ return -EINVAL;
+ }
+
+ old_tail = dma_list->tail;
+ new_tail = old_tail ? old_tail + 1 : dma_list->head;
+
+ /* Check if list is full. */
+ if (new_tail - dma_list->head >= dma_list->max_nents)
+ return -ENOMEM;
+
+ /*
+ * If there was an old tail (i.e., this is not the first element we are
+ * adding), un-terminate the old tail and make it point to the new one.
+ */
+ if (old_tail) {
+ old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE;
+ /*
+ * The old tail 'nxt_desc' must point to the DMA address of the
+ * new tail.
+ */
+ old_tail->nxt_desc = dma_list->dma_addr +
+ sizeof(*dma_list->tail) * (new_tail -
+ dma_list->head);
+ }
+
+ new_tail->src_addr = (u32)addr;
+ new_tail->src_len = (u32)len;
+ new_tail->ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
+ new_tail->nxt_desc = 0;
+
+ /* Update list tail with new tail. */
+ dma_list->tail = new_tail;
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_hash_init() - Initialize hash operation context.
+ * @ctx: The context to initialize.
+ * @algo: The hashing algorithm to use.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
+{
+ if (!ctx)
+ return -EINVAL;
+
+ ctx->algo = algo;
+ ctx->idata.msg_len_lo = 0;
+ ctx->idata.msg_len_hi = 0;
+ /* No need to set idata.digest to 0. */
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_hash_update() - Perform a hashing iteration.
+ * @hcu_dev: The OCS HCU device to use.
+ * @ctx: The OCS HCU hashing context.
+ * @dma_list: The OCS DMA list mapping the input data to process.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list)
+{
+ int rc;
+
+ if (!hcu_dev || !ctx)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+ if (rc)
+ return rc;
+
+ /* If we already processed some data, idata needs to be set. */
+ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+ ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+ /* Start linked-list DMA hashing. */
+ rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, false);
+ if (rc)
+ return rc;
+
+ /* Update idata and return. */
+ return ocs_hcu_get_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+}
+
+/**
+ * ocs_hcu_hash_finup() - Update and finalize hash computation.
+ * @hcu_dev: The OCS HCU device to use.
+ * @ctx: The OCS HCU hashing context.
+ * @dma_list: The OCS DMA list mapping the input data to process.
+ * @dgst: The buffer where to save the computed digest.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len)
+{
+ int rc;
+
+ if (!hcu_dev || !ctx)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+ if (rc)
+ return rc;
+
+ /* If we already processed some data, idata needs to be set. */
+ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+ ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+ /* Start linked-list DMA hashing. */
+ rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
+ if (rc)
+ return rc;
+
+ /* Get digest and return. */
+ return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_hash_final() - Finalize hash computation.
+ * @hcu_dev: The OCS HCU device to use.
+ * @ctx: The OCS HCU hashing context.
+ * @dgst: The buffer where to save the computed digest.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
+ size_t dgst_len)
+{
+ int rc;
+
+ if (!hcu_dev || !ctx)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+ if (rc)
+ return rc;
+
+ /* If we already processed some data, idata needs to be set. */
+ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+ ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+ /*
+ * Enable HCU interrupts, so that HCU_DONE will be triggered once the
+ * final hash is computed.
+ */
+ ocs_hcu_done_irq_en(hcu_dev);
+ reinit_completion(&hcu_dev->irq_done);
+ writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+ if (rc)
+ return rc;
+
+ /* Get digest and return. */
+ return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_digest() - Compute hash digest.
+ * @hcu_dev: The OCS HCU device to use.
+ * @algo: The hash algorithm to use.
+ * @data: The input data to process.
+ * @data_len: The length of @data.
+ * @dgst: The buffer where to save the computed digest.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ void *data, size_t data_len, u8 *dgst, size_t dgst_len)
+{
+ struct device *dev = hcu_dev->dev;
+ dma_addr_t dma_handle;
+ u32 reg;
+ int rc;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, algo, false);
+ if (rc)
+ return rc;
+
+ dma_handle = dma_map_single(dev, data, data_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_handle))
+ return -EIO;
+
+ reg = HCU_DMA_SNOOP_MASK | HCU_DMA_EN;
+
+ ocs_hcu_done_irq_en(hcu_dev);
+
+ reinit_completion(&hcu_dev->irq_done);
+
+ writel(dma_handle, hcu_dev->io_base + OCS_HCU_DMA_SRC_ADDR);
+ writel(data_len, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
+ writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
+ writel(reg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
+
+ writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+ if (rc)
+ return rc;
+
+ dma_unmap_single(dev, dma_handle, data_len, DMA_TO_DEVICE);
+
+ return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_hmac() - Compute HMAC.
+ * @hcu_dev: The OCS HCU device to use.
+ * @algo: The hash algorithm to use with HMAC.
+ * @key: The key to use.
+ * @dma_list: The OCS DMA list mapping the input data to process.
+ * @key_len: The length of @key.
+ * @dgst: The buffer where to save the computed HMAC.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ const u8 *key, size_t key_len,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len)
+{
+ int rc;
+
+ /* Ensure 'key' is not NULL. */
+ if (!key || key_len == 0)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, algo, true);
+ if (rc)
+ return rc;
+
+ rc = ocs_hcu_write_key(hcu_dev, key, key_len);
+ if (rc)
+ return rc;
+
+ rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
+
+ /* Clear HW key before processing return code. */
+ ocs_hcu_clear_key(hcu_dev);
+
+ if (rc)
+ return rc;
+
+ return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
+}
+
+irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
+{
+ struct ocs_hcu_dev *hcu_dev = dev_id;
+ u32 hcu_irq;
+ u32 dma_irq;
+
+ /* Read and clear the HCU interrupt. */
+ hcu_irq = readl(hcu_dev->io_base + OCS_HCU_ISR);
+ writel(hcu_irq, hcu_dev->io_base + OCS_HCU_ISR);
+
+ /* Read and clear the HCU DMA interrupt. */
+ dma_irq = readl(hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+ writel(dma_irq, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+
+ /* Check for errors. */
+ if (hcu_irq & HCU_IRQ_HASH_ERR_MASK || dma_irq & HCU_DMA_IRQ_ERR_MASK) {
+ hcu_dev->irq_err = true;
+ goto complete;
+ }
+
+ /* Check for DONE IRQs. */
+ if (hcu_irq & HCU_IRQ_HASH_DONE || dma_irq & HCU_DMA_IRQ_SRC_DONE)
+ goto complete;
+
+ return IRQ_NONE;
+
+complete:
+ complete(&hcu_dev->irq_done);
+
+ return IRQ_HANDLED;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/intel/keembay/ocs-hcu.h b/drivers/crypto/intel/keembay/ocs-hcu.h
new file mode 100644
index 0000000000..fbbbb92a05
--- /dev/null
+++ b/drivers/crypto/intel/keembay/ocs-hcu.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/dma-mapping.h>
+
+#ifndef _CRYPTO_OCS_HCU_H
+#define _CRYPTO_OCS_HCU_H
+
+#define OCS_HCU_DMA_BIT_MASK DMA_BIT_MASK(32)
+
+#define OCS_HCU_HW_KEY_LEN 64
+
+struct ocs_hcu_dma_list;
+
+enum ocs_hcu_algo {
+ OCS_HCU_ALGO_SHA256 = 2,
+ OCS_HCU_ALGO_SHA224 = 3,
+ OCS_HCU_ALGO_SHA384 = 4,
+ OCS_HCU_ALGO_SHA512 = 5,
+ OCS_HCU_ALGO_SM3 = 6,
+};
+
+/**
+ * struct ocs_hcu_dev - OCS HCU device context.
+ * @list: List of device contexts.
+ * @dev: OCS HCU device.
+ * @io_base: Base address of OCS HCU registers.
+ * @engine: Crypto engine for the device.
+ * @irq: IRQ number.
+ * @irq_done: Completion for IRQ.
+ * @irq_err: Flag indicating an IRQ error has happened.
+ */
+struct ocs_hcu_dev {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *io_base;
+ struct crypto_engine *engine;
+ int irq;
+ struct completion irq_done;
+ bool irq_err;
+};
+
+/**
+ * struct ocs_hcu_idata - Intermediate data generated by the HCU.
+ * @msg_len_lo: Length of data the HCU has operated on in bits, low 32b.
+ * @msg_len_hi: Length of data the HCU has operated on in bits, high 32b.
+ * @digest: The digest read from the HCU. If the HCU is terminated, it will
+ * contain the actual hash digest. Otherwise it is the intermediate
+ * state.
+ */
+struct ocs_hcu_idata {
+ u32 msg_len_lo;
+ u32 msg_len_hi;
+ u8 digest[SHA512_DIGEST_SIZE];
+};
+
+/**
+ * struct ocs_hcu_hash_ctx - Context for OCS HCU hashing operation.
+ * @algo: The hashing algorithm being used.
+ * @idata: The current intermediate data.
+ */
+struct ocs_hcu_hash_ctx {
+ enum ocs_hcu_algo algo;
+ struct ocs_hcu_idata idata;
+};
+
+irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id);
+
+struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
+ int max_nents);
+
+void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list);
+
+int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list,
+ dma_addr_t addr, u32 len);
+
+int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo);
+
+int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list);
+
+int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len);
+
+int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
+ size_t dgst_len);
+
+int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ void *data, size_t data_len, u8 *dgst, size_t dgst_len);
+
+int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ const u8 *key, size_t key_len,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len);
+
+#endif /* _CRYPTO_OCS_HCU_H */
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
new file mode 100644
index 0000000000..1220cc86f9
--- /dev/null
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CRYPTO_DEV_QAT
+ tristate
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_SKCIPHER
+ select CRYPTO_AKCIPHER
+ select CRYPTO_DH
+ select CRYPTO_HMAC
+ select CRYPTO_RSA
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_LIB_AES
+ select FW_LOADER
+ select CRC8
+
+config CRYPTO_DEV_QAT_DH895xCC
+ tristate "Support for Intel(R) DH895xCC"
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_C3XXX
+ tristate "Support for Intel(R) C3XXX"
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_c3xxx.
+
+config CRYPTO_DEV_QAT_C62X
+ tristate "Support for Intel(R) C62X"
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_c62x.
+
+config CRYPTO_DEV_QAT_4XXX
+ tristate "Support for Intel(R) QAT_4XXX"
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) QuickAssist Technology QAT_4xxx
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_4xxx.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+ tristate "Support for Intel(R) DH895xCC Virtual Function"
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+ select PCI_IOV
+ select CRYPTO_DEV_QAT
+
+ help
+ Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+ Virtual Function for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_dh895xccvf.
+
+config CRYPTO_DEV_QAT_C3XXXVF
+ tristate "Support for Intel(R) C3XXX Virtual Function"
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+ select PCI_IOV
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+ Virtual Function for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_c3xxxvf.
+
+config CRYPTO_DEV_QAT_C62XVF
+ tristate "Support for Intel(R) C62X Virtual Function"
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+ select PCI_IOV
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+ Virtual Function for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_c62xvf.
diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile
new file mode 100644
index 0000000000..258c8a626c
--- /dev/null
+++ b/drivers/crypto/intel/qat/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
+obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
new file mode 100644
index 0000000000..ff9c8b5897
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
+qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
new file mode 100644
index 0000000000..a5691ba0b7
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 - 2021 Intel Corporation */
+#include <linux/iopoll.h>
+#include <adf_accel_devices.h>
+#include <adf_cfg.h>
+#include <adf_clock.h>
+#include <adf_common_drv.h>
+#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_data.h>
+#include <adf_gen4_pfvf.h>
+#include <adf_gen4_pm.h>
+#include <adf_gen4_timer.h>
+#include "adf_4xxx_hw_data.h"
+#include "adf_cfg_services.h"
+#include "icp_qat_hw.h"
+
+#define ADF_AE_GROUP_0 GENMASK(3, 0)
+#define ADF_AE_GROUP_1 GENMASK(7, 4)
+#define ADF_AE_GROUP_2 BIT(8)
+
+enum adf_fw_objs {
+ ADF_FW_SYM_OBJ,
+ ADF_FW_ASYM_OBJ,
+ ADF_FW_DC_OBJ,
+ ADF_FW_ADMIN_OBJ,
+};
+
+static const char * const adf_4xxx_fw_objs[] = {
+ [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ,
+ [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_4XXX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_4XXX_ADMIN_OBJ,
+};
+
+static const char * const adf_402xx_fw_objs[] = {
+ [ADF_FW_SYM_OBJ] = ADF_402XX_SYM_OBJ,
+ [ADF_FW_ASYM_OBJ] = ADF_402XX_ASYM_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_402XX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ,
+};
+
+struct adf_fw_config {
+ u32 ae_mask;
+ enum adf_fw_objs obj;
+};
+
+static const struct adf_fw_config adf_fw_cy_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_dc_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_sym_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_asym_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_asym_dc_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_sym_dc_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_dcc_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+};
+
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
+
+/* Worker thread to service arbiter mappings */
+static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
+ 0x5555555, 0x5555555, 0x5555555, 0x5555555,
+ 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
+ 0x0
+};
+
+static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
+ 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
+ 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
+ 0x0
+};
+
+static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
+ 0x0
+};
+
+static struct adf_hw_device_class adf_4xxx_class = {
+ .name = ADF_4XXX_DEVICE_NAME,
+ .type = DEV_4XXX,
+ .instances = 0,
+};
+
+static int get_service_enabled(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ ADF_SERVICES_ENABLED " param not found\n");
+ return ret;
+ }
+
+ ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services),
+ services);
+ if (ret < 0)
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
+ services);
+
+ return ret;
+}
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_4XXX_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ u32 me_disable = self->fuses;
+
+ return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_4XXX_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ if (!self || !self->ae_mask)
+ return 0;
+
+ return hweight32(self->ae_mask);
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_4XXX_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_4XXX_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_4XXX_SRAM_BAR;
+}
+
+/*
+ * The vector routing table is used to select the MSI-X entry to use for each
+ * interrupt source.
+ * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
+ * The final entry corresponds to VF2PF or error interrupts.
+ * This vector table could be used to configure one MSI-X entry to be shared
+ * between multiple interrupt sources.
+ *
+ * The default routing is set to have a one to one correspondence between the
+ * interrupt source and the MSI-X entry used.
+ */
+static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr;
+ int i;
+
+ csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+ for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
+ ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 capabilities_sym, capabilities_asym, capabilities_dc;
+ u32 capabilities_dcc;
+ u32 fusectl1;
+
+ /* Read accelerator capabilities mask */
+ pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
+
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_SHA3 |
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+ ICP_ACCEL_CAPABILITIES_HKDF |
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_SM3 |
+ ICP_ACCEL_CAPABILITIES_SM4 |
+ ICP_ACCEL_CAPABILITIES_AES_V2;
+
+ /* A set bit in fusectl1 means the feature is OFF in this SKU */
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
+ }
+
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_SM2 |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ }
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return capabilities_sym | capabilities_asym;
+ case SVC_DC:
+ return capabilities_dc;
+ case SVC_DCC:
+ /*
+ * Sym capabilities are available for chaining operations,
+ * but sym crypto instances cannot be supported
+ */
+ capabilities_dcc = capabilities_dc | capabilities_sym;
+ capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ return capabilities_dcc;
+ case SVC_SYM:
+ return capabilities_sym;
+ case SVC_ASYM:
+ return capabilities_asym;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return capabilities_asym | capabilities_dc;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return capabilities_sym | capabilities_dc;
+ default:
+ return 0;
+ }
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_1;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_DC:
+ return thrd_to_arb_map_dc;
+ case SVC_DCC:
+ return thrd_to_arb_map_dcc;
+ default:
+ return default_thrd_to_arb_map;
+ }
+}
+
+static void get_arb_info(struct arb_info *arb_info)
+{
+ arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
+ arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
+ arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
+}
+
+static void get_admin_info(struct admin_info *admin_csrs_info)
+{
+ admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
+ admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
+ admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
+}
+
+static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * 4XXX uses KPT counter for HB
+ */
+ return ADF_4XXX_KPT_COUNTER_FREQ;
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
+ void __iomem *csr = misc_bar->virt_addr;
+
+ /* Enable all in errsou3 except VFLR notification on host */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+
+ addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+
+ /* Enable bundle interrupts */
+ ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
+ ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
+
+ /* Enable misc interrupts */
+ ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
+}
+
+static int adf_init_device(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+ u32 status;
+ u32 csr;
+ int ret;
+
+ addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+
+ /* Temporarily mask PM interrupt */
+ csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
+ csr |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
+
+ /* Set DRV_ACTIVE bit to power up the device */
+ ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
+
+ /* Poll status register to make sure the device is powered up */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_GEN4_PM_INIT_STATE,
+ ADF_GEN4_PM_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN4_PM_STATUS);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+
+ return ret;
+}
+
+static u32 uof_get_num_objs(void)
+{
+ return ARRAY_SIZE(adf_fw_cy_config);
+}
+
+static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
+{
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return adf_fw_cy_config;
+ case SVC_DC:
+ return adf_fw_dc_config;
+ case SVC_DCC:
+ return adf_fw_dcc_config;
+ case SVC_SYM:
+ return adf_fw_sym_config;
+ case SVC_ASYM:
+ return adf_fw_asym_config;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return adf_fw_asym_dc_config;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return adf_fw_sym_dc_config;
+ default:
+ return NULL;
+ }
+}
+
+enum adf_rp_groups {
+ RP_GROUP_0 = 0,
+ RP_GROUP_1,
+ RP_GROUP_COUNT
+};
+
+static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ enum adf_cfg_service_type rps[RP_GROUP_COUNT];
+ const struct adf_fw_config *fw_config;
+ u16 ring_to_svc_map;
+ int i, j;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return 0;
+
+ for (i = 0; i < RP_GROUP_COUNT; i++) {
+ switch (fw_config[i].ae_mask) {
+ case ADF_AE_GROUP_0:
+ j = RP_GROUP_0;
+ break;
+ case ADF_AE_GROUP_1:
+ j = RP_GROUP_1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (fw_config[i].obj) {
+ case ADF_FW_SYM_OBJ:
+ rps[j] = SYM;
+ break;
+ case ADF_FW_ASYM_OBJ:
+ rps[j] = ASYM;
+ break;
+ case ADF_FW_DC_OBJ:
+ rps[j] = COMP;
+ break;
+ default:
+ rps[j] = 0;
+ break;
+ }
+ }
+
+ ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+
+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
+ const char * const fw_objs[], int num_objs)
+{
+ const struct adf_fw_config *fw_config;
+ int id;
+
+ fw_config = get_fw_config(accel_dev);
+ if (fw_config)
+ id = fw_config[obj_num].obj;
+ else
+ id = -EINVAL;
+
+ if (id < 0 || id > num_objs)
+ return NULL;
+
+ return fw_objs[id];
+}
+
+static const char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_4xxx_fw_objs);
+
+ return uof_get_name(accel_dev, obj_num, adf_4xxx_fw_objs, num_fw_objs);
+}
+
+static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_402xx_fw_objs);
+
+ return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs);
+}
+
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return 0;
+
+ return fw_config[obj_num].ae_mask;
+}
+
+void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
+{
+ hw_data->dev_class = &adf_4xxx_class;
+ hw_data->instance_id = adf_4xxx_class.instances++;
+ hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
+ hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
+ hw_data->num_logical_accel = 1;
+ hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = adf_enable_error_correction;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_arb_info = get_arb_info;
+ hw_data->get_admin_info = get_admin_info;
+ hw_data->get_accel_cap = get_accel_cap;
+ hw_data->get_sku = get_sku;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = adf_enable_ints;
+ hw_data->init_device = adf_init_device;
+ hw_data->reset_device = adf_reset_flr;
+ hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
+ switch (dev_id) {
+ case ADF_402XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_402XX_FW;
+ hw_data->fw_mmp_name = ADF_402XX_MMP;
+ hw_data->uof_get_name = uof_get_name_402xx;
+ break;
+
+ default:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->uof_get_name = uof_get_name_4xxx;
+ }
+ hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_ae_mask = uof_get_ae_mask;
+ hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
+ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->enable_pm = adf_gen4_enable_pm;
+ hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
+ hw_data->dev_config = adf_gen4_dev_config;
+ hw_data->start_timer = adf_gen4_timer_start;
+ hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->get_hb_clock = get_heartbeat_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+
+ adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen4_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
new file mode 100644
index 0000000000..bb3d95a8fb
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_4XXX_HW_DATA_H_
+#define ADF_4XXX_HW_DATA_H_
+
+#include <linux/units.h>
+#include <adf_accel_devices.h>
+
+/* PCIe configuration space */
+#define ADF_4XXX_SRAM_BAR 0
+#define ADF_4XXX_PMISC_BAR 1
+#define ADF_4XXX_ETR_BAR 2
+#define ADF_4XXX_RX_RINGS_OFFSET 1
+#define ADF_4XXX_TX_RINGS_MASK 0x1
+#define ADF_4XXX_MAX_ACCELERATORS 1
+#define ADF_4XXX_MAX_ACCELENGINES 9
+#define ADF_4XXX_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
+
+/* Physical function fuses */
+#define ADF_4XXX_FUSECTL0_OFFSET (0x2C8)
+#define ADF_4XXX_FUSECTL1_OFFSET (0x2CC)
+#define ADF_4XXX_FUSECTL2_OFFSET (0x2D0)
+#define ADF_4XXX_FUSECTL3_OFFSET (0x2D4)
+#define ADF_4XXX_FUSECTL4_OFFSET (0x2D8)
+#define ADF_4XXX_FUSECTL5_OFFSET (0x2DC)
+
+#define ADF_4XXX_ACCELERATORS_MASK (0x1)
+#define ADF_4XXX_ACCELENGINES_MASK (0x1FF)
+#define ADF_4XXX_ADMIN_AE_MASK (0x100)
+
+#define ADF_4XXX_ETR_MAX_BANKS 64
+
+/* MSIX interrupt */
+#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET (0x41A040)
+#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET (0x41A044)
+#define ADF_4XXX_SMIAPF_MASK_OFFSET (0x41A084)
+#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04))
+
+/* Bank and ring configuration */
+#define ADF_4XXX_NUM_RINGS_PER_BANK 2
+#define ADF_4XXX_NUM_BANKS_PER_VF 4
+
+/* Arbiter configuration */
+#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
+#define ADF_4XXX_ARB_OFFSET (0x0)
+#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET (0x400)
+
+/* Admin Interface Reg Offset */
+#define ADF_4XXX_ADMINMSGUR_OFFSET (0x500574)
+#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
+#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
+
+/* Firmware Binaries */
+#define ADF_4XXX_FW "qat_4xxx.bin"
+#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
+#define ADF_4XXX_SYM_OBJ "qat_4xxx_sym.bin"
+#define ADF_4XXX_DC_OBJ "qat_4xxx_dc.bin"
+#define ADF_4XXX_ASYM_OBJ "qat_4xxx_asym.bin"
+#define ADF_4XXX_ADMIN_OBJ "qat_4xxx_admin.bin"
+/* Firmware for 402XXX */
+#define ADF_402XX_FW "qat_402xx.bin"
+#define ADF_402XX_MMP "qat_402xx_mmp.bin"
+#define ADF_402XX_SYM_OBJ "qat_402xx_sym.bin"
+#define ADF_402XX_DC_OBJ "qat_402xx_dc.bin"
+#define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin"
+#define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin"
+
+/* Clocks frequency */
+#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
+/* qat_4xxx fuse bits are different from old GENs, redefine them */
+enum icp_qat_4xxx_slice_mask {
+ ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
+ ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1),
+ ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2),
+ ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
+ ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
+ ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
+ ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7),
+};
+
+void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
+void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
new file mode 100644
index 0000000000..90f5c1ca7b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 Intel Corporation */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <adf_accel_devices.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+#include <adf_dbgfs.h>
+#include <adf_heartbeat.h>
+
+#include "adf_4xxx_hw_data.h"
+#include "adf_cfg_services.h"
+#include "qat_compression.h"
+#include "qat_crypto.h"
+#include "adf_transport_access_macros.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ if (accel_dev->hw_device) {
+ adf_clean_hw_data_4xxx(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_dbgfs_exit(accel_dev);
+ adf_cfg_dev_remove(accel_dev);
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ const char *config;
+ int ret;
+
+ config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ /* Default configuration is crypto only for even devices
+ * and compression for odd devices
+ */
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, config,
+ ADF_STR);
+ if (ret)
+ return ret;
+
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
+
+ return 0;
+}
+
+static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long bank, val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_crypto(accel_dev))
+ instances = min(cpus, banks / 2);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ bank = i * 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &bank, ADF_DEC);
+ if (ret)
+ goto err;
+
+ bank += 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &bank, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+ i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ val = 128;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+ return ret;
+}
+
+static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_compression(accel_dev))
+ instances = min(cpus, banks);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+ return ret;
+}
+
+static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
+{
+ unsigned long val;
+ int ret;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+}
+
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret)
+ goto err;
+
+ ret = sysfs_match_string(adf_cfg_services, services);
+ if (ret < 0)
+ goto err;
+
+ switch (ret) {
+ case SVC_CY:
+ case SVC_CY2:
+ ret = adf_crypto_dev_config(accel_dev);
+ break;
+ case SVC_DC:
+ case SVC_DCC:
+ ret = adf_comp_dev_config(accel_dev);
+ break;
+ default:
+ ret = adf_no_dev_config(accel_dev);
+ break;
+ }
+
+ if (ret)
+ goto err;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
+ return ret;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ unsigned int i, bar_nr;
+ unsigned long bar_mask;
+ struct adf_bar *bar;
+ int ret;
+
+ if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /*
+ * Add accel device to accel table
+ * This should be called before adf_cleanup_accel is called
+ */
+ if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ return -EFAULT;
+ }
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and initialise device hardware meta-data structure */
+ hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+ /* If the device has no acceleration engines then ignore it */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ (~hw_data->ae_mask & 0x01)) {
+ dev_err(&pdev->dev, "No acceleration units found.\n");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* Enable PCI device */
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable PCI device.\n");
+ goto out_err;
+ }
+
+ /* Set DMA identifier */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration.\n");
+ goto out_err;
+ }
+
+ ret = adf_cfg_dev_init(accel_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize configuration.\n");
+ goto out_err;
+ }
+
+ /* Get accelerator capabilities mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ /* Find and map all the device's BARS */
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
+
+ ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to map pci regions.\n");
+ goto out_err;
+ }
+
+ i = 0;
+ for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
+ bar = &accel_pci_dev->pci_bars[i++];
+ bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_save_state(pdev)) {
+ dev_err(&pdev->dev, "Failed to save pci state.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ goto out_err_dev_stop;
+
+ ret = adf_sysfs_init(accel_dev);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_down(accel_dev, false);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ adf_dev_down(accel_dev, false);
+ adf_cleanup_accel(accel_dev);
+}
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_4XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
+module_pci_driver(adf_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_4XXX_FW);
+MODULE_FIRMWARE(ADF_4XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_SOFTDEP("pre: crypto-intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
new file mode 100644
index 0000000000..92ef416ccc
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
+qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
new file mode 100644
index 0000000000..9c00c441b6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_clock.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include "adf_c3xxx_hw_data.h"
+#include "adf_heartbeat.h"
+#include "icp_qat_hw.h"
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
+ 0x12222AAA, 0x11222AAA, 0x12222AAA,
+ 0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c3xxx_class = {
+ .name = ADF_C3XXX_DEVICE_NAME,
+ .type = DEV_C3XXX,
+ .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ u32 straps = self->straps;
+ u32 fuses = self->fuses;
+ u32 accel;
+
+ accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET;
+ accel &= ADF_C3XXX_ACCELERATORS_MASK;
+
+ return accel;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ u32 straps = self->straps;
+ u32 fuses = self->fuses;
+ unsigned long disabled;
+ u32 ae_disable;
+ int accel;
+
+ /* If an accel is disabled, then disable the corresponding two AEs */
+ disabled = ~get_accel_mask(self) & ADF_C3XXX_ACCELERATORS_MASK;
+ ae_disable = BIT(1) | BIT(0);
+ for_each_set_bit(accel, &disabled, ADF_C3XXX_MAX_ACCELERATORS)
+ straps |= ae_disable << (accel << 1);
+
+ return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
+}
+
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * Timestamp update interval is 16 AE clock ticks for c3xxx.
+ */
+ return self->clock_frequency / 16;
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev)
+{
+ u32 frequency;
+ int ret;
+
+ ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C3XXX_MIN_AE_FREQ,
+ ADF_C3XXX_MAX_AE_FREQ);
+ if (ret)
+ return ret;
+
+ accel_dev->hw_device->clock_frequency = frequency;
+ return 0;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_C3XXX_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_C3XXX_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_C3XXX_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ int aes = self->get_num_aes(self);
+
+ if (aes == 6)
+ return DEV_SKU_4;
+
+ return DEV_SKU_UNKNOWN;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ return thrd_to_arb_map;
+}
+
+static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
+{
+ adf_gen2_cfg_iov_thds(accel_dev, enable,
+ ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS,
+ ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS);
+}
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &c3xxx_class;
+ hw_data->instance_id = c3xxx_class.instances++;
+ hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
+ hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
+ hw_data->num_logical_accel = 1;
+ hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
+ hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = adf_gen2_enable_error_correction;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_accel_cap = adf_gen2_get_accel_cap;
+ hw_data->get_num_accels = adf_gen2_get_num_accels;
+ hw_data->get_num_aes = adf_gen2_get_num_aes;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_admin_info = adf_gen2_get_admin_info;
+ hw_data->get_arb_info = adf_gen2_get_arb_info;
+ hw_data->get_sku = get_sku;
+ hw_data->fw_name = ADF_C3XXX_FW;
+ hw_data->fw_mmp_name = ADF_C3XXX_MMP;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->configure_iov_threads = configure_iov_threads;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = adf_gen2_enable_ints;
+ hw_data->reset_device = adf_reset_flr;
+ hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->dev_config = adf_gen2_dev_config;
+ hw_data->measure_clock = measure_clock;
+ hw_data->get_hb_clock = get_ts_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
+
+ adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
new file mode 100644
index 0000000000..690c6a1aa1
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_C3XXX_HW_DATA_H_
+#define ADF_C3XXX_HW_DATA_H_
+
+#include <linux/units.h>
+
+/* PCIe configuration space */
+#define ADF_C3XXX_PMISC_BAR 0
+#define ADF_C3XXX_ETR_BAR 1
+#define ADF_C3XXX_SRAM_BAR 0
+#define ADF_C3XXX_MAX_ACCELERATORS 3
+#define ADF_C3XXX_MAX_ACCELENGINES 6
+#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
+#define ADF_C3XXX_ACCELERATORS_MASK 0x7
+#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
+#define ADF_C3XXX_ETR_MAX_BANKS 16
+#define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC
+
+/* AE to function mapping */
+#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
+#define ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS 6
+
+/* Clocks frequency */
+#define ADF_C3XXX_AE_FREQ (685 * HZ_PER_MHZ)
+#define ADF_C3XXX_MIN_AE_FREQ (533 * HZ_PER_MHZ)
+#define ADF_C3XXX_MAX_AE_FREQ (685 * HZ_PER_MHZ)
+
+/* Firmware Binary */
+#define ADF_C3XXX_FW "qat_c3xxx.bin"
+#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
new file mode 100644
index 0000000000..468c910209
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_dbgfs.h>
+#include "adf_c3xxx_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C3XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+ int i;
+
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ if (bar->virt_addr)
+ pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+ }
+
+ if (accel_dev->hw_device) {
+ switch (accel_pci_dev->pci_dev->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+ adf_clean_hw_data_c3xxx(accel_dev->hw_device);
+ break;
+ default:
+ break;
+ }
+ kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_dbgfs_exit(accel_dev);
+ adf_cfg_dev_remove(accel_dev);
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ unsigned int i, bar_nr;
+ unsigned long bar_mask;
+ int ret;
+
+ switch (ent->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+ return -ENODEV;
+ }
+
+ if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+ /* If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow. */
+ dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /* Add accel device to accel table.
+ * This should be called before adf_cleanup_accel is called */
+ if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ kfree(accel_dev);
+ return -EFAULT;
+ }
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and configure device configuration structure */
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_c3xxx(accel_dev->hw_device);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+ &hw_data->fuses);
+ pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET,
+ &hw_data->straps);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+ /* If the device has no acceleration engines then ignore it. */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ ((~hw_data->ae_mask) & 0x01)) {
+ dev_err(&pdev->dev, "No acceleration units found");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* enable PCI device */
+ if (pci_enable_device(pdev)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* set dma identifier */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
+ }
+
+ if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
+ ret = -EFAULT;
+ goto out_err_disable;
+ }
+
+ /* Get accelerator capabilities mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+
+ /* Find and map all the device's BARS */
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+ bar->base_addr = pci_resource_start(pdev, bar_nr);
+ if (!bar->base_addr)
+ break;
+ bar->size = pci_resource_len(pdev, bar_nr);
+ bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+ ret = -EFAULT;
+ goto out_err_free_reg;
+ }
+ }
+ pci_set_master(pdev);
+
+ if (pci_save_state(pdev)) {
+ dev_err(&pdev->dev, "Failed to save pci state\n");
+ ret = -ENOMEM;
+ goto out_err_free_reg;
+ }
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_down(accel_dev, false);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ adf_dev_down(accel_dev, false);
+ adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+ request_module("intel_qat");
+
+ if (pci_register_driver(&adf_driver)) {
+ pr_err("QAT: Driver initialization failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+ pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_C3XXX_FW);
+MODULE_FIRMWARE(ADF_C3XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
new file mode 100644
index 0000000000..b6d76825a9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
+qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
new file mode 100644
index 0000000000..84d9486e04
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+static struct adf_hw_device_class c3xxxiov_class = {
+ .name = ADF_C3XXXVF_DEVICE_NAME,
+ .type = DEV_C3XXXVF,
+ .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_C3XXXIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ return ADF_C3XXXIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_C3XXXIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ return ADF_C3XXXIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_C3XXXIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_C3XXXIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_VF;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &c3xxxiov_class;
+ hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS;
+ hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS;
+ hw_data->num_logical_accel = 1;
+ hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
+ hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+ hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+ hw_data->free_irq = adf_vf_isr_resource_free;
+ hw_data->enable_error_correction = adf_vf_void_noop;
+ hw_data->init_admin_comms = adf_vf_int_noop;
+ hw_data->exit_admin_comms = adf_vf_void_noop;
+ hw_data->send_admin_init = adf_vf2pf_notify_init;
+ hw_data->init_arb = adf_vf_int_noop;
+ hw_data->exit_arb = adf_vf_void_noop;
+ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_sku = get_sku;
+ hw_data->enable_ints = adf_vf_void_noop;
+ hw_data->dev_class->instances++;
+ hw_data->dev_config = adf_gen2_dev_config;
+ adf_devmgr_update_class_index(hw_data);
+ adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+ adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
new file mode 100644
index 0000000000..6b4bf181d1
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
+#ifndef ADF_C3XXXVF_HW_DATA_H_
+#define ADF_C3XXXVF_HW_DATA_H_
+
+#define ADF_C3XXXIOV_PMISC_BAR 1
+#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1
+#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1
+#define ADF_C3XXXIOV_MAX_ACCELERATORS 1
+#define ADF_C3XXXIOV_MAX_ACCELENGINES 1
+#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8
+#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
+#define ADF_C3XXXIOV_ETR_BAR 0
+#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
new file mode 100644
index 0000000000..d5a0ecca9d
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_dbgfs.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C3XXXVF_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+ struct adf_accel_dev *pf;
+ int i;
+
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ if (bar->virt_addr)
+ pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+ }
+
+ if (accel_dev->hw_device) {
+ switch (accel_pci_dev->pci_dev->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
+ adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
+ break;
+ default:
+ break;
+ }
+ kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_dbgfs_exit(accel_dev);
+ adf_cfg_dev_remove(accel_dev);
+ pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+ adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_dev *pf;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ unsigned int i, bar_nr;
+ unsigned long bar_mask;
+ int ret;
+
+ switch (ent->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+ return -ENODEV;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!accel_dev)
+ return -ENOMEM;
+
+ accel_dev->is_vf = true;
+ pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /* Add accel device to accel table */
+ if (adf_devmgr_add_dev(accel_dev, pf)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ kfree(accel_dev);
+ return -EFAULT;
+ }
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and configure device configuration structure */
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* enable PCI device */
+ if (pci_enable_device(pdev)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* set dma identifier */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
+ }
+
+ if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
+ ret = -EFAULT;
+ goto out_err_disable;
+ }
+
+ /* Find and map all the device's BARS */
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+ bar->base_addr = pci_resource_start(pdev, bar_nr);
+ if (!bar->base_addr)
+ break;
+ bar->size = pci_resource_len(pdev, bar_nr);
+ bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+ ret = -EFAULT;
+ goto out_err_free_reg;
+ }
+ }
+ pci_set_master(pdev);
+ /* Completion for VF2PF request/response message exchange */
+ init_completion(&accel_dev->vf.msg_received);
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = adf_dev_up(accel_dev, false);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_down(accel_dev, false);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ adf_flush_vf_wq(accel_dev);
+ adf_dev_down(accel_dev, false);
+ adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+ request_module("intel_qat");
+
+ if (pci_register_driver(&adf_driver)) {
+ pr_err("QAT: Driver initialization failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+ pci_unregister_driver(&adf_driver);
+ adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
new file mode 100644
index 0000000000..d581f7c87d
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c62x/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
+qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
new file mode 100644
index 0000000000..355a781693
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_clock.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include "adf_c62x_hw_data.h"
+#include "adf_heartbeat.h"
+#include "icp_qat_hw.h"
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
+ 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+ 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c62x_class = {
+ .name = ADF_C62X_DEVICE_NAME,
+ .type = DEV_C62X,
+ .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ u32 straps = self->straps;
+ u32 fuses = self->fuses;
+ u32 accel;
+
+ accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET;
+ accel &= ADF_C62X_ACCELERATORS_MASK;
+
+ return accel;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ u32 straps = self->straps;
+ u32 fuses = self->fuses;
+ unsigned long disabled;
+ u32 ae_disable;
+ int accel;
+
+ /* If an accel is disabled, then disable the corresponding two AEs */
+ disabled = ~get_accel_mask(self) & ADF_C62X_ACCELERATORS_MASK;
+ ae_disable = BIT(1) | BIT(0);
+ for_each_set_bit(accel, &disabled, ADF_C62X_MAX_ACCELERATORS)
+ straps |= ae_disable << (accel << 1);
+
+ return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
+}
+
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * Timestamp update interval is 16 AE clock ticks for c62x.
+ */
+ return self->clock_frequency / 16;
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev)
+{
+ u32 frequency;
+ int ret;
+
+ ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C62X_MIN_AE_FREQ,
+ ADF_C62X_MAX_AE_FREQ);
+ if (ret)
+ return ret;
+
+ accel_dev->hw_device->clock_frequency = frequency;
+ return 0;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_C62X_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_C62X_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_C62X_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ int aes = self->get_num_aes(self);
+
+ if (aes == 8)
+ return DEV_SKU_2;
+ else if (aes == 10)
+ return DEV_SKU_4;
+
+ return DEV_SKU_UNKNOWN;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ return thrd_to_arb_map;
+}
+
+static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
+{
+ adf_gen2_cfg_iov_thds(accel_dev, enable,
+ ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS,
+ ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS);
+}
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &c62x_class;
+ hw_data->instance_id = c62x_class.instances++;
+ hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
+ hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
+ hw_data->num_logical_accel = 1;
+ hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
+ hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = adf_gen2_enable_error_correction;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_accel_cap = adf_gen2_get_accel_cap;
+ hw_data->get_num_accels = adf_gen2_get_num_accels;
+ hw_data->get_num_aes = adf_gen2_get_num_aes;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_admin_info = adf_gen2_get_admin_info;
+ hw_data->get_arb_info = adf_gen2_get_arb_info;
+ hw_data->get_sku = get_sku;
+ hw_data->fw_name = ADF_C62X_FW;
+ hw_data->fw_mmp_name = ADF_C62X_MMP;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->configure_iov_threads = configure_iov_threads;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = adf_gen2_enable_ints;
+ hw_data->reset_device = adf_reset_flr;
+ hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->dev_config = adf_gen2_dev_config;
+ hw_data->measure_clock = measure_clock;
+ hw_data->get_hb_clock = get_ts_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
+
+ adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
new file mode 100644
index 0000000000..13e6ebf6fd
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_C62X_HW_DATA_H_
+#define ADF_C62X_HW_DATA_H_
+
+#include <linux/units.h>
+
+/* PCIe configuration space */
+#define ADF_C62X_SRAM_BAR 0
+#define ADF_C62X_PMISC_BAR 1
+#define ADF_C62X_ETR_BAR 2
+#define ADF_C62X_MAX_ACCELERATORS 5
+#define ADF_C62X_MAX_ACCELENGINES 10
+#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
+#define ADF_C62X_ACCELERATORS_MASK 0x1F
+#define ADF_C62X_ACCELENGINES_MASK 0x3FF
+#define ADF_C62X_ETR_MAX_BANKS 16
+#define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC
+
+/* AE to function mapping */
+#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
+#define ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS 10
+
+/* Clocks frequency */
+#define ADF_C62X_AE_FREQ (685 * HZ_PER_MHZ)
+#define ADF_C62X_MIN_AE_FREQ (533 * HZ_PER_MHZ)
+#define ADF_C62X_MAX_AE_FREQ (800 * HZ_PER_MHZ)
+
+/* Firmware Binary */
+#define ADF_C62X_FW "qat_c62x.bin"
+#define ADF_C62X_MMP "qat_c62x_mmp.bin"
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
new file mode 100644
index 0000000000..0186921be9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_dbgfs.h>
+#include "adf_c62x_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C62X_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+ int i;
+
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ if (bar->virt_addr)
+ pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+ }
+
+ if (accel_dev->hw_device) {
+ switch (accel_pci_dev->pci_dev->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
+ adf_clean_hw_data_c62x(accel_dev->hw_device);
+ break;
+ default:
+ break;
+ }
+ kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_dbgfs_exit(accel_dev);
+ adf_cfg_dev_remove(accel_dev);
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ unsigned int i, bar_nr;
+ unsigned long bar_mask;
+ int ret;
+
+ switch (ent->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+ return -ENODEV;
+ }
+
+ if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+ /* If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow. */
+ dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /* Add accel device to accel table.
+ * This should be called before adf_cleanup_accel is called */
+ if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ kfree(accel_dev);
+ return -EFAULT;
+ }
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and configure device configuration structure */
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_c62x(accel_dev->hw_device);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+ &hw_data->fuses);
+ pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET,
+ &hw_data->straps);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+ /* If the device has no acceleration engines then ignore it. */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ ((~hw_data->ae_mask) & 0x01)) {
+ dev_err(&pdev->dev, "No acceleration units found");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* enable PCI device */
+ if (pci_enable_device(pdev)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* set dma identifier */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
+ }
+
+ if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
+ ret = -EFAULT;
+ goto out_err_disable;
+ }
+
+ /* Get accelerator capabilities mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+
+ /* Find and map all the device's BARS */
+ i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+ bar->base_addr = pci_resource_start(pdev, bar_nr);
+ if (!bar->base_addr)
+ break;
+ bar->size = pci_resource_len(pdev, bar_nr);
+ bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+ ret = -EFAULT;
+ goto out_err_free_reg;
+ }
+ }
+ pci_set_master(pdev);
+
+ if (pci_save_state(pdev)) {
+ dev_err(&pdev->dev, "Failed to save pci state\n");
+ ret = -ENOMEM;
+ goto out_err_free_reg;
+ }
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_down(accel_dev, false);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ adf_dev_down(accel_dev, false);
+ adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+ request_module("intel_qat");
+
+ if (pci_register_driver(&adf_driver)) {
+ pr_err("QAT: Driver initialization failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+ pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_C62X_FW);
+MODULE_FIRMWARE(ADF_C62X_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
new file mode 100644
index 0000000000..446c3d6386
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
+qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
new file mode 100644
index 0000000000..751d7aa57f
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
+#include "adf_c62xvf_hw_data.h"
+
+static struct adf_hw_device_class c62xiov_class = {
+ .name = ADF_C62XVF_DEVICE_NAME,
+ .type = DEV_C62XVF,
+ .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_C62XIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ return ADF_C62XIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_C62XIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ return ADF_C62XIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_C62XIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_C62XIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_VF;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &c62xiov_class;
+ hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS;
+ hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS;
+ hw_data->num_logical_accel = 1;
+ hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
+ hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+ hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+ hw_data->free_irq = adf_vf_isr_resource_free;
+ hw_data->enable_error_correction = adf_vf_void_noop;
+ hw_data->init_admin_comms = adf_vf_int_noop;
+ hw_data->exit_admin_comms = adf_vf_void_noop;
+ hw_data->send_admin_init = adf_vf2pf_notify_init;
+ hw_data->init_arb = adf_vf_int_noop;
+ hw_data->exit_arb = adf_vf_void_noop;
+ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_sku = get_sku;
+ hw_data->enable_ints = adf_vf_void_noop;
+ hw_data->dev_class->instances++;
+ hw_data->dev_config = adf_gen2_dev_config;
+ adf_devmgr_update_class_index(hw_data);
+ adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+ adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h
new file mode 100644
index 0000000000..a1a62c003e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
+#ifndef ADF_C62XVF_HW_DATA_H_
+#define ADF_C62XVF_HW_DATA_H_
+
+#define ADF_C62XIOV_PMISC_BAR 1
+#define ADF_C62XIOV_ACCELERATORS_MASK 0x1
+#define ADF_C62XIOV_ACCELENGINES_MASK 0x1
+#define ADF_C62XIOV_MAX_ACCELERATORS 1
+#define ADF_C62XIOV_MAX_ACCELENGINES 1
+#define ADF_C62XIOV_RX_RINGS_OFFSET 8
+#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
+#define ADF_C62XIOV_ETR_BAR 0
+#define ADF_C62XIOV_ETR_MAX_BANKS 1
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
new file mode 100644
index 0000000000..c9ae6c0d0d
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_dbgfs.h>
+#include "adf_c62xvf_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X_VF), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C62XVF_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+ struct adf_accel_dev *pf;
+ int i;
+
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ if (bar->virt_addr)
+ pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+ }
+
+ if (accel_dev->hw_device) {
+ switch (accel_pci_dev->pci_dev->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+ adf_clean_hw_data_c62xiov(accel_dev->hw_device);
+ break;
+ default:
+ break;
+ }
+ kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_dbgfs_exit(accel_dev);
+ adf_cfg_dev_remove(accel_dev);
+ pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+ adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_dev *pf;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ unsigned int i, bar_nr;
+ unsigned long bar_mask;
+ int ret;
+
+ switch (ent->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+ return -ENODEV;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!accel_dev)
+ return -ENOMEM;
+
+ accel_dev->is_vf = true;
+ pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /* Add accel device to accel table */
+ if (adf_devmgr_add_dev(accel_dev, pf)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ kfree(accel_dev);
+ return -EFAULT;
+ }
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and configure device configuration structure */
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_c62xiov(accel_dev->hw_device);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* enable PCI device */
+ if (pci_enable_device(pdev)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* set dma identifier */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
+ }
+
+ if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
+ ret = -EFAULT;
+ goto out_err_disable;
+ }
+
+ /* Find and map all the device's BARS */
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+ bar->base_addr = pci_resource_start(pdev, bar_nr);
+ if (!bar->base_addr)
+ break;
+ bar->size = pci_resource_len(pdev, bar_nr);
+ bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+ ret = -EFAULT;
+ goto out_err_free_reg;
+ }
+ }
+ pci_set_master(pdev);
+ /* Completion for VF2PF request/response message exchange */
+ init_completion(&accel_dev->vf.msg_received);
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = adf_dev_up(accel_dev, false);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_down(accel_dev, false);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ adf_flush_vf_wq(accel_dev);
+ adf_dev_down(accel_dev, false);
+ adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+ request_module("intel_qat");
+
+ if (pci_register_driver(&adf_driver)) {
+ pr_err("QAT: Driver initialization failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+ pci_unregister_driver(&adf_driver);
+ adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
new file mode 100644
index 0000000000..43622c7fca
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+intel_qat-objs := adf_cfg.o \
+ adf_isr.o \
+ adf_ctl_drv.o \
+ adf_dev_mgr.o \
+ adf_init.o \
+ adf_accel_engine.o \
+ adf_aer.o \
+ adf_transport.o \
+ adf_admin.o \
+ adf_hw_arbiter.o \
+ adf_sysfs.o \
+ adf_gen2_hw_data.o \
+ adf_gen2_config.o \
+ adf_gen4_hw_data.o \
+ adf_gen4_pm.o \
+ adf_gen2_dc.o \
+ adf_gen4_dc.o \
+ adf_gen4_timer.o \
+ adf_clock.o \
+ qat_crypto.o \
+ qat_compression.o \
+ qat_comp_algs.o \
+ qat_algs.o \
+ qat_asym_algs.o \
+ qat_algs_send.o \
+ qat_uclo.o \
+ qat_hal.o \
+ qat_bl.o
+
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
+ adf_fw_counters.o \
+ adf_heartbeat.o \
+ adf_heartbeat_dbgfs.o \
+ adf_dbgfs.o
+
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
+ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
+ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
+ adf_gen2_pfvf.o adf_gen4_pfvf.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
new file mode 100644
index 0000000000..79d5a1535e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_ACCEL_DEVICES_H_
+#define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include "adf_cfg_common.h"
+#include "adf_pfvf_msg.h"
+
+#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
+#define ADF_C62X_DEVICE_NAME "c6xx"
+#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
+#define ADF_C3XXX_DEVICE_NAME "c3xxx"
+#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
+#define ADF_4XXX_DEVICE_NAME "4xxx"
+#define ADF_4XXX_PCI_DEVICE_ID 0x4940
+#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
+#define ADF_401XX_PCI_DEVICE_ID 0x4942
+#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
+#define ADF_402XX_PCI_DEVICE_ID 0x4944
+#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
+#define ADF_DEVICE_FUSECTL_OFFSET 0x40
+#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
+#define ADF_PCI_MAX_BARS 3
+#define ADF_DEVICE_NAME_LENGTH 32
+#define ADF_ETR_MAX_RINGS_PER_BANK 16
+#define ADF_MAX_MSIX_VECTOR_NAME 48
+#define ADF_DEVICE_NAME_PREFIX "qat_"
+
+enum adf_accel_capabilities {
+ ADF_ACCEL_CAPABILITIES_NULL = 0,
+ ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
+ ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
+ ADF_ACCEL_CAPABILITIES_CIPHER = 4,
+ ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
+ ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
+ ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
+ ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
+};
+
+struct adf_bar {
+ resource_size_t base_addr;
+ void __iomem *virt_addr;
+ resource_size_t size;
+};
+
+struct adf_irq {
+ bool enabled;
+ char name[ADF_MAX_MSIX_VECTOR_NAME];
+};
+
+struct adf_accel_msix {
+ struct adf_irq *irqs;
+ u32 num_entries;
+};
+
+struct adf_accel_pci {
+ struct pci_dev *pci_dev;
+ struct adf_accel_msix msix_entries;
+ struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
+ u8 revid;
+ u8 sku;
+};
+
+enum dev_state {
+ DEV_DOWN = 0,
+ DEV_UP
+};
+
+enum dev_sku_info {
+ DEV_SKU_1 = 0,
+ DEV_SKU_2,
+ DEV_SKU_3,
+ DEV_SKU_4,
+ DEV_SKU_VF,
+ DEV_SKU_UNKNOWN,
+};
+
+static inline const char *get_sku_info(enum dev_sku_info info)
+{
+ switch (info) {
+ case DEV_SKU_1:
+ return "SKU1";
+ case DEV_SKU_2:
+ return "SKU2";
+ case DEV_SKU_3:
+ return "SKU3";
+ case DEV_SKU_4:
+ return "SKU4";
+ case DEV_SKU_VF:
+ return "SKUVF";
+ case DEV_SKU_UNKNOWN:
+ default:
+ break;
+ }
+ return "Unknown SKU";
+}
+
+struct adf_hw_device_class {
+ const char *name;
+ const enum adf_device_type type;
+ u32 instances;
+};
+
+struct arb_info {
+ u32 arb_cfg;
+ u32 arb_offset;
+ u32 wt2sam_offset;
+};
+
+struct admin_info {
+ u32 admin_msg_ur;
+ u32 admin_msg_lr;
+ u32 mailbox_offset;
+};
+
+struct adf_hw_csr_ops {
+ u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
+ u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring);
+ void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring, u32 value);
+ u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring);
+ void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring, u32 value);
+ u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring, u32 value);
+ void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring, dma_addr_t addr);
+ void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
+ u32 value);
+ void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
+ u32 value);
+ void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
+ u32 value);
+ void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
+ u32 bank, u32 value);
+ void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
+ u32 value);
+};
+
+struct adf_cfg_device_data;
+struct adf_accel_dev;
+struct adf_etr_data;
+struct adf_etr_ring_data;
+
+struct adf_pfvf_ops {
+ int (*enable_comms)(struct adf_accel_dev *accel_dev);
+ u32 (*get_pf2vf_offset)(u32 i);
+ u32 (*get_vf2pf_offset)(u32 i);
+ void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
+ void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
+ u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
+ int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ u32 pfvf_offset, struct mutex *csr_lock);
+ struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver);
+};
+
+struct adf_dc_ops {
+ void (*build_deflate_ctx)(void *ctx);
+};
+
+struct adf_hw_device_data {
+ struct adf_hw_device_class *dev_class;
+ u32 (*get_accel_mask)(struct adf_hw_device_data *self);
+ u32 (*get_ae_mask)(struct adf_hw_device_data *self);
+ u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
+ u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
+ u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
+ u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
+ u32 (*get_num_aes)(struct adf_hw_device_data *self);
+ u32 (*get_num_accels)(struct adf_hw_device_data *self);
+ void (*get_arb_info)(struct arb_info *arb_csrs_info);
+ void (*get_admin_info)(struct admin_info *admin_csrs_info);
+ enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
+ u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev);
+ int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+ void (*free_irq)(struct adf_accel_dev *accel_dev);
+ void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+ int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
+ void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+ int (*send_admin_init)(struct adf_accel_dev *accel_dev);
+ int (*start_timer)(struct adf_accel_dev *accel_dev);
+ void (*stop_timer)(struct adf_accel_dev *accel_dev);
+ void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev);
+ uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
+ int (*measure_clock)(struct adf_accel_dev *accel_dev);
+ int (*init_arb)(struct adf_accel_dev *accel_dev);
+ void (*exit_arb)(struct adf_accel_dev *accel_dev);
+ const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
+ int (*init_device)(struct adf_accel_dev *accel_dev);
+ int (*enable_pm)(struct adf_accel_dev *accel_dev);
+ bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
+ void (*disable_iov)(struct adf_accel_dev *accel_dev);
+ void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
+ bool enable);
+ void (*enable_ints)(struct adf_accel_dev *accel_dev);
+ void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
+ int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
+ void (*reset_device)(struct adf_accel_dev *accel_dev);
+ void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
+ const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
+ u32 (*uof_get_num_objs)(void);
+ u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
+ int (*dev_config)(struct adf_accel_dev *accel_dev);
+ struct adf_pfvf_ops pfvf_ops;
+ struct adf_hw_csr_ops csr_ops;
+ struct adf_dc_ops dc_ops;
+ const char *fw_name;
+ const char *fw_mmp_name;
+ u32 fuses;
+ u32 straps;
+ u32 accel_capabilities_mask;
+ u32 extended_dc_capabilities;
+ u32 clock_frequency;
+ u32 instance_id;
+ u16 accel_mask;
+ u32 ae_mask;
+ u32 admin_ae_mask;
+ u16 tx_rings_mask;
+ u16 ring_to_svc_map;
+ u8 tx_rx_gap;
+ u8 num_banks;
+ u16 num_banks_per_vf;
+ u8 num_rings_per_bank;
+ u8 num_accel;
+ u8 num_logical_accel;
+ u8 num_engines;
+ u32 num_hb_ctrs;
+};
+
+/* CSR write macro */
+#define ADF_CSR_WR(csr_base, csr_offset, val) \
+ __raw_writel(val, csr_base + csr_offset)
+
+/* CSR read macro */
+#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+
+#define ADF_CFG_NUM_SERVICES 4
+#define ADF_SRV_TYPE_BIT_LEN 3
+#define ADF_SRV_TYPE_MASK 0x7
+#define ADF_AE_ADMIN_THREAD 7
+#define ADF_NUM_THREADS_PER_AE 8
+#define ADF_NUM_PKE_STRAND 2
+#define ADF_AE_STRAND0_THREAD 8
+#define ADF_AE_STRAND1_THREAD 9
+
+#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
+#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
+#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
+#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
+#define GET_NUM_RINGS_PER_BANK(accel_dev) \
+ GET_HW_DATA(accel_dev)->num_rings_per_bank
+#define GET_SRV_TYPE(accel_dev, idx) \
+ (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
+ & ADF_SRV_TYPE_MASK)
+#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
+#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
+#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
+#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
+
+struct adf_admin_comms;
+struct icp_qat_fw_loader_handle;
+struct adf_fw_loader_data {
+ struct icp_qat_fw_loader_handle *fw_loader;
+ const struct firmware *uof_fw;
+ const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+ struct adf_accel_dev *accel_dev;
+ struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+ struct ratelimit_state vf2pf_ratelimit;
+ u32 vf_nr;
+ bool init;
+ u8 vf_compat_ver;
+};
+
+struct adf_dc_data {
+ u8 *ovf_buff;
+ size_t ovf_buff_sz;
+ dma_addr_t ovf_buff_p;
+};
+
+struct adf_accel_dev {
+ struct adf_etr_data *transport;
+ struct adf_hw_device_data *hw_device;
+ struct adf_cfg_device_data *cfg;
+ struct adf_fw_loader_data *fw_loader;
+ struct adf_admin_comms *admin;
+ struct adf_dc_data *dc_data;
+ struct list_head crypto_list;
+ struct list_head compression_list;
+ unsigned long status;
+ atomic_t ref_count;
+ struct dentry *debugfs_dir;
+ struct dentry *fw_cntr_dbgfile;
+ struct list_head list;
+ struct module *owner;
+ struct adf_accel_pci accel_pci_dev;
+ struct adf_timer *timer;
+ struct adf_heartbeat *heartbeat;
+ union {
+ struct {
+ /* protects VF2PF interrupts access */
+ spinlock_t vf2pf_ints_lock;
+ /* vf_info is non-zero when SR-IOV is init'ed */
+ struct adf_accel_vf_info *vf_info;
+ } pf;
+ struct {
+ bool irq_enabled;
+ char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
+ struct tasklet_struct pf2vf_bh_tasklet;
+ struct mutex vf2pf_lock; /* protect CSR access */
+ struct completion msg_received;
+ struct pfvf_message response; /* temp field holding pf2vf response */
+ u8 pf_compat_ver;
+ } vf;
+ };
+ struct mutex state_lock; /* protect state of the device */
+ bool is_vf;
+ u32 accel_id;
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
new file mode 100644
index 0000000000..6be064dc64
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include "adf_cfg.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+
+static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
+ u32 fw_size)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct icp_qat_fw_loader_handle *loader;
+ const char *obj_name;
+ u32 num_objs;
+ u32 ae_mask;
+ int i;
+
+ loader = loader_data->fw_loader;
+ num_objs = hw_device->uof_get_num_objs();
+
+ for (i = 0; i < num_objs; i++) {
+ obj_name = hw_device->uof_get_name(accel_dev, i);
+ ae_mask = hw_device->uof_get_ae_mask(accel_dev, i);
+ if (!obj_name || !ae_mask) {
+ dev_err(&GET_DEV(accel_dev), "Invalid UOF image\n");
+ goto out_err;
+ }
+
+ if (qat_uclo_set_cfg_ae_mask(loader, ae_mask)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid mask for UOF image\n");
+ goto out_err;
+ }
+ if (qat_uclo_map_obj(loader, fw_addr, fw_size, obj_name)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to map UOF firmware\n");
+ goto out_err;
+ }
+ if (qat_uclo_wr_all_uimage(loader)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load UOF firmware\n");
+ goto out_err;
+ }
+ qat_uclo_del_obj(loader);
+ }
+
+ return 0;
+
+out_err:
+ adf_ae_fw_release(accel_dev);
+ return -EFAULT;
+}
+
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ void *fw_addr, *mmp_addr;
+ u32 fw_size, mmp_size;
+
+ if (!hw_device->fw_name)
+ return 0;
+
+ if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+ &accel_dev->accel_pci_dev.pci_dev->dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+ hw_device->fw_mmp_name);
+ return -EFAULT;
+ }
+ if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
+ &accel_dev->accel_pci_dev.pci_dev->dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
+ hw_device->fw_name);
+ goto out_err;
+ }
+
+ fw_size = loader_data->uof_fw->size;
+ fw_addr = (void *)loader_data->uof_fw->data;
+ mmp_size = loader_data->mmp_fw->size;
+ mmp_addr = (void *)loader_data->mmp_fw->data;
+
+ if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n");
+ goto out_err;
+ }
+
+ if (hw_device->uof_get_num_objs)
+ return adf_ae_fw_load_images(accel_dev, fw_addr, fw_size);
+
+ if (qat_uclo_map_obj(loader_data->fw_loader, fw_addr, fw_size, NULL)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to map FW\n");
+ goto out_err;
+ }
+ if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ adf_ae_fw_release(accel_dev);
+ return -EFAULT;
+}
+
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return;
+
+ qat_uclo_del_obj(loader_data->fw_loader);
+ qat_hal_deinit(loader_data->fw_loader);
+ release_firmware(loader_data->uof_fw);
+ release_firmware(loader_data->mmp_fw);
+ loader_data->uof_fw = NULL;
+ loader_data->mmp_fw = NULL;
+ loader_data->fw_loader = NULL;
+}
+
+int adf_ae_start(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 ae_ctr;
+
+ if (!hw_data->fw_name)
+ return 0;
+
+ ae_ctr = qat_hal_start(loader_data->fw_loader);
+ dev_info(&GET_DEV(accel_dev),
+ "qat_dev%d started %d acceleration engines\n",
+ accel_dev->accel_id, ae_ctr);
+ return 0;
+}
+
+int adf_ae_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+ if (!hw_data->fw_name)
+ return 0;
+
+ for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+ if (hw_data->ae_mask & (1 << ae)) {
+ qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
+ ae_ctr++;
+ }
+ }
+ dev_info(&GET_DEV(accel_dev),
+ "qat_dev%d stopped %d acceleration engines\n",
+ accel_dev->accel_id, ae_ctr);
+ return 0;
+}
+
+static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+ qat_hal_reset(loader_data->fw_loader);
+ if (qat_hal_clr_reset(loader_data->fw_loader))
+ return -EFAULT;
+
+ return 0;
+}
+
+int adf_ae_init(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return 0;
+
+ loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
+ if (!loader_data)
+ return -ENOMEM;
+
+ accel_dev->fw_loader = loader_data;
+ if (qat_hal_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n");
+ kfree(loader_data);
+ return -EFAULT;
+ }
+ if (adf_ae_reset(accel_dev, 0)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n");
+ qat_hal_deinit(loader_data->fw_loader);
+ kfree(loader_data);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
+{
+ struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return 0;
+
+ qat_hal_deinit(loader_data->fw_loader);
+ kfree(accel_dev->fw_loader);
+ accel_dev->fw_loader = NULL;
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
new file mode 100644
index 0000000000..194d64d4b9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_heartbeat.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_ADMIN_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+#define ADF_CONST_TABLE_SIZE 1024
+#define ADF_ADMIN_POLL_DELAY_US 20
+#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_ONE_AE 1
+
+static const u8 const_tab[1024] __aligned(1024) = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x01, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x15, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x02, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x14, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x02,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25,
+0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x2B, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+ dma_addr_t phy_addr;
+ dma_addr_t const_tbl_addr;
+ void *virt_addr;
+ void *virt_tbl_addr;
+ void __iomem *mailbox_addr;
+ struct mutex lock; /* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+ void *in, void *out)
+{
+ int ret;
+ u32 status;
+ struct adf_admin_comms *admin = accel_dev->admin;
+ int offset = ae * ADF_ADMINMSG_LEN * 2;
+ void __iomem *mailbox = admin->mailbox_addr;
+ int mb_offset = ae * ADF_ADMIN_MAILBOX_STRIDE;
+ struct icp_qat_fw_init_admin_req *request = in;
+
+ mutex_lock(&admin->lock);
+
+ if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+ mutex_unlock(&admin->lock);
+ return -EAGAIN;
+ }
+
+ memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+ ADF_CSR_WR(mailbox, mb_offset, 1);
+
+ ret = read_poll_timeout(ADF_CSR_RD, status, status == 0,
+ ADF_ADMIN_POLL_DELAY_US,
+ ADF_ADMIN_POLL_TIMEOUT_US, true,
+ mailbox, mb_offset);
+ if (ret < 0) {
+ /* Response timeout */
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send admin msg %d to accelerator %d\n",
+ request->cmd_id, ae);
+ } else {
+ /* Response received from admin message, we can now
+ * make response data available in "out" parameter.
+ */
+ memcpy(out, admin->virt_addr + offset +
+ ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+ }
+
+ mutex_unlock(&admin->lock);
+ return ret;
+}
+
+static int adf_send_admin(struct adf_accel_dev *accel_dev,
+ struct icp_qat_fw_init_admin_req *req,
+ struct icp_qat_fw_init_admin_resp *resp,
+ const unsigned long ae_mask)
+{
+ u32 ae;
+
+ for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER)
+ if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) ||
+ resp->status)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int adf_init_ae(struct adf_accel_dev *accel_dev)
+{
+ struct icp_qat_fw_init_admin_req req;
+ struct icp_qat_fw_init_admin_resp resp;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ u32 ae_mask = hw_device->ae_mask;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.cmd_id = ICP_QAT_FW_INIT_AE;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
+{
+ struct icp_qat_fw_init_admin_req req;
+ struct icp_qat_fw_init_admin_resp resp;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ u32 ae_mask = hw_device->admin_ae_mask ?: hw_device->ae_mask;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
+
+ req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
+ req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp)
+{
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp;
+ unsigned int ae_mask = ADF_ONE_AE;
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_TIMER_GET;
+ ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ if (ret)
+ return ret;
+
+ *timestamp = resp.timestamp;
+ return 0;
+}
+
+static int adf_set_chaining(struct adf_accel_dev *accel_dev)
+{
+ u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+
+ req.cmd_id = ICP_QAT_FW_DC_CHAIN_INIT;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
+ u32 *capabilities)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_resp resp;
+ struct icp_qat_fw_init_admin_req req;
+ unsigned long ae_mask;
+ unsigned long ae;
+ int ret;
+
+ /* Target only service accelerator engines */
+ ae_mask = hw_device->ae_mask & ~hw_device->admin_ae_mask;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
+
+ *capabilities = 0;
+ for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
+ ret = adf_send_admin(accel_dev, &req, &resp, 1ULL << ae);
+ if (ret)
+ return ret;
+
+ *capabilities |= resp.extended_features;
+ }
+
+ return 0;
+}
+
+int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps)
+{
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_COUNTERS_GET;
+
+ ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp);
+ if (ret || resp.status)
+ return -EFAULT;
+
+ *reqs = resp.req_rec_count;
+ *resps = resp.resp_sent_count;
+
+ return 0;
+}
+
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt)
+{
+ u32 ae_mask = accel_dev->hw_device->ae_mask;
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp = { };
+
+ req.cmd_id = ICP_QAT_FW_SYNC;
+ req.int_timer_ticks = cnt;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks)
+{
+ u32 ae_mask = accel_dev->hw_device->ae_mask;
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp;
+
+ req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET;
+ req.init_cfg_ptr = accel_dev->heartbeat->dma.phy_addr;
+ req.heartbeat_ticks = ticks;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+static bool is_dcc_enabled(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret)
+ return false;
+
+ return !strcmp(services, "dcc");
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+ u32 dc_capabilities = 0;
+ int ret;
+
+ ret = adf_set_fw_constants(accel_dev);
+ if (ret)
+ return ret;
+
+ if (is_dcc_enabled(accel_dev)) {
+ ret = adf_set_chaining(accel_dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
+ return ret;
+ }
+ accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
+
+ return adf_init_ae(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+/**
+ * adf_init_admin_pm() - Function sends PM init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ * @idle_delay: QAT HW idle time before power gating is initiated.
+ * 000 - 64us
+ * 001 - 128us
+ * 010 - 256us
+ * 011 - 512us
+ * 100 - 1ms
+ * 101 - 2ms
+ * 110 - 4ms
+ * 111 - 8ms
+ *
+ * Function sends to the FW the admin init message for the PM state
+ * configuration.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_resp resp = {0};
+ struct icp_qat_fw_init_admin_req req = {0};
+ u32 ae_mask = hw_data->admin_ae_mask;
+
+ if (!accel_dev->admin) {
+ dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
+ return -EFAULT;
+ }
+
+ req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG;
+ req.idle_filter = idle_delay;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+ struct adf_admin_comms *admin;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ struct admin_info admin_csrs_info;
+ u32 mailbox_offset, adminmsg_u, adminmsg_l;
+ void __iomem *mailbox;
+ u64 reg_val;
+
+ admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!admin)
+ return -ENOMEM;
+ admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &admin->phy_addr, GFP_KERNEL);
+ if (!admin->virt_addr) {
+ dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+ kfree(admin);
+ return -ENOMEM;
+ }
+
+ admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+ PAGE_SIZE,
+ &admin->const_tbl_addr,
+ GFP_KERNEL);
+ if (!admin->virt_tbl_addr) {
+ dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_addr, admin->phy_addr);
+ kfree(admin);
+ return -ENOMEM;
+ }
+
+ memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
+ hw_data->get_admin_info(&admin_csrs_info);
+
+ mailbox_offset = admin_csrs_info.mailbox_offset;
+ mailbox = pmisc_addr + mailbox_offset;
+ adminmsg_u = admin_csrs_info.admin_msg_ur;
+ adminmsg_l = admin_csrs_info.admin_msg_lr;
+
+ reg_val = (u64)admin->phy_addr;
+ ADF_CSR_WR(pmisc_addr, adminmsg_u, upper_32_bits(reg_val));
+ ADF_CSR_WR(pmisc_addr, adminmsg_l, lower_32_bits(reg_val));
+
+ mutex_init(&admin->lock);
+ admin->mailbox_addr = mailbox;
+ accel_dev->admin = admin;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+ struct adf_admin_comms *admin = accel_dev->admin;
+
+ if (!admin)
+ return;
+
+ if (admin->virt_addr)
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_addr, admin->phy_addr);
+ if (admin->virt_tbl_addr)
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_tbl_addr, admin->const_tbl_addr);
+
+ mutex_destroy(&admin->lock);
+ kfree(admin);
+ accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
new file mode 100644
index 0000000000..a39e70bd4b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+static struct workqueue_struct *device_reset_wq;
+
+static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ dev_info(&pdev->dev, "Acceleration driver hardware error detected.\n");
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Can't find acceleration device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (state == pci_channel_io_perm_failure) {
+ dev_err(&pdev->dev, "Can't recover from device error\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* reset dev data */
+struct adf_reset_dev_data {
+ int mode;
+ struct adf_accel_dev *accel_dev;
+ struct completion compl;
+ struct work_struct reset_work;
+};
+
+void adf_reset_sbr(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ struct pci_dev *parent = pdev->bus->self;
+ u16 bridge_ctl = 0;
+
+ if (!parent)
+ parent = pdev;
+
+ if (!pci_wait_for_pending_transaction(pdev))
+ dev_info(&GET_DEV(accel_dev),
+ "Transaction still in progress. Proceeding\n");
+
+ dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
+
+ pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
+ bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+ msleep(100);
+ bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+ msleep(100);
+}
+EXPORT_SYMBOL_GPL(adf_reset_sbr);
+
+void adf_reset_flr(struct adf_accel_dev *accel_dev)
+{
+ pcie_flr(accel_to_pci_dev(accel_dev));
+}
+EXPORT_SYMBOL_GPL(adf_reset_flr);
+
+void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ if (hw_device->reset_device) {
+ dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
+ accel_dev->accel_id);
+ hw_device->reset_device(accel_dev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ }
+}
+
+static void adf_device_reset_worker(struct work_struct *work)
+{
+ struct adf_reset_dev_data *reset_data =
+ container_of(work, struct adf_reset_dev_data, reset_work);
+ struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+
+ adf_dev_restarting_notify(accel_dev);
+ if (adf_dev_restart(accel_dev)) {
+ /* The device hanged and we can't restart it so stop here */
+ dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
+ kfree(reset_data);
+ WARN(1, "QAT: device restart failed. Device is unusable\n");
+ return;
+ }
+ adf_dev_restarted_notify(accel_dev);
+ clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+ /* The dev is back alive. Notify the caller if in sync mode */
+ if (reset_data->mode == ADF_DEV_RESET_SYNC)
+ complete(&reset_data->compl);
+ else
+ kfree(reset_data);
+}
+
+static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+ enum adf_dev_reset_mode mode)
+{
+ struct adf_reset_dev_data *reset_data;
+
+ if (!adf_dev_started(accel_dev) ||
+ test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+ return 0;
+
+ set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+ reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
+ if (!reset_data)
+ return -ENOMEM;
+ reset_data->accel_dev = accel_dev;
+ init_completion(&reset_data->compl);
+ reset_data->mode = mode;
+ INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
+ queue_work(device_reset_wq, &reset_data->reset_work);
+
+ /* If in sync mode wait for the result */
+ if (mode == ADF_DEV_RESET_SYNC) {
+ int ret = 0;
+ /* Maximum device reset time is 10 seconds */
+ unsigned long wait_jiffies = msecs_to_jiffies(10000);
+ unsigned long timeout = wait_for_completion_timeout(
+ &reset_data->compl, wait_jiffies);
+ if (!timeout) {
+ dev_err(&GET_DEV(accel_dev),
+ "Reset device timeout expired\n");
+ ret = -EFAULT;
+ }
+ kfree(reset_data);
+ return ret;
+ }
+ return 0;
+}
+
+static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Can't find acceleration device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void adf_resume(struct pci_dev *pdev)
+{
+ dev_info(&pdev->dev, "Acceleration driver reset completed\n");
+ dev_info(&pdev->dev, "Device is up and running\n");
+}
+
+const struct pci_error_handlers adf_err_handler = {
+ .error_detected = adf_error_detected,
+ .slot_reset = adf_slot_reset,
+ .resume = adf_resume,
+};
+EXPORT_SYMBOL_GPL(adf_err_handler);
+
+int adf_init_aer(void)
+{
+ device_reset_wq = alloc_workqueue("qat_device_reset_wq",
+ WQ_MEM_RECLAIM, 0);
+ return !device_reset_wq ? -EFAULT : 0;
+}
+
+void adf_exit_aer(void)
+{
+ if (device_reset_wq)
+ destroy_workqueue(device_reset_wq);
+ device_reset_wq = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
new file mode 100644
index 0000000000..8836f015c3
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static DEFINE_MUTEX(qat_cfg_read_lock);
+
+static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
+{
+ struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+ mutex_lock(&qat_cfg_read_lock);
+ return seq_list_start(&dev_cfg->sec_list, *pos);
+}
+
+static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
+{
+ struct list_head *list;
+ struct adf_cfg_section *sec =
+ list_entry(v, struct adf_cfg_section, list);
+
+ seq_printf(sfile, "[%s]\n", sec->name);
+ list_for_each(list, &sec->param_head) {
+ struct adf_cfg_key_val *ptr =
+ list_entry(list, struct adf_cfg_key_val, list);
+ seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
+ }
+ return 0;
+}
+
+static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+ struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+ return seq_list_next(v, &dev_cfg->sec_list, pos);
+}
+
+static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
+{
+ mutex_unlock(&qat_cfg_read_lock);
+}
+
+static const struct seq_operations qat_dev_cfg_sops = {
+ .start = qat_dev_cfg_start,
+ .next = qat_dev_cfg_next,
+ .stop = qat_dev_cfg_stop,
+ .show = qat_dev_cfg_show
+};
+
+DEFINE_SEQ_ATTRIBUTE(qat_dev_cfg);
+
+/**
+ * adf_cfg_dev_add() - Create an acceleration device configuration table.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates a configuration table for the given acceleration device.
+ * The table stores device specific config values.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data;
+
+ dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
+ if (!dev_cfg_data)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&dev_cfg_data->sec_list);
+ init_rwsem(&dev_cfg_data->lock);
+ accel_dev->cfg = dev_cfg_data;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ dev_cfg_data->debug = debugfs_create_file("dev_cfg", 0400,
+ accel_dev->debugfs_dir,
+ dev_cfg_data,
+ &qat_dev_cfg_fops);
+}
+
+void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ if (!dev_cfg_data)
+ return;
+
+ debugfs_remove(dev_cfg_data->debug);
+ dev_cfg_data->debug = NULL;
+}
+
+static void adf_cfg_section_del_all(struct list_head *head);
+
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ down_write(&dev_cfg_data->lock);
+ adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+ up_write(&dev_cfg_data->lock);
+ clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+}
+
+/**
+ * adf_cfg_dev_remove() - Clears acceleration device configuration table.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function removes configuration table from the given acceleration device
+ * and frees all allocated memory.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ if (!dev_cfg_data)
+ return;
+
+ down_write(&dev_cfg_data->lock);
+ adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+ up_write(&dev_cfg_data->lock);
+ kfree(dev_cfg_data);
+ accel_dev->cfg = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
+
+static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
+ struct adf_cfg_section *sec)
+{
+ list_add_tail(&new->list, &sec->param_head);
+}
+
+static void adf_cfg_keyval_remove(const char *key, struct adf_cfg_section *sec)
+{
+ struct list_head *head = &sec->param_head;
+ struct list_head *list_ptr, *tmp;
+
+ list_for_each_prev_safe(list_ptr, tmp, head) {
+ struct adf_cfg_key_val *ptr =
+ list_entry(list_ptr, struct adf_cfg_key_val, list);
+
+ if (strncmp(ptr->key, key, sizeof(ptr->key)))
+ continue;
+
+ list_del(list_ptr);
+ kfree(ptr);
+ break;
+ }
+}
+
+static void adf_cfg_keyval_del_all(struct list_head *head)
+{
+ struct list_head *list_ptr, *tmp;
+
+ list_for_each_prev_safe(list_ptr, tmp, head) {
+ struct adf_cfg_key_val *ptr =
+ list_entry(list_ptr, struct adf_cfg_key_val, list);
+ list_del(list_ptr);
+ kfree(ptr);
+ }
+}
+
+static void adf_cfg_section_del_all(struct list_head *head)
+{
+ struct adf_cfg_section *ptr;
+ struct list_head *list, *tmp;
+
+ list_for_each_prev_safe(list, tmp, head) {
+ ptr = list_entry(list, struct adf_cfg_section, list);
+ adf_cfg_keyval_del_all(&ptr->param_head);
+ list_del(list);
+ kfree(ptr);
+ }
+}
+
+static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
+ const char *key)
+{
+ struct list_head *list;
+
+ list_for_each(list, &s->param_head) {
+ struct adf_cfg_key_val *ptr =
+ list_entry(list, struct adf_cfg_key_val, list);
+ if (!strcmp(ptr->key, key))
+ return ptr;
+ }
+ return NULL;
+}
+
+static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
+ const char *sec_name)
+{
+ struct adf_cfg_device_data *cfg = accel_dev->cfg;
+ struct list_head *list;
+
+ list_for_each(list, &cfg->sec_list) {
+ struct adf_cfg_section *ptr =
+ list_entry(list, struct adf_cfg_section, list);
+ if (!strcmp(ptr->name, sec_name))
+ return ptr;
+ }
+ return NULL;
+}
+
+static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
+ const char *sec_name,
+ const char *key_name,
+ char *val)
+{
+ struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
+ struct adf_cfg_key_val *keyval = NULL;
+
+ if (sec)
+ keyval = adf_cfg_key_value_find(sec, key_name);
+ if (keyval) {
+ memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+ return 0;
+ }
+ return -ENODATA;
+}
+
+/**
+ * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
+ * @accel_dev: Pointer to acceleration device.
+ * @section_name: Name of the section where the param will be added
+ * @key: The key string
+ * @val: Value pain for the given @key
+ * @type: Type - string, int or address
+ *
+ * Function adds configuration key - value entry in the appropriate section
+ * in the given acceleration device. If the key exists already, the value
+ * is updated.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+ const char *section_name,
+ const char *key, const void *val,
+ enum adf_cfg_val_type type)
+{
+ struct adf_cfg_device_data *cfg = accel_dev->cfg;
+ struct adf_cfg_key_val *key_val;
+ struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
+ section_name);
+ char temp_val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+ if (!section)
+ return -EFAULT;
+
+ key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
+ if (!key_val)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&key_val->list);
+ strscpy(key_val->key, key, sizeof(key_val->key));
+
+ if (type == ADF_DEC) {
+ snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+ "%ld", (*((long *)val)));
+ } else if (type == ADF_STR) {
+ strscpy(key_val->val, (char *)val, sizeof(key_val->val));
+ } else if (type == ADF_HEX) {
+ snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+ "0x%lx", (unsigned long)val);
+ } else {
+ dev_err(&GET_DEV(accel_dev), "Unknown type given.\n");
+ kfree(key_val);
+ return -EINVAL;
+ }
+ key_val->type = type;
+
+ /* Add the key-value pair as below policy:
+ * 1. if the key doesn't exist, add it;
+ * 2. if the key already exists with a different value then update it
+ * to the new value (the key is deleted and the newly created
+ * key_val containing the new value is added to the database);
+ * 3. if the key exists with the same value, then return without doing
+ * anything (the newly created key_val is freed).
+ */
+ if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
+ if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
+ adf_cfg_keyval_remove(key, section);
+ } else {
+ kfree(key_val);
+ return 0;
+ }
+ }
+
+ down_write(&cfg->lock);
+ adf_cfg_keyval_add(key_val, section);
+ up_write(&cfg->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
+
+/**
+ * adf_cfg_section_add() - Add config section entry to config table.
+ * @accel_dev: Pointer to acceleration device.
+ * @name: Name of the section
+ *
+ * Function adds configuration section where key - value entries
+ * will be stored.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
+{
+ struct adf_cfg_device_data *cfg = accel_dev->cfg;
+ struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
+
+ if (sec)
+ return 0;
+
+ sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+ if (!sec)
+ return -ENOMEM;
+
+ strscpy(sec->name, name, sizeof(sec->name));
+ INIT_LIST_HEAD(&sec->param_head);
+ down_write(&cfg->lock);
+ list_add_tail(&sec->list, &cfg->sec_list);
+ up_write(&cfg->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_section_add);
+
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+ const char *section, const char *name,
+ char *value)
+{
+ struct adf_cfg_device_data *cfg = accel_dev->cfg;
+ int ret;
+
+ down_read(&cfg->lock);
+ ret = adf_cfg_key_val_get(accel_dev, section, name, value);
+ up_read(&cfg->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_get_param_value);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.h b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
new file mode 100644
index 0000000000..c0c9052b22
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_H_
+#define ADF_CFG_H_
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_cfg_key_val {
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ enum adf_cfg_val_type type;
+ struct list_head list;
+};
+
+struct adf_cfg_section {
+ char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+ struct list_head list;
+ struct list_head param_head;
+};
+
+struct adf_cfg_device_data {
+ struct list_head sec_list;
+ struct dentry *debug;
+ struct rw_semaphore lock;
+};
+
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev);
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+ const char *section_name,
+ const char *key, const void *val,
+ enum adf_cfg_val_type type);
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+ const char *section, const char *name, char *value);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
new file mode 100644
index 0000000000..6e5de1dab9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_COMMON_H_
+#define ADF_CFG_COMMON_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ADF_CFG_MAX_STR_LEN 64
+#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_BASE_DEC 10
+#define ADF_CFG_BASE_HEX 16
+#define ADF_CFG_ALL_DEVICES 0xFE
+#define ADF_CFG_NO_DEVICE 0xFF
+#define ADF_CFG_AFFINITY_WHATEVER 0xFF
+#define MAX_DEVICE_NAME_SIZE 32
+#define ADF_MAX_DEVICES (32 * 32)
+#define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES)
+
+#define ADF_CFG_SERV_RING_PAIR_0_SHIFT 0
+#define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3
+#define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6
+#define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9
+enum adf_cfg_service_type {
+ UNUSED = 0,
+ CRYPTO,
+ COMP,
+ SYM,
+ ASYM,
+ USED
+};
+
+enum adf_cfg_val_type {
+ ADF_DEC,
+ ADF_HEX,
+ ADF_STR
+};
+
+enum adf_device_type {
+ DEV_UNKNOWN = 0,
+ DEV_DH895XCC,
+ DEV_DH895XCCVF,
+ DEV_C62X,
+ DEV_C62XVF,
+ DEV_C3XXX,
+ DEV_C3XXXVF,
+ DEV_4XXX,
+};
+
+struct adf_dev_status_info {
+ enum adf_device_type type;
+ __u32 accel_id;
+ __u32 instance_id;
+ __u8 num_ae;
+ __u8 num_accel;
+ __u8 num_logical_accel;
+ __u8 banks_per_accel;
+ __u8 state;
+ __u8 bus;
+ __u8 dev;
+ __u8 fun;
+ char name[MAX_DEVICE_NAME_SIZE];
+};
+
+#define ADF_CTL_IOC_MAGIC 'a'
+#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
+ struct adf_user_cfg_ctl_data)
+#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
+ struct adf_user_cfg_ctl_data)
+#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
+ struct adf_user_cfg_ctl_data)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, __u32)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, __s32)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
new file mode 100644
index 0000000000..b353d40c5c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef _ADF_CFG_SERVICES_H_
+#define _ADF_CFG_SERVICES_H_
+
+#include "adf_cfg_strings.h"
+
+enum adf_services {
+ SVC_CY = 0,
+ SVC_CY2,
+ SVC_DC,
+ SVC_DCC,
+ SVC_SYM,
+ SVC_ASYM,
+ SVC_DC_ASYM,
+ SVC_ASYM_DC,
+ SVC_DC_SYM,
+ SVC_SYM_DC,
+};
+
+static const char *const adf_cfg_services[] = {
+ [SVC_CY] = ADF_CFG_CY,
+ [SVC_CY2] = ADF_CFG_ASYM_SYM,
+ [SVC_DC] = ADF_CFG_DC,
+ [SVC_DCC] = ADF_CFG_DCC,
+ [SVC_SYM] = ADF_CFG_SYM,
+ [SVC_ASYM] = ADF_CFG_ASYM,
+ [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
+ [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
+ [SVC_DC_SYM] = ADF_CFG_DC_SYM,
+ [SVC_SYM_DC] = ADF_CFG_SYM_DC,
+};
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
new file mode 100644
index 0000000000..322b76903a
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_STRINGS_H_
+#define ADF_CFG_STRINGS_H_
+
+#define ADF_GENERAL_SEC "GENERAL"
+#define ADF_KERNEL_SEC "KERNEL"
+#define ADF_ACCEL_SEC "Accelerator"
+#define ADF_NUM_CY "NumberCyInstances"
+#define ADF_NUM_DC "NumberDcInstances"
+#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
+#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
+#define ADF_RING_DC_SIZE "NumConcurrentRequests"
+#define ADF_RING_ASYM_TX "RingAsymTx"
+#define ADF_RING_SYM_TX "RingSymTx"
+#define ADF_RING_ASYM_RX "RingAsymRx"
+#define ADF_RING_SYM_RX "RingSymRx"
+#define ADF_RING_DC_TX "RingTx"
+#define ADF_RING_DC_RX "RingRx"
+#define ADF_ETRMGR_BANK "Bank"
+#define ADF_RING_SYM_BANK_NUM "BankSymNumber"
+#define ADF_RING_ASYM_BANK_NUM "BankAsymNumber"
+#define ADF_RING_DC_BANK_NUM "BankDcNumber"
+#define ADF_CY "Cy"
+#define ADF_DC "Dc"
+#define ADF_CFG_DC "dc"
+#define ADF_CFG_CY "sym;asym"
+#define ADF_CFG_SYM "sym"
+#define ADF_CFG_ASYM "asym"
+#define ADF_CFG_ASYM_SYM "asym;sym"
+#define ADF_CFG_ASYM_DC "asym;dc"
+#define ADF_CFG_DC_ASYM "dc;asym"
+#define ADF_CFG_SYM_DC "sym;dc"
+#define ADF_CFG_DC_SYM "dc;sym"
+#define ADF_CFG_DCC "dcc"
+#define ADF_SERVICES_ENABLED "ServicesEnabled"
+#define ADF_PM_IDLE_SUPPORT "PmIdleSupport"
+#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
+ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
+#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
+#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
+ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCE_TIMER
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
+ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_MSG_ENABLED
+#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
+#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
+ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
+#define ADF_ACCEL_STR "Accelerator%d"
+#define ADF_HEARTBEAT_TIMER "HeartbeatTimer"
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h
new file mode 100644
index 0000000000..421f4fb8b4
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_USER_H_
+#define ADF_CFG_USER_H_
+
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_user_cfg_key_val {
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ union {
+ struct adf_user_cfg_key_val *next;
+ __u64 padding3;
+ };
+ enum adf_cfg_val_type type;
+} __packed;
+
+struct adf_user_cfg_section {
+ char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+ union {
+ struct adf_user_cfg_key_val *params;
+ __u64 padding1;
+ };
+ union {
+ struct adf_user_cfg_section *next;
+ __u64 padding3;
+ };
+} __packed;
+
+struct adf_user_cfg_ctl_data {
+ union {
+ struct adf_user_cfg_section *config_section;
+ __u64 padding;
+ };
+ __u8 device_id;
+} __packed;
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c
new file mode 100644
index 0000000000..dc0778691e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/units.h>
+#include <asm/errno.h>
+#include "adf_accel_devices.h"
+#include "adf_clock.h"
+#include "adf_common_drv.h"
+
+#define MEASURE_CLOCK_RETRIES 10
+#define MEASURE_CLOCK_DELAY_US 10000
+#define ME_CLK_DIVIDER 16
+#define MEASURE_CLOCK_DELTA_THRESHOLD_US 100
+
+static inline u64 timespec_to_us(const struct timespec64 *ts)
+{
+ return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_USEC);
+}
+
+static inline u64 timespec_to_ms(const struct timespec64 *ts)
+{
+ return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_MSEC);
+}
+
+u64 adf_clock_get_current_time(void)
+{
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ return timespec_to_ms(&ts);
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency)
+{
+ struct timespec64 ts1, ts2, ts3, ts4;
+ u64 timestamp1, timestamp2, temp;
+ u32 delta_us, tries;
+ int ret;
+
+ tries = MEASURE_CLOCK_RETRIES;
+ do {
+ ktime_get_real_ts64(&ts1);
+ ret = adf_get_fw_timestamp(accel_dev, &timestamp1);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get fw timestamp\n");
+ return ret;
+ }
+ ktime_get_real_ts64(&ts2);
+ delta_us = timespec_to_us(&ts2) - timespec_to_us(&ts1);
+ } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries);
+
+ if (!tries) {
+ dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n");
+ return -ETIMEDOUT;
+ }
+
+ fsleep(MEASURE_CLOCK_DELAY_US);
+
+ tries = MEASURE_CLOCK_RETRIES;
+ do {
+ ktime_get_real_ts64(&ts3);
+ if (adf_get_fw_timestamp(accel_dev, &timestamp2)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get fw timestamp\n");
+ return -EIO;
+ }
+ ktime_get_real_ts64(&ts4);
+ delta_us = timespec_to_us(&ts4) - timespec_to_us(&ts3);
+ } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries);
+
+ if (!tries) {
+ dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n");
+ return -ETIMEDOUT;
+ }
+
+ delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1);
+ temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10;
+ temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us);
+ /*
+ * Enclose the division to allow the preprocessor to precalculate it,
+ * and avoid promoting r-value to 64-bit before division.
+ */
+ *frequency = temp * (HZ_PER_MHZ / 10);
+
+ return 0;
+}
+
+/**
+ * adf_dev_measure_clock() - measures device clock frequency
+ * @accel_dev: Pointer to acceleration device.
+ * @frequency: Pointer to variable where result will be stored
+ * @min: Minimal allowed frequency value
+ * @max: Maximal allowed frequency value
+ *
+ * If the measurement result will go beyond the min/max thresholds the value
+ * will take the value of the crossed threshold.
+ *
+ * This algorithm compares the device firmware timestamp with the kernel
+ * timestamp. So we can't expect too high accuracy from this measurement.
+ *
+ * Return:
+ * * 0 - measurement succeed
+ * * -ETIMEDOUT - measurement failed
+ */
+int adf_dev_measure_clock(struct adf_accel_dev *accel_dev,
+ u32 *frequency, u32 min, u32 max)
+{
+ int ret;
+ u32 freq;
+
+ ret = measure_clock(accel_dev, &freq);
+ if (ret)
+ return ret;
+
+ *frequency = clamp(freq, min, max);
+
+ if (*frequency != freq)
+ dev_warn(&GET_DEV(accel_dev),
+ "Measured clock %d Hz is out of range, assuming %d\n",
+ freq, *frequency);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_measure_clock);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.h b/drivers/crypto/intel/qat/qat_common/adf_clock.h
new file mode 100644
index 0000000000..e309bc0dc3
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_clock.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_CLOCK_H
+#define ADF_CLOCK_H
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+int adf_dev_measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency,
+ u32 min, u32 max);
+u64 adf_clock_get_current_time(void);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
new file mode 100644
index 0000000000..79ff798237
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#ifndef ADF_DRV_H
+#define ADF_DRV_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_loader_handle.h"
+#include "icp_qat_hal.h"
+
+#define ADF_MAJOR_VERSION 0
+#define ADF_MINOR_VERSION 6
+#define ADF_BUILD_VERSION 0
+#define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \
+ __stringify(ADF_MINOR_VERSION) "." \
+ __stringify(ADF_BUILD_VERSION)
+
+#define ADF_STATUS_RESTARTING 0
+#define ADF_STATUS_STARTING 1
+#define ADF_STATUS_CONFIGURED 2
+#define ADF_STATUS_STARTED 3
+#define ADF_STATUS_AE_INITIALISED 4
+#define ADF_STATUS_AE_UCODE_LOADED 5
+#define ADF_STATUS_AE_STARTED 6
+#define ADF_STATUS_PF_RUNNING 7
+#define ADF_STATUS_IRQ_ALLOCATED 8
+#define ADF_STATUS_CRYPTO_ALGS_REGISTERED 9
+#define ADF_STATUS_COMP_ALGS_REGISTERED 10
+
+enum adf_dev_reset_mode {
+ ADF_DEV_RESET_ASYNC = 0,
+ ADF_DEV_RESET_SYNC
+};
+
+enum adf_event {
+ ADF_EVENT_INIT = 0,
+ ADF_EVENT_START,
+ ADF_EVENT_STOP,
+ ADF_EVENT_SHUTDOWN,
+ ADF_EVENT_RESTARTING,
+ ADF_EVENT_RESTARTED,
+};
+
+struct service_hndl {
+ int (*event_hld)(struct adf_accel_dev *accel_dev,
+ enum adf_event event);
+ unsigned long init_status[ADF_DEVS_ARRAY_SIZE];
+ unsigned long start_status[ADF_DEVS_ARRAY_SIZE];
+ char *name;
+ struct list_head list;
+};
+
+int adf_service_register(struct service_hndl *service);
+int adf_service_unregister(struct service_hndl *service);
+
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
+int adf_dev_restart(struct adf_accel_dev *accel_dev);
+
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf);
+struct list_head *adf_devmgr_get_head(void);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id);
+struct adf_accel_dev *adf_devmgr_get_first(void);
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
+int adf_devmgr_verify_id(u32 id);
+void adf_devmgr_get_num_dev(u32 *num);
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
+int adf_dev_started(struct adf_accel_dev *accel_dev);
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
+int adf_ae_init(struct adf_accel_dev *accel_dev);
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev);
+int adf_ae_start(struct adf_accel_dev *accel_dev);
+int adf_ae_stop(struct adf_accel_dev *accel_dev);
+
+extern const struct pci_error_handlers adf_err_handler;
+void adf_reset_sbr(struct adf_accel_dev *accel_dev);
+void adf_reset_flr(struct adf_accel_dev *accel_dev);
+void adf_dev_restore(struct adf_accel_dev *accel_dev);
+int adf_init_aer(void);
+void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps);
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt);
+int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks);
+int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
+
+int adf_dev_get(struct adf_accel_dev *accel_dev);
+void adf_dev_put(struct adf_accel_dev *accel_dev);
+int adf_dev_in_use(struct adf_accel_dev *accel_dev);
+int adf_init_etr_data(struct adf_accel_dev *accel_dev);
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
+int qat_crypto_register(void);
+int qat_crypto_unregister(void);
+int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev);
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
+void qat_crypto_put_instance(struct qat_crypto_instance *inst);
+void qat_alg_callback(void *resp);
+void qat_alg_asym_callback(void *resp);
+int qat_algs_register(void);
+void qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
+
+struct qat_compression_instance *qat_compression_get_instance_node(int node);
+void qat_compression_put_instance(struct qat_compression_instance *inst);
+int qat_compression_register(void);
+int qat_compression_unregister(void);
+int qat_comp_algs_register(void);
+void qat_comp_algs_unregister(void);
+void qat_comp_alg_callback(void *resp);
+
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+
+int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev);
+
+int adf_sysfs_init(struct adf_accel_dev *accel_dev);
+
+int qat_hal_init(struct adf_accel_dev *accel_dev);
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_start(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+ unsigned int ctx_mask);
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask);
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+ unsigned int ae);
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, enum icp_qat_uof_regtype lm_type,
+ unsigned char mode);
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char mode);
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char mode);
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask, unsigned int upc);
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int uaddr,
+ unsigned int words_num, u64 *uword);
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+ unsigned int uword_addr, unsigned int words_num,
+ unsigned int *data);
+int qat_hal_get_ins_num(void);
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae,
+ struct icp_qat_uof_batch_init *lm_init_header);
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned long ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned long ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned long ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned long ctx_mask,
+ unsigned short reg_num, unsigned int regdata);
+void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char mode);
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
+void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle);
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
+ int mem_size);
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, u32 mem_size, const char *obj_name);
+int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
+ unsigned int cfg_ae_mask);
+int adf_init_misc_wq(void);
+void adf_exit_misc_wq(void);
+bool adf_misc_wq_queue_work(struct work_struct *work);
+bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
+ unsigned long delay);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
+void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
+bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
+bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr);
+int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev);
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
+int adf_init_pf_wq(void);
+void adf_exit_pf_wq(void);
+int adf_init_vf_wq(void);
+void adf_exit_vf_wq(void);
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
+#else
+#define adf_sriov_configure NULL
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_init_pf_wq(void)
+{
+ return 0;
+}
+
+static inline void adf_exit_pf_wq(void)
+{
+}
+
+static inline int adf_init_vf_wq(void)
+{
+ return 0;
+}
+
+static inline void adf_exit_vf_wq(void)
+{
+}
+
+#endif
+
+static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc;
+
+ pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+
+ return pmisc->virt_addr;
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
new file mode 100644
index 0000000000..29c4422f24
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_user.h"
+
+#define ADF_CFG_MAX_SECTION 512
+#define ADF_CFG_MAX_KEY_VAL 256
+
+#define DEVICE_NAME "qat_adf_ctl"
+
+static DEFINE_MUTEX(adf_ctl_lock);
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+static const struct file_operations adf_ctl_ops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = adf_ctl_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+struct adf_ctl_drv_info {
+ unsigned int major;
+ struct cdev drv_cdev;
+ struct class *drv_class;
+};
+
+static struct adf_ctl_drv_info adf_ctl_drv;
+
+static void adf_chr_drv_destroy(void)
+{
+ device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
+ cdev_del(&adf_ctl_drv.drv_cdev);
+ class_destroy(adf_ctl_drv.drv_class);
+ unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
+}
+
+static int adf_chr_drv_create(void)
+{
+ dev_t dev_id;
+ struct device *drv_device;
+
+ if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
+ pr_err("QAT: unable to allocate chrdev region\n");
+ return -EFAULT;
+ }
+
+ adf_ctl_drv.drv_class = class_create(DEVICE_NAME);
+ if (IS_ERR(adf_ctl_drv.drv_class)) {
+ pr_err("QAT: class_create failed for adf_ctl\n");
+ goto err_chrdev_unreg;
+ }
+ adf_ctl_drv.major = MAJOR(dev_id);
+ cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
+ if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
+ pr_err("QAT: cdev add failed\n");
+ goto err_class_destr;
+ }
+
+ drv_device = device_create(adf_ctl_drv.drv_class, NULL,
+ MKDEV(adf_ctl_drv.major, 0),
+ NULL, DEVICE_NAME);
+ if (IS_ERR(drv_device)) {
+ pr_err("QAT: failed to create device\n");
+ goto err_cdev_del;
+ }
+ return 0;
+err_cdev_del:
+ cdev_del(&adf_ctl_drv.drv_cdev);
+err_class_destr:
+ class_destroy(adf_ctl_drv.drv_class);
+err_chrdev_unreg:
+ unregister_chrdev_region(dev_id, 1);
+ return -EFAULT;
+}
+
+static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
+ unsigned long arg)
+{
+ struct adf_user_cfg_ctl_data *cfg_data;
+
+ cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
+ if (!cfg_data)
+ return -ENOMEM;
+
+ /* Initialize device id to NO DEVICE as 0 is a valid device id */
+ cfg_data->device_id = ADF_CFG_NO_DEVICE;
+
+ if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+ pr_err("QAT: failed to copy from user cfg_data.\n");
+ kfree(cfg_data);
+ return -EIO;
+ }
+
+ *ctl_data = cfg_data;
+ return 0;
+}
+
+static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
+ const char *section,
+ const struct adf_user_cfg_key_val *key_val)
+{
+ if (key_val->type == ADF_HEX) {
+ long *ptr = (long *)key_val->val;
+ long val = *ptr;
+
+ if (adf_cfg_add_key_value_param(accel_dev, section,
+ key_val->key, (void *)val,
+ key_val->type)) {
+ dev_err(&GET_DEV(accel_dev),
+ "failed to add hex keyvalue.\n");
+ return -EFAULT;
+ }
+ } else {
+ if (adf_cfg_add_key_value_param(accel_dev, section,
+ key_val->key, key_val->val,
+ key_val->type)) {
+ dev_err(&GET_DEV(accel_dev),
+ "failed to add keyvalue.\n");
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
+ struct adf_user_cfg_ctl_data *ctl_data)
+{
+ struct adf_user_cfg_key_val key_val;
+ struct adf_user_cfg_key_val *params_head;
+ struct adf_user_cfg_section section, *section_head;
+ int i, j;
+
+ section_head = ctl_data->config_section;
+
+ for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
+ if (copy_from_user(&section, (void __user *)section_head,
+ sizeof(*section_head))) {
+ dev_err(&GET_DEV(accel_dev),
+ "failed to copy section info\n");
+ goto out_err;
+ }
+
+ if (adf_cfg_section_add(accel_dev, section.name)) {
+ dev_err(&GET_DEV(accel_dev),
+ "failed to add section.\n");
+ goto out_err;
+ }
+
+ params_head = section.params;
+
+ for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
+ if (copy_from_user(&key_val, (void __user *)params_head,
+ sizeof(key_val))) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to copy keyvalue.\n");
+ goto out_err;
+ }
+ if (adf_add_key_value_data(accel_dev, section.name,
+ &key_val)) {
+ goto out_err;
+ }
+ params_head = key_val.next;
+ }
+ section_head = section.next;
+ }
+ return 0;
+out_err:
+ adf_cfg_del_all(accel_dev);
+ return -EFAULT;
+}
+
+static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct adf_user_cfg_ctl_data *ctl_data;
+ struct adf_accel_dev *accel_dev;
+
+ ret = adf_ctl_alloc_resources(&ctl_data, arg);
+ if (ret)
+ return ret;
+
+ accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+ if (!accel_dev) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (adf_dev_started(accel_dev)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (adf_copy_key_value_data(accel_dev, ctl_data)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+out:
+ kfree(ctl_data);
+ return ret;
+}
+
+static int adf_ctl_is_device_in_use(int id)
+{
+ struct adf_accel_dev *dev;
+
+ list_for_each_entry(dev, adf_devmgr_get_head(), list) {
+ if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+ if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
+ dev_info(&GET_DEV(dev),
+ "device qat_dev%d is busy\n",
+ dev->accel_id);
+ return -EBUSY;
+ }
+ }
+ }
+ return 0;
+}
+
+static void adf_ctl_stop_devices(u32 id)
+{
+ struct adf_accel_dev *accel_dev;
+
+ list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+ if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+ if (!adf_dev_started(accel_dev))
+ continue;
+
+ /* First stop all VFs */
+ if (!accel_dev->is_vf)
+ continue;
+
+ adf_dev_down(accel_dev, false);
+ }
+ }
+
+ list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+ if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+ if (!adf_dev_started(accel_dev))
+ continue;
+
+ adf_dev_down(accel_dev, false);
+ }
+ }
+}
+
+static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct adf_user_cfg_ctl_data *ctl_data;
+
+ ret = adf_ctl_alloc_resources(&ctl_data, arg);
+ if (ret)
+ return ret;
+
+ if (adf_devmgr_verify_id(ctl_data->device_id)) {
+ pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = adf_ctl_is_device_in_use(ctl_data->device_id);
+ if (ret)
+ goto out;
+
+ if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
+ pr_info("QAT: Stopping all acceleration devices.\n");
+ else
+ pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
+ ctl_data->device_id);
+
+ adf_ctl_stop_devices(ctl_data->device_id);
+
+out:
+ kfree(ctl_data);
+ return ret;
+}
+
+static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct adf_user_cfg_ctl_data *ctl_data;
+ struct adf_accel_dev *accel_dev;
+
+ ret = adf_ctl_alloc_resources(&ctl_data, arg);
+ if (ret)
+ return ret;
+
+ ret = -ENODEV;
+ accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+ if (!accel_dev)
+ goto out;
+
+ dev_info(&GET_DEV(accel_dev),
+ "Starting acceleration device qat_dev%d.\n",
+ ctl_data->device_id);
+
+ ret = adf_dev_up(accel_dev, false);
+
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+ ctl_data->device_id);
+ adf_dev_down(accel_dev, false);
+ }
+out:
+ kfree(ctl_data);
+ return ret;
+}
+
+static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ u32 num_devices = 0;
+
+ adf_devmgr_get_num_dev(&num_devices);
+ if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct adf_hw_device_data *hw_data;
+ struct adf_dev_status_info dev_info;
+ struct adf_accel_dev *accel_dev;
+
+ if (copy_from_user(&dev_info, (void __user *)arg,
+ sizeof(struct adf_dev_status_info))) {
+ pr_err("QAT: failed to copy from user.\n");
+ return -EFAULT;
+ }
+
+ accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
+ if (!accel_dev)
+ return -ENODEV;
+
+ hw_data = accel_dev->hw_device;
+ dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
+ dev_info.num_ae = hw_data->get_num_aes(hw_data);
+ dev_info.num_accel = hw_data->get_num_accels(hw_data);
+ dev_info.num_logical_accel = hw_data->num_logical_accel;
+ dev_info.banks_per_accel = hw_data->num_banks
+ / hw_data->num_logical_accel;
+ strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+ dev_info.instance_id = hw_data->instance_id;
+ dev_info.type = hw_data->dev_class->type;
+ dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
+ dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
+ dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
+
+ if (copy_to_user((void __user *)arg, &dev_info,
+ sizeof(struct adf_dev_status_info))) {
+ dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&adf_ctl_lock))
+ return -EFAULT;
+
+ switch (cmd) {
+ case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
+ ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
+ break;
+
+ case IOCTL_STOP_ACCEL_DEV:
+ ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
+ break;
+
+ case IOCTL_START_ACCEL_DEV:
+ ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
+ break;
+
+ case IOCTL_GET_NUM_DEVICES:
+ ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
+ break;
+
+ case IOCTL_STATUS_ACCEL_DEV:
+ ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
+ break;
+ default:
+ pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
+ ret = -EFAULT;
+ break;
+ }
+ mutex_unlock(&adf_ctl_lock);
+ return ret;
+}
+
+static int __init adf_register_ctl_device_driver(void)
+{
+ if (adf_chr_drv_create())
+ goto err_chr_dev;
+
+ if (adf_init_misc_wq())
+ goto err_misc_wq;
+
+ if (adf_init_aer())
+ goto err_aer;
+
+ if (adf_init_pf_wq())
+ goto err_pf_wq;
+
+ if (adf_init_vf_wq())
+ goto err_vf_wq;
+
+ if (qat_crypto_register())
+ goto err_crypto_register;
+
+ if (qat_compression_register())
+ goto err_compression_register;
+
+ return 0;
+
+err_compression_register:
+ qat_crypto_unregister();
+err_crypto_register:
+ adf_exit_vf_wq();
+err_vf_wq:
+ adf_exit_pf_wq();
+err_pf_wq:
+ adf_exit_aer();
+err_aer:
+ adf_exit_misc_wq();
+err_misc_wq:
+ adf_chr_drv_destroy();
+err_chr_dev:
+ mutex_destroy(&adf_ctl_lock);
+ return -EFAULT;
+}
+
+static void __exit adf_unregister_ctl_device_driver(void)
+{
+ adf_chr_drv_destroy();
+ adf_exit_misc_wq();
+ adf_exit_aer();
+ adf_exit_vf_wq();
+ adf_exit_pf_wq();
+ qat_crypto_unregister();
+ qat_compression_unregister();
+ adf_clean_vf_map(false);
+ mutex_destroy(&adf_ctl_lock);
+}
+
+module_init(adf_register_ctl_device_driver);
+module_exit(adf_unregister_ctl_device_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_ALIAS_CRYPTO("intel_qat");
+MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
new file mode 100644
index 0000000000..04845f8d72
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+#include "adf_dbgfs.h"
+#include "adf_fw_counters.h"
+#include "adf_heartbeat_dbgfs.h"
+
+/**
+ * adf_dbgfs_init() - add persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * This function creates debugfs entries that are persistent through a device
+ * state change (from up to down or vice versa).
+ */
+void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
+{
+ char name[ADF_DEVICE_NAME_LENGTH];
+ void *ret;
+
+ /* Create dev top level debugfs entry */
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ accel_dev->hw_device->dev_class->name,
+ pci_name(accel_dev->accel_pci_dev.pci_dev));
+
+ ret = debugfs_create_dir(name, NULL);
+ if (IS_ERR_OR_NULL(ret))
+ return;
+
+ accel_dev->debugfs_dir = ret;
+
+ adf_cfg_dev_dbgfs_add(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_dbgfs_init);
+
+/**
+ * adf_dbgfs_exit() - remove persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ */
+void adf_dbgfs_exit(struct adf_accel_dev *accel_dev)
+{
+ adf_cfg_dev_dbgfs_rm(accel_dev);
+ debugfs_remove(accel_dev->debugfs_dir);
+}
+EXPORT_SYMBOL_GPL(adf_dbgfs_exit);
+
+/**
+ * adf_dbgfs_add() - add non-persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * This function creates debugfs entries that are not persistent through
+ * a device state change (from up to down or vice versa).
+ */
+void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->debugfs_dir)
+ return;
+
+ if (!accel_dev->is_vf) {
+ adf_fw_counters_dbgfs_add(accel_dev);
+ adf_heartbeat_dbgfs_add(accel_dev);
+ }
+}
+
+/**
+ * adf_dbgfs_rm() - remove non-persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ */
+void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->debugfs_dir)
+ return;
+
+ if (!accel_dev->is_vf) {
+ adf_heartbeat_dbgfs_rm(accel_dev);
+ adf_fw_counters_dbgfs_rm(accel_dev);
+ }
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.h
new file mode 100644
index 0000000000..e0cb2c2a2e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_DBGFS_H
+#define ADF_DBGFS_H
+
+#ifdef CONFIG_DEBUG_FS
+void adf_dbgfs_init(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_rm(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_exit(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_dbgfs_exit(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
new file mode 100644
index 0000000000..86ee36feef
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
+static DEFINE_MUTEX(table_lock);
+static u32 num_devices;
+static u8 id_map[ADF_MAX_DEVICES];
+
+struct vf_id_map {
+ u32 bdf;
+ u32 id;
+ u32 fake_id;
+ bool attached;
+ struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+ return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+ PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+ (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+ return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+ struct list_head *itr;
+
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+
+ if (ptr->bdf == bdf)
+ return ptr;
+ }
+ return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+ struct list_head *itr;
+
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+ if (ptr->fake_id == fake)
+ return ptr->id;
+ }
+ return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ * for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+ struct vf_id_map *map;
+ struct list_head *ptr, *tmp;
+
+ mutex_lock(&table_lock);
+ list_for_each_safe(ptr, tmp, &vfs_table) {
+ map = list_entry(ptr, struct vf_id_map, list);
+ if (map->bdf != -1) {
+ id_map[map->id] = 0;
+ num_devices--;
+ }
+
+ if (vf && map->bdf == -1)
+ continue;
+
+ list_del(ptr);
+ kfree(map);
+ }
+ mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data: Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+ struct adf_hw_device_class *class = hw_data->dev_class;
+ struct list_head *itr;
+ int i = 0;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
+ list_entry(itr, struct adf_accel_dev, list);
+
+ if (ptr->hw_device->dev_class == class)
+ ptr->hw_device->instance_id = i++;
+
+ if (i == class->instances)
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
+static unsigned int adf_find_free_id(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ADF_MAX_DEVICES; i++) {
+ if (!id_map[i]) {
+ id_map[i] = 1;
+ return i;
+ }
+ }
+ return ADF_MAX_DEVICES + 1;
+}
+
+/**
+ * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
+ * @accel_dev: Pointer to acceleration device.
+ * @pf: Corresponding PF if the accel_dev is a VF
+ *
+ * Function adds acceleration device to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf)
+{
+ struct list_head *itr;
+ int ret = 0;
+
+ if (num_devices == ADF_MAX_DEVICES) {
+ dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
+ ADF_MAX_DEVICES);
+ return -EFAULT;
+ }
+
+ mutex_lock(&table_lock);
+ atomic_set(&accel_dev->ref_count, 0);
+
+ /* PF on host or VF on guest - optimized to remove redundant is_vf */
+ if (!accel_dev->is_vf || !pf) {
+ struct vf_id_map *map;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
+ list_entry(itr, struct adf_accel_dev, list);
+
+ if (ptr == accel_dev) {
+ ret = -EEXIST;
+ goto unlock;
+ }
+ }
+
+ list_add_tail(&accel_dev->list, &accel_table);
+ accel_dev->accel_id = adf_find_free_id();
+ if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+ num_devices++;
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ map->bdf = ~0;
+ map->id = accel_dev->accel_id;
+ map->fake_id = map->id;
+ map->attached = true;
+ list_add_tail(&map->list, &vfs_table);
+ } else if (accel_dev->is_vf && pf) {
+ /* VF on host */
+ struct vf_id_map *map;
+
+ map = adf_find_vf(adf_get_vf_num(accel_dev));
+ if (map) {
+ struct vf_id_map *next;
+
+ accel_dev->accel_id = map->id;
+ list_add_tail(&accel_dev->list, &accel_table);
+ map->fake_id++;
+ map->attached = true;
+ next = list_next_entry(map, list);
+ while (next && &next->list != &vfs_table) {
+ next->fake_id++;
+ next = list_next_entry(next, list);
+ }
+
+ ret = 0;
+ goto unlock;
+ }
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ accel_dev->accel_id = adf_find_free_id();
+ if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+ kfree(map);
+ ret = -EFAULT;
+ goto unlock;
+ }
+ num_devices++;
+ list_add_tail(&accel_dev->list, &accel_table);
+ map->bdf = adf_get_vf_num(accel_dev);
+ map->id = accel_dev->accel_id;
+ map->fake_id = map->id;
+ map->attached = true;
+ list_add_tail(&map->list, &vfs_table);
+ }
+ mutex_init(&accel_dev->state_lock);
+unlock:
+ mutex_unlock(&table_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
+
+struct list_head *adf_devmgr_get_head(void)
+{
+ return &accel_table;
+}
+
+/**
+ * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
+ * @accel_dev: Pointer to acceleration device.
+ * @pf: Corresponding PF if the accel_dev is a VF
+ *
+ * Function removes acceleration device from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf)
+{
+ mutex_lock(&table_lock);
+ /* PF on host or VF on guest - optimized to remove redundant is_vf */
+ if (!accel_dev->is_vf || !pf) {
+ id_map[accel_dev->accel_id] = 0;
+ num_devices--;
+ } else if (accel_dev->is_vf && pf) {
+ struct vf_id_map *map, *next;
+
+ map = adf_find_vf(adf_get_vf_num(accel_dev));
+ if (!map) {
+ dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+ goto unlock;
+ }
+ map->fake_id--;
+ map->attached = false;
+ next = list_next_entry(map, list);
+ while (next && &next->list != &vfs_table) {
+ next->fake_id--;
+ next = list_next_entry(next, list);
+ }
+ }
+unlock:
+ mutex_destroy(&accel_dev->state_lock);
+ list_del(&accel_dev->list);
+ mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
+
+struct adf_accel_dev *adf_devmgr_get_first(void)
+{
+ struct adf_accel_dev *dev = NULL;
+
+ if (!list_empty(&accel_table))
+ dev = list_first_entry(&accel_table, struct adf_accel_dev,
+ list);
+ return dev;
+}
+
+/**
+ * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
+ * @pci_dev: Pointer to PCI device.
+ *
+ * Function returns acceleration device associated with the given PCI device.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: pointer to accel_dev or NULL if not found.
+ */
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
+{
+ struct list_head *itr;
+
+ mutex_lock(&table_lock);
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
+ list_entry(itr, struct adf_accel_dev, list);
+
+ if (ptr->accel_pci_dev.pci_dev == pci_dev) {
+ mutex_unlock(&table_lock);
+ return ptr;
+ }
+ }
+ mutex_unlock(&table_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
+
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
+{
+ struct list_head *itr;
+ int real_id;
+
+ mutex_lock(&table_lock);
+ real_id = adf_get_vf_real_id(id);
+ if (real_id < 0)
+ goto unlock;
+
+ id = real_id;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
+ list_entry(itr, struct adf_accel_dev, list);
+ if (ptr->accel_id == id) {
+ mutex_unlock(&table_lock);
+ return ptr;
+ }
+ }
+unlock:
+ mutex_unlock(&table_lock);
+ return NULL;
+}
+
+int adf_devmgr_verify_id(u32 id)
+{
+ if (id == ADF_CFG_ALL_DEVICES)
+ return 0;
+
+ if (adf_devmgr_get_dev_by_id(id))
+ return 0;
+
+ return -ENODEV;
+}
+
+static int adf_get_num_dettached_vfs(void)
+{
+ struct list_head *itr;
+ int vfs = 0;
+
+ mutex_lock(&table_lock);
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+ if (ptr->bdf != ~0 && !ptr->attached)
+ vfs++;
+ }
+ mutex_unlock(&table_lock);
+ return vfs;
+}
+
+void adf_devmgr_get_num_dev(u32 *num)
+{
+ *num = num_devices - adf_get_num_dettached_vfs();
+}
+
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int adf_dev_in_use(struct adf_accel_dev *accel_dev)
+{
+ return atomic_read(&accel_dev->ref_count) != 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
+
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int adf_dev_get(struct adf_accel_dev *accel_dev)
+{
+ if (atomic_add_return(1, &accel_dev->ref_count) == 1)
+ if (!try_module_get(accel_dev->owner))
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_get);
+
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_dev_put(struct adf_accel_dev *accel_dev)
+{
+ if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
+ module_put(accel_dev->owner);
+}
+EXPORT_SYMBOL_GPL(adf_dev_put);
+
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
+{
+ return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
+
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int adf_dev_started(struct adf_accel_dev *accel_dev)
+{
+ return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
+}
+EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
new file mode 100644
index 0000000000..cb6e09ef5c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_fw_counters.h"
+
+#define ADF_FW_COUNTERS_MAX_PADDING 16
+
+enum adf_fw_counters_types {
+ ADF_FW_REQUESTS,
+ ADF_FW_RESPONSES,
+ ADF_FW_COUNTERS_COUNT
+};
+
+static const char * const adf_fw_counter_names[] = {
+ [ADF_FW_REQUESTS] = "Requests",
+ [ADF_FW_RESPONSES] = "Responses",
+};
+
+static_assert(ARRAY_SIZE(adf_fw_counter_names) == ADF_FW_COUNTERS_COUNT);
+
+struct adf_ae_counters {
+ u16 ae;
+ u64 values[ADF_FW_COUNTERS_COUNT];
+};
+
+struct adf_fw_counters {
+ u16 ae_count;
+ struct adf_ae_counters ae_counters[];
+};
+
+static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae,
+ u64 req_count, u64 resp_count)
+{
+ ae_counters->ae = ae;
+ ae_counters->values[ADF_FW_REQUESTS] = req_count;
+ ae_counters->values[ADF_FW_RESPONSES] = resp_count;
+}
+
+static int adf_fw_counters_load_from_device(struct adf_accel_dev *accel_dev,
+ struct adf_fw_counters *fw_counters)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ unsigned long ae_mask;
+ unsigned int i;
+ unsigned long ae;
+
+ /* Ignore the admin AEs */
+ ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask;
+
+ if (hweight_long(ae_mask) > fw_counters->ae_count)
+ return -EINVAL;
+
+ i = 0;
+ for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
+ u64 req_count, resp_count;
+ int ret;
+
+ ret = adf_get_ae_fw_counters(accel_dev, ae, &req_count, &resp_count);
+ if (ret)
+ return ret;
+
+ adf_fw_counters_parse_ae_values(&fw_counters->ae_counters[i++], ae,
+ req_count, resp_count);
+ }
+
+ return 0;
+}
+
+static struct adf_fw_counters *adf_fw_counters_allocate(unsigned long ae_count)
+{
+ struct adf_fw_counters *fw_counters;
+
+ if (unlikely(!ae_count))
+ return ERR_PTR(-EINVAL);
+
+ fw_counters = kmalloc(struct_size(fw_counters, ae_counters, ae_count), GFP_KERNEL);
+ if (!fw_counters)
+ return ERR_PTR(-ENOMEM);
+
+ fw_counters->ae_count = ae_count;
+
+ return fw_counters;
+}
+
+/**
+ * adf_fw_counters_get() - Return FW counters for the provided device.
+ * @accel_dev: Pointer to a QAT acceleration device
+ *
+ * Allocates and returns a table of counters containing execution statistics
+ * for each non-admin AE available through the supplied acceleration device.
+ * The caller becomes the owner of such memory and is responsible for
+ * the deallocation through a call to kfree().
+ *
+ * Returns: a pointer to a dynamically allocated struct adf_fw_counters
+ * on success, or a negative value on error.
+ */
+static struct adf_fw_counters *adf_fw_counters_get(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_fw_counters *fw_counters;
+ unsigned long ae_count;
+ int ret;
+
+ if (!adf_dev_started(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "QAT Device not started\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ /* Ignore the admin AEs */
+ ae_count = hweight_long(hw_data->ae_mask & ~hw_data->admin_ae_mask);
+
+ fw_counters = adf_fw_counters_allocate(ae_count);
+ if (IS_ERR(fw_counters))
+ return fw_counters;
+
+ ret = adf_fw_counters_load_from_device(accel_dev, fw_counters);
+ if (ret) {
+ kfree(fw_counters);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to create QAT fw_counters file table [%d].\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ return fw_counters;
+}
+
+static void *qat_fw_counters_seq_start(struct seq_file *sfile, loff_t *pos)
+{
+ struct adf_fw_counters *fw_counters = sfile->private;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ if (*pos > fw_counters->ae_count)
+ return NULL;
+
+ return &fw_counters->ae_counters[*pos - 1];
+}
+
+static void *qat_fw_counters_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+ struct adf_fw_counters *fw_counters = sfile->private;
+
+ (*pos)++;
+
+ if (*pos > fw_counters->ae_count)
+ return NULL;
+
+ return &fw_counters->ae_counters[*pos - 1];
+}
+
+static void qat_fw_counters_seq_stop(struct seq_file *sfile, void *v) {}
+
+static int qat_fw_counters_seq_show(struct seq_file *sfile, void *v)
+{
+ int i;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(sfile, "AE ");
+ for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i)
+ seq_printf(sfile, " %*s", ADF_FW_COUNTERS_MAX_PADDING,
+ adf_fw_counter_names[i]);
+ } else {
+ struct adf_ae_counters *ae_counters = (struct adf_ae_counters *)v;
+
+ seq_printf(sfile, "%2d:", ae_counters->ae);
+ for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i)
+ seq_printf(sfile, " %*llu", ADF_FW_COUNTERS_MAX_PADDING,
+ ae_counters->values[i]);
+ }
+ seq_putc(sfile, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations qat_fw_counters_sops = {
+ .start = qat_fw_counters_seq_start,
+ .next = qat_fw_counters_seq_next,
+ .stop = qat_fw_counters_seq_stop,
+ .show = qat_fw_counters_seq_show,
+};
+
+static int qat_fw_counters_file_open(struct inode *inode, struct file *file)
+{
+ struct adf_accel_dev *accel_dev = inode->i_private;
+ struct seq_file *fw_counters_seq_file;
+ struct adf_fw_counters *fw_counters;
+ int ret;
+
+ fw_counters = adf_fw_counters_get(accel_dev);
+ if (IS_ERR(fw_counters))
+ return PTR_ERR(fw_counters);
+
+ ret = seq_open(file, &qat_fw_counters_sops);
+ if (unlikely(ret)) {
+ kfree(fw_counters);
+ return ret;
+ }
+
+ fw_counters_seq_file = file->private_data;
+ fw_counters_seq_file->private = fw_counters;
+ return ret;
+}
+
+static int qat_fw_counters_file_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+
+ kfree(seq->private);
+ seq->private = NULL;
+
+ return seq_release(inode, file); }
+
+static const struct file_operations qat_fw_counters_fops = {
+ .owner = THIS_MODULE,
+ .open = qat_fw_counters_file_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = qat_fw_counters_file_release,
+};
+
+/**
+ * adf_fw_counters_dbgfs_add() - Create a debugfs file containing FW
+ * execution counters.
+ * @accel_dev: Pointer to a QAT acceleration device
+ *
+ * Function creates a file to display a table with statistics for the given
+ * QAT acceleration device. The table stores device specific execution values
+ * for each AE, such as the number of requests sent to the FW and responses
+ * received from the FW.
+ *
+ * Return: void
+ */
+void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ accel_dev->fw_cntr_dbgfile = debugfs_create_file("fw_counters", 0400,
+ accel_dev->debugfs_dir,
+ accel_dev,
+ &qat_fw_counters_fops);
+}
+
+/**
+ * adf_fw_counters_dbgfs_rm() - Remove the debugfs file containing FW counters.
+ * @accel_dev: Pointer to a QAT acceleration device.
+ *
+ * Function removes the file providing the table of statistics for the given
+ * QAT acceleration device.
+ *
+ * Return: void
+ */
+void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ debugfs_remove(accel_dev->fw_cntr_dbgfile);
+ accel_dev->fw_cntr_dbgfile = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h
new file mode 100644
index 0000000000..91b3b6a95f
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_FW_COUNTERS_H
+#define ADF_FW_COUNTERS_H
+
+struct adf_accel_dev;
+
+void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
new file mode 100644
index 0000000000..c27ff6d18e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_gen2_config.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "qat_compression.h"
+#include "adf_heartbeat.h"
+#include "adf_transport_access_macros.h"
+
+static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_crypto(accel_dev))
+ instances = min(cpus, banks);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+ i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ val = 128;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 8;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 10;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+ return ret;
+}
+
+static int adf_gen2_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_compression(accel_dev))
+ instances = min(cpus, banks);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 6;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 14;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+ return ret;
+}
+
+/**
+ * adf_gen2_dev_config() - create dev config required to create instances
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_gen2_dev_config(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ goto err;
+
+ ret = adf_gen2_crypto_dev_config(accel_dev);
+ if (ret)
+ goto err;
+
+ ret = adf_gen2_comp_dev_config(accel_dev);
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ goto err;
+
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_DEFAULT_MS);
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_dev_config);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h
new file mode 100644
index 0000000000..4bf9da2de6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN2_CONFIG_H_
+#define ADF_GEN2_CONFIG_H_
+
+#include "adf_accel_devices.h"
+
+int adf_gen2_dev_config(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
new file mode 100644
index 0000000000..47261b1c1d
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_gen2_dc.h"
+#include "icp_qat_fw_comp.h"
+
+static void qat_comp_build_deflate_ctx(void *ctx)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
+
+ memset(req_tmpl, 0, sizeof(*req_tmpl));
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
+ QAT_COMN_PTR_TYPE_SGL);
+ header->serv_specif_flags =
+ ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
+ ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+ req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
+ req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
+ req_pars->req_par_flags =
+ ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
+ ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL,
+ ICP_QAT_FW_COMP_CNV,
+ ICP_QAT_FW_COMP_CNV_RECOVERY,
+ ICP_QAT_FW_COMP_NO_CNV_DFX,
+ ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
+ ICP_QAT_FW_COMP_NO_XXHASH_ACC,
+ ICP_QAT_FW_COMP_CNV_ERROR_NONE,
+ ICP_QAT_FW_COMP_NO_APPEND_CRC,
+ ICP_QAT_FW_COMP_NO_DROP_DATA);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
+
+ /* Fill second half of the template for decompression */
+ memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
+ req_tmpl++;
+ header = &req_tmpl->comn_hdr;
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ cd_pars = &req_tmpl->cd_pars;
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+}
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
new file mode 100644
index 0000000000..6eae023354
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN2_DC_H
+#define ADF_GEN2_DC_H
+
+#include "adf_accel_devices.h"
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
+
+#endif /* ADF_GEN2_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
new file mode 100644
index 0000000000..d1884547b5
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 Intel Corporation */
+#include "adf_common_drv.h"
+#include "adf_gen2_hw_data.h"
+#include "icp_qat_hw.h"
+#include <linux/pci.h>
+
+u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
+{
+ if (!self || !self->accel_mask)
+ return 0;
+
+ return hweight16(self->accel_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
+
+u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
+{
+ if (!self || !self->ae_mask)
+ return 0;
+
+ return hweight32(self->ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
+
+void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ unsigned long accel_mask = hw_data->accel_mask;
+ unsigned long ae_mask = hw_data->ae_mask;
+ unsigned int val, i;
+
+ /* Enable Accel Engine error detection & correction */
+ for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
+ val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
+ val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
+ }
+
+ /* Enable shared memory error detection & correction */
+ for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
+ val |= ADF_GEN2_ERRSSMSH_EN;
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
+ val |= ADF_GEN2_ERRSSMSH_EN;
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
+
+void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
+ int num_a_regs, int num_b_regs)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 reg;
+ int i;
+
+ /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
+ for (i = 0; i < num_a_regs; i++) {
+ reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
+ if (enable)
+ reg |= AE2FUNCTION_MAP_VALID;
+ else
+ reg &= ~AE2FUNCTION_MAP_VALID;
+ WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
+ }
+
+ /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
+ for (i = 0; i < num_b_regs; i++) {
+ reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
+ if (enable)
+ reg |= AE2FUNCTION_MAP_VALID;
+ else
+ reg &= ~AE2FUNCTION_MAP_VALID;
+ WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);
+
+void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info)
+{
+ admin_csrs_info->mailbox_offset = ADF_MAILBOX_BASE_OFFSET;
+ admin_csrs_info->admin_msg_ur = ADF_ADMINMSGUR_OFFSET;
+ admin_csrs_info->admin_msg_lr = ADF_ADMINMSGLR_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_admin_info);
+
+void adf_gen2_get_arb_info(struct arb_info *arb_info)
+{
+ arb_info->arb_cfg = ADF_ARB_CONFIG;
+ arb_info->arb_offset = ADF_ARB_OFFSET;
+ arb_info->wt2sam_offset = ADF_ARB_WRK_2_SER_MAP_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
+
+void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+ u32 val;
+
+ val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1;
+
+ /* Enable bundle and misc interrupts */
+ ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val);
+ ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+ return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
+ u32 ring, u32 value)
+{
+ WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ dma_addr_t addr)
+{
+ WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+ csr_ops->read_csr_ring_head = read_csr_ring_head;
+ csr_ops->write_csr_ring_head = write_csr_ring_head;
+ csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+ csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+ csr_ops->read_csr_e_stat = read_csr_e_stat;
+ csr_ops->write_csr_ring_config = write_csr_ring_config;
+ csr_ops->write_csr_ring_base = write_csr_ring_base;
+ csr_ops->write_csr_int_flag = write_csr_int_flag;
+ csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+ csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+ csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+ csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+ csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
+
+u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 straps = hw_data->straps;
+ u32 fuses = hw_data->fuses;
+ u32 legfuses;
+ u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+ /* Read accelerator capabilities mask */
+ pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
+
+ /* A set bit in legfuses means the feature is OFF in this SKU */
+ if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+ if ((straps | fuses) & ADF_POWERGATE_PKE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+
+ if ((straps | fuses) & ADF_POWERGATE_DC)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+ return capabilities;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
+
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+ unsigned long accel_mask = hw_data->accel_mask;
+ u32 i = 0;
+
+ /* Configures WDT timers */
+ for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+ /* Enable WDT for sym and dc */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
+ /* Enable WDT for pke */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
new file mode 100644
index 0000000000..6bd341061d
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2020 Intel Corporation */
+#ifndef ADF_GEN2_HW_DATA_H_
+#define ADF_GEN2_HW_DATA_H_
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+
+/* Transport access */
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_FLAG 0x170
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_GEN2_RX_RINGS_OFFSET 8
+#define ADF_GEN2_TX_RINGS_MASK 0xFF
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ u32 l_base = 0, u_base = 0; \
+ l_base = (u32)((value) & 0xFFFFFFFF); \
+ u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
+} while (0)
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_FLAG, value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, value)
+
+/* AE to function map */
+#define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
+#define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
+#define AE2FUNCTION_MAP_REG_SIZE 4
+#define AE2FUNCTION_MAP_VALID BIT(7)
+
+#define READ_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index) \
+ ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
+ AE2FUNCTION_MAP_REG_SIZE * (index))
+#define WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
+ ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
+ AE2FUNCTION_MAP_REG_SIZE * (index), value)
+#define READ_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index) \
+ ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
+ AE2FUNCTION_MAP_REG_SIZE * (index))
+#define WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
+ ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
+ AE2FUNCTION_MAP_REG_SIZE * (index), value)
+
+/* Admin Interface Offsets */
+#define ADF_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_MAILBOX_BASE_OFFSET 0x20970
+
+/* Arbiter configuration */
+#define ADF_ARB_OFFSET 0x30000
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
+#define ADF_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * (index)), value)
+
+/* Power gating */
+#define ADF_POWERGATE_DC BIT(23)
+#define ADF_POWERGATE_PKE BIT(24)
+
+/* Default ring mapping */
+#define ADF_GEN2_DEFAULT_RING_TO_SRV_MAP \
+ (CRYPTO << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
+ CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
+ UNUSED << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
+ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
+
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x2000000
+#define ADF_SSMWDT_OFFSET 0x54
+#define ADF_SSMWDTPKE_OFFSET 0x58
+#define ADF_SSMWDT(i) (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
+#define ADF_SSMWDTPKE(i) (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
+
+/* Error detection and correction */
+#define ADF_GEN2_AE_CTX_ENABLES(i) ((i) * 0x1000 + 0x20818)
+#define ADF_GEN2_AE_MISC_CONTROL(i) ((i) * 0x1000 + 0x20960)
+#define ADF_GEN2_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_GEN2_UERRSSMSH(i) ((i) * 0x4000 + 0x18)
+#define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10)
+#define ADF_GEN2_ERRSSMSH_EN BIT(3)
+
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
+/* Interrupts */
+#define ADF_GEN2_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_GEN2_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_GEN2_SMIA1_MASK 0x1
+
+u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
+u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
+void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
+void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
+ int num_a_regs, int num_b_regs);
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
+void adf_gen2_get_arb_info(struct arb_info *arb_info);
+void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
+u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
new file mode 100644
index 0000000000..70ef119639
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen2_pfvf.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_vf_proto.h"
+#include "adf_pfvf_utils.h"
+
+ /* VF2PF interrupts */
+#define ADF_GEN2_VF_MSK 0xFFFF
+#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & ADF_GEN2_VF_MSK) << 9)
+
+#define ADF_GEN2_PF_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_GEN2_VF_PF2VF_OFFSET 0x200
+
+#define ADF_GEN2_CSR_IN_USE 0x6AC2
+#define ADF_GEN2_CSR_IN_USE_MASK 0xFFFE
+
+enum gen2_csr_pos {
+ ADF_GEN2_CSR_PF2VF_OFFSET = 0,
+ ADF_GEN2_CSR_VF2PF_OFFSET = 16,
+};
+
+#define ADF_PFVF_GEN2_MSGTYPE_SHIFT 2
+#define ADF_PFVF_GEN2_MSGTYPE_MASK 0x0F
+#define ADF_PFVF_GEN2_MSGDATA_SHIFT 6
+#define ADF_PFVF_GEN2_MSGDATA_MASK 0x3FF
+
+static const struct pfvf_csr_format csr_gen2_fmt = {
+ { ADF_PFVF_GEN2_MSGTYPE_SHIFT, ADF_PFVF_GEN2_MSGTYPE_MASK },
+ { ADF_PFVF_GEN2_MSGDATA_SHIFT, ADF_PFVF_GEN2_MSGDATA_MASK },
+};
+
+#define ADF_PFVF_MSG_RETRY_DELAY 5
+#define ADF_PFVF_MSG_MAX_RETRIES 3
+
+static u32 adf_gen2_pf_get_pfvf_offset(u32 i)
+{
+ return ADF_GEN2_PF_PF2VF_OFFSET(i);
+}
+
+static u32 adf_gen2_vf_get_pfvf_offset(u32 i)
+{
+ return ADF_GEN2_VF_PF2VF_OFFSET;
+}
+
+static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+ /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+ if (vf_mask & ADF_GEN2_VF_MSK) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
+}
+
+static void adf_gen2_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+ /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+}
+
+static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+ u32 sources, disabled, pending;
+ u32 errsou3, errmsk3;
+
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+ sources = ADF_GEN2_ERR_REG_VF2PF(errsou3);
+
+ if (!sources)
+ return 0;
+
+ /* Get the already disabled interrupts */
+ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+ disabled = ADF_GEN2_ERR_REG_VF2PF(errmsk3);
+
+ pending = sources & ~disabled;
+ if (!pending)
+ return 0;
+
+ /* Due to HW limitations, when disabling the interrupts, we can't
+ * just disable the requested sources, as this would lead to missed
+ * interrupts if ERRSOU3 changes just before writing to ERRMSK3.
+ * To work around it, disable all and re-enable only the sources that
+ * are not in vf_mask and were not already disabled. Re-enabling will
+ * trigger a new interrupt for the sources that have changed in the
+ * meantime, if any.
+ */
+ errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+ errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+ /* Return the sources of the (new) interrupt(s) */
+ return pending;
+}
+
+static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset)
+{
+ return ADF_PFVF_INT << offset;
+}
+
+static u32 gen2_csr_msg_to_position(u32 csr_msg, enum gen2_csr_pos offset)
+{
+ return (csr_msg & 0xFFFF) << offset;
+}
+
+static u32 gen2_csr_msg_from_position(u32 csr_val, enum gen2_csr_pos offset)
+{
+ return (csr_val >> offset) & 0xFFFF;
+}
+
+static bool gen2_csr_is_in_use(u32 msg, enum gen2_csr_pos offset)
+{
+ return ((msg >> offset) & ADF_GEN2_CSR_IN_USE_MASK) == ADF_GEN2_CSR_IN_USE;
+}
+
+static void gen2_csr_clear_in_use(u32 *msg, enum gen2_csr_pos offset)
+{
+ *msg &= ~(ADF_GEN2_CSR_IN_USE_MASK << offset);
+}
+
+static void gen2_csr_set_in_use(u32 *msg, enum gen2_csr_pos offset)
+{
+ *msg |= (ADF_GEN2_CSR_IN_USE << offset);
+}
+
+static bool is_legacy_user_pfvf_message(u32 msg)
+{
+ return !(msg & ADF_PFVF_MSGORIGIN_SYSTEM);
+}
+
+static bool is_pf2vf_notification(u8 msg_type)
+{
+ switch (msg_type) {
+ case ADF_PF2VF_MSGTYPE_RESTARTING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_vf2pf_notification(u8 msg_type)
+{
+ switch (msg_type) {
+ case ADF_VF2PF_MSGTYPE_INIT:
+ case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct pfvf_gen2_params {
+ u32 pfvf_offset;
+ struct mutex *csr_lock; /* lock preventing concurrent access of CSR */
+ enum gen2_csr_pos local_offset;
+ enum gen2_csr_pos remote_offset;
+ bool (*is_notification_message)(u8 msg_type);
+ u8 compat_ver;
+};
+
+static int adf_gen2_pfvf_send(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg,
+ struct pfvf_gen2_params *params)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ enum gen2_csr_pos remote_offset = params->remote_offset;
+ enum gen2_csr_pos local_offset = params->local_offset;
+ unsigned int retries = ADF_PFVF_MSG_MAX_RETRIES;
+ struct mutex *lock = params->csr_lock;
+ u32 pfvf_offset = params->pfvf_offset;
+ u32 int_bit;
+ u32 csr_val;
+ u32 csr_msg;
+ int ret;
+
+ /* Gen2 messages, both PF->VF and VF->PF, are all 16 bits long. This
+ * allows us to build and read messages as if they where all 0 based.
+ * However, send and receive are in a single shared 32 bits register,
+ * so we need to shift and/or mask the message half before decoding
+ * it and after encoding it. Which one to shift depends on the
+ * direction.
+ */
+
+ int_bit = gen2_csr_get_int_bit(local_offset);
+
+ csr_msg = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen2_fmt);
+ if (unlikely(!csr_msg))
+ return -EINVAL;
+
+ /* Prepare for CSR format, shifting the wire message in place and
+ * setting the in use pattern
+ */
+ csr_msg = gen2_csr_msg_to_position(csr_msg, local_offset);
+ gen2_csr_set_in_use(&csr_msg, remote_offset);
+
+ mutex_lock(lock);
+
+start:
+ /* Check if the PFVF CSR is in use by remote function */
+ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+ if (gen2_csr_is_in_use(csr_val, local_offset)) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "PFVF CSR in use by remote function\n");
+ goto retry;
+ }
+
+ /* Attempt to get ownership of the PFVF CSR */
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_msg | int_bit);
+
+ /* Wait for confirmation from remote func it received the message */
+ ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & int_bit),
+ ADF_PFVF_MSG_ACK_DELAY_US,
+ ADF_PFVF_MSG_ACK_MAX_DELAY_US,
+ true, pmisc_addr, pfvf_offset);
+ if (unlikely(ret < 0)) {
+ dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+ csr_val &= ~int_bit;
+ }
+
+ /* For fire-and-forget notifications, the receiver does not clear
+ * the in-use pattern. This is used to detect collisions.
+ */
+ if (params->is_notification_message(msg.type) && csr_val != csr_msg) {
+ /* Collision must have overwritten the message */
+ dev_err(&GET_DEV(accel_dev),
+ "Collision on notification - PFVF CSR overwritten by remote function\n");
+ goto retry;
+ }
+
+ /* If the far side did not clear the in-use pattern it is either
+ * 1) Notification - message left intact to detect collision
+ * 2) Older protocol (compatibility version < 3) on the far side
+ * where the sender is responsible for clearing the in-use
+ * pattern after the received has acknowledged receipt.
+ * In either case, clear the in-use pattern now.
+ */
+ if (gen2_csr_is_in_use(csr_val, remote_offset)) {
+ gen2_csr_clear_in_use(&csr_val, remote_offset);
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
+ }
+
+out:
+ mutex_unlock(lock);
+ return ret;
+
+retry:
+ if (--retries) {
+ msleep(ADF_PFVF_MSG_RETRY_DELAY);
+ goto start;
+ } else {
+ ret = -EBUSY;
+ goto out;
+ }
+}
+
+static struct pfvf_message adf_gen2_pfvf_recv(struct adf_accel_dev *accel_dev,
+ struct pfvf_gen2_params *params)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ enum gen2_csr_pos remote_offset = params->remote_offset;
+ enum gen2_csr_pos local_offset = params->local_offset;
+ u32 pfvf_offset = params->pfvf_offset;
+ struct pfvf_message msg = { 0 };
+ u32 int_bit;
+ u32 csr_val;
+ u16 csr_msg;
+
+ int_bit = gen2_csr_get_int_bit(local_offset);
+
+ /* Read message */
+ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+ if (!(csr_val & int_bit)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
+ return msg;
+ }
+
+ /* Extract the message from the CSR */
+ csr_msg = gen2_csr_msg_from_position(csr_val, local_offset);
+
+ /* Ignore legacy non-system (non-kernel) messages */
+ if (unlikely(is_legacy_user_pfvf_message(csr_msg))) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ignored non-system message (0x%.8x);\n", csr_val);
+ /* Because this must be a legacy message, the far side
+ * must clear the in-use pattern, so don't do it.
+ */
+ return msg;
+ }
+
+ /* Return the pfvf_message format */
+ msg = adf_pfvf_message_of(accel_dev, csr_msg, &csr_gen2_fmt);
+
+ /* The in-use pattern is not cleared for notifications (so that
+ * it can be used for collision detection) or older implementations
+ */
+ if (params->compat_ver >= ADF_PFVF_COMPAT_FAST_ACK &&
+ !params->is_notification_message(msg.type))
+ gen2_csr_clear_in_use(&csr_val, remote_offset);
+
+ /* To ACK, clear the INT bit */
+ csr_val &= ~int_bit;
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
+
+ return msg;
+}
+
+static int adf_gen2_pf2vf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ u32 pfvf_offset, struct mutex *csr_lock)
+{
+ struct pfvf_gen2_params params = {
+ .csr_lock = csr_lock,
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .is_notification_message = is_pf2vf_notification,
+ };
+
+ return adf_gen2_pfvf_send(accel_dev, msg, &params);
+}
+
+static int adf_gen2_vf2pf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ u32 pfvf_offset, struct mutex *csr_lock)
+{
+ struct pfvf_gen2_params params = {
+ .csr_lock = csr_lock,
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .is_notification_message = is_vf2pf_notification,
+ };
+
+ return adf_gen2_pfvf_send(accel_dev, msg, &params);
+}
+
+static struct pfvf_message adf_gen2_pf2vf_recv(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver)
+{
+ struct pfvf_gen2_params params = {
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .is_notification_message = is_pf2vf_notification,
+ .compat_ver = compat_ver,
+ };
+
+ return adf_gen2_pfvf_recv(accel_dev, &params);
+}
+
+static struct pfvf_message adf_gen2_vf2pf_recv(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver)
+{
+ struct pfvf_gen2_params params = {
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .is_notification_message = is_vf2pf_notification,
+ .compat_ver = compat_ver,
+ };
+
+ return adf_gen2_pfvf_recv(accel_dev, &params);
+}
+
+void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+ pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset;
+ pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset;
+ pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
+ pfvf_ops->disable_all_vf2pf_interrupts = adf_gen2_disable_all_vf2pf_interrupts;
+ pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen2_disable_pending_vf2pf_interrupts;
+ pfvf_ops->send_msg = adf_gen2_pf2vf_send;
+ pfvf_ops->recv_msg = adf_gen2_vf2pf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_pf_pfvf_ops);
+
+void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_enable_vf2pf_comms;
+ pfvf_ops->get_pf2vf_offset = adf_gen2_vf_get_pfvf_offset;
+ pfvf_ops->get_vf2pf_offset = adf_gen2_vf_get_pfvf_offset;
+ pfvf_ops->send_msg = adf_gen2_vf2pf_send;
+ pfvf_ops->recv_msg = adf_gen2_pf2vf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_vf_pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
new file mode 100644
index 0000000000..a716545a76
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_GEN2_PFVF_H
+#define ADF_GEN2_PFVF_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
+#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
+
+#if defined(CONFIG_PCI_IOV)
+void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+#else
+static inline void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+
+static inline void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+#endif
+
+#endif /* ADF_GEN2_PFVF_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
new file mode 100644
index 0000000000..5859238e37
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_20_comp.h"
+#include "adf_gen4_dc.h"
+
+static void qat_comp_build_deflate(void *ctx)
+{
+ struct icp_qat_fw_comp_req *req_tmpl =
+ (struct icp_qat_fw_comp_req *)ctx;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
+ struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
+ struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
+ u32 upper_val;
+ u32 lower_val;
+
+ memset(req_tmpl, 0, sizeof(*req_tmpl));
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
+ QAT_COMN_PTR_TYPE_SGL);
+ header->serv_specif_flags =
+ ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
+ ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+ hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
+ hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
+ hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
+ hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
+ hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
+ hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
+ hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
+
+ upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
+ lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
+
+ req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
+ req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
+ req_pars->req_par_flags =
+ ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
+ ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL,
+ ICP_QAT_FW_COMP_CNV,
+ ICP_QAT_FW_COMP_CNV_RECOVERY,
+ ICP_QAT_FW_COMP_NO_CNV_DFX,
+ ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
+ ICP_QAT_FW_COMP_NO_XXHASH_ACC,
+ ICP_QAT_FW_COMP_CNV_ERROR_NONE,
+ ICP_QAT_FW_COMP_NO_APPEND_CRC,
+ ICP_QAT_FW_COMP_NO_DROP_DATA);
+
+ /* Fill second half of the template for decompression */
+ memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
+ req_tmpl++;
+ header = &req_tmpl->comn_hdr;
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ cd_pars = &req_tmpl->cd_pars;
+
+ hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
+ lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+}
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_deflate_ctx = qat_comp_build_deflate;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
new file mode 100644
index 0000000000..0b1a677441
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN4_DC_H
+#define ADF_GEN4_DC_H
+
+#include "adf_accel_devices.h"
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
+
+#endif /* ADF_GEN4_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
new file mode 100644
index 0000000000..3148a62938
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 Intel Corporation */
+#include <linux/iopoll.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_hw_data.h"
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+ return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ dma_addr_t addr)
+{
+ WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+ csr_ops->read_csr_ring_head = read_csr_ring_head;
+ csr_ops->write_csr_ring_head = write_csr_ring_head;
+ csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+ csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+ csr_ops->read_csr_e_stat = read_csr_e_stat;
+ csr_ops->write_csr_ring_config = write_csr_ring_config;
+ csr_ops->write_csr_ring_base = write_csr_ring_base;
+ csr_ops->write_csr_int_flag = write_csr_int_flag;
+ csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+ csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+ csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+ csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+ csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
+
+static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
+ u32 *lower)
+{
+ *lower = lower_32_bits(value);
+ *upper = upper_32_bits(value);
+}
+
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+ u32 ssm_wdt_pke_high = 0;
+ u32 ssm_wdt_pke_low = 0;
+ u32 ssm_wdt_high = 0;
+ u32 ssm_wdt_low = 0;
+
+ /* Convert 64bit WDT timer value into 32bit values for
+ * mmio write to 32bit CSRs.
+ */
+ adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
+ adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
+ &ssm_wdt_pke_low);
+
+ /* Enable WDT for sym and dc */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
+ /* Enable WDT for pke */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
+
+int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
+
+static int reset_ring_pair(void __iomem *csr, u32 bank_number)
+{
+ u32 status;
+ int ret;
+
+ /* Write rpresetctl register BIT(0) as 1
+ * Since rpresetctl registers have no RW fields, no need to preserve
+ * values for other bits. Just write directly.
+ */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_RESET);
+
+ /* Read rpresetsts register and wait for rp reset to complete */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US,
+ ADF_RPRESET_POLL_TIMEOUT_US, true,
+ csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+ if (!ret) {
+ /* When rp reset is done, clear rpresetsts */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
+ ADF_WQM_CSR_RPRESETSTS_STATUS);
+ }
+
+ return ret;
+}
+
+int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
+ void __iomem *csr;
+ int ret;
+
+ if (bank_number >= hw_data->num_banks)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "ring pair reset for bank:%d\n", bank_number);
+
+ csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
+ ret = reset_ring_pair(csr, bank_number);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "ring pair reset failed (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
new file mode 100644
index 0000000000..02d7a019eb
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2020 Intel Corporation */
+#ifndef ADF_GEN4_HW_CSR_DATA_H_
+#define ADF_GEN4_HW_CSR_DATA_H_
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+
+/* Transport access */
+#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
+#define ADF_RING_CSR_RING_CONFIG 0x1000
+#define ADF_RING_CSR_RING_LBASE 0x1040
+#define ADF_RING_CSR_RING_UBASE 0x1080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_FLAG 0x170
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_ADDR_OFFSET 0x100000
+#define ADF_RING_BUNDLE_SIZE 0x2000
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ void __iomem *_csr_base_addr = csr_base_addr; \
+ u32 _bank = bank; \
+ u32 _ring = ring; \
+ dma_addr_t _value = value; \
+ u32 l_base = 0, u_base = 0; \
+ l_base = lower_32_bits(_value); \
+ u_base = upper_32_bits(_value); \
+ ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (_bank) + \
+ ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \
+ ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (_bank) + \
+ ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \
+} while (0)
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG, (value))
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_COL_EN, (value))
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, (value))
+
+/* Arbiter configuration */
+#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
+
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+/* Default ring mapping */
+#define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \
+ (ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
+ SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
+ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
+ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
+
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000
+#define ADF_SSMWDTL_OFFSET 0x54
+#define ADF_SSMWDTH_OFFSET 0x5C
+#define ADF_SSMWDTPKEL_OFFSET 0x58
+#define ADF_SSMWDTPKEH_OFFSET 0x60
+
+/* Ring reset */
+#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_RPRESET_POLL_DELAY_US 20
+#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3))
+#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
+#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+
+/* Error source registers */
+#define ADF_GEN4_ERRSOU0 (0x41A200)
+#define ADF_GEN4_ERRSOU1 (0x41A204)
+#define ADF_GEN4_ERRSOU2 (0x41A208)
+#define ADF_GEN4_ERRSOU3 (0x41A20C)
+
+/* Error source mask registers */
+#define ADF_GEN4_ERRMSK0 (0x41A210)
+#define ADF_GEN4_ERRMSK1 (0x41A214)
+#define ADF_GEN4_ERRMSK2 (0x41A218)
+#define ADF_GEN4_ERRMSK3 (0x41A21C)
+
+#define ADF_GEN4_VFLNOTIFY BIT(7)
+
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
new file mode 100644
index 0000000000..8e8efe93f3
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_utils.h"
+
+#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
+#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
+
+/* VF2PF interrupt source registers */
+#define ADF_4XXX_VM2PF_SOU 0x41A180
+#define ADF_4XXX_VM2PF_MSK 0x41A1C0
+#define ADF_GEN4_VF_MSK 0xFFFF
+
+#define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2
+#define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F
+#define ADF_PFVF_GEN4_MSGDATA_SHIFT 8
+#define ADF_PFVF_GEN4_MSGDATA_MASK 0xFFFFFF
+
+static const struct pfvf_csr_format csr_gen4_fmt = {
+ { ADF_PFVF_GEN4_MSGTYPE_SHIFT, ADF_PFVF_GEN4_MSGTYPE_MASK },
+ { ADF_PFVF_GEN4_MSGDATA_SHIFT, ADF_PFVF_GEN4_MSGDATA_MASK },
+};
+
+static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
+{
+ return ADF_4XXX_PF2VM_OFFSET(i);
+}
+
+static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
+{
+ return ADF_4XXX_VM2PF_OFFSET(i);
+}
+
+static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+ u32 val;
+
+ val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
+}
+
+static void adf_gen4_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
+}
+
+static u32 adf_gen4_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+ u32 sources, disabled, pending;
+
+ /* Get the interrupt sources triggered by VFs */
+ sources = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
+ if (!sources)
+ return 0;
+
+ /* Get the already disabled interrupts */
+ disabled = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
+
+ pending = sources & ~disabled;
+ if (!pending)
+ return 0;
+
+ /* Due to HW limitations, when disabling the interrupts, we can't
+ * just disable the requested sources, as this would lead to missed
+ * interrupts if VM2PF_SOU changes just before writing to VM2PF_MSK.
+ * To work around it, disable all and re-enable only the sources that
+ * are not in vf_mask and were not already disabled. Re-enabling will
+ * trigger a new interrupt for the sources that have changed in the
+ * meantime, if any.
+ */
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, disabled | sources);
+
+ /* Return the sources of the (new) interrupt(s) */
+ return pending;
+}
+
+static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg, u32 pfvf_offset,
+ struct mutex *csr_lock)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 csr_val;
+ int ret;
+
+ csr_val = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen4_fmt);
+ if (unlikely(!csr_val))
+ return -EINVAL;
+
+ mutex_lock(csr_lock);
+
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val | ADF_PFVF_INT);
+
+ /* Wait for confirmation from remote that it received the message */
+ ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & ADF_PFVF_INT),
+ ADF_PFVF_MSG_ACK_DELAY_US,
+ ADF_PFVF_MSG_ACK_MAX_DELAY_US,
+ true, pmisc_addr, pfvf_offset);
+ if (ret < 0)
+ dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+
+ mutex_unlock(csr_lock);
+ return ret;
+}
+
+static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ struct pfvf_message msg = { 0 };
+ u32 csr_val;
+
+ /* Read message from the CSR */
+ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+ if (!(csr_val & ADF_PFVF_INT)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
+ return msg;
+ }
+
+ /* We can now acknowledge the message reception by clearing the
+ * interrupt bit
+ */
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val & ~ADF_PFVF_INT);
+
+ /* Return the pfvf_message format */
+ return adf_pfvf_message_of(accel_dev, csr_val, &csr_gen4_fmt);
+}
+
+void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+ pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset;
+ pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset;
+ pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts;
+ pfvf_ops->disable_all_vf2pf_interrupts = adf_gen4_disable_all_vf2pf_interrupts;
+ pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen4_disable_pending_vf2pf_interrupts;
+ pfvf_ops->send_msg = adf_gen4_pfvf_send;
+ pfvf_ops->recv_msg = adf_gen4_pfvf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_pf_pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
new file mode 100644
index 0000000000..17d1b774d4
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_GEN4_PFVF_H
+#define ADF_GEN4_PFVF_H
+
+#include "adf_accel_devices.h"
+
+#ifdef CONFIG_PCI_IOV
+void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+#else
+static inline void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+#endif
+
+#endif /* ADF_GEN4_PFVF_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
new file mode 100644
index 0000000000..34c6cd8e27
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pm.h"
+#include "adf_cfg_strings.h"
+#include "icp_qat_fw_init_admin.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_cfg.h"
+
+enum qat_pm_host_msg {
+ PM_NO_CHANGE = 0,
+ PM_SET_MIN,
+};
+
+struct adf_gen4_pm_data {
+ struct work_struct pm_irq_work;
+ struct adf_accel_dev *accel_dev;
+ u32 pm_int_sts;
+};
+
+static int send_host_msg(struct adf_accel_dev *accel_dev)
+{
+ char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ bool pm_idle_support;
+ u32 msg;
+ int ret;
+
+ msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
+ if (msg & ADF_GEN4_PM_MSG_PENDING)
+ return -EBUSY;
+
+ adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_PM_IDLE_SUPPORT, pm_idle_support_cfg);
+ ret = kstrtobool(pm_idle_support_cfg, &pm_idle_support);
+ if (ret)
+ pm_idle_support = true;
+
+ /* Send HOST_MSG */
+ msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK,
+ pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE);
+ msg |= ADF_GEN4_PM_MSG_PENDING;
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
+
+ /* Poll status register to make sure the HOST_MSG has been processed */
+ return read_poll_timeout(ADF_CSR_RD, msg,
+ !(msg & ADF_GEN4_PM_MSG_PENDING),
+ ADF_GEN4_PM_MSG_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
+ ADF_GEN4_PM_HOST_MSG);
+}
+
+static void pm_bh_handler(struct work_struct *work)
+{
+ struct adf_gen4_pm_data *pm_data =
+ container_of(work, struct adf_gen4_pm_data, pm_irq_work);
+ struct adf_accel_dev *accel_dev = pm_data->accel_dev;
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ u32 pm_int_sts = pm_data->pm_int_sts;
+ u32 val;
+
+ /* PM Idle interrupt */
+ if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
+ /* Issue host message to FW */
+ if (send_host_msg(accel_dev))
+ dev_warn_ratelimited(&GET_DEV(accel_dev),
+ "Failed to send host msg to FW\n");
+ }
+
+ /* Clear interrupt status */
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
+
+ /* Reenable PM interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val &= ~ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ kfree(pm_data);
+}
+
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct adf_gen4_pm_data *pm_data = NULL;
+ u32 errsou2;
+ u32 errmsk2;
+ u32 val;
+
+ /* Only handle the interrupt triggered by PM */
+ errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ if (errmsk2 & ADF_GEN4_PM_SOU)
+ return false;
+
+ errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
+ if (!(errsou2 & ADF_GEN4_PM_SOU))
+ return false;
+
+ /* Disable interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+
+ pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
+ if (!pm_data)
+ return false;
+
+ pm_data->pm_int_sts = val;
+ pm_data->accel_dev = accel_dev;
+
+ INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
+ adf_misc_wq_queue_work(&pm_data->pm_irq_work);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ int ret;
+ u32 val;
+
+ ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
+ if (ret)
+ return ret;
+
+ /* Enable default PM interrupts: IDLE, THROTTLE */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+ val |= ADF_GEN4_PM_INT_EN_DEFAULT;
+
+ /* Clear interrupt status */
+ val |= ADF_GEN4_PM_INT_STS_MASK;
+ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
+
+ /* Unmask PM Interrupt */
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+ val &= ~ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
new file mode 100644
index 0000000000..c2768762cc
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN4_PM_H
+#define ADF_GEN4_PM_H
+
+#include "adf_accel_devices.h"
+
+/* Power management registers */
+#define ADF_GEN4_PM_HOST_MSG (0x50A01C)
+
+/* Power management */
+#define ADF_GEN4_PM_POLL_DELAY_US 20
+#define ADF_GEN4_PM_POLL_TIMEOUT_US USEC_PER_SEC
+#define ADF_GEN4_PM_MSG_POLL_DELAY_US (10 * USEC_PER_MSEC)
+#define ADF_GEN4_PM_STATUS (0x50A00C)
+#define ADF_GEN4_PM_INTERRUPT (0x50A028)
+
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_GEN4_PM_SOU BIT(18)
+
+#define ADF_GEN4_PM_IDLE_INT_EN BIT(18)
+#define ADF_GEN4_PM_THROTTLE_INT_EN BIT(19)
+#define ADF_GEN4_PM_DRV_ACTIVE BIT(20)
+#define ADF_GEN4_PM_INIT_STATE BIT(21)
+#define ADF_GEN4_PM_INT_EN_DEFAULT (ADF_GEN4_PM_IDLE_INT_EN | \
+ ADF_GEN4_PM_THROTTLE_INT_EN)
+
+#define ADF_GEN4_PM_THR_STS BIT(0)
+#define ADF_GEN4_PM_IDLE_STS BIT(1)
+#define ADF_GEN4_PM_FW_INT_STS BIT(2)
+#define ADF_GEN4_PM_INT_STS_MASK (ADF_GEN4_PM_THR_STS | \
+ ADF_GEN4_PM_IDLE_STS | \
+ ADF_GEN4_PM_FW_INT_STS)
+
+#define ADF_GEN4_PM_MSG_PENDING BIT(0)
+#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK GENMASK(28, 1)
+
+#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x6)
+#define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7)
+#define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1)
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
new file mode 100644
index 0000000000..646c57922f
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_timer.h"
+
+#define ADF_GEN4_TIMER_PERIOD_MS 200
+
+/* This periodic update is used to trigger HB, RL & TL fw events */
+static void work_handler(struct work_struct *work)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_timer *timer_ctx;
+ u32 time_periods;
+
+ timer_ctx = container_of(to_delayed_work(work), struct adf_timer, work_ctx);
+ accel_dev = timer_ctx->accel_dev;
+
+ adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
+ msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+
+ time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
+ ADF_GEN4_TIMER_PERIOD_MS);
+
+ if (adf_send_admin_tim_sync(accel_dev, time_periods))
+ dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
+}
+
+int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
+{
+ struct adf_timer *timer_ctx;
+
+ timer_ctx = kzalloc(sizeof(*timer_ctx), GFP_KERNEL);
+ if (!timer_ctx)
+ return -ENOMEM;
+
+ timer_ctx->accel_dev = accel_dev;
+ accel_dev->timer = timer_ctx;
+ timer_ctx->initial_ktime = ktime_get_real();
+
+ INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
+ adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
+ msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
+
+void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_timer *timer_ctx = accel_dev->timer;
+
+ if (!timer_ctx)
+ return;
+
+ cancel_delayed_work_sync(&timer_ctx->work_ctx);
+
+ kfree(timer_ctx);
+ accel_dev->timer = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
new file mode 100644
index 0000000000..66a709e7b3
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_GEN4_TIMER_H_
+#define ADF_GEN4_TIMER_H_
+
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+struct adf_accel_dev;
+
+struct adf_timer {
+ struct adf_accel_dev *accel_dev;
+ struct delayed_work work_ctx;
+ ktime_t initial_ktime;
+};
+
+int adf_gen4_timer_start(struct adf_accel_dev *accel_dev);
+void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_GEN4_TIMER_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
new file mode 100644
index 0000000000..beef9a5f6c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_clock.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+#include "adf_transport_internal.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_HB_EMPTY_SIG 0xA5A5A5A5
+
+/* Heartbeat counter pair */
+struct hb_cnt_pair {
+ __u16 resp_heartbeat_cnt;
+ __u16 req_heartbeat_cnt;
+};
+
+static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev)
+{
+ u64 curr_time = adf_clock_get_current_time();
+ u64 polling_time = curr_time - accel_dev->heartbeat->last_hb_check_time;
+
+ if (polling_time < accel_dev->heartbeat->hb_timer) {
+ dev_warn(&GET_DEV(accel_dev),
+ "HB polling too frequent. Configured HB timer %d ms\n",
+ accel_dev->heartbeat->hb_timer);
+ return -EINVAL;
+ }
+
+ accel_dev->heartbeat->last_hb_check_time = curr_time;
+ return 0;
+}
+
+/**
+ * validate_hb_ctrs_cnt() - checks if the number of heartbeat counters should
+ * be updated by one to support the currently loaded firmware.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Return:
+ * * true - hb_ctrs must increased by ADF_NUM_PKE_STRAND
+ * * false - no changes needed
+ */
+static bool validate_hb_ctrs_cnt(struct adf_accel_dev *accel_dev)
+{
+ const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs;
+ const size_t max_aes = accel_dev->hw_device->num_engines;
+ const size_t hb_struct_size = sizeof(struct hb_cnt_pair);
+ const size_t exp_diff_size = array3_size(ADF_NUM_PKE_STRAND, max_aes,
+ hb_struct_size);
+ const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+ const size_t stats_size = size_mul(dev_ctrs, hb_struct_size);
+ const u32 exp_diff_cnt = exp_diff_size / sizeof(u32);
+ const u32 stats_el_cnt = stats_size / sizeof(u32);
+ struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr;
+ const u32 *mem_to_chk = (u32 *)(hb_stats + dev_ctrs);
+ u32 el_diff_cnt = 0;
+ int i;
+
+ /* count how many bytes are different from pattern */
+ for (i = 0; i < stats_el_cnt; i++) {
+ if (mem_to_chk[i] == ADF_HB_EMPTY_SIG)
+ break;
+
+ el_diff_cnt++;
+ }
+
+ return el_diff_cnt && el_diff_cnt == exp_diff_cnt;
+}
+
+void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev)
+{
+ struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr;
+ const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs;
+ const size_t max_aes = accel_dev->hw_device->num_engines;
+ const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+ const size_t stats_size = size_mul(dev_ctrs, sizeof(struct hb_cnt_pair));
+ const size_t mem_items_to_fill = size_mul(stats_size, 2) / sizeof(u32);
+
+ /* fill hb stats memory with pattern */
+ memset32((uint32_t *)hb_stats, ADF_HB_EMPTY_SIG, mem_items_to_fill);
+ accel_dev->heartbeat->ctrs_cnt_checked = false;
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_check_ctrs);
+
+static int get_timer_ticks(struct adf_accel_dev *accel_dev, unsigned int *value)
+{
+ char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+ u32 timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS;
+ int cfg_read_status;
+ u32 ticks;
+ int ret;
+
+ cfg_read_status = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_HEARTBEAT_TIMER, timer_str);
+ if (cfg_read_status == 0) {
+ if (kstrtouint(timer_str, 10, &timer_ms))
+ dev_dbg(&GET_DEV(accel_dev),
+ "kstrtouint failed to parse the %s, param value",
+ ADF_HEARTBEAT_TIMER);
+ }
+
+ if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) {
+ dev_err(&GET_DEV(accel_dev), "Timer cannot be less than %u\n",
+ ADF_CFG_HB_TIMER_MIN_MS);
+ return -EINVAL;
+ }
+
+ /*
+ * On 4xxx devices adf_timer is responsible for HB updates and
+ * its period is fixed to 200ms
+ */
+ if (accel_dev->timer)
+ timer_ms = ADF_CFG_HB_TIMER_MIN_MS;
+
+ ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks);
+ if (ret)
+ return ret;
+
+ adf_heartbeat_save_cfg_param(accel_dev, timer_ms);
+
+ accel_dev->heartbeat->hb_timer = timer_ms;
+ *value = ticks;
+
+ return 0;
+}
+
+static int check_ae(struct hb_cnt_pair *curr, struct hb_cnt_pair *prev,
+ u16 *count, const size_t hb_ctrs)
+{
+ size_t thr;
+
+ /* loop through all threads in AE */
+ for (thr = 0; thr < hb_ctrs; thr++) {
+ u16 req = curr[thr].req_heartbeat_cnt;
+ u16 resp = curr[thr].resp_heartbeat_cnt;
+ u16 last = prev[thr].resp_heartbeat_cnt;
+
+ if ((thr == ADF_AE_ADMIN_THREAD || req != resp) && resp == last) {
+ u16 retry = ++count[thr];
+
+ if (retry >= ADF_CFG_HB_COUNT_THRESHOLD)
+ return -EIO;
+
+ } else {
+ count[thr] = 0;
+ }
+ }
+ return 0;
+}
+
+static int adf_hb_get_status(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct hb_cnt_pair *live_stats, *last_stats, *curr_stats;
+ const size_t hb_ctrs = hw_device->num_hb_ctrs;
+ const unsigned long ae_mask = hw_device->ae_mask;
+ const size_t max_aes = hw_device->num_engines;
+ const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+ const size_t stats_size = size_mul(dev_ctrs, sizeof(*curr_stats));
+ struct hb_cnt_pair *ae_curr_p, *ae_prev_p;
+ u16 *count_fails, *ae_count_p;
+ size_t ae_offset;
+ size_t ae = 0;
+ int ret = 0;
+
+ if (!accel_dev->heartbeat->ctrs_cnt_checked) {
+ if (validate_hb_ctrs_cnt(accel_dev))
+ hw_device->num_hb_ctrs += ADF_NUM_PKE_STRAND;
+
+ accel_dev->heartbeat->ctrs_cnt_checked = true;
+ }
+
+ live_stats = accel_dev->heartbeat->dma.virt_addr;
+ last_stats = live_stats + dev_ctrs;
+ count_fails = (u16 *)(last_stats + dev_ctrs);
+
+ curr_stats = kmemdup(live_stats, stats_size, GFP_KERNEL);
+ if (!curr_stats)
+ return -ENOMEM;
+
+ /* loop through active AEs */
+ for_each_set_bit(ae, &ae_mask, max_aes) {
+ ae_offset = size_mul(ae, hb_ctrs);
+ ae_curr_p = curr_stats + ae_offset;
+ ae_prev_p = last_stats + ae_offset;
+ ae_count_p = count_fails + ae_offset;
+
+ ret = check_ae(ae_curr_p, ae_prev_p, ae_count_p, hb_ctrs);
+ if (ret)
+ break;
+ }
+
+ /* Copy current stats for the next iteration */
+ memcpy(last_stats, curr_stats, stats_size);
+ kfree(curr_stats);
+
+ return ret;
+}
+
+void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
+ enum adf_device_heartbeat_status *hb_status)
+{
+ struct adf_heartbeat *hb;
+
+ if (!adf_dev_started(accel_dev) ||
+ test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
+ *hb_status = HB_DEV_UNRESPONSIVE;
+ return;
+ }
+
+ if (adf_hb_check_polling_freq(accel_dev) == -EINVAL) {
+ *hb_status = HB_DEV_UNSUPPORTED;
+ return;
+ }
+
+ hb = accel_dev->heartbeat;
+ hb->hb_sent_counter++;
+
+ if (adf_hb_get_status(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Heartbeat ERROR: QAT is not responding.\n");
+ *hb_status = HB_DEV_UNRESPONSIVE;
+ hb->hb_failed_counter++;
+ return;
+ }
+
+ *hb_status = HB_DEV_ALIVE;
+}
+
+int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms,
+ u32 *value)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 clk_per_sec;
+
+ /* HB clock may be different than AE clock */
+ if (!hw_data->get_hb_clock)
+ return -EINVAL;
+
+ clk_per_sec = hw_data->get_hb_clock(hw_data);
+ *value = time_ms * (clk_per_sec / MSEC_PER_SEC);
+
+ return 0;
+}
+
+int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+ unsigned int timer_ms)
+{
+ char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+ snprintf(timer_str, sizeof(timer_str), "%u", timer_ms);
+ return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_HEARTBEAT_TIMER, timer_str,
+ ADF_STR);
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_save_cfg_param);
+
+int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb;
+
+ hb = kzalloc(sizeof(*hb), GFP_KERNEL);
+ if (!hb)
+ goto err_ret;
+
+ hb->dma.virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &hb->dma.phy_addr, GFP_KERNEL);
+ if (!hb->dma.virt_addr)
+ goto err_free;
+
+ /*
+ * Default set this flag as true to avoid unnecessary checks,
+ * it will be reset on platforms that need such a check
+ */
+ hb->ctrs_cnt_checked = true;
+ accel_dev->heartbeat = hb;
+
+ return 0;
+
+err_free:
+ kfree(hb);
+err_ret:
+ return -ENOMEM;
+}
+
+int adf_heartbeat_start(struct adf_accel_dev *accel_dev)
+{
+ unsigned int timer_ticks;
+ int ret;
+
+ if (!accel_dev->heartbeat) {
+ dev_warn(&GET_DEV(accel_dev), "Heartbeat instance not found!");
+ return -EFAULT;
+ }
+
+ if (accel_dev->hw_device->check_hb_ctrs)
+ accel_dev->hw_device->check_hb_ctrs(accel_dev);
+
+ ret = get_timer_ticks(accel_dev, &timer_ticks);
+ if (ret)
+ return ret;
+
+ ret = adf_send_admin_hb_timer(accel_dev, timer_ticks);
+ if (ret)
+ dev_warn(&GET_DEV(accel_dev), "Heartbeat not supported!");
+
+ return ret;
+}
+
+void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+ if (!hb)
+ return;
+
+ if (hb->dma.virt_addr)
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ hb->dma.virt_addr, hb->dma.phy_addr);
+
+ kfree(hb);
+ accel_dev->heartbeat = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
new file mode 100644
index 0000000000..b22e3cb297
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_HEARTBEAT_H_
+#define ADF_HEARTBEAT_H_
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+struct dentry;
+
+#define ADF_CFG_HB_TIMER_MIN_MS 200
+#define ADF_CFG_HB_TIMER_DEFAULT_MS 500
+#define ADF_CFG_HB_COUNT_THRESHOLD 3
+
+enum adf_device_heartbeat_status {
+ HB_DEV_UNRESPONSIVE = 0,
+ HB_DEV_ALIVE,
+ HB_DEV_UNSUPPORTED,
+};
+
+struct adf_heartbeat {
+ unsigned int hb_sent_counter;
+ unsigned int hb_failed_counter;
+ unsigned int hb_timer;
+ u64 last_hb_check_time;
+ bool ctrs_cnt_checked;
+ struct hb_dma_addr {
+ dma_addr_t phy_addr;
+ void *virt_addr;
+ } dma;
+ struct {
+ struct dentry *base_dir;
+ struct dentry *status;
+ struct dentry *cfg;
+ struct dentry *sent;
+ struct dentry *failed;
+ } dbgfs;
+};
+
+#ifdef CONFIG_DEBUG_FS
+int adf_heartbeat_init(struct adf_accel_dev *accel_dev);
+int adf_heartbeat_start(struct adf_accel_dev *accel_dev);
+void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev);
+
+int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms,
+ uint32_t *value);
+int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+ unsigned int timer_ms);
+void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
+ enum adf_device_heartbeat_status *hb_status);
+void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev);
+
+#else
+static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline int adf_heartbeat_start(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+ unsigned int timer_ms)
+{
+ return 0;
+}
+
+static inline void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+#endif /* ADF_HEARTBEAT_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
new file mode 100644
index 0000000000..803cbfd838
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/types.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+#include "adf_heartbeat_dbgfs.h"
+
+#define HB_OK 0
+#define HB_ERROR -1
+#define HB_STATUS_MAX_STRLEN 4
+#define HB_STATS_MAX_STRLEN 16
+
+static ssize_t adf_hb_stats_read(struct file *file, char __user *user_buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[HB_STATS_MAX_STRLEN];
+ unsigned int *value;
+ int len;
+
+ if (*ppos > 0)
+ return 0;
+
+ value = file->private_data;
+ len = scnprintf(buf, sizeof(buf), "%u\n", *value);
+
+ return simple_read_from_buffer(user_buffer, count, ppos, buf, len + 1);
+}
+
+static const struct file_operations adf_hb_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = adf_hb_stats_read,
+};
+
+static ssize_t adf_hb_status_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum adf_device_heartbeat_status hb_status;
+ char ret_str[HB_STATUS_MAX_STRLEN];
+ struct adf_accel_dev *accel_dev;
+ int ret_code;
+ size_t len;
+
+ if (*ppos > 0)
+ return 0;
+
+ accel_dev = file->private_data;
+ ret_code = HB_OK;
+
+ adf_heartbeat_status(accel_dev, &hb_status);
+
+ if (hb_status != HB_DEV_ALIVE)
+ ret_code = HB_ERROR;
+
+ len = scnprintf(ret_str, sizeof(ret_str), "%d\n", ret_code);
+
+ return simple_read_from_buffer(user_buf, count, ppos, ret_str, len + 1);
+}
+
+static const struct file_operations adf_hb_status_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = adf_hb_status_read,
+};
+
+static ssize_t adf_hb_cfg_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ struct adf_accel_dev *accel_dev;
+ unsigned int timer_ms;
+ int len;
+
+ if (*ppos > 0)
+ return 0;
+
+ accel_dev = file->private_data;
+ timer_ms = accel_dev->heartbeat->hb_timer;
+ len = scnprintf(timer_str, sizeof(timer_str), "%u\n", timer_ms);
+
+ return simple_read_from_buffer(user_buf, count, ppos, timer_str,
+ len + 1);
+}
+
+static ssize_t adf_hb_cfg_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char input_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+ struct adf_accel_dev *accel_dev;
+ int ret, written_chars;
+ unsigned int timer_ms;
+ u32 ticks;
+
+ accel_dev = file->private_data;
+ timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS;
+
+ /* last byte left as string termination */
+ if (count > sizeof(input_str) - 1)
+ return -EINVAL;
+
+ written_chars = simple_write_to_buffer(input_str, sizeof(input_str) - 1,
+ ppos, user_buf, count);
+ if (written_chars > 0) {
+ ret = kstrtouint(input_str, 10, &timer_ms);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "heartbeat_cfg: Invalid value\n");
+ return ret;
+ }
+
+ if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) {
+ dev_err(&GET_DEV(accel_dev),
+ "heartbeat_cfg: Invalid value\n");
+ return -EINVAL;
+ }
+
+ /*
+ * On 4xxx devices adf_timer is responsible for HB updates and
+ * its period is fixed to 200ms
+ */
+ if (accel_dev->timer)
+ timer_ms = ADF_CFG_HB_TIMER_MIN_MS;
+
+ ret = adf_heartbeat_save_cfg_param(accel_dev, timer_ms);
+ if (ret)
+ return ret;
+
+ ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks);
+ if (ret)
+ return ret;
+
+ ret = adf_send_admin_hb_timer(accel_dev, ticks);
+ if (ret)
+ return ret;
+
+ accel_dev->heartbeat->hb_timer = timer_ms;
+ }
+
+ return written_chars;
+}
+
+static const struct file_operations adf_hb_cfg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = adf_hb_cfg_read,
+ .write = adf_hb_cfg_write,
+};
+
+void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+ if (!hb)
+ return;
+
+ hb->dbgfs.base_dir = debugfs_create_dir("heartbeat", accel_dev->debugfs_dir);
+ hb->dbgfs.status = debugfs_create_file("status", 0400, hb->dbgfs.base_dir,
+ accel_dev, &adf_hb_status_fops);
+ hb->dbgfs.sent = debugfs_create_file("queries_sent", 0400, hb->dbgfs.base_dir,
+ &hb->hb_sent_counter, &adf_hb_stats_fops);
+ hb->dbgfs.failed = debugfs_create_file("queries_failed", 0400, hb->dbgfs.base_dir,
+ &hb->hb_failed_counter, &adf_hb_stats_fops);
+ hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir,
+ accel_dev, &adf_hb_cfg_fops);
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add);
+
+void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+ if (!hb)
+ return;
+
+ debugfs_remove(hb->dbgfs.status);
+ hb->dbgfs.status = NULL;
+ debugfs_remove(hb->dbgfs.sent);
+ hb->dbgfs.sent = NULL;
+ debugfs_remove(hb->dbgfs.failed);
+ hb->dbgfs.failed = NULL;
+ debugfs_remove(hb->dbgfs.cfg);
+ hb->dbgfs.cfg = NULL;
+ debugfs_remove(hb->dbgfs.base_dir);
+ hb->dbgfs.base_dir = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_rm);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h
new file mode 100644
index 0000000000..84dd29ea64
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_HEARTBEAT_DBGFS_H_
+#define ADF_HEARTBEAT_DBGFS_H_
+
+struct adf_accel_dev;
+
+void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_HEARTBEAT_DBGFS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
new file mode 100644
index 0000000000..da69566992
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport_internal.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REG_SIZE 0x4
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, arb_offset, index, value) \
+ ADF_CSR_WR(csr_addr, (arb_offset) + \
+ (ADF_ARB_REG_SIZE * (index)), value)
+
+#define WRITE_CSR_ARB_WT2SAM(csr_addr, arb_offset, wt_offset, index, value) \
+ ADF_CSR_WR(csr_addr, ((arb_offset) + (wt_offset)) + \
+ (ADF_ARB_REG_SIZE * (index)), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+ unsigned long ae_mask = hw_data->ae_mask;
+ u32 arb_off, wt_off, arb_cfg;
+ const u32 *thd_2_arb_cfg;
+ struct arb_info info;
+ int arb, i;
+
+ hw_data->get_arb_info(&info);
+ arb_cfg = info.arb_cfg;
+ arb_off = info.arb_offset;
+ wt_off = info.wt2sam_offset;
+
+ /* Service arb configured for 32 bytes responses and
+ * ring flow control check enabled. */
+ for (arb = 0; arb < ADF_ARB_NUM; arb++)
+ WRITE_CSR_ARB_SARCONFIG(csr, arb_off, arb, arb_cfg);
+
+ /* Map worker threads to service arbiters */
+ thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev);
+
+ for_each_set_bit(i, &ae_mask, hw_data->num_engines)
+ WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, thd_2_arb_cfg[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
+{
+ struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ u32 tx_ring_mask = hw_data->tx_rings_mask;
+ u32 shift = hw_data->tx_rx_gap;
+ u32 arben, arben_tx, arben_rx;
+ u32 rx_ring_mask;
+
+ /*
+ * Enable arbitration on a ring only if the TX half of the ring mask
+ * matches the RX part. This results in writes to CSR on both TX and
+ * RX update - only one is necessary, but both are done for
+ * simplicity.
+ */
+ rx_ring_mask = tx_ring_mask << shift;
+ arben_tx = (ring->bank->ring_mask & tx_ring_mask) >> 0;
+ arben_rx = (ring->bank->ring_mask & rx_ring_mask) >> shift;
+ arben = arben_tx & arben_rx;
+
+ csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
+ ring->bank->bank_number, arben);
+}
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ u32 arb_off, wt_off;
+ struct arb_info info;
+ void __iomem *csr;
+ unsigned int i;
+
+ hw_data->get_arb_info(&info);
+ arb_off = info.arb_offset;
+ wt_off = info.wt2sam_offset;
+
+ if (!accel_dev->transport)
+ return;
+
+ csr = accel_dev->transport->banks[0].csr_addr;
+
+ hw_data->get_arb_info(&info);
+
+ /* Reset arbiter configuration */
+ for (i = 0; i < ADF_ARB_NUM; i++)
+ WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0);
+
+ /* Unmap worker threads to service arbiters */
+ for (i = 0; i < hw_data->num_engines; i++)
+ WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0);
+
+ /* Disable arbitration on all rings */
+ for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+ csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
+}
+EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
new file mode 100644
index 0000000000..0f9e2d59ce
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+#include "adf_dbgfs.h"
+#include "adf_heartbeat.h"
+
+static LIST_HEAD(service_table);
+static DEFINE_MUTEX(service_lock);
+
+static void adf_service_add(struct service_hndl *service)
+{
+ mutex_lock(&service_lock);
+ list_add(&service->list, &service_table);
+ mutex_unlock(&service_lock);
+}
+
+int adf_service_register(struct service_hndl *service)
+{
+ memset(service->init_status, 0, sizeof(service->init_status));
+ memset(service->start_status, 0, sizeof(service->start_status));
+ adf_service_add(service);
+ return 0;
+}
+
+static void adf_service_remove(struct service_hndl *service)
+{
+ mutex_lock(&service_lock);
+ list_del(&service->list);
+ mutex_unlock(&service_lock);
+}
+
+int adf_service_unregister(struct service_hndl *service)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
+ if (service->init_status[i] || service->start_status[i]) {
+ pr_err("QAT: Could not remove active service\n");
+ return -EFAULT;
+ }
+ }
+ adf_service_remove(service);
+ return 0;
+}
+
+/**
+ * adf_dev_init() - Init data structures and services for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Initialize the ring data structures and the admin comms and arbitration
+ * services.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+static int adf_dev_init(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+ struct list_head *list_itr;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ int ret;
+
+ if (!hw_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to init device - hw_data not set\n");
+ return -EFAULT;
+ }
+
+ if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
+ !accel_dev->is_vf) {
+ dev_err(&GET_DEV(accel_dev), "Device not configured\n");
+ return -EFAULT;
+ }
+
+ if (adf_init_etr_data(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
+ return -EFAULT;
+ }
+
+ if (hw_data->init_device && hw_data->init_device(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
+ return -EFAULT;
+ }
+
+ if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
+ return -EFAULT;
+ }
+
+ if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
+ return -EFAULT;
+ }
+
+ if (hw_data->get_ring_to_svc_map)
+ hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev);
+
+ if (adf_ae_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to initialise Acceleration Engine\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
+
+ if (adf_ae_fw_load(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load acceleration FW\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+
+ if (hw_data->alloc_irq(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+
+ hw_data->enable_ints(accel_dev);
+ hw_data->enable_error_correction(accel_dev);
+
+ ret = hw_data->pfvf_ops.enable_comms(accel_dev);
+ if (ret)
+ return ret;
+
+ if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
+ accel_dev->is_vf) {
+ if (qat_crypto_vf_dev_config(accel_dev))
+ return -EFAULT;
+ }
+
+ adf_heartbeat_init(accel_dev);
+
+ /*
+ * Subservice initialisation is divided into two stages: init and start.
+ * This is to facilitate any ordering dependencies between services
+ * prior to starting any of the accelerators.
+ */
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to initialise service %s\n",
+ service->name);
+ return -EFAULT;
+ }
+ set_bit(accel_dev->accel_id, service->init_status);
+ }
+
+ return 0;
+}
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+static int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct service_hndl *service;
+ struct list_head *list_itr;
+ int ret;
+
+ set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+ if (adf_ae_start(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+
+ if (hw_data->send_admin_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+ return -EFAULT;
+ }
+
+ if (hw_data->measure_clock) {
+ ret = hw_data->measure_clock(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
+ return ret;
+ }
+ }
+
+ /* Set ssm watch dog timer */
+ if (hw_data->set_ssm_wdtimer)
+ hw_data->set_ssm_wdtimer(accel_dev);
+
+ /* Enable Power Management */
+ if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
+ return -EFAULT;
+ }
+
+ if (hw_data->start_timer) {
+ ret = hw_data->start_timer(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
+ return ret;
+ }
+ }
+
+ adf_heartbeat_start(accel_dev);
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to start service %s\n",
+ service->name);
+ return -EFAULT;
+ }
+ set_bit(accel_dev->accel_id, service->start_status);
+ }
+
+ clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ set_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+ if (!list_empty(&accel_dev->crypto_list) &&
+ (qat_algs_register() || qat_asym_algs_register())) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to register crypto algs\n");
+ set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
+
+ if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to register compression algs\n");
+ set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
+
+ adf_dbgfs_add(accel_dev);
+
+ return 0;
+}
+
+/**
+ * adf_dev_stop() - Stop acceleration service for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is shuting down.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+static void adf_dev_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct service_hndl *service;
+ struct list_head *list_itr;
+ bool wait = false;
+ int ret;
+
+ if (!adf_dev_started(accel_dev) &&
+ !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
+ return;
+
+ adf_dbgfs_rm(accel_dev);
+
+ clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+ if (!list_empty(&accel_dev->crypto_list) &&
+ test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
+ qat_algs_unregister();
+ qat_asym_algs_unregister();
+ }
+ clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
+
+ if (!list_empty(&accel_dev->compression_list) &&
+ test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
+ qat_comp_algs_unregister();
+ clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (!test_bit(accel_dev->accel_id, service->start_status))
+ continue;
+ ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
+ if (!ret) {
+ clear_bit(accel_dev->accel_id, service->start_status);
+ } else if (ret == -EAGAIN) {
+ wait = true;
+ clear_bit(accel_dev->accel_id, service->start_status);
+ }
+ }
+
+ if (hw_data->stop_timer)
+ hw_data->stop_timer(accel_dev);
+
+ if (wait)
+ msleep(100);
+
+ if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
+ if (adf_ae_stop(accel_dev))
+ dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
+ else
+ clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+ }
+}
+
+/**
+ * adf_dev_shutdown() - shutdown acceleration services and data strucutures
+ * @accel_dev: Pointer to acceleration device
+ *
+ * Cleanup the ring data structures and the admin comms and arbitration
+ * services.
+ */
+static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct service_hndl *service;
+ struct list_head *list_itr;
+
+ if (!hw_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to shutdown device - hw_data not set\n");
+ return;
+ }
+
+ if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
+ adf_ae_fw_release(accel_dev);
+ clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+ }
+
+ if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
+ if (adf_ae_shutdown(accel_dev))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to shutdown Accel Engine\n");
+ else
+ clear_bit(ADF_STATUS_AE_INITIALISED,
+ &accel_dev->status);
+ }
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (!test_bit(accel_dev->accel_id, service->init_status))
+ continue;
+ if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to shutdown service %s\n",
+ service->name);
+ else
+ clear_bit(accel_dev->accel_id, service->init_status);
+ }
+
+ adf_heartbeat_shutdown(accel_dev);
+
+ hw_data->disable_iov(accel_dev);
+
+ if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
+ hw_data->free_irq(accel_dev);
+ clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+ }
+
+ /* Delete configuration only if not restarting */
+ if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+ adf_cfg_del_all(accel_dev);
+
+ if (hw_data->exit_arb)
+ hw_data->exit_arb(accel_dev);
+
+ if (hw_data->exit_admin_comms)
+ hw_data->exit_admin_comms(accel_dev);
+
+ adf_cleanup_etr_data(accel_dev);
+ adf_dev_restore(accel_dev);
+}
+
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+ struct list_head *list_itr;
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to restart service %s.\n",
+ service->name);
+ }
+ return 0;
+}
+
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+ struct list_head *list_itr;
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+ if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to restart service %s.\n",
+ service->name);
+ }
+ return 0;
+}
+
+static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+ if (!ret) {
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED,
+ services, ADF_STR);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ mutex_lock(&accel_dev->state_lock);
+
+ if (reconfig) {
+ ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ goto out;
+ }
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+out:
+ mutex_unlock(&accel_dev->state_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_down);
+
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ mutex_lock(&accel_dev->state_lock);
+
+ if (adf_dev_started(accel_dev)) {
+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
+ accel_dev->accel_id);
+ ret = -EALREADY;
+ goto out;
+ }
+
+ if (config && GET_HW_DATA(accel_dev)->dev_config) {
+ ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+ if (unlikely(ret))
+ goto out;
+ }
+
+ ret = adf_dev_init(accel_dev);
+ if (unlikely(ret))
+ goto out;
+
+ ret = adf_dev_start(accel_dev);
+
+out:
+ mutex_unlock(&accel_dev->state_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_up);
+
+int adf_dev_restart(struct adf_accel_dev *accel_dev)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EFAULT;
+
+ adf_dev_down(accel_dev, false);
+
+ ret = adf_dev_up(accel_dev, false);
+ /* if device is already up return success*/
+ if (ret == -EALREADY)
+ return 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_restart);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
new file mode 100644
index 0000000000..2aba194a7c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+
+#define ADF_MAX_NUM_VFS 32
+static struct workqueue_struct *adf_misc_wq;
+
+static int adf_enable_msix(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 msix_num_entries = hw_data->num_banks + 1;
+ int ret;
+
+ if (hw_data->set_msix_rttable)
+ hw_data->set_msix_rttable(accel_dev);
+
+ ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
+ msix_num_entries, PCI_IRQ_MSIX);
+ if (unlikely(ret < 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to allocate %d MSI-X vectors\n",
+ msix_num_entries);
+ return ret;
+ }
+ return 0;
+}
+
+static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
+{
+ pci_free_irq_vectors(pci_dev_info->pci_dev);
+}
+
+static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
+{
+ struct adf_etr_bank_data *bank = bank_ptr;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+ csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
+ 0);
+ tasklet_hi_schedule(&bank->resp_handler);
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PCI_IOV
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+ GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+ GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr);
+ spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 pending;
+
+ spin_lock(&accel_dev->pf.vf2pf_ints_lock);
+ pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr);
+ spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
+
+ return pending;
+}
+
+static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
+{
+ bool irq_handled = false;
+ unsigned long vf_mask;
+
+ /* Get the interrupt sources triggered by VFs, except for those already disabled */
+ vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev);
+ if (vf_mask) {
+ struct adf_accel_vf_info *vf_info;
+ int i;
+
+ /*
+ * Handle VF2PF interrupt unless the VF is malicious and
+ * is attempting to flood the host OS with VF2PF interrupts.
+ */
+ for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
+ vf_info = accel_dev->pf.vf_info + i;
+
+ if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Too many ints from VF%d\n",
+ vf_info->vf_nr);
+ continue;
+ }
+
+ adf_schedule_vf2pf_handler(vf_info);
+ irq_handled = true;
+ }
+ }
+ return irq_handled;
+}
+#endif /* CONFIG_PCI_IOV */
+
+static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+ if (hw_data->handle_pm_interrupt &&
+ hw_data->handle_pm_interrupt(accel_dev))
+ return true;
+
+ return false;
+}
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+ struct adf_accel_dev *accel_dev = dev_ptr;
+
+#ifdef CONFIG_PCI_IOV
+ /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+ if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev))
+ return IRQ_HANDLED;
+#endif /* CONFIG_PCI_IOV */
+
+ if (adf_handle_pm_int(accel_dev))
+ return IRQ_HANDLED;
+
+ dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+ accel_dev->accel_id);
+
+ return IRQ_NONE;
+}
+
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ int clust_irq = hw_data->num_banks;
+ int irq, i = 0;
+
+ if (pci_dev_info->msix_entries.num_entries > 1) {
+ for (i = 0; i < hw_data->num_banks; i++) {
+ if (irqs[i].enabled) {
+ irq = pci_irq_vector(pci_dev_info->pci_dev, i);
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, &etr_data->banks[i]);
+ }
+ }
+ }
+
+ if (irqs[i].enabled) {
+ irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
+ free_irq(irq, accel_dev);
+ }
+}
+
+static int adf_request_irqs(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ int clust_irq = hw_data->num_banks;
+ int ret, irq, i = 0;
+ char *name;
+
+ /* Request msix irq for all banks unless SR-IOV enabled */
+ if (!accel_dev->pf.vf_info) {
+ for (i = 0; i < hw_data->num_banks; i++) {
+ struct adf_etr_bank_data *bank = &etr_data->banks[i];
+ unsigned int cpu, cpus = num_online_cpus();
+
+ name = irqs[i].name;
+ snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat%d-bundle%d", accel_dev->accel_id, i);
+ irq = pci_irq_vector(pci_dev_info->pci_dev, i);
+ if (unlikely(irq < 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get IRQ number of device vector %d - %s\n",
+ i, name);
+ ret = irq;
+ goto err;
+ }
+ ret = request_irq(irq, adf_msix_isr_bundle, 0,
+ &name[0], bank);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to allocate IRQ %d for %s\n",
+ irq, name);
+ goto err;
+ }
+
+ cpu = ((accel_dev->accel_id * hw_data->num_banks) +
+ i) % cpus;
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+ irqs[i].enabled = true;
+ }
+ }
+
+ /* Request msix irq for AE */
+ name = irqs[i].name;
+ snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat%d-ae-cluster", accel_dev->accel_id);
+ irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
+ if (unlikely(irq < 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get IRQ number of device vector %d - %s\n",
+ i, name);
+ ret = irq;
+ goto err;
+ }
+ ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to allocate IRQ %d for %s\n", irq, name);
+ goto err;
+ }
+ irqs[i].enabled = true;
+ return ret;
+err:
+ adf_free_irqs(accel_dev);
+ return ret;
+}
+
+static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 msix_num_entries = 1;
+ struct adf_irq *irqs;
+
+ /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
+ if (!accel_dev->pf.vf_info)
+ msix_num_entries += hw_data->num_banks;
+
+ irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
+ GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
+ if (!irqs)
+ return -ENOMEM;
+
+ accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
+ accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
+ return 0;
+}
+
+static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
+{
+ kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
+ accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ int i;
+
+ for (i = 0; i < hw_data->num_banks; i++)
+ tasklet_init(&priv_data->banks[i].resp_handler,
+ adf_response_handler,
+ (unsigned long)&priv_data->banks[i]);
+ return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ int i;
+
+ for (i = 0; i < hw_data->num_banks; i++) {
+ tasklet_disable(&priv_data->banks[i].resp_handler);
+ tasklet_kill(&priv_data->banks[i].resp_handler);
+ }
+}
+
+/**
+ * adf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device.
+ */
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+ adf_free_irqs(accel_dev);
+ adf_cleanup_bh(accel_dev);
+ adf_disable_msix(&accel_dev->accel_pci_dev);
+ adf_isr_free_msix_vectors_data(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_isr_resource_free);
+
+/**
+ * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = adf_isr_alloc_msix_vectors_data(accel_dev);
+ if (ret)
+ goto err_out;
+
+ ret = adf_enable_msix(accel_dev);
+ if (ret)
+ goto err_free_msix_table;
+
+ ret = adf_setup_bh(accel_dev);
+ if (ret)
+ goto err_disable_msix;
+
+ ret = adf_request_irqs(accel_dev);
+ if (ret)
+ goto err_cleanup_bh;
+
+ return 0;
+
+err_cleanup_bh:
+ adf_cleanup_bh(accel_dev);
+
+err_disable_msix:
+ adf_disable_msix(&accel_dev->accel_pci_dev);
+
+err_free_msix_table:
+ adf_isr_free_msix_vectors_data(accel_dev);
+
+err_out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
+
+/**
+ * adf_init_misc_wq() - Init misc workqueue
+ *
+ * Function init workqueue 'qat_misc_wq' for general purpose.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int __init adf_init_misc_wq(void)
+{
+ adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
+
+ return !adf_misc_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_misc_wq(void)
+{
+ if (adf_misc_wq)
+ destroy_workqueue(adf_misc_wq);
+
+ adf_misc_wq = NULL;
+}
+
+bool adf_misc_wq_queue_work(struct work_struct *work)
+{
+ return queue_work(adf_misc_wq, work);
+}
+
+bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
+ unsigned long delay)
+{
+ return queue_delayed_work(adf_misc_wq, work, delay);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
new file mode 100644
index 0000000000..204a424389
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#ifndef ADF_PFVF_MSG_H
+#define ADF_PFVF_MSG_H
+
+#include <linux/bits.h>
+
+/*
+ * PF<->VF Gen2 Messaging format
+ *
+ * The PF has an array of 32-bit PF2VF registers, one for each VF. The
+ * PF can access all these registers while each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | VF2PF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | PF2VF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see function
+ * adf_gen2_pfvf_send() in adf_pf2vf_msg.c).
+ *
+ *
+ * PF<->VF Gen4 Messaging format
+ *
+ * Similarly to the gen2 messaging format, 32-bit long registers are used for
+ * communication between PF and VFs. However, each VF and PF share a pair of
+ * 32-bits register to avoid collisions: one for PV to VF messages and one
+ * for VF to PF messages.
+ *
+ * Both the Interrupt bit and the Message Origin bit retain the same position
+ * and meaning, although non-system messages are now deprecated and not
+ * expected.
+ *
+ * 31 30 9 8 7 6 5 4 3 2 1 0
+ * _______________________________________________
+ * | | | . . . | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \_____________________/ \_______________/ ^ ^
+ * ^ ^ | |
+ * | | | PF/VF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * For both formats, the message reception is acknowledged by lowering the
+ * interrupt bit on the register where the message was sent.
+ */
+
+/* PFVF message common bits */
+#define ADF_PFVF_INT BIT(0)
+#define ADF_PFVF_MSGORIGIN_SYSTEM BIT(1)
+
+/* Different generations have different CSR layouts, use this struct
+ * to abstract these differences away
+ */
+struct pfvf_message {
+ u8 type;
+ u32 data;
+};
+
+/* PF->VF messages */
+enum pf2vf_msgtype {
+ ADF_PF2VF_MSGTYPE_RESTARTING = 0x01,
+ ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02,
+ ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03,
+/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
+ ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10,
+};
+
+/* VF->PF messages */
+enum vf2pf_msgtype {
+ ADF_VF2PF_MSGTYPE_INIT = 0x03,
+ ADF_VF2PF_MSGTYPE_SHUTDOWN = 0x04,
+ ADF_VF2PF_MSGTYPE_VERSION_REQ = 0x05,
+ ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ = 0x06,
+ ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07,
+ ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08,
+ ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09,
+/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
+ ADF_VF2PF_MSGTYPE_RP_RESET = 0x10,
+};
+
+/* VF/PF compatibility version. */
+enum pfvf_compatibility_version {
+ /* Support for extended capabilities */
+ ADF_PFVF_COMPAT_CAPABILITIES = 0x02,
+ /* In-use pattern cleared by receiver */
+ ADF_PFVF_COMPAT_FAST_ACK = 0x03,
+ /* Ring to service mapping support for non-standard mappings */
+ ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04,
+ /* Reference to the latest version */
+ ADF_PFVF_COMPAT_THIS_VERSION = 0x04,
+};
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK GENMASK(7, 0)
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK GENMASK(9, 8)
+
+enum pf2vf_compat_response {
+ ADF_PF2VF_VF_COMPATIBLE = 0x01,
+ ADF_PF2VF_VF_INCOMPATIBLE = 0x02,
+ ADF_PF2VF_VF_COMPAT_UNKNOWN = 0x03,
+};
+
+enum ring_reset_result {
+ RPRESET_SUCCESS = 0x00,
+ RPRESET_NOT_SUPPORTED = 0x01,
+ RPRESET_INVAL_BANK = 0x02,
+ RPRESET_TIMEOUT = 0x03,
+};
+
+#define ADF_VF2PF_RNG_RESET_RP_MASK GENMASK(1, 0)
+#define ADF_VF2PF_RNG_RESET_RSVD_MASK GENMASK(25, 2)
+
+/* PF->VF Block Responses */
+#define ADF_PF2VF_BLKMSG_RESP_TYPE_MASK GENMASK(1, 0)
+#define ADF_PF2VF_BLKMSG_RESP_DATA_MASK GENMASK(9, 2)
+
+enum pf2vf_blkmsg_resp_type {
+ ADF_PF2VF_BLKMSG_RESP_TYPE_DATA = 0x00,
+ ADF_PF2VF_BLKMSG_RESP_TYPE_CRC = 0x01,
+ ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR = 0x02,
+};
+
+/* PF->VF Block Error Code */
+enum pf2vf_blkmsg_error {
+ ADF_PF2VF_INVALID_BLOCK_TYPE = 0x00,
+ ADF_PF2VF_INVALID_BYTE_NUM_REQ = 0x01,
+ ADF_PF2VF_PAYLOAD_TRUNCATED = 0x02,
+ ADF_PF2VF_UNSPECIFIED_ERROR = 0x03,
+};
+
+/* VF->PF Block Requests */
+#define ADF_VF2PF_LARGE_BLOCK_TYPE_MASK GENMASK(1, 0)
+#define ADF_VF2PF_LARGE_BLOCK_BYTE_MASK GENMASK(8, 2)
+#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK GENMASK(2, 0)
+#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK GENMASK(8, 3)
+#define ADF_VF2PF_SMALL_BLOCK_TYPE_MASK GENMASK(3, 0)
+#define ADF_VF2PF_SMALL_BLOCK_BYTE_MASK GENMASK(8, 4)
+#define ADF_VF2PF_BLOCK_CRC_REQ_MASK BIT(9)
+
+/* PF->VF Block Request Types
+ * 0..15 - 32 byte message
+ * 16..23 - 64 byte message
+ * 24..27 - 128 byte message
+ */
+enum vf2pf_blkmsg_req_type {
+ ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY = 0x02,
+ ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP = 0x03,
+};
+
+#define ADF_VF2PF_SMALL_BLOCK_TYPE_MAX \
+ (FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK))
+
+#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX \
+ (FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK) + \
+ ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1)
+
+#define ADF_VF2PF_LARGE_BLOCK_TYPE_MAX \
+ (FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK) + \
+ ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX)
+
+#define ADF_VF2PF_SMALL_BLOCK_BYTE_MAX \
+ FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK)
+
+#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX \
+ FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK)
+
+#define ADF_VF2PF_LARGE_BLOCK_BYTE_MAX \
+ FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK)
+
+struct pfvf_blkmsg_header {
+ u8 version;
+ u8 payload_size;
+} __packed;
+
+#define ADF_PFVF_BLKMSG_HEADER_SIZE (sizeof(struct pfvf_blkmsg_header))
+#define ADF_PFVF_BLKMSG_PAYLOAD_SIZE(blkmsg) (sizeof(blkmsg) - \
+ ADF_PFVF_BLKMSG_HEADER_SIZE)
+#define ADF_PFVF_BLKMSG_MSG_SIZE(blkmsg) (ADF_PFVF_BLKMSG_HEADER_SIZE + \
+ (blkmsg)->hdr.payload_size)
+#define ADF_PFVF_BLKMSG_MSG_MAX_SIZE 128
+
+/* PF->VF Block message header bytes */
+#define ADF_PFVF_BLKMSG_VER_BYTE 0
+#define ADF_PFVF_BLKMSG_LEN_BYTE 1
+
+/* PF/VF Capabilities message values */
+enum blkmsg_capabilities_versions {
+ ADF_PFVF_CAPABILITIES_V1_VERSION = 0x01,
+ ADF_PFVF_CAPABILITIES_V2_VERSION = 0x02,
+ ADF_PFVF_CAPABILITIES_V3_VERSION = 0x03,
+};
+
+struct capabilities_v1 {
+ struct pfvf_blkmsg_header hdr;
+ u32 ext_dc_caps;
+} __packed;
+
+struct capabilities_v2 {
+ struct pfvf_blkmsg_header hdr;
+ u32 ext_dc_caps;
+ u32 capabilities;
+} __packed;
+
+struct capabilities_v3 {
+ struct pfvf_blkmsg_header hdr;
+ u32 ext_dc_caps;
+ u32 capabilities;
+ u32 frequency;
+} __packed;
+
+/* PF/VF Ring to service mapping values */
+enum blkmsg_ring_to_svc_versions {
+ ADF_PFVF_RING_TO_SVC_VERSION = 0x01,
+};
+
+struct ring_to_svc_map_v1 {
+ struct pfvf_blkmsg_header hdr;
+ u16 map;
+} __packed;
+
+#endif /* ADF_PFVF_MSG_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
new file mode 100644
index 0000000000..14c069f0d7
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_vf_info *vf;
+ struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send restarting msg to VF%d\n", i);
+ }
+}
+
+int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 compat)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct capabilities_v2 caps_msg;
+
+ caps_msg.ext_dc_caps = hw_data->extended_dc_capabilities;
+ caps_msg.capabilities = hw_data->accel_capabilities_mask;
+
+ caps_msg.hdr.version = ADF_PFVF_CAPABILITIES_V2_VERSION;
+ caps_msg.hdr.payload_size =
+ ADF_PFVF_BLKMSG_PAYLOAD_SIZE(struct capabilities_v2);
+
+ memcpy(buffer, &caps_msg, sizeof(caps_msg));
+
+ return 0;
+}
+
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 compat)
+{
+ struct ring_to_svc_map_v1 rts_map_msg;
+
+ rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
+ rts_map_msg.hdr.version = ADF_PFVF_RING_TO_SVC_VERSION;
+ rts_map_msg.hdr.payload_size = ADF_PFVF_BLKMSG_PAYLOAD_SIZE(rts_map_msg);
+
+ memcpy(buffer, &rts_map_msg, sizeof(rts_map_msg));
+
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
new file mode 100644
index 0000000000..e8982d1ac8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_PF_MSG_H
+#define ADF_PFVF_PF_MSG_H
+
+#include "adf_accel_devices.h"
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+
+typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 compat);
+
+int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 comapt);
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 comapt);
+
+#endif /* ADF_PFVF_PF_MSG_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
new file mode 100644
index 0000000000..388e58bcbc
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_utils.h"
+
+typedef u8 (*pf2vf_blkmsg_data_getter_fn)(u8 const *blkmsg, u8 byte);
+
+static const adf_pf2vf_blkmsg_provider pf2vf_blkmsg_providers[] = {
+ NULL, /* no message type defined for value 0 */
+ NULL, /* no message type defined for value 1 */
+ adf_pf_capabilities_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY */
+ adf_pf_ring_to_svc_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP */
+};
+
+/**
+ * adf_send_pf2vf_msg() - send PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr: VF number to which the message will be sent
+ * @msg: Message to send
+ *
+ * This function allows the PF to send a message to a specific VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg)
+{
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(vf_nr);
+
+ return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
+ &accel_dev->pf.vf_info[vf_nr].pf2vf_lock);
+}
+
+/**
+ * adf_recv_vf2pf_msg() - receive a VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr: Number of the VF from where the message will be received
+ *
+ * This function allows the PF to receive a message from a specific VF.
+ *
+ * Return: a valid message on success, zero otherwise.
+ */
+static struct pfvf_message adf_recv_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(vf_nr);
+
+ return pfvf_ops->recv_msg(accel_dev, pfvf_offset, vf_info->vf_compat_ver);
+}
+
+static adf_pf2vf_blkmsg_provider get_blkmsg_response_provider(u8 type)
+{
+ if (type >= ARRAY_SIZE(pf2vf_blkmsg_providers))
+ return NULL;
+
+ return pf2vf_blkmsg_providers[type];
+}
+
+/* Byte pf2vf_blkmsg_data_getter_fn callback */
+static u8 adf_pf2vf_blkmsg_get_byte(u8 const *blkmsg, u8 index)
+{
+ return blkmsg[index];
+}
+
+/* CRC pf2vf_blkmsg_data_getter_fn callback */
+static u8 adf_pf2vf_blkmsg_get_crc(u8 const *blkmsg, u8 count)
+{
+ /* count is 0-based, turn it into a length */
+ return adf_pfvf_calc_blkmsg_crc(blkmsg, count + 1);
+}
+
+static int adf_pf2vf_blkmsg_get_data(struct adf_accel_vf_info *vf_info,
+ u8 type, u8 byte, u8 max_size, u8 *data,
+ pf2vf_blkmsg_data_getter_fn data_getter)
+{
+ u8 blkmsg[ADF_PFVF_BLKMSG_MSG_MAX_SIZE] = { 0 };
+ struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+ adf_pf2vf_blkmsg_provider provider;
+ u8 msg_size;
+
+ provider = get_blkmsg_response_provider(type);
+
+ if (unlikely(!provider)) {
+ pr_err("QAT: No registered provider for message %d\n", type);
+ *data = ADF_PF2VF_INVALID_BLOCK_TYPE;
+ return -EINVAL;
+ }
+
+ if (unlikely((*provider)(accel_dev, blkmsg, vf_info->vf_compat_ver))) {
+ pr_err("QAT: unknown error from provider for message %d\n", type);
+ *data = ADF_PF2VF_UNSPECIFIED_ERROR;
+ return -EINVAL;
+ }
+
+ msg_size = ADF_PFVF_BLKMSG_HEADER_SIZE + blkmsg[ADF_PFVF_BLKMSG_LEN_BYTE];
+
+ if (unlikely(msg_size >= max_size)) {
+ pr_err("QAT: Invalid size %d provided for message type %d\n",
+ msg_size, type);
+ *data = ADF_PF2VF_PAYLOAD_TRUNCATED;
+ return -EINVAL;
+ }
+
+ if (unlikely(byte >= msg_size)) {
+ pr_err("QAT: Out-of-bound byte number %d (msg size %d)\n",
+ byte, msg_size);
+ *data = ADF_PF2VF_INVALID_BYTE_NUM_REQ;
+ return -EINVAL;
+ }
+
+ *data = data_getter(blkmsg, byte);
+ return 0;
+}
+
+static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
+ struct pfvf_message req)
+{
+ u8 resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR;
+ struct pfvf_message resp = { 0 };
+ u8 resp_data = 0;
+ u8 blk_type;
+ u8 blk_byte;
+ u8 byte_max;
+
+ switch (req.type) {
+ case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
+ blk_type = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK, req.data)
+ + ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX + 1;
+ blk_byte = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, req.data);
+ byte_max = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
+ break;
+ case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
+ blk_type = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK, req.data)
+ + ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1;
+ blk_byte = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, req.data);
+ byte_max = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
+ break;
+ case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
+ blk_type = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, req.data);
+ blk_byte = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, req.data);
+ byte_max = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
+ break;
+ }
+
+ /* Is this a request for CRC or data? */
+ if (FIELD_GET(ADF_VF2PF_BLOCK_CRC_REQ_MASK, req.data)) {
+ dev_dbg(&GET_DEV(vf_info->accel_dev),
+ "BlockMsg of type %d for CRC over %d bytes received from VF%d\n",
+ blk_type, blk_byte + 1, vf_info->vf_nr);
+
+ if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
+ byte_max, &resp_data,
+ adf_pf2vf_blkmsg_get_crc))
+ resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_CRC;
+ } else {
+ dev_dbg(&GET_DEV(vf_info->accel_dev),
+ "BlockMsg of type %d for data byte %d received from VF%d\n",
+ blk_type, blk_byte, vf_info->vf_nr);
+
+ if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
+ byte_max, &resp_data,
+ adf_pf2vf_blkmsg_get_byte))
+ resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_DATA;
+ }
+
+ resp.type = ADF_PF2VF_MSGTYPE_BLKMSG_RESP;
+ resp.data = FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp_type) |
+ FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp_data);
+
+ return resp;
+}
+
+static struct pfvf_message handle_rp_reset_req(struct adf_accel_dev *accel_dev, u8 vf_nr,
+ struct pfvf_message req)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct pfvf_message resp = {
+ .type = ADF_PF2VF_MSGTYPE_RP_RESET_RESP,
+ .data = RPRESET_SUCCESS
+ };
+ u32 bank_number;
+ u32 rsvd_field;
+
+ bank_number = FIELD_GET(ADF_VF2PF_RNG_RESET_RP_MASK, req.data);
+ rsvd_field = FIELD_GET(ADF_VF2PF_RNG_RESET_RSVD_MASK, req.data);
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring Pair Reset Message received from VF%d for bank 0x%x\n",
+ vf_nr, bank_number);
+
+ if (!hw_data->ring_pair_reset || rsvd_field) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring Pair Reset for VF%d is not supported\n", vf_nr);
+ resp.data = RPRESET_NOT_SUPPORTED;
+ goto out;
+ }
+
+ if (bank_number >= hw_data->num_banks_per_vf) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid bank number (0x%x) from VF%d for Ring Reset\n",
+ bank_number, vf_nr);
+ resp.data = RPRESET_INVAL_BANK;
+ goto out;
+ }
+
+ /* Convert the VF provided value to PF bank number */
+ bank_number = vf_nr * hw_data->num_banks_per_vf + bank_number;
+ if (hw_data->ring_pair_reset(accel_dev, bank_number)) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring pair reset for VF%d failure\n", vf_nr);
+ resp.data = RPRESET_TIMEOUT;
+ goto out;
+ }
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring pair reset for VF%d successfully\n", vf_nr);
+
+out:
+ return resp;
+}
+
+static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
+ struct pfvf_message msg, struct pfvf_message *resp)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+
+ switch (msg.type) {
+ case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+ {
+ u8 vf_compat_ver = msg.data;
+ u8 compat;
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
+ vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
+
+ if (vf_compat_ver == 0)
+ compat = ADF_PF2VF_VF_INCOMPATIBLE;
+ else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
+ compat = ADF_PF2VF_VF_COMPATIBLE;
+ else
+ compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
+
+ vf_info->vf_compat_ver = vf_compat_ver;
+
+ resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
+ resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK,
+ ADF_PFVF_COMPAT_THIS_VERSION) |
+ FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+ {
+ u8 compat;
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Legacy VersionRequest received from VF%d to PF (vers 1.1)\n",
+ vf_nr);
+
+ /* legacy driver, VF compat_ver is 0 */
+ vf_info->vf_compat_ver = 0;
+
+ /* PF always newer than legacy VF */
+ compat = ADF_PF2VF_VF_COMPATIBLE;
+
+ /* Set legacy major and minor version to the latest, 1.1 */
+ resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
+ resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK, 0x11) |
+ FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_INIT:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Init message received from VF%d\n", vf_nr);
+ vf_info->init = true;
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Shutdown message received from VF%d\n", vf_nr);
+ vf_info->init = false;
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
+ case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
+ case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
+ *resp = handle_blkmsg_req(vf_info, msg);
+ break;
+ case ADF_VF2PF_MSGTYPE_RP_RESET:
+ *resp = handle_rp_reset_req(accel_dev, vf_nr, msg);
+ break;
+ default:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Unknown message from VF%d (type 0x%.4x, data: 0x%.4x)\n",
+ vf_nr, msg.type, msg.data);
+ return -ENOMSG;
+ }
+
+ return 0;
+}
+
+bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct pfvf_message req;
+ struct pfvf_message resp = {0};
+
+ req = adf_recv_vf2pf_msg(accel_dev, vf_nr);
+ if (!req.type) /* Legacy or no message */
+ return true;
+
+ if (adf_handle_vf2pf_msg(accel_dev, vf_nr, req, &resp))
+ return false;
+
+ if (resp.type && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send response to VF%d\n", vf_nr);
+
+ return true;
+}
+
+/**
+ * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * This function carries out the necessary steps to setup and start the PFVF
+ * communication channel, if any.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+{
+ adf_pfvf_crc_init();
+ spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h
new file mode 100644
index 0000000000..165d266d02
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_PF_PROTO_H
+#define ADF_PFVF_PF_PROTO_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg);
+
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PFVF_PF_PROTO_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c
new file mode 100644
index 0000000000..c5f6d77d4b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/crc8.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_utils.h"
+
+/* CRC Calculation */
+DECLARE_CRC8_TABLE(pfvf_crc8_table);
+#define ADF_PFVF_CRC8_POLYNOMIAL 0x97
+
+void adf_pfvf_crc_init(void)
+{
+ crc8_populate_msb(pfvf_crc8_table, ADF_PFVF_CRC8_POLYNOMIAL);
+}
+
+u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len)
+{
+ return crc8(pfvf_crc8_table, buf, buf_len, CRC8_INIT_VALUE);
+}
+
+static bool set_value_on_csr_msg(struct adf_accel_dev *accel_dev, u32 *csr_msg,
+ u32 value, const struct pfvf_field_format *fmt)
+{
+ if (unlikely((value & fmt->mask) != value)) {
+ dev_err(&GET_DEV(accel_dev),
+ "PFVF message value 0x%X out of range, %u max allowed\n",
+ value, fmt->mask);
+ return false;
+ }
+
+ *csr_msg |= value << fmt->offset;
+
+ return true;
+}
+
+u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg,
+ const struct pfvf_csr_format *fmt)
+{
+ u32 csr_msg = 0;
+
+ if (!set_value_on_csr_msg(accel_dev, &csr_msg, msg.type, &fmt->type) ||
+ !set_value_on_csr_msg(accel_dev, &csr_msg, msg.data, &fmt->data))
+ return 0;
+
+ return csr_msg | ADF_PFVF_MSGORIGIN_SYSTEM;
+}
+
+struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 csr_msg,
+ const struct pfvf_csr_format *fmt)
+{
+ struct pfvf_message msg = { 0 };
+
+ msg.type = (csr_msg >> fmt->type.offset) & fmt->type.mask;
+ msg.data = (csr_msg >> fmt->data.offset) & fmt->data.mask;
+
+ if (unlikely(!msg.type))
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid PFVF msg with no type received\n");
+
+ return msg;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
new file mode 100644
index 0000000000..2be048e228
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_UTILS_H
+#define ADF_PFVF_UTILS_H
+
+#include <linux/types.h>
+#include "adf_pfvf_msg.h"
+
+/* How long to wait for far side to acknowledge receipt */
+#define ADF_PFVF_MSG_ACK_DELAY_US 4
+#define ADF_PFVF_MSG_ACK_MAX_DELAY_US (1 * USEC_PER_SEC)
+
+u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len);
+void adf_pfvf_crc_init(void);
+
+struct pfvf_field_format {
+ u8 offset;
+ u32 mask;
+};
+
+struct pfvf_csr_format {
+ struct pfvf_field_format type;
+ struct pfvf_field_format data;
+};
+
+u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ const struct pfvf_csr_format *fmt);
+struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg,
+ const struct pfvf_csr_format *fmt);
+
+#endif /* ADF_PFVF_UTILS_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
new file mode 100644
index 0000000000..1141258db4
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_vf_msg.h"
+#include "adf_pfvf_vf_proto.h"
+
+/**
+ * adf_vf2pf_notify_init() - send init msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends an init message from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT };
+
+ if (adf_send_vf2pf_msg(accel_dev, msg)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Init event to PF\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
+
+/**
+ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown message from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN };
+
+ if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+ if (adf_send_vf2pf_msg(accel_dev, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
+
+int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+ u8 pf_version;
+ int compat;
+ int ret;
+ struct pfvf_message resp;
+ struct pfvf_message msg = {
+ .type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ,
+ .data = ADF_PFVF_COMPAT_THIS_VERSION,
+ };
+
+ BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
+
+ ret = adf_send_vf2pf_req(accel_dev, msg, &resp);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Compatibility Version Request.\n");
+ return ret;
+ }
+
+ pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data);
+ compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data);
+
+ /* Response from PF received, check compatibility */
+ switch (compat) {
+ case ADF_PF2VF_VF_COMPATIBLE:
+ break;
+ case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+ /* VF is newer than PF - compatible for now */
+ break;
+ case ADF_PF2VF_VF_INCOMPATIBLE:
+ dev_err(&GET_DEV(accel_dev),
+ "PF (vers %d) and VF (vers %d) are not compatible\n",
+ pf_version, ADF_PFVF_COMPAT_THIS_VERSION);
+ return -EINVAL;
+ default:
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid response from PF; assume not compatible\n");
+ return -EINVAL;
+ }
+
+ accel_dev->vf.pf_compat_ver = pf_version;
+ return 0;
+}
+
+int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct capabilities_v3 cap_msg = { 0 };
+ unsigned int len = sizeof(cap_msg);
+
+ if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
+ /* The PF is too old to support the extended capabilities */
+ return 0;
+
+ if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY,
+ (u8 *)&cap_msg, &len)) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to get block message response\n");
+ return -EFAULT;
+ }
+
+ switch (cap_msg.hdr.version) {
+ default:
+ /* Newer version received, handle only the know parts */
+ fallthrough;
+ case ADF_PFVF_CAPABILITIES_V3_VERSION:
+ if (likely(len >= sizeof(struct capabilities_v3)))
+ hw_data->clock_frequency = cap_msg.frequency;
+ else
+ dev_info(&GET_DEV(accel_dev), "Could not get frequency");
+ fallthrough;
+ case ADF_PFVF_CAPABILITIES_V2_VERSION:
+ if (likely(len >= sizeof(struct capabilities_v2)))
+ hw_data->accel_capabilities_mask = cap_msg.capabilities;
+ else
+ dev_info(&GET_DEV(accel_dev), "Could not get capabilities");
+ fallthrough;
+ case ADF_PFVF_CAPABILITIES_V1_VERSION:
+ if (likely(len >= sizeof(struct capabilities_v1))) {
+ hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps;
+ } else {
+ dev_err(&GET_DEV(accel_dev),
+ "Capabilities message truncated to %d bytes\n", len);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
+{
+ struct ring_to_svc_map_v1 rts_map_msg = { 0 };
+ unsigned int len = sizeof(rts_map_msg);
+
+ if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
+ /* Use already set default mappings */
+ return 0;
+
+ if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP,
+ (u8 *)&rts_map_msg, &len)) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to get block message response\n");
+ return -EFAULT;
+ }
+
+ if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) {
+ dev_err(&GET_DEV(accel_dev),
+ "RING_TO_SVC message truncated to %d bytes\n", len);
+ return -EFAULT;
+ }
+
+ /* Only v1 at present */
+ accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
new file mode 100644
index 0000000000..71bc0e3f1d
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_VF_MSG_H
+#define ADF_PFVF_VF_MSG_H
+
+#if defined(CONFIG_PCI_IOV)
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+
+#endif /* ADF_PFVF_VF_MSG_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
new file mode 100644
index 0000000000..1015155b63
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_utils.h"
+#include "adf_pfvf_vf_msg.h"
+#include "adf_pfvf_vf_proto.h"
+
+#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
+#define ADF_PFVF_MSG_ACK_DELAY 2
+#define ADF_PFVF_MSG_ACK_MAX_RETRY 100
+
+/* How often to retry if there is no response */
+#define ADF_PFVF_MSG_RESP_RETRIES 5
+#define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \
+ ADF_PFVF_MSG_ACK_MAX_RETRY + \
+ ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
+
+/**
+ * adf_send_vf2pf_msg() - send VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @msg: Message to send
+ *
+ * This function allows the VF to send a message to the PF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
+{
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0);
+
+ return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
+ &accel_dev->vf.vf2pf_lock);
+}
+
+/**
+ * adf_recv_pf2vf_msg() - receive a PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ *
+ * This function allows the VF to receive a message from the PF.
+ *
+ * Return: a valid message on success, zero otherwise.
+ */
+static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev)
+{
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0);
+
+ return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->vf.pf_compat_ver);
+}
+
+/**
+ * adf_send_vf2pf_req() - send VF2PF request message
+ * @accel_dev: Pointer to acceleration device.
+ * @msg: Request message to send
+ * @resp: Returned PF response
+ *
+ * This function sends a message that requires a response from the VF to the PF
+ * and waits for a reply.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ struct pfvf_message *resp)
+{
+ unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
+ unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES;
+ int ret;
+
+ reinit_completion(&accel_dev->vf.msg_received);
+
+ /* Send request from VF to PF */
+ do {
+ ret = adf_send_vf2pf_msg(accel_dev, msg);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send request msg to PF\n");
+ return ret;
+ }
+
+ /* Wait for response, if it times out retry */
+ ret = wait_for_completion_timeout(&accel_dev->vf.msg_received,
+ timeout);
+ if (ret) {
+ if (likely(resp))
+ *resp = accel_dev->vf.response;
+
+ /* Once copied, set to an invalid value */
+ accel_dev->vf.response.type = 0;
+
+ return 0;
+ }
+
+ dev_err(&GET_DEV(accel_dev), "PFVF response message timeout\n");
+ } while (--retries);
+
+ return -EIO;
+}
+
+static int adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev, bool crc,
+ u8 *type, u8 *data)
+{
+ struct pfvf_message req = { 0 };
+ struct pfvf_message resp = { 0 };
+ u8 blk_type;
+ u8 blk_byte;
+ u8 msg_type;
+ u8 max_data;
+ int err;
+
+ /* Convert the block type to {small, medium, large} size category */
+ if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) {
+ msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ;
+ blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type);
+ blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data);
+ max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
+ } else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) {
+ msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ;
+ blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK,
+ *type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX);
+ blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data);
+ max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
+ } else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) {
+ msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ;
+ blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK,
+ *type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX);
+ blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data);
+ max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
+ } else {
+ dev_err(&GET_DEV(accel_dev), "Invalid message type %u\n", *type);
+ return -EINVAL;
+ }
+
+ /* Sanity check */
+ if (*data > max_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid byte %s %u for message type %u\n",
+ crc ? "count" : "index", *data, *type);
+ return -EINVAL;
+ }
+
+ /* Build the block message */
+ req.type = msg_type;
+ req.data = blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc);
+
+ err = adf_send_vf2pf_req(accel_dev, req, &resp);
+ if (err)
+ return err;
+
+ *type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data);
+ *data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data);
+
+ return 0;
+}
+
+static int adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev, u8 type,
+ u8 index, u8 *data)
+{
+ int ret;
+
+ ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index);
+ if (ret < 0)
+ return ret;
+
+ if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Unexpected BLKMSG response type %u, byte 0x%x\n",
+ type, index);
+ return -EFAULT;
+ }
+
+ *data = index;
+ return 0;
+}
+
+static int adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev, u8 type,
+ u8 bytes, u8 *crc)
+{
+ int ret;
+
+ /* The count of bytes refers to a length, however shift it to a 0-based
+ * count to avoid overflows. Thus, a request for 0 bytes is technically
+ * valid.
+ */
+ --bytes;
+
+ ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes);
+ if (ret < 0)
+ return ret;
+
+ if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Unexpected CRC BLKMSG response type %u, crc 0x%x\n",
+ type, bytes);
+ return -EFAULT;
+ }
+
+ *crc = bytes;
+ return 0;
+}
+
+/**
+ * adf_send_vf2pf_blkmsg_req() - retrieve block message
+ * @accel_dev: Pointer to acceleration VF device.
+ * @type: The block message type, see adf_pfvf_msg.h for allowed values
+ * @buffer: input buffer where to place the received data
+ * @buffer_len: buffer length as input, the amount of written bytes on output
+ *
+ * Request a message of type 'type' over the block message transport.
+ * This function will send the required amount block message requests and
+ * return the overall content back to the caller through the provided buffer.
+ * The buffer should be large enough to contain the requested message type,
+ * otherwise the response will be truncated.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
+ u8 *buffer, unsigned int *buffer_len)
+{
+ unsigned int index;
+ unsigned int msg_len;
+ int ret;
+ u8 remote_crc;
+ u8 local_crc;
+
+ if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) {
+ dev_err(&GET_DEV(accel_dev), "Invalid block message type %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Buffer size too small for a block message\n");
+ return -EINVAL;
+ }
+
+ ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
+ ADF_PFVF_BLKMSG_VER_BYTE,
+ &buffer[ADF_PFVF_BLKMSG_VER_BYTE]);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid version 0 received for block request %u", type);
+ return -EFAULT;
+ }
+
+ ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
+ ADF_PFVF_BLKMSG_LEN_BYTE,
+ &buffer[ADF_PFVF_BLKMSG_LEN_BYTE]);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid size 0 received for block request %u", type);
+ return -EFAULT;
+ }
+
+ /* We need to pick the minimum since there is no way to request a
+ * specific version. As a consequence any scenario is possible:
+ * - PF has a newer (longer) version which doesn't fit in the buffer
+ * - VF expects a newer (longer) version, so we must not ask for
+ * bytes in excess
+ * - PF and VF share the same version, no problem
+ */
+ msg_len = ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE];
+ msg_len = min(*buffer_len, msg_len);
+
+ /* Get the payload */
+ for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) {
+ ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, index,
+ &buffer[index]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc);
+ if (unlikely(ret))
+ return ret;
+
+ local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len);
+ if (unlikely(local_crc != remote_crc)) {
+ dev_err(&GET_DEV(accel_dev),
+ "CRC error on msg type %d. Local %02X, remote %02X\n",
+ type, local_crc, remote_crc);
+ return -EIO;
+ }
+
+ *buffer_len = msg_len;
+ return 0;
+}
+
+static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg)
+{
+ switch (msg.type) {
+ case ADF_PF2VF_MSGTYPE_RESTARTING:
+ dev_dbg(&GET_DEV(accel_dev), "Restarting message received from PF\n");
+
+ adf_pf2vf_handle_pf_restarting(accel_dev);
+ return false;
+ case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+ case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
+ case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Response Message received from PF (type 0x%.4x, data 0x%.4x)\n",
+ msg.type, msg.data);
+ accel_dev->vf.response = msg;
+ complete(&accel_dev->vf.msg_received);
+ return true;
+ default:
+ dev_err(&GET_DEV(accel_dev),
+ "Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n",
+ msg.type, msg.data);
+ }
+
+ return false;
+}
+
+bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg;
+
+ msg = adf_recv_pf2vf_msg(accel_dev);
+ if (msg.type) /* Invalid or no message */
+ return adf_handle_pf2vf_msg(accel_dev, msg);
+
+ /* No replies for PF->VF messages at present */
+
+ return true;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ adf_pfvf_crc_init();
+ adf_enable_pf2vf_interrupts(accel_dev);
+
+ ret = adf_vf2pf_request_version(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_vf2pf_get_capabilities(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_vf2pf_get_ring_to_svc(accel_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h
new file mode 100644
index 0000000000..f6ee9b38c0
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_VF_PROTO_H
+#define ADF_PFVF_VF_PROTO_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg);
+int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ struct pfvf_message *resp);
+int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
+ u8 *buffer, unsigned int *buffer_len);
+
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PFVF_VF_PROTO_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
new file mode 100644
index 0000000000..f44025bb6f
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_pfvf_pf_msg.h"
+
+#define ADF_VF2PF_RATELIMIT_INTERVAL 8
+#define ADF_VF2PF_RATELIMIT_BURST 130
+
+static struct workqueue_struct *pf2vf_resp_wq;
+
+struct adf_pf2vf_resp {
+ struct work_struct pf2vf_resp_work;
+ struct adf_accel_vf_info *vf_info;
+};
+
+static void adf_iov_send_resp(struct work_struct *work)
+{
+ struct adf_pf2vf_resp *pf2vf_resp =
+ container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+ struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
+ struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+ u32 vf_nr = vf_info->vf_nr;
+ bool ret;
+
+ ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
+ if (ret)
+ /* re-enable interrupt on PF from this VF */
+ adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
+
+ kfree(pf2vf_resp);
+}
+
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
+{
+ struct adf_pf2vf_resp *pf2vf_resp;
+
+ pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
+ if (!pf2vf_resp)
+ return;
+
+ pf2vf_resp->vf_info = vf_info;
+ INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
+ queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
+}
+
+static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_accel_vf_info *vf_info;
+ int i;
+
+ for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+ i++, vf_info++) {
+ /* This ptr will be populated when VFs will be created */
+ vf_info->accel_dev = accel_dev;
+ vf_info->vf_nr = i;
+ vf_info->vf_compat_ver = 0;
+
+ mutex_init(&vf_info->pf2vf_lock);
+ ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+ ADF_VF2PF_RATELIMIT_INTERVAL,
+ ADF_VF2PF_RATELIMIT_BURST);
+ }
+
+ /* Set Valid bits in AE Thread to PCIe Function Mapping */
+ if (hw_data->configure_iov_threads)
+ hw_data->configure_iov_threads(accel_dev, true);
+
+ /* Enable VF to PF interrupts for all VFs */
+ adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
+
+ /*
+ * Due to the hardware design, when SR-IOV and the ring arbiter
+ * are enabled all the VFs supported in hardware must be enabled in
+ * order for all the hardware resources (i.e. bundles) to be usable.
+ * When SR-IOV is enabled, each of the VFs will own one bundle.
+ */
+ return pci_enable_sriov(pdev, totalvfs);
+}
+
+/**
+ * adf_disable_sriov() - Disable SRIOV for the device
+ * @accel_dev: Pointer to accel device.
+ *
+ * Function disables SRIOV for the accel device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
+ struct adf_accel_vf_info *vf;
+ int i;
+
+ if (!accel_dev->pf.vf_info)
+ return;
+
+ adf_pf2vf_notify_restarting(accel_dev);
+ pci_disable_sriov(accel_to_pci_dev(accel_dev));
+
+ /* Disable VF to PF interrupts */
+ adf_disable_all_vf2pf_interrupts(accel_dev);
+
+ /* Clear Valid bits in AE Thread to PCIe Function Mapping */
+ if (hw_data->configure_iov_threads)
+ hw_data->configure_iov_threads(accel_dev, false);
+
+ for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
+ mutex_destroy(&vf->pf2vf_lock);
+
+ kfree(accel_dev->pf.vf_info);
+ accel_dev->pf.vf_info = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_disable_sriov);
+
+/**
+ * adf_sriov_configure() - Enable SRIOV for the device
+ * @pdev: Pointer to PCI device.
+ * @numvfs: Number of virtual functions (VFs) to enable.
+ *
+ * Note that the @numvfs parameter is ignored and all VFs supported by the
+ * device are enabled due to the design of the hardware.
+ *
+ * Function enables SRIOV for the PCI device.
+ *
+ * Return: number of VFs enabled on success, error code otherwise.
+ */
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ unsigned long val;
+ int ret;
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -EFAULT;
+ }
+
+ if (!device_iommu_mapped(&pdev->dev))
+ dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
+
+ if (accel_dev->pf.vf_info) {
+ dev_info(&pdev->dev, "Already enabled for this device\n");
+ return -EINVAL;
+ }
+
+ if (adf_dev_started(accel_dev)) {
+ if (adf_devmgr_in_reset(accel_dev) ||
+ adf_dev_in_use(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Device busy\n");
+ return -EBUSY;
+ }
+
+ ret = adf_dev_down(accel_dev, true);
+ if (ret)
+ return ret;
+ }
+
+ if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+ return -EFAULT;
+ val = 0;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ ADF_NUM_CY, (void *)&val, ADF_DEC))
+ return -EFAULT;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ /* Allocate memory for VF info structs */
+ accel_dev->pf.vf_info = kcalloc(totalvfs,
+ sizeof(struct adf_accel_vf_info),
+ GFP_KERNEL);
+ if (!accel_dev->pf.vf_info)
+ return -ENOMEM;
+
+ if (adf_dev_up(accel_dev, false)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+ accel_dev->accel_id);
+ return -EFAULT;
+ }
+
+ ret = adf_enable_sriov(accel_dev);
+ if (ret)
+ return ret;
+
+ return numvfs;
+}
+EXPORT_SYMBOL_GPL(adf_sriov_configure);
+
+int __init adf_init_pf_wq(void)
+{
+ /* Workqueue for PF2VF responses */
+ pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
+
+ return !pf2vf_resp_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_pf_wq(void)
+{
+ if (pf2vf_resp_wq) {
+ destroy_workqueue(pf2vf_resp_wq);
+ pf2vf_resp_wq = NULL;
+ }
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
new file mode 100644
index 0000000000..8f04b0d3c5
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_services.h"
+#include "adf_common_drv.h"
+
+static const char * const state_operations[] = {
+ [DEV_DOWN] = "down",
+ [DEV_UP] = "up",
+};
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ char *state;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ state = adf_dev_started(accel_dev) ? "up" : "down";
+ return sysfs_emit(buf, "%s\n", state);
+}
+
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_accel_dev *accel_dev;
+ u32 accel_id;
+ int ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ accel_id = accel_dev->accel_id;
+
+ if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
+ dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
+ return -EBUSY;
+ }
+
+ ret = sysfs_match_string(state_operations, buf);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case DEV_DOWN:
+ dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
+
+ if (!adf_dev_started(accel_dev)) {
+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
+ accel_id);
+
+ break;
+ }
+
+ ret = adf_dev_down(accel_dev, true);
+ if (ret < 0)
+ return -EINVAL;
+
+ break;
+ case DEV_UP:
+ dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret == -EALREADY) {
+ break;
+ } else if (ret) {
+ dev_err(dev, "Failed to start device qat_dev%d\n",
+ accel_id);
+ adf_dev_down(accel_dev, true);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ struct adf_accel_dev *accel_dev;
+ int ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", services);
+}
+
+static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
+ const char *services)
+{
+ return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services,
+ ADF_STR);
+}
+
+static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_hw_device_data *hw_data;
+ struct adf_accel_dev *accel_dev;
+ int ret;
+
+ ret = sysfs_match_string(adf_cfg_services, buf);
+ if (ret < 0)
+ return ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ if (adf_dev_started(accel_dev)) {
+ dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
+ accel_dev->accel_id);
+ return -EINVAL;
+ }
+
+ ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]);
+ if (ret < 0)
+ return ret;
+
+ hw_data = GET_HW_DATA(accel_dev);
+
+ /* Update capabilities mask after change in configuration.
+ * A call to this function is required as capabilities are, at the
+ * moment, tied to configuration
+ */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t pm_idle_enabled_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char pm_idle_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
+ struct adf_accel_dev *accel_dev;
+ int ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_PM_IDLE_SUPPORT, pm_idle_enabled);
+ if (ret)
+ return sysfs_emit(buf, "1\n");
+
+ return sysfs_emit(buf, "%s\n", pm_idle_enabled);
+}
+
+static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long pm_idle_enabled_cfg_val;
+ struct adf_accel_dev *accel_dev;
+ bool pm_idle_enabled;
+ int ret;
+
+ ret = kstrtobool(buf, &pm_idle_enabled);
+ if (ret)
+ return ret;
+
+ pm_idle_enabled_cfg_val = pm_idle_enabled;
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ if (adf_dev_started(accel_dev)) {
+ dev_info(dev, "Device qat_dev%d must be down to set pm_idle_enabled.\n",
+ accel_dev->accel_id);
+ return -EINVAL;
+ }
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_PM_IDLE_SUPPORT, &pm_idle_enabled_cfg_val,
+ ADF_DEC);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(pm_idle_enabled);
+
+static DEVICE_ATTR_RW(state);
+static DEVICE_ATTR_RW(cfg_services);
+
+static struct attribute *qat_attrs[] = {
+ &dev_attr_state.attr,
+ &dev_attr_cfg_services.attr,
+ &dev_attr_pm_idle_enabled.attr,
+ NULL,
+};
+
+static struct attribute_group qat_group = {
+ .attrs = qat_attrs,
+ .name = "qat",
+};
+
+int adf_sysfs_init(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to create qat attribute group: %d\n", ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_sysfs_init);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c
new file mode 100644
index 0000000000..630d0483c4
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/nospec.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+#define ADF_MAX_RING_THRESHOLD 80
+#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100)
+
+static inline u32 adf_modulo(u32 data, u32 shift)
+{
+ u32 div = data >> shift;
+ u32 mult = div << shift;
+
+ return data - mult;
+}
+
+static inline int adf_check_ring_alignment(u64 addr, u64 size)
+{
+ if (((size - 1) & addr) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
+{
+ int i = ADF_MIN_RING_SIZE;
+
+ for (; i <= ADF_MAX_RING_SIZE; i++)
+ if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
+ return i;
+
+ return ADF_DEFAULT_RING_SIZE;
+}
+
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
+{
+ spin_lock(&bank->lock);
+ if (bank->ring_mask & (1 << ring)) {
+ spin_unlock(&bank->lock);
+ return -EFAULT;
+ }
+ bank->ring_mask |= (1 << ring);
+ spin_unlock(&bank->lock);
+ return 0;
+}
+
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
+{
+ spin_lock(&bank->lock);
+ bank->ring_mask &= ~(1 << ring);
+ spin_unlock(&bank->lock);
+}
+
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
+{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+ spin_lock_bh(&bank->lock);
+ bank->irq_mask |= (1 << ring);
+ spin_unlock_bh(&bank->lock);
+ csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
+ bank->irq_mask);
+ csr_ops->write_csr_int_col_ctl(bank->csr_addr, bank->bank_number,
+ bank->irq_coalesc_timer);
+}
+
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
+{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+ spin_lock_bh(&bank->lock);
+ bank->irq_mask &= ~(1 << ring);
+ spin_unlock_bh(&bank->lock);
+ csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
+ bank->irq_mask);
+}
+
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
+{
+ return atomic_read(ring->inflights) > ring->threshold;
+}
+
+int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
+{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+
+ if (atomic_add_return(1, ring->inflights) >
+ ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
+ atomic_dec(ring->inflights);
+ return -EAGAIN;
+ }
+ spin_lock_bh(&ring->lock);
+ memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+
+ ring->tail = adf_modulo(ring->tail +
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+ ADF_RING_SIZE_MODULO(ring->ring_size));
+ csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
+ ring->bank->bank_number, ring->ring_number,
+ ring->tail);
+ spin_unlock_bh(&ring->lock);
+
+ return 0;
+}
+
+static int adf_handle_response(struct adf_etr_ring_data *ring)
+{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+ u32 msg_counter = 0;
+ u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
+
+ while (*msg != ADF_RING_EMPTY_SIG) {
+ ring->callback((u32 *)msg);
+ atomic_dec(ring->inflights);
+ *msg = ADF_RING_EMPTY_SIG;
+ ring->head = adf_modulo(ring->head +
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+ ADF_RING_SIZE_MODULO(ring->ring_size));
+ msg_counter++;
+ msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
+ }
+ if (msg_counter > 0) {
+ csr_ops->write_csr_ring_head(ring->bank->csr_addr,
+ ring->bank->bank_number,
+ ring->ring_number, ring->head);
+ }
+ return 0;
+}
+
+static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
+{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+ u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
+
+ csr_ops->write_csr_ring_config(ring->bank->csr_addr,
+ ring->bank->bank_number,
+ ring->ring_number, ring_config);
+
+}
+
+static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
+{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+ u32 ring_config =
+ BUILD_RESP_RING_CONFIG(ring->ring_size,
+ ADF_RING_NEAR_WATERMARK_512,
+ ADF_RING_NEAR_WATERMARK_0);
+
+ csr_ops->write_csr_ring_config(ring->bank->csr_addr,
+ ring->bank->bank_number,
+ ring->ring_number, ring_config);
+}
+
+static int adf_init_ring(struct adf_etr_ring_data *ring)
+{
+ struct adf_etr_bank_data *bank = ring->bank;
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ u64 ring_base;
+ u32 ring_size_bytes =
+ ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+
+ ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+ ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+ ring_size_bytes, &ring->dma_addr,
+ GFP_KERNEL);
+ if (!ring->base_addr)
+ return -ENOMEM;
+
+ memset(ring->base_addr, 0x7F, ring_size_bytes);
+ /* The base_addr has to be aligned to the size of the buffer */
+ if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
+ dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
+ dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+ ring->base_addr, ring->dma_addr);
+ ring->base_addr = NULL;
+ return -EFAULT;
+ }
+
+ if (hw_data->tx_rings_mask & (1 << ring->ring_number))
+ adf_configure_tx_ring(ring);
+
+ else
+ adf_configure_rx_ring(ring);
+
+ ring_base = csr_ops->build_csr_ring_base_addr(ring->dma_addr,
+ ring->ring_size);
+
+ csr_ops->write_csr_ring_base(ring->bank->csr_addr,
+ ring->bank->bank_number, ring->ring_number,
+ ring_base);
+ spin_lock_init(&ring->lock);
+ return 0;
+}
+
+static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
+{
+ u32 ring_size_bytes =
+ ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+ ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+
+ if (ring->base_addr) {
+ memset(ring->base_addr, 0x7F, ring_size_bytes);
+ dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
+ ring_size_bytes, ring->base_addr,
+ ring->dma_addr);
+ }
+}
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+ u32 bank_num, u32 num_msgs,
+ u32 msg_size, const char *ring_name,
+ adf_callback_fn callback, int poll_mode,
+ struct adf_etr_ring_data **ring_ptr)
+{
+ struct adf_etr_data *transport_data = accel_dev->transport;
+ u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
+ struct adf_etr_bank_data *bank;
+ struct adf_etr_ring_data *ring;
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ int max_inflights;
+ u32 ring_num;
+ int ret;
+
+ if (bank_num >= GET_MAX_BANKS(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
+ return -EFAULT;
+ }
+ if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+ dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
+ return -EFAULT;
+ }
+ if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
+ ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid ring size for given msg size\n");
+ return -EFAULT;
+ }
+ if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
+ dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
+ section, ring_name);
+ return -EFAULT;
+ }
+ if (kstrtouint(val, 10, &ring_num)) {
+ dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
+ return -EFAULT;
+ }
+ if (ring_num >= num_rings_per_bank) {
+ dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
+ return -EFAULT;
+ }
+
+ ring_num = array_index_nospec(ring_num, num_rings_per_bank);
+ bank = &transport_data->banks[bank_num];
+ if (adf_reserve_ring(bank, ring_num)) {
+ dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
+ ring_num, ring_name);
+ return -EFAULT;
+ }
+ ring = &bank->rings[ring_num];
+ ring->ring_number = ring_num;
+ ring->bank = bank;
+ ring->callback = callback;
+ ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
+ ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
+ ring->head = 0;
+ ring->tail = 0;
+ max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
+ ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
+ atomic_set(ring->inflights, 0);
+ ret = adf_init_ring(ring);
+ if (ret)
+ goto err;
+
+ /* Enable HW arbitration for the given ring */
+ adf_update_ring_arb(ring);
+
+ if (adf_ring_debugfs_add(ring, ring_name)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Couldn't add ring debugfs entry\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* Enable interrupts if needed */
+ if (callback && (!poll_mode))
+ adf_enable_ring_irq(bank, ring->ring_number);
+ *ring_ptr = ring;
+ return 0;
+err:
+ adf_cleanup_ring(ring);
+ adf_unreserve_ring(bank, ring_num);
+ adf_update_ring_arb(ring);
+ return ret;
+}
+
+void adf_remove_ring(struct adf_etr_ring_data *ring)
+{
+ struct adf_etr_bank_data *bank = ring->bank;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+ /* Disable interrupts for the given ring */
+ adf_disable_ring_irq(bank, ring->ring_number);
+
+ /* Clear PCI config space */
+
+ csr_ops->write_csr_ring_config(bank->csr_addr, bank->bank_number,
+ ring->ring_number, 0);
+ csr_ops->write_csr_ring_base(bank->csr_addr, bank->bank_number,
+ ring->ring_number, 0);
+ adf_ring_debugfs_rm(ring);
+ adf_unreserve_ring(bank, ring->ring_number);
+ /* Disable HW arbitration for the given ring */
+ adf_update_ring_arb(ring);
+ adf_cleanup_ring(ring);
+}
+
+static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
+{
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ unsigned long empty_rings;
+ int i;
+
+ empty_rings = csr_ops->read_csr_e_stat(bank->csr_addr,
+ bank->bank_number);
+ empty_rings = ~empty_rings & bank->irq_mask;
+
+ for_each_set_bit(i, &empty_rings, num_rings_per_bank)
+ adf_handle_response(&bank->rings[i]);
+}
+
+void adf_response_handler(uintptr_t bank_addr)
+{
+ struct adf_etr_bank_data *bank = (void *)bank_addr;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+ /* Handle all the responses and reenable IRQs */
+ adf_ring_response_handler(bank);
+
+ csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
+ bank->irq_mask);
+}
+
+static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
+ const char *section, const char *format,
+ u32 key, u32 *value)
+{
+ char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+ snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
+
+ if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
+ return -EFAULT;
+
+ if (kstrtouint(val_buf, 10, value))
+ return -EFAULT;
+ return 0;
+}
+
+static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
+ const char *section,
+ u32 bank_num_in_accel)
+{
+ if (adf_get_cfg_int(bank->accel_dev, section,
+ ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+ bank_num_in_accel, &bank->irq_coalesc_timer))
+ bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+
+ if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
+ ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
+ bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+}
+
+static int adf_init_bank(struct adf_accel_dev *accel_dev,
+ struct adf_etr_bank_data *bank,
+ u32 bank_num, void __iomem *csr_addr)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u8 num_rings_per_bank = hw_data->num_rings_per_bank;
+ struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
+ u32 irq_mask = BIT(num_rings_per_bank) - 1;
+ struct adf_etr_ring_data *ring;
+ struct adf_etr_ring_data *tx_ring;
+ u32 i, coalesc_enabled = 0;
+ unsigned long ring_mask;
+ int size;
+
+ memset(bank, 0, sizeof(*bank));
+ bank->bank_number = bank_num;
+ bank->csr_addr = csr_addr;
+ bank->accel_dev = accel_dev;
+ spin_lock_init(&bank->lock);
+
+ /* Allocate the rings in the bank */
+ size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
+ bank->rings = kzalloc_node(size, GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!bank->rings)
+ return -ENOMEM;
+
+ /* Enable IRQ coalescing always. This will allow to use
+ * the optimised flag and coalesc register.
+ * If it is disabled in the config file just use min time value */
+ if ((adf_get_cfg_int(accel_dev, "Accelerator0",
+ ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
+ &coalesc_enabled) == 0) && coalesc_enabled)
+ adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
+ else
+ bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
+
+ for (i = 0; i < num_rings_per_bank; i++) {
+ csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
+ csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
+
+ ring = &bank->rings[i];
+ if (hw_data->tx_rings_mask & (1 << i)) {
+ ring->inflights =
+ kzalloc_node(sizeof(atomic_t),
+ GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!ring->inflights)
+ goto err;
+ } else {
+ if (i < hw_data->tx_rx_gap) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid tx rings mask config\n");
+ goto err;
+ }
+ tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
+ ring->inflights = tx_ring->inflights;
+ }
+ }
+ if (adf_bank_debugfs_add(bank)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add bank debugfs entry\n");
+ goto err;
+ }
+
+ csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
+ csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
+
+ return 0;
+err:
+ ring_mask = hw_data->tx_rings_mask;
+ for_each_set_bit(i, &ring_mask, num_rings_per_bank) {
+ ring = &bank->rings[i];
+ kfree(ring->inflights);
+ ring->inflights = NULL;
+ }
+ kfree(bank->rings);
+ return -ENOMEM;
+}
+
+/**
+ * adf_init_etr_data() - Initialize transport rings for acceleration device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function is the initializes the communications channels (rings) to the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *etr_data;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *csr_addr;
+ u32 size;
+ u32 num_banks = 0;
+ int i, ret;
+
+ etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!etr_data)
+ return -ENOMEM;
+
+ num_banks = GET_MAX_BANKS(accel_dev);
+ size = num_banks * sizeof(struct adf_etr_bank_data);
+ etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!etr_data->banks) {
+ ret = -ENOMEM;
+ goto err_bank;
+ }
+
+ accel_dev->transport = etr_data;
+ i = hw_data->get_etr_bar_id(hw_data);
+ csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+
+ /* accel_dev->debugfs_dir should always be non-NULL here */
+ etr_data->debug = debugfs_create_dir("transport",
+ accel_dev->debugfs_dir);
+
+ for (i = 0; i < num_banks; i++) {
+ ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
+ csr_addr);
+ if (ret)
+ goto err_bank_all;
+ }
+
+ return 0;
+
+err_bank_all:
+ debugfs_remove(etr_data->debug);
+ kfree(etr_data->banks);
+err_bank:
+ kfree(etr_data);
+ accel_dev->transport = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_init_etr_data);
+
+static void cleanup_bank(struct adf_etr_bank_data *bank)
+{
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u8 num_rings_per_bank = hw_data->num_rings_per_bank;
+ u32 i;
+
+ for (i = 0; i < num_rings_per_bank; i++) {
+ struct adf_etr_ring_data *ring = &bank->rings[i];
+
+ if (bank->ring_mask & (1 << i))
+ adf_cleanup_ring(ring);
+
+ if (hw_data->tx_rings_mask & (1 << i))
+ kfree(ring->inflights);
+ }
+ kfree(bank->rings);
+ adf_bank_debugfs_rm(bank);
+ memset(bank, 0, sizeof(*bank));
+}
+
+static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ u32 i, num_banks = GET_MAX_BANKS(accel_dev);
+
+ for (i = 0; i < num_banks; i++)
+ cleanup_bank(&etr_data->banks[i]);
+}
+
+/**
+ * adf_cleanup_etr_data() - Clear transport rings for acceleration device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function is the clears the communications channels (rings) of the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *etr_data = accel_dev->transport;
+
+ if (etr_data) {
+ adf_cleanup_etr_handles(accel_dev);
+ debugfs_remove(etr_data->debug);
+ kfree(etr_data->banks->rings);
+ kfree(etr_data->banks);
+ kfree(etr_data);
+ accel_dev->transport = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.h b/drivers/crypto/intel/qat/qat_common/adf_transport.h
new file mode 100644
index 0000000000..e6ef6f9b76
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_TRANSPORT_H
+#define ADF_TRANSPORT_H
+
+#include "adf_accel_devices.h"
+
+struct adf_etr_ring_data;
+
+typedef void (*adf_callback_fn)(void *resp_msg);
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+ u32 bank_num, u32 num_mgs, u32 msg_size,
+ const char *ring_name, adf_callback_fn callback,
+ int poll_mode, struct adf_etr_ring_data **ring_ptr);
+
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
+int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
+void adf_remove_ring(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h
new file mode 100644
index 0000000000..d3667dbd98
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include "adf_accel_devices.h"
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring buffer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
+ ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
+ ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+ SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+ ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size) \
+ ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+ ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
new file mode 100644
index 0000000000..e2dd568b87
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+
+static DEFINE_MUTEX(ring_read_lock);
+static DEFINE_MUTEX(bank_read_lock);
+
+static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
+{
+ struct adf_etr_ring_data *ring = sfile->private;
+
+ mutex_lock(&ring_read_lock);
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ return NULL;
+
+ return ring->base_addr +
+ (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+ struct adf_etr_ring_data *ring = sfile->private;
+
+ if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ return NULL;
+
+ return ring->base_addr +
+ (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static int adf_ring_show(struct seq_file *sfile, void *v)
+{
+ struct adf_etr_ring_data *ring = sfile->private;
+ struct adf_etr_bank_data *bank = ring->bank;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+ void __iomem *csr = ring->bank->csr_addr;
+
+ if (v == SEQ_START_TOKEN) {
+ int head, tail, empty;
+
+ head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
+ ring->ring_number);
+ tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
+ ring->ring_number);
+ empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
+
+ seq_puts(sfile, "------- Ring configuration -------\n");
+ seq_printf(sfile, "ring name: %s\n",
+ ring->ring_debug->ring_name);
+ seq_printf(sfile, "ring num %d, bank num %d\n",
+ ring->ring_number, ring->bank->bank_number);
+ seq_printf(sfile, "head %x, tail %x, empty: %d\n",
+ head, tail, (empty & 1 << ring->ring_number)
+ >> ring->ring_number);
+ seq_printf(sfile, "ring size %lld, msg size %d\n",
+ (long long)ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+ seq_puts(sfile, "----------- Ring data ------------\n");
+ return 0;
+ }
+ seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
+ v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
+ return 0;
+}
+
+static void adf_ring_stop(struct seq_file *sfile, void *v)
+{
+ mutex_unlock(&ring_read_lock);
+}
+
+static const struct seq_operations adf_ring_debug_sops = {
+ .start = adf_ring_start,
+ .next = adf_ring_next,
+ .stop = adf_ring_stop,
+ .show = adf_ring_show
+};
+
+DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
+
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+{
+ struct adf_etr_ring_debug_entry *ring_debug;
+ char entry_name[16];
+
+ ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+ if (!ring_debug)
+ return -ENOMEM;
+
+ strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+ snprintf(entry_name, sizeof(entry_name), "ring_%02d",
+ ring->ring_number);
+
+ ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
+ ring->bank->bank_debug_dir,
+ ring, &adf_ring_debug_fops);
+ ring->ring_debug = ring_debug;
+ return 0;
+}
+
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
+{
+ if (ring->ring_debug) {
+ debugfs_remove(ring->ring_debug->debug);
+ kfree(ring->ring_debug);
+ ring->ring_debug = NULL;
+ }
+}
+
+static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
+{
+ struct adf_etr_bank_data *bank = sfile->private;
+ u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
+
+ mutex_lock(&bank_read_lock);
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ if (*pos >= num_rings_per_bank)
+ return NULL;
+
+ return pos;
+}
+
+static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+ struct adf_etr_bank_data *bank = sfile->private;
+ u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
+
+ if (++(*pos) >= num_rings_per_bank)
+ return NULL;
+
+ return pos;
+}
+
+static int adf_bank_show(struct seq_file *sfile, void *v)
+{
+ struct adf_etr_bank_data *bank = sfile->private;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(sfile, "------- Bank %d configuration -------\n",
+ bank->bank_number);
+ } else {
+ int ring_id = *((int *)v) - 1;
+ struct adf_etr_ring_data *ring = &bank->rings[ring_id];
+ void __iomem *csr = bank->csr_addr;
+ int head, tail, empty;
+
+ if (!(bank->ring_mask & 1 << ring_id))
+ return 0;
+
+ head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
+ ring->ring_number);
+ tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
+ ring->ring_number);
+ empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
+
+ seq_printf(sfile,
+ "ring num %02d, head %04x, tail %04x, empty: %d\n",
+ ring->ring_number, head, tail,
+ (empty & 1 << ring->ring_number) >>
+ ring->ring_number);
+ }
+ return 0;
+}
+
+static void adf_bank_stop(struct seq_file *sfile, void *v)
+{
+ mutex_unlock(&bank_read_lock);
+}
+
+static const struct seq_operations adf_bank_debug_sops = {
+ .start = adf_bank_start,
+ .next = adf_bank_next,
+ .stop = adf_bank_stop,
+ .show = adf_bank_show
+};
+
+DEFINE_SEQ_ATTRIBUTE(adf_bank_debug);
+
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ struct dentry *parent = accel_dev->transport->debug;
+ char name[16];
+
+ snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+ bank->bank_debug_dir = debugfs_create_dir(name, parent);
+ bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
+ bank->bank_debug_dir, bank,
+ &adf_bank_debug_fops);
+ return 0;
+}
+
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
+{
+ debugfs_remove(bank->bank_debug_cfg);
+ debugfs_remove(bank->bank_debug_dir);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h b/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h
new file mode 100644
index 0000000000..8b2c92ba7c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_TRANSPORT_INTRN_H
+#define ADF_TRANSPORT_INTRN_H
+
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include "adf_transport.h"
+
+struct adf_etr_ring_debug_entry {
+ char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ struct dentry *debug;
+};
+
+struct adf_etr_ring_data {
+ void *base_addr;
+ atomic_t *inflights;
+ adf_callback_fn callback;
+ struct adf_etr_bank_data *bank;
+ dma_addr_t dma_addr;
+ struct adf_etr_ring_debug_entry *ring_debug;
+ spinlock_t lock; /* protects ring data struct */
+ u16 head;
+ u16 tail;
+ u32 threshold;
+ u8 ring_number;
+ u8 ring_size;
+ u8 msg_size;
+};
+
+struct adf_etr_bank_data {
+ struct adf_etr_ring_data *rings;
+ struct tasklet_struct resp_handler;
+ void __iomem *csr_addr;
+ u32 irq_coalesc_timer;
+ u32 bank_number;
+ u16 ring_mask;
+ u16 irq_mask;
+ spinlock_t lock; /* protects bank data struct */
+ struct adf_accel_dev *accel_dev;
+ struct dentry *bank_debug_dir;
+ struct dentry *bank_debug_cfg;
+};
+
+struct adf_etr_data {
+ struct adf_etr_bank_data *banks;
+ struct dentry *debug;
+};
+
+void adf_response_handler(uintptr_t bank_addr);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
+#else
+static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+ return 0;
+}
+
+#define adf_bank_debugfs_rm(bank) do {} while (0)
+
+static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
+ const char *name)
+{
+ return 0;
+}
+
+#define adf_ring_debugfs_rm(ring) do {} while (0)
+#endif
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
new file mode 100644
index 0000000000..b05c3957a1
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+
+#define ADF_VINTSOU_OFFSET 0x204
+#define ADF_VINTMSK_OFFSET 0x208
+#define ADF_VINTSOU_BUN BIT(0)
+#define ADF_VINTSOU_PF2VF BIT(1)
+
+static struct workqueue_struct *adf_vf_stop_wq;
+
+struct adf_vf_stop_data {
+ struct adf_accel_dev *accel_dev;
+ struct work_struct work;
+};
+
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+
+ ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0);
+}
+
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+
+ ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
+static int adf_enable_msi(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1,
+ PCI_IRQ_MSI);
+ if (unlikely(stat < 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to enable MSI interrupt: %d\n", stat);
+ return stat;
+ }
+
+ return 0;
+}
+
+static void adf_disable_msi(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ pci_free_irq_vectors(pdev);
+}
+
+static void adf_dev_stop_async(struct work_struct *work)
+{
+ struct adf_vf_stop_data *stop_data =
+ container_of(work, struct adf_vf_stop_data, work);
+ struct adf_accel_dev *accel_dev = stop_data->accel_dev;
+
+ adf_dev_restarting_notify(accel_dev);
+ adf_dev_down(accel_dev, false);
+
+ /* Re-enable PF2VF interrupts */
+ adf_enable_pf2vf_interrupts(accel_dev);
+ kfree(stop_data);
+}
+
+int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
+{
+ struct adf_vf_stop_data *stop_data;
+
+ clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+ stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+ if (!stop_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "Couldn't schedule stop for vf_%d\n",
+ accel_dev->accel_id);
+ return -ENOMEM;
+ }
+ stop_data->accel_dev = accel_dev;
+ INIT_WORK(&stop_data->work, adf_dev_stop_async);
+ queue_work(adf_vf_stop_wq, &stop_data->work);
+
+ return 0;
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+ struct adf_accel_dev *accel_dev = data;
+ bool ret;
+
+ ret = adf_recv_and_handle_pf2vf_msg(accel_dev);
+ if (ret)
+ /* Re-enable PF2VF interrupts */
+ adf_enable_pf2vf_interrupts(accel_dev);
+
+ return;
+
+}
+
+static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+ tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+ (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+
+ mutex_init(&accel_dev->vf.vf2pf_lock);
+ return 0;
+}
+
+static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+ tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
+ tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
+ mutex_destroy(&accel_dev->vf.vf2pf_lock);
+}
+
+static irqreturn_t adf_isr(int irq, void *privdata)
+{
+ struct adf_accel_dev *accel_dev = privdata;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_bar_addr = pmisc->virt_addr;
+ bool handled = false;
+ u32 v_int, v_mask;
+
+ /* Read VF INT source CSR to determine the source of VF interrupt */
+ v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
+
+ /* Read VF INT mask CSR to determine which sources are masked */
+ v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
+
+ /*
+ * Recompute v_int ignoring sources that are masked. This is to
+ * avoid rescheduling the tasklet for interrupts already handled
+ */
+ v_int &= ~v_mask;
+
+ /* Check for PF2VF interrupt */
+ if (v_int & ADF_VINTSOU_PF2VF) {
+ /* Disable PF to VF interrupt */
+ adf_disable_pf2vf_interrupts(accel_dev);
+
+ /* Schedule tasklet to handle interrupt BH */
+ tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
+ handled = true;
+ }
+
+ /* Check bundle interrupt */
+ if (v_int & ADF_VINTSOU_BUN) {
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ struct adf_etr_bank_data *bank = &etr_data->banks[0];
+
+ /* Disable Flag and Coalesce Ring Interrupts */
+ csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
+ bank->bank_number, 0);
+ tasklet_hi_schedule(&bank->resp_handler);
+ handled = true;
+ }
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ unsigned int cpu;
+ int ret;
+
+ snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
+ (void *)accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
+ accel_dev->vf.irq_name);
+ return ret;
+ }
+ cpu = accel_dev->accel_id % num_online_cpus();
+ irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+ accel_dev->vf.irq_enabled = true;
+
+ return ret;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+
+ tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
+ (unsigned long)priv_data->banks);
+ return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+
+ tasklet_disable(&priv_data->banks[0].resp_handler);
+ tasklet_kill(&priv_data->banks[0].resp_handler);
+}
+
+/**
+ * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device virtual function.
+ */
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ if (accel_dev->vf.irq_enabled) {
+ irq_set_affinity_hint(pdev->irq, NULL);
+ free_irq(pdev->irq, accel_dev);
+ }
+ adf_cleanup_bh(accel_dev);
+ adf_cleanup_pf2vf_bh(accel_dev);
+ adf_disable_msi(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
+
+/**
+ * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+ if (adf_enable_msi(accel_dev))
+ goto err_out;
+
+ if (adf_setup_pf2vf_bh(accel_dev))
+ goto err_disable_msi;
+
+ if (adf_setup_bh(accel_dev))
+ goto err_cleanup_pf2vf_bh;
+
+ if (adf_request_msi_irq(accel_dev))
+ goto err_cleanup_bh;
+
+ return 0;
+
+err_cleanup_bh:
+ adf_cleanup_bh(accel_dev);
+
+err_cleanup_pf2vf_bh:
+ adf_cleanup_pf2vf_bh(accel_dev);
+
+err_disable_msi:
+ adf_disable_msi(accel_dev);
+
+err_out:
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+
+/**
+ * adf_flush_vf_wq() - Flush workqueue for VF
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function disables the PF/VF interrupts on the VF so that no new messages
+ * are received and flushes the workqueue 'adf_vf_stop_wq'.
+ *
+ * Return: void.
+ */
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+ adf_disable_pf2vf_interrupts(accel_dev);
+
+ flush_workqueue(adf_vf_stop_wq);
+}
+EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
+
+/**
+ * adf_init_vf_wq() - Init workqueue for VF
+ *
+ * Function init workqueue 'adf_vf_stop_wq' for VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int __init adf_init_vf_wq(void)
+{
+ adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
+
+ return !adf_vf_stop_wq ? -EFAULT : 0;
+}
+
+void adf_exit_vf_wq(void)
+{
+ if (adf_vf_stop_wq)
+ destroy_workqueue(adf_vf_stop_wq);
+
+ adf_vf_stop_wq = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
new file mode 100644
index 0000000000..c141160421
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <linux/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+ (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+ (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+ ICP_QAT_FW_COMN_RESP_SERV_NULL,
+ ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+ ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+ ICP_QAT_FW_COMN_REQ_NULL = 0,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+ ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+ union {
+ struct {
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
+ } s;
+ struct {
+ __u32 serv_specif_fields[4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+ __u64 opaque_data;
+ __u64 src_data_addr;
+ __u64 dest_data_addr;
+ __u32 src_length;
+ __u32 dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+ __u32 content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+ __u8 resrvd1;
+ __u8 service_cmd_id;
+ __u8 service_type;
+ __u8 hdr_flags;
+ __u16 serv_specif_flags;
+ __u16 comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+ __u32 serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+ __u8 xlat_err_code;
+ __u8 cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+ __u8 resrvd1;
+ __u8 service_id;
+ __u8 response_type;
+ __u8 hdr_flags;
+ struct icp_qat_fw_comn_error comn_error;
+ __u8 comn_status;
+ __u8 cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_hdr;
+ __u64 opaque_data;
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
+#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+ ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+ ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+ (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+ (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+ ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+ | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+ QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+ ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+ QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+ (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+ QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+ (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+ QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+ (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+ QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+ QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+ QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+ ICP_QAT_FW_SLICE_NULL = 0,
+ ICP_QAT_FW_SLICE_CIPHER = 1,
+ ICP_QAT_FW_SLICE_AUTH = 2,
+ ICP_QAT_FW_SLICE_DRAM_RD = 3,
+ ICP_QAT_FW_SLICE_DRAM_WR = 4,
+ ICP_QAT_FW_SLICE_COMP = 5,
+ ICP_QAT_FW_SLICE_XLAT = 6,
+ ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
new file mode 100644
index 0000000000..a03d43fef2
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_FW_COMP_H_
+#define _ICP_QAT_FW_COMP_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_comp_cmd_id {
+ ICP_QAT_FW_COMP_CMD_STATIC = 0,
+ ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
+ ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
+ ICP_QAT_FW_COMP_CMD_DELIMITER
+};
+
+enum icp_qat_fw_comp_20_cmd_id {
+ ICP_QAT_FW_COMP_20_CMD_LZ4_COMPRESS = 3,
+ ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4,
+ ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5,
+ ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6,
+ ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7,
+ ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8,
+ ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9,
+ ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10,
+ ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11,
+ ICP_QAT_FW_COMP_20_CMD_DELIMITER
+};
+
+#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
+#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
+#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
+#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
+#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
+#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+
+#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
+ ret_uncomp, secure_ram) \
+ ((((sesstype) & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) << \
+ ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
+ (((autoselect) & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) << \
+ ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
+ (((enhanced_asb) & ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) << \
+ ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
+ (((ret_uncomp) & ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) << \
+ ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
+ (((secure_ram) & ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) << \
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
+ ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
+ ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS, \
+ ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK)
+
+#define ICP_QAT_FW_COMP_EN_ASB_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS, \
+ ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK)
+
+#define ICP_QAT_FW_COMP_RET_UNCOMP_GET(flags) \
+ QAT_FIELD_GET(flags, \
+ ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS, \
+ ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK)
+
+#define ICP_QAT_FW_COMP_SECURE_RAM_USE_GET(flags) \
+ QAT_FIELD_GET(flags, \
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS, \
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK)
+
+struct icp_qat_fw_comp_req_hdr_cd_pars {
+ union {
+ struct {
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
+ } s;
+ struct {
+ __u32 comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
+ __u32 content_desc_resrvd4;
+ } sl;
+ } u;
+};
+
+struct icp_qat_fw_comp_req_params {
+ __u32 comp_len;
+ __u32 out_buffer_sz;
+ union {
+ struct {
+ __u32 initial_crc32;
+ __u32 initial_adler;
+ } legacy;
+ __u64 crc_data_addr;
+ } crc;
+ __u32 req_par_flags;
+ __u32 rsrvd;
+};
+
+#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \
+ cnvdfx, crc, xxhash_acc, \
+ cnv_error_type, append_crc, \
+ drop_data) \
+ ((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \
+ ICP_QAT_FW_COMP_SOP_BITPOS) | \
+ (((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \
+ ICP_QAT_FW_COMP_EOP_BITPOS) | \
+ (((bfinal) & ICP_QAT_FW_COMP_BFINAL_MASK) \
+ << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
+ (((cnv) & ICP_QAT_FW_COMP_CNV_MASK) << \
+ ICP_QAT_FW_COMP_CNV_BITPOS) | \
+ (((cnvnr) & ICP_QAT_FW_COMP_CNVNR_MASK) \
+ << ICP_QAT_FW_COMP_CNVNR_BITPOS) | \
+ (((cnvdfx) & ICP_QAT_FW_COMP_CNV_DFX_MASK) \
+ << ICP_QAT_FW_COMP_CNV_DFX_BITPOS) | \
+ (((crc) & ICP_QAT_FW_COMP_CRC_MODE_MASK) \
+ << ICP_QAT_FW_COMP_CRC_MODE_BITPOS) | \
+ (((xxhash_acc) & ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) \
+ << ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS) | \
+ (((cnv_error_type) & ICP_QAT_FW_COMP_CNV_ERROR_MASK) \
+ << ICP_QAT_FW_COMP_CNV_ERROR_BITPOS) | \
+ (((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \
+ << ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \
+ (((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \
+ << ICP_QAT_FW_COMP_DROP_DATA_BITPOS))
+
+#define ICP_QAT_FW_COMP_NOT_SOP 0
+#define ICP_QAT_FW_COMP_SOP 1
+#define ICP_QAT_FW_COMP_NOT_EOP 0
+#define ICP_QAT_FW_COMP_EOP 1
+#define ICP_QAT_FW_COMP_NOT_BFINAL 0
+#define ICP_QAT_FW_COMP_BFINAL 1
+#define ICP_QAT_FW_COMP_NO_CNV 0
+#define ICP_QAT_FW_COMP_CNV 1
+#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
+#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
+#define ICP_QAT_FW_COMP_NO_CNV_DFX 0
+#define ICP_QAT_FW_COMP_CNV_DFX 1
+#define ICP_QAT_FW_COMP_CRC_MODE_LEGACY 0
+#define ICP_QAT_FW_COMP_CRC_MODE_E2E 1
+#define ICP_QAT_FW_COMP_NO_XXHASH_ACC 0
+#define ICP_QAT_FW_COMP_XXHASH_ACC 1
+#define ICP_QAT_FW_COMP_APPEND_CRC 1
+#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0
+#define ICP_QAT_FW_COMP_DROP_DATA 1
+#define ICP_QAT_FW_COMP_NO_DROP_DATA 0
+#define ICP_QAT_FW_COMP_SOP_BITPOS 0
+#define ICP_QAT_FW_COMP_SOP_MASK 0x1
+#define ICP_QAT_FW_COMP_EOP_BITPOS 1
+#define ICP_QAT_FW_COMP_EOP_MASK 0x1
+#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
+#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_BITPOS 16
+#define ICP_QAT_FW_COMP_CNV_MASK 0x1
+#define ICP_QAT_FW_COMP_CNVNR_BITPOS 17
+#define ICP_QAT_FW_COMP_CNVNR_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_DFX_BITPOS 18
+#define ICP_QAT_FW_COMP_CNV_DFX_MASK 0x1
+#define ICP_QAT_FW_COMP_CRC_MODE_BITPOS 19
+#define ICP_QAT_FW_COMP_CRC_MODE_MASK 0x1
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS 20
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_ERROR_BITPOS 21
+#define ICP_QAT_FW_COMP_CNV_ERROR_MASK 0b111
+#define ICP_QAT_FW_COMP_CNV_ERROR_NONE 0b000
+#define ICP_QAT_FW_COMP_CNV_ERROR_CHECKSUM 0b001
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_OBC_DIFF 0b010
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR 0b011
+#define ICP_QAT_FW_COMP_CNV_ERROR_XLT 0b100
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_IBC_DIFF 0b101
+#define ICP_QAT_FW_COMP_APPEND_CRC_BITPOS 24
+#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1
+#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25
+#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1
+
+#define ICP_QAT_FW_COMP_SOP_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \
+ ICP_QAT_FW_COMP_SOP_MASK)
+
+#define ICP_QAT_FW_COMP_SOP_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SOP_BITPOS, \
+ ICP_QAT_FW_COMP_SOP_MASK)
+
+#define ICP_QAT_FW_COMP_EOP_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_EOP_BITPOS, \
+ ICP_QAT_FW_COMP_EOP_MASK)
+
+#define ICP_QAT_FW_COMP_EOP_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_EOP_BITPOS, \
+ ICP_QAT_FW_COMP_EOP_MASK)
+
+#define ICP_QAT_FW_COMP_BFINAL_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
+ ICP_QAT_FW_COMP_BFINAL_MASK)
+
+#define ICP_QAT_FW_COMP_BFINAL_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
+ ICP_QAT_FW_COMP_BFINAL_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_BITPOS, \
+ ICP_QAT_FW_COMP_CNV_MASK)
+
+#define ICP_QAT_FW_COMP_CNVNR_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNVNR_BITPOS, \
+ ICP_QAT_FW_COMP_CNVNR_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_DFX_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
+ ICP_QAT_FW_COMP_CNV_DFX_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_DFX_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
+ ICP_QAT_FW_COMP_CNV_DFX_MASK)
+
+#define ICP_QAT_FW_COMP_CRC_MODE_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CRC_MODE_BITPOS, \
+ ICP_QAT_FW_COMP_CRC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
+ ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
+ ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
+ ICP_QAT_FW_COMP_CNV_ERROR_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
+ ICP_QAT_FW_COMP_CNV_ERROR_MASK)
+
+struct icp_qat_fw_xlt_req_params {
+ __u64 inter_buff_ptr;
+};
+
+struct icp_qat_fw_comp_cd_hdr {
+ __u16 ram_bank_flags;
+ __u8 comp_cfg_offset;
+ __u8 next_curr_id;
+ __u32 resrvd;
+ __u64 comp_state_addr;
+ __u64 ram_banks_addr;
+};
+
+#define COMP_CPR_INITIAL_CRC 0
+#define COMP_CPR_INITIAL_ADLER 1
+
+struct icp_qat_fw_xlt_cd_hdr {
+ __u16 resrvd1;
+ __u8 resrvd2;
+ __u8 next_curr_id;
+ __u32 resrvd3;
+};
+
+struct icp_qat_fw_comp_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comp_req_params comp_pars;
+ union {
+ struct icp_qat_fw_xlt_req_params xlt_pars;
+ __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+ } u1;
+ __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
+ union {
+ struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
+ __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
+ } u2;
+};
+
+struct icp_qat_fw_resp_comp_pars {
+ __u32 input_byte_counter;
+ __u32 output_byte_counter;
+ union {
+ struct {
+ __u32 curr_crc32;
+ __u32 curr_adler_32;
+ } legacy;
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_2];
+ } crc;
+};
+
+struct icp_qat_fw_comp_state {
+ __u32 rd8_counter;
+ __u32 status_flags;
+ __u32 in_counter;
+ __u32 out_counter;
+ __u64 intermediate_state;
+ __u32 lobc;
+ __u32 replaybc;
+ __u64 pcrc64_poly;
+ __u32 crc32;
+ __u32 adler_xxhash32;
+ __u64 pcrc64_xorout;
+ __u32 out_buf_size;
+ __u32 in_buf_size;
+ __u64 in_pcrc64;
+ __u64 out_pcrc64;
+ __u32 lobs;
+ __u32 libc;
+ __u64 reserved;
+ __u32 xxhash_state[4];
+ __u32 cleartext[4];
+};
+
+struct icp_qat_fw_comp_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ __u64 opaque_data;
+ struct icp_qat_fw_resp_comp_pars comp_resp_pars;
+};
+
+#define QAT_FW_COMP_BANK_FLAG_MASK 0x1
+#define QAT_FW_COMP_BANK_I_BITPOS 8
+#define QAT_FW_COMP_BANK_H_BITPOS 7
+#define QAT_FW_COMP_BANK_G_BITPOS 6
+#define QAT_FW_COMP_BANK_F_BITPOS 5
+#define QAT_FW_COMP_BANK_E_BITPOS 4
+#define QAT_FW_COMP_BANK_D_BITPOS 3
+#define QAT_FW_COMP_BANK_C_BITPOS 2
+#define QAT_FW_COMP_BANK_B_BITPOS 1
+#define QAT_FW_COMP_BANK_A_BITPOS 0
+
+enum icp_qat_fw_comp_bank_enabled {
+ ICP_QAT_FW_COMP_BANK_DISABLED = 0,
+ ICP_QAT_FW_COMP_BANK_ENABLED = 1,
+ ICP_QAT_FW_COMP_BANK_DELIMITER = 2
+};
+
+#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable, bank_h_enable, \
+ bank_g_enable, bank_f_enable, \
+ bank_e_enable, bank_d_enable, \
+ bank_c_enable, bank_b_enable, \
+ bank_a_enable) \
+ ((((bank_i_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_I_BITPOS) | \
+ (((bank_h_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_H_BITPOS) | \
+ (((bank_g_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_G_BITPOS) | \
+ (((bank_f_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_F_BITPOS) | \
+ (((bank_e_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_E_BITPOS) | \
+ (((bank_d_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_D_BITPOS) | \
+ (((bank_c_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_C_BITPOS) | \
+ (((bank_b_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_B_BITPOS) | \
+ (((bank_a_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+ QAT_FW_COMP_BANK_A_BITPOS))
+
+struct icp_qat_fw_comp_crc_data_struct {
+ __u32 crc32;
+ union {
+ __u32 adler;
+ __u32 xxhash;
+ } adler_xxhash_u;
+ __u32 cpr_in_crc_lo;
+ __u32 cpr_in_crc_hi;
+ __u32 cpr_out_crc_lo;
+ __u32 cpr_out_crc_hi;
+ __u32 xlt_in_crc_lo;
+ __u32 xlt_in_crc_hi;
+ __u32 xlt_out_crc_lo;
+ __u32 xlt_out_crc_hi;
+ __u32 prog_crc_poly_lo;
+ __u32 prog_crc_poly_hi;
+ __u32 xor_out_lo;
+ __u32 xor_out_hi;
+ __u32 append_crc_lo;
+ __u32 append_crc_hi;
+};
+
+struct xxhash_acc_state_buff {
+ __u32 in_counter;
+ __u32 out_counter;
+ __u32 xxhash_state[4];
+ __u32 clear_txt[4];
+};
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
new file mode 100644
index 0000000000..019a644383
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
+#define _ICP_QAT_FW_INIT_ADMIN_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_init_admin_cmd_id {
+ ICP_QAT_FW_INIT_AE = 0,
+ ICP_QAT_FW_TRNG_ENABLE = 1,
+ ICP_QAT_FW_TRNG_DISABLE = 2,
+ ICP_QAT_FW_CONSTANTS_CFG = 3,
+ ICP_QAT_FW_STATUS_GET = 4,
+ ICP_QAT_FW_COUNTERS_GET = 5,
+ ICP_QAT_FW_LOOPBACK = 6,
+ ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+ ICP_QAT_FW_HEARTBEAT_GET = 8,
+ ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
+ ICP_QAT_FW_DC_CHAIN_INIT = 11,
+ ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13,
+ ICP_QAT_FW_TIMER_GET = 19,
+ ICP_QAT_FW_PM_STATE_CONFIG = 128,
+};
+
+enum icp_qat_fw_init_admin_resp_status {
+ ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
+ ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+};
+
+struct icp_qat_fw_init_admin_req {
+ __u16 init_cfg_sz;
+ __u8 resrvd1;
+ __u8 cmd_id;
+ __u32 resrvd2;
+ __u64 opaque_data;
+ __u64 init_cfg_ptr;
+
+ union {
+ struct {
+ __u16 ibuf_size_in_kb;
+ __u16 resrvd3;
+ };
+ struct {
+ __u32 int_timer_ticks;
+ };
+ struct {
+ __u32 heartbeat_ticks;
+ };
+ __u32 idle_filter;
+ };
+
+ __u32 resrvd4;
+} __packed;
+
+struct icp_qat_fw_init_admin_resp {
+ __u8 flags;
+ __u8 resrvd1;
+ __u8 status;
+ __u8 cmd_id;
+ union {
+ __u32 resrvd2;
+ struct {
+ __u16 version_minor_num;
+ __u16 version_major_num;
+ };
+ __u32 extended_features;
+ };
+ __u64 opaque_data;
+ union {
+ __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ __u32 version_patch_num;
+ __u8 context_id;
+ __u8 ae_id;
+ __u16 resrvd4;
+ __u64 resrvd5;
+ };
+ struct {
+ __u64 req_rec_count;
+ __u64 resp_sent_count;
+ };
+ struct {
+ __u16 compression_algos;
+ __u16 checksum_algos;
+ __u32 deflate_capabilities;
+ __u32 resrvd6;
+ __u32 lzs_capabilities;
+ };
+ struct {
+ __u32 cipher_algos;
+ __u32 hash_algos;
+ __u16 keygen_algos;
+ __u16 other;
+ __u16 public_key_algos;
+ __u16 prime_algos;
+ };
+ struct {
+ __u64 timestamp;
+ __u64 resrvd7;
+ };
+ struct {
+ __u32 successful_count;
+ __u32 unsuccessful_count;
+ __u64 resrvd8;
+ };
+ };
+} __packed;
+
+#define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h
new file mode 100644
index 0000000000..28fa17f14b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+ ICP_QAT_FW_LA_CMD_CIPHER = 0,
+ ICP_QAT_FW_LA_CMD_AUTH = 1,
+ ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+ ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+ ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+ ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+ ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+ ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+ ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+ ICP_QAT_FW_LA_CMD_MGF1 = 9,
+ ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+ ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+ ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE 1
+#define QAT_LA_SLICE_TYPE_BITPOS 14
+#define QAT_LA_SLICE_TYPE_MASK 0x3
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO 2
+#define ICP_QAT_FW_LA_CCM_PROTO 1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK 0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+ cmp_auth, ret_auth, update_state, \
+ ciph_iv, ciphcfg, partial) \
+ (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+ ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+ QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+ ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+ QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+ ((proto & QAT_LA_PROTO_MASK) << \
+ QAT_LA_PROTO_BITPOS) | \
+ ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+ QAT_LA_CMP_AUTH_RES_BITPOS) | \
+ ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+ QAT_LA_RET_AUTH_RES_BITPOS) | \
+ ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+ QAT_LA_UPDATE_STATE_BITPOS) | \
+ ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+ QAT_LA_CIPH_IV_FLD_BITPOS) | \
+ ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+ ((partial & QAT_LA_PARTIAL_MASK) << \
+ QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+ QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_SLICE_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_SLICE_TYPE_BITPOS, \
+ QAT_LA_SLICE_TYPE_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+ union {
+ struct {
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
+ } s;
+ struct {
+ __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+ union {
+ struct {
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
+ } s;
+ struct {
+ __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } sl;
+ } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+ __u8 cipher_state_sz;
+ __u8 cipher_key_sz;
+ __u8 cipher_cfg_offset;
+ __u8 next_curr_id;
+ __u8 cipher_padding_sz;
+ __u8 resrvd1;
+ __u16 resrvd2;
+ __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+ __u32 resrvd1;
+ __u8 resrvd2;
+ __u8 hash_flags;
+ __u8 hash_cfg_offset;
+ __u8 next_curr_id;
+ __u8 resrvd3;
+ __u8 outer_prefix_sz;
+ __u8 final_sz;
+ __u8 inner_res_sz;
+ __u8 resrvd4;
+ __u8 inner_state1_sz;
+ __u8 inner_state2_offset;
+ __u8 inner_state2_sz;
+ __u8 outer_config_offset;
+ __u8 outer_state1_sz;
+ __u8 outer_res_sz;
+ __u8 outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+ __u8 cipher_state_sz;
+ __u8 cipher_key_sz;
+ __u8 cipher_cfg_offset;
+ __u8 next_curr_id_cipher;
+ __u8 cipher_padding_sz;
+ __u8 hash_flags;
+ __u8 hash_cfg_offset;
+ __u8 next_curr_id_auth;
+ __u8 resrvd1;
+ __u8 outer_prefix_sz;
+ __u8 final_sz;
+ __u8 inner_res_sz;
+ __u8 resrvd2;
+ __u8 inner_state1_sz;
+ __u8 inner_state2_offset;
+ __u8 inner_state2_sz;
+ __u8 outer_config_offset;
+ __u8 outer_state1_sz;
+ __u8 outer_res_sz;
+ __u8 outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+ (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+ __u32 cipher_offset;
+ __u32 cipher_length;
+ union {
+ __u32 cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ __u64 cipher_IV_ptr;
+ __u64 resrvd1;
+ } s;
+ } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+ __u32 auth_off;
+ __u32 auth_len;
+ union {
+ __u64 auth_partial_st_prefix;
+ __u64 aad_adr;
+ } u1;
+ __u64 auth_res_addr;
+ union {
+ __u8 inner_prefix_sz;
+ __u8 aad_sz;
+ } u2;
+ __u8 resrvd1;
+ __u8 hash_state_sz;
+ __u8 auth_res_sz;
+} __packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+ union {
+ __u8 inner_prefix_sz;
+ __u8 aad_sz;
+ } u2;
+ __u8 resrvd1;
+ __u16 resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ __u64 opaque_data;
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
new file mode 100644
index 0000000000..7eb5daef4f
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
+#define __ICP_QAT_FW_LOADER_HANDLE_H__
+#include "icp_qat_uclo.h"
+
+struct icp_qat_fw_loader_ae_data {
+ unsigned int state;
+ unsigned int ustore_size;
+ unsigned int free_addr;
+ unsigned int free_size;
+ unsigned int live_ctx_mask;
+};
+
+struct icp_qat_fw_loader_hal_handle {
+ struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
+ unsigned int ae_mask;
+ unsigned int admin_ae_mask;
+ unsigned int slice_mask;
+ unsigned int revision_id;
+ unsigned int ae_max_num;
+ unsigned int upc_mask;
+ unsigned int max_ustore;
+};
+
+struct icp_qat_fw_loader_chip_info {
+ int mmp_sram_size;
+ bool nn;
+ bool lm2lm3;
+ u32 lm_size;
+ u32 icp_rst_csr;
+ u32 icp_rst_mask;
+ u32 glb_clk_enable_csr;
+ u32 misc_ctl_csr;
+ u32 wakeup_event_val;
+ bool fw_auth;
+ bool css_3k;
+ bool tgroup_share_ustore;
+ u32 fcu_ctl_csr;
+ u32 fcu_sts_csr;
+ u32 fcu_dram_addr_hi;
+ u32 fcu_dram_addr_lo;
+ u32 fcu_loaded_ae_csr;
+ u8 fcu_loaded_ae_pos;
+};
+
+struct icp_qat_fw_loader_handle {
+ struct icp_qat_fw_loader_hal_handle *hal_handle;
+ struct icp_qat_fw_loader_chip_info *chip_info;
+ struct pci_dev *pci_dev;
+ void *obj_handle;
+ void *sobj_handle;
+ void *mobj_handle;
+ unsigned int cfg_ae_mask;
+ void __iomem *hal_sram_addr_v;
+ void __iomem *hal_cap_g_ctl_csr_addr_v;
+ void __iomem *hal_cap_ae_xfer_csr_addr_v;
+ void __iomem *hal_cap_ae_local_csr_addr_v;
+ void __iomem *hal_ep_csr_addr_v;
+};
+
+struct icp_firml_dram_desc {
+ void __iomem *dram_base_addr;
+ void *dram_base_addr_v;
+ dma_addr_t dram_bus_addr;
+ u64 dram_size;
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644
index 0000000000..9dddae0009
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+ __u64 content_desc_addr;
+ __u32 content_desc_resrvd;
+ __u32 func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+ __u64 opaque;
+ __u64 src_data_addr;
+ __u64 dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+ __u8 resrvd1;
+ __u8 resrvd2;
+ __u8 service_type;
+ __u8 hdr_flags;
+ __u16 comn_req_flags;
+ __u16 resrvd4;
+ struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+ struct icp_qat_fw_req_pke_hdr pke_hdr;
+ struct icp_qat_fw_req_pke_mid pke_mid;
+ __u8 output_param_count;
+ __u8 input_param_count;
+ __u16 resrvd1;
+ __u32 resrvd2;
+ __u64 next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+ __u8 resrvd1;
+ __u8 resrvd2;
+ __u8 response_type;
+ __u8 hdr_flags;
+ __u16 comn_resp_flags;
+ __u16 resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+ struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+ __u64 opaque;
+ __u64 src_data_addr;
+ __u64 dest_data_addr;
+};
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
+ QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
+ ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
+ QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+ QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h
new file mode 100644
index 0000000000..20b2ee1fc6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef __ICP_QAT_HAL_H
+#define __ICP_QAT_HAL_H
+#include "icp_qat_fw_loader_handle.h"
+
+enum hal_global_csr {
+ MISC_CONTROL = 0xA04,
+ ICP_RESET = 0xA0c,
+ ICP_GLOBAL_CLK_ENABLE = 0xA50
+};
+
+enum {
+ MISC_CONTROL_C4XXX = 0xAA0,
+ ICP_RESET_CPP0 = 0x938,
+ ICP_RESET_CPP1 = 0x93c,
+ ICP_GLOBAL_CLK_ENABLE_CPP0 = 0x964,
+ ICP_GLOBAL_CLK_ENABLE_CPP1 = 0x968
+};
+
+enum hal_ae_csr {
+ USTORE_ADDRESS = 0x000,
+ USTORE_DATA_LOWER = 0x004,
+ USTORE_DATA_UPPER = 0x008,
+ ALU_OUT = 0x010,
+ CTX_ARB_CNTL = 0x014,
+ CTX_ENABLES = 0x018,
+ CC_ENABLE = 0x01c,
+ CSR_CTX_POINTER = 0x020,
+ CTX_STS_INDIRECT = 0x040,
+ ACTIVE_CTX_STATUS = 0x044,
+ CTX_SIG_EVENTS_INDIRECT = 0x048,
+ CTX_SIG_EVENTS_ACTIVE = 0x04c,
+ CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
+ LM_ADDR_0_INDIRECT = 0x060,
+ LM_ADDR_1_INDIRECT = 0x068,
+ LM_ADDR_2_INDIRECT = 0x0cc,
+ LM_ADDR_3_INDIRECT = 0x0d4,
+ INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
+ INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
+ INDIRECT_LM_ADDR_2_BYTE_INDEX = 0x10c,
+ INDIRECT_LM_ADDR_3_BYTE_INDEX = 0x114,
+ INDIRECT_T_INDEX = 0x0f8,
+ INDIRECT_T_INDEX_BYTE_INDEX = 0x0fc,
+ FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
+ TIMESTAMP_LOW = 0x0c0,
+ TIMESTAMP_HIGH = 0x0c4,
+ PROFILE_COUNT = 0x144,
+ SIGNATURE_ENABLE = 0x150,
+ AE_MISC_CONTROL = 0x160,
+ LOCAL_CSR_STATUS = 0x180,
+};
+
+enum fcu_csr {
+ FCU_CONTROL = 0x8c0,
+ FCU_STATUS = 0x8c4,
+ FCU_STATUS1 = 0x8c8,
+ FCU_DRAM_ADDR_LO = 0x8cc,
+ FCU_DRAM_ADDR_HI = 0x8d0,
+ FCU_RAMBASE_ADDR_HI = 0x8d4,
+ FCU_RAMBASE_ADDR_LO = 0x8d8
+};
+
+enum fcu_csr_4xxx {
+ FCU_CONTROL_4XXX = 0x1000,
+ FCU_STATUS_4XXX = 0x1004,
+ FCU_ME_BROADCAST_MASK_TYPE = 0x1008,
+ FCU_AE_LOADED_4XXX = 0x1010,
+ FCU_DRAM_ADDR_LO_4XXX = 0x1014,
+ FCU_DRAM_ADDR_HI_4XXX = 0x1018,
+};
+
+enum fcu_cmd {
+ FCU_CTRL_CMD_NOOP = 0,
+ FCU_CTRL_CMD_AUTH = 1,
+ FCU_CTRL_CMD_LOAD = 2,
+ FCU_CTRL_CMD_START = 3
+};
+
+enum fcu_sts {
+ FCU_STS_NO_STS = 0,
+ FCU_STS_VERI_DONE = 1,
+ FCU_STS_LOAD_DONE = 2,
+ FCU_STS_VERI_FAIL = 3,
+ FCU_STS_LOAD_FAIL = 4,
+ FCU_STS_BUSY = 5
+};
+
+#define ALL_AE_MASK 0xFFFFFFFF
+#define UA_ECS (0x1 << 31)
+#define ACS_ABO_BITPOS 31
+#define ACS_ACNO 0x7
+#define CE_ENABLE_BITPOS 0x8
+#define CE_LMADDR_0_GLOBAL_BITPOS 16
+#define CE_LMADDR_1_GLOBAL_BITPOS 17
+#define CE_LMADDR_2_GLOBAL_BITPOS 22
+#define CE_LMADDR_3_GLOBAL_BITPOS 23
+#define CE_T_INDEX_GLOBAL_BITPOS 21
+#define CE_NN_MODE_BITPOS 20
+#define CE_REG_PAR_ERR_BITPOS 25
+#define CE_BREAKPOINT_BITPOS 27
+#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
+#define CE_INUSE_CONTEXTS_BITPOS 31
+#define CE_NN_MODE (0x1 << CE_NN_MODE_BITPOS)
+#define CE_INUSE_CONTEXTS (0x1 << CE_INUSE_CONTEXTS_BITPOS)
+#define XCWE_VOLUNTARY (0x1)
+#define LCS_STATUS (0x1)
+#define MMC_SHARE_CS_BITPOS 2
+#define WAKEUP_EVENT 0x10000
+#define FCU_CTRL_BROADCAST_POS 0x4
+#define FCU_CTRL_AE_POS 0x8
+#define FCU_AUTH_STS_MASK 0x7
+#define FCU_STS_DONE_POS 0x9
+#define FCU_STS_AUTHFWLD_POS 0X8
+#define FCU_LOADED_AE_POS 0x16
+#define FW_AUTH_WAIT_PERIOD 10
+#define FW_AUTH_MAX_RETRY 300
+#define ICP_QAT_AE_OFFSET 0x20000
+#define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000)
+#define LOCAL_TO_XFER_REG_OFFSET 0x800
+#define ICP_QAT_EP_OFFSET 0x3a000
+#define ICP_QAT_EP_OFFSET_4XXX 0x200000 /* HI MMIO CSRs */
+#define ICP_QAT_AE_OFFSET_4XXX 0x600000
+#define ICP_QAT_CAP_OFFSET_4XXX 0x640000
+#define SET_CAP_CSR(handle, csr, val) \
+ ADF_CSR_WR((handle)->hal_cap_g_ctl_csr_addr_v, csr, val)
+#define GET_CAP_CSR(handle, csr) \
+ ADF_CSR_RD((handle)->hal_cap_g_ctl_csr_addr_v, csr)
+#define AE_CSR(handle, ae) \
+ ((char __iomem *)(handle)->hal_cap_ae_local_csr_addr_v + ((ae) << 12))
+#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & (csr)))
+#define SET_AE_CSR(handle, ae, csr, val) \
+ ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
+#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
+#define AE_XFER(handle, ae) \
+ ((char __iomem *)(handle)->hal_cap_ae_xfer_csr_addr_v + ((ae) << 12))
+#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
+ (((reg) & 0xff) << 2))
+#define SET_AE_XFER(handle, ae, reg, val) \
+ ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
+#define SRAM_WRITE(handle, addr, val) \
+ ADF_CSR_WR((handle)->hal_sram_addr_v, addr, val)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
new file mode 100644
index 0000000000..0c8883e2cc
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+ ICP_QAT_HW_AE_0 = 0,
+ ICP_QAT_HW_AE_1 = 1,
+ ICP_QAT_HW_AE_2 = 2,
+ ICP_QAT_HW_AE_3 = 3,
+ ICP_QAT_HW_AE_4 = 4,
+ ICP_QAT_HW_AE_5 = 5,
+ ICP_QAT_HW_AE_6 = 6,
+ ICP_QAT_HW_AE_7 = 7,
+ ICP_QAT_HW_AE_8 = 8,
+ ICP_QAT_HW_AE_9 = 9,
+ ICP_QAT_HW_AE_10 = 10,
+ ICP_QAT_HW_AE_11 = 11,
+ ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+ ICP_QAT_HW_QAT_0 = 0,
+ ICP_QAT_HW_QAT_1 = 1,
+ ICP_QAT_HW_QAT_2 = 2,
+ ICP_QAT_HW_QAT_3 = 3,
+ ICP_QAT_HW_QAT_4 = 4,
+ ICP_QAT_HW_QAT_5 = 5,
+ ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+ ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+ ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+ ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+ ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+ ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+ ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+ ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+ ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+ ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+ ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+ ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+ ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+ ICP_QAT_HW_AUTH_MODE0 = 0,
+ ICP_QAT_HW_AUTH_MODE1 = 1,
+ ICP_QAT_HW_AUTH_MODE2 = 2,
+ ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+ __u32 config;
+ __u32 reserved;
+};
+
+struct icp_qat_hw_ucs_cipher_config {
+ __u32 val;
+ __u32 reserved[3];
+};
+
+enum icp_qat_slice_mask {
+ ICP_ACCEL_MASK_CIPHER_SLICE = BIT(0),
+ ICP_ACCEL_MASK_AUTH_SLICE = BIT(1),
+ ICP_ACCEL_MASK_PKE_SLICE = BIT(2),
+ ICP_ACCEL_MASK_COMPRESS_SLICE = BIT(3),
+ ICP_ACCEL_MASK_LZS_SLICE = BIT(4),
+ ICP_ACCEL_MASK_EIA3_SLICE = BIT(5),
+ ICP_ACCEL_MASK_SHA3_SLICE = BIT(6),
+};
+
+enum icp_qat_capabilities_mask {
+ ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = BIT(0),
+ ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = BIT(1),
+ ICP_ACCEL_CAPABILITIES_CIPHER = BIT(2),
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3),
+ ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4),
+ ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5),
+ /* Bits 6-7 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
+ ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
+ /* Bits 10-11 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_HKDF = BIT(12),
+ ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13),
+ /* Bit 14 is currently reserved */
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
+ ICP_ACCEL_CAPABILITIES_SM2 = BIT(18),
+ ICP_ACCEL_CAPABILITIES_SM3 = BIT(19),
+ ICP_ACCEL_CAPABILITIES_SM4 = BIT(20),
+ /* Bit 21 is currently reserved */
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25),
+ ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26)
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+ (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+ ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+ QAT_AUTH_ALGO_SHA3_BITPOS) | \
+ (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+ (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+ & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+ ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+ __be32 counter;
+ __u32 reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+ (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+ struct icp_qat_hw_auth_config auth_config;
+ struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+ ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ __u8 state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+ __u8 state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+ struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+ ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+ ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+ ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+ ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+ ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+ ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+ ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+ ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+ ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+ ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+ ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+ ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+ ICP_QAT_HW_CIPHER_F8_MODE = 3,
+ ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+ ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+ __u32 val;
+ __u32 reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+ ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+ ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+ ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+ (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+ ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+ ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+ ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
+
+struct icp_qat_hw_cipher_aes256_f8 {
+ struct icp_qat_hw_cipher_config cipher_config;
+ __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_ucs_cipher_aes256_f8 {
+ struct icp_qat_hw_ucs_cipher_config cipher_config;
+ __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_cipher_algo_blk {
+ union {
+ struct icp_qat_hw_cipher_aes256_f8 aes;
+ struct icp_qat_hw_ucs_cipher_aes256_f8 ucs_aes;
+ };
+} __aligned(64);
+
+enum icp_qat_hw_compression_direction {
+ ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
+ ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
+ ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_delayed_match {
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_algo {
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
+ ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
+ ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_depth {
+ ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
+ ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
+ ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
+ ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
+ ICP_QAT_HW_COMPRESSION_DEPTH_128 = 4,
+ ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 5
+};
+
+enum icp_qat_hw_compression_file_type {
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
+};
+
+struct icp_qat_hw_compression_config {
+ __u32 lower_val;
+ __u32 upper_val;
+};
+
+#define QAT_COMPRESSION_DIR_BITPOS 4
+#define QAT_COMPRESSION_DIR_MASK 0x7
+#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
+#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
+#define QAT_COMPRESSION_ALGO_BITPOS 31
+#define QAT_COMPRESSION_ALGO_MASK 0x1
+#define QAT_COMPRESSION_DEPTH_BITPOS 28
+#define QAT_COMPRESSION_DEPTH_MASK 0x7
+#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
+#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
+
+#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(dir, delayed, \
+ algo, depth, filetype) \
+ ((((dir) & QAT_COMPRESSION_DIR_MASK) << \
+ QAT_COMPRESSION_DIR_BITPOS) | \
+ (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) << \
+ QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
+ (((algo) & QAT_COMPRESSION_ALGO_MASK) << \
+ QAT_COMPRESSION_ALGO_BITPOS) | \
+ (((depth) & QAT_COMPRESSION_DEPTH_MASK) << \
+ QAT_COMPRESSION_DEPTH_BITPOS) | \
+ (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) << \
+ QAT_COMPRESSION_FILE_TYPE_BITPOS))
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
new file mode 100644
index 0000000000..7ea8962272
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_HW_20_COMP_H_
+#define _ICP_QAT_HW_20_COMP_H_
+
+#include "icp_qat_hw_20_comp_defs.h"
+#include "icp_qat_fw.h"
+
+struct icp_qat_hw_comp_20_config_csr_lower {
+ enum icp_qat_hw_comp_20_extended_delay_match_mode edmm;
+ enum icp_qat_hw_comp_20_hw_comp_format algo;
+ enum icp_qat_hw_comp_20_search_depth sd;
+ enum icp_qat_hw_comp_20_hbs_control hbs;
+ enum icp_qat_hw_comp_20_abd abd;
+ enum icp_qat_hw_comp_20_lllbd_ctrl lllbd;
+ enum icp_qat_hw_comp_20_min_match_control mmctrl;
+ enum icp_qat_hw_comp_20_skip_hash_collision hash_col;
+ enum icp_qat_hw_comp_20_skip_hash_update hash_update;
+ enum icp_qat_hw_comp_20_byte_skip skip_ctrl;
+};
+
+static inline __u32
+ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.algo,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK);
+ QAT_FIELD_SET(val32, csr.sd,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK);
+ QAT_FIELD_SET(val32, csr.edmm,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK);
+ QAT_FIELD_SET(val32, csr.hbs,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lllbd,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
+ QAT_FIELD_SET(val32, csr.mmctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.hash_col,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK);
+ QAT_FIELD_SET(val32, csr.hash_update,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK);
+ QAT_FIELD_SET(val32, csr.skip_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK);
+ QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
+
+ return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_comp_20_config_csr_upper {
+ enum icp_qat_hw_comp_20_scb_control scb_ctrl;
+ enum icp_qat_hw_comp_20_rmb_control rmb_ctrl;
+ enum icp_qat_hw_comp_20_som_control som_ctrl;
+ enum icp_qat_hw_comp_20_skip_hash_rd_control skip_hash_ctrl;
+ enum icp_qat_hw_comp_20_scb_unload_control scb_unload_ctrl;
+ enum icp_qat_hw_comp_20_disable_token_fusion_control disable_token_fusion_ctrl;
+ enum icp_qat_hw_comp_20_lbms lbms;
+ enum icp_qat_hw_comp_20_scb_mode_reset_mask scb_mode_reset;
+ __u16 lazy;
+ __u16 nice;
+};
+
+static inline __u32
+ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.scb_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.rmb_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.som_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.skip_hash_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.scb_unload_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.disable_token_fusion_ctrl,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lbms,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK);
+ QAT_FIELD_SET(val32, csr.scb_mode_reset,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
+ QAT_FIELD_SET(val32, csr.lazy,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK);
+ QAT_FIELD_SET(val32, csr.nice,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
+ ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
+
+ return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_decomp_20_config_csr_lower {
+ enum icp_qat_hw_decomp_20_hbs_control hbs;
+ enum icp_qat_hw_decomp_20_lbms lbms;
+ enum icp_qat_hw_decomp_20_hw_comp_format algo;
+ enum icp_qat_hw_decomp_20_min_match_control mmctrl;
+ enum icp_qat_hw_decomp_20_lz4_block_checksum_present lbc;
+};
+
+static inline __u32
+ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.hbs,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lbms,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK);
+ QAT_FIELD_SET(val32, csr.algo,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK);
+ QAT_FIELD_SET(val32, csr.mmctrl,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lbc,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK);
+
+ return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_decomp_20_config_csr_upper {
+ enum icp_qat_hw_decomp_20_speculative_decoder_control sdc;
+ enum icp_qat_hw_decomp_20_mini_cam_control mcc;
+};
+
+static inline __u32
+ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.sdc,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.mcc,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
+ ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
+
+ return __builtin_bswap32(val32);
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h
new file mode 100644
index 0000000000..208d455428
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_HW_20_COMP_DEFS_H
+#define _ICP_QAT_HW_20_COMP_DEFS_H
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_control {
+ ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0,
+ ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_rmb_control {
+ ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0,
+ ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3
+
+enum icp_qat_hw_comp_20_som_control {
+ ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0,
+ ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1,
+ ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2,
+ ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_rd_control {
+ ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
+ ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_unload_control {
+ ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0,
+ ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_disable_token_fusion_control {
+ ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0,
+ ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS 19
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK 0x3
+
+enum icp_qat_hw_comp_20_lbms {
+ ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB = 0x0,
+ ICP_QAT_HW_COMP_20_LBMS_LBMS_256KB = 0x1,
+ ICP_QAT_HW_COMP_20_LBMS_LBMS_1MB = 0x2,
+ ICP_QAT_HW_COMP_20_LBMS_LBMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_mode_reset_mask {
+ ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0,
+ ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
+
+enum icp_qat_hw_comp_20_hbs_control {
+ ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
+ ICP_QAT_HW_COMP_23_HBS_CONTROL_HBS_IS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1
+
+enum icp_qat_hw_comp_20_abd {
+ ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1
+
+enum icp_qat_hw_comp_20_lllbd_ctrl {
+ ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf
+
+enum icp_qat_hw_comp_20_search_depth {
+ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1,
+ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3,
+ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7
+
+enum icp_qat_hw_comp_20_hw_comp_format {
+ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0,
+ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1,
+ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S = 0x3,
+ ICP_QAT_HW_COMP_23_HW_COMP_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_min_match_control {
+ ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+ ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_collision {
+ ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_update {
+ ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1
+
+enum icp_qat_hw_comp_20_byte_skip {
+ ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0,
+ ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1
+
+enum icp_qat_hw_comp_20_extended_delay_match_mode {
+ ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0,
+ ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_speculative_decoder_control {
+ ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0,
+ ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_mini_cam_control {
+ ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0,
+ ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
+
+enum icp_qat_hw_decomp_20_hbs_control {
+ ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS 8
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK 0x3
+
+enum icp_qat_hw_decomp_20_lbms {
+ ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB = 0x0,
+ ICP_QAT_HW_DECOMP_20_LBMS_LBMS_256KB = 0x1,
+ ICP_QAT_HW_DECOMP_20_LBMS_LBMS_1MB = 0x2,
+ ICP_QAT_HW_DECOMP_20_LBMS_LBMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7
+
+enum icp_qat_hw_decomp_20_hw_comp_format {
+ ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1,
+ ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S = 0x3,
+ ICP_QAT_HW_DECOMP_23_HW_DECOMP_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_min_match_control {
+ ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+ ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS 3
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK 0x1
+
+enum icp_qat_hw_decomp_20_lz4_block_checksum_present {
+ ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT = 0x0,
+ ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
new file mode 100644
index 0000000000..69482abdb8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
@@ -0,0 +1,585 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef __ICP_QAT_UCLO_H__
+#define __ICP_QAT_UCLO_H__
+
+#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000
+#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000
+#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000
+#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
+#define ICP_QAT_UCLO_MAX_AE 12
+#define ICP_QAT_UCLO_MAX_CTX 8
+#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
+#define ICP_QAT_UCLO_MAX_USTORE 0x4000
+#define ICP_QAT_UCLO_MAX_XFER_REG 128
+#define ICP_QAT_UCLO_MAX_GPR_REG 128
+#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
+#define ICP_QAT_UCLO_MAX_LMEM_REG_2X 1280
+#define ICP_QAT_UCLO_AE_ALL_CTX 0xff
+#define ICP_QAT_UOF_OBJID_LEN 8
+#define ICP_QAT_UOF_FID 0xc6c2
+#define ICP_QAT_UOF_MAJVER 0x4
+#define ICP_QAT_UOF_MINVER 0x11
+#define ICP_QAT_UOF_OBJS "UOF_OBJS"
+#define ICP_QAT_UOF_STRT "UOF_STRT"
+#define ICP_QAT_UOF_IMAG "UOF_IMAG"
+#define ICP_QAT_UOF_IMEM "UOF_IMEM"
+#define ICP_QAT_UOF_LOCAL_SCOPE 1
+#define ICP_QAT_UOF_INIT_EXPR 0
+#define ICP_QAT_UOF_INIT_REG 1
+#define ICP_QAT_UOF_INIT_REG_CTX 2
+#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3
+#define ICP_QAT_SUOF_OBJ_ID_LEN 8
+#define ICP_QAT_SUOF_FID 0x53554f46
+#define ICP_QAT_SUOF_MAJVER 0x0
+#define ICP_QAT_SUOF_MINVER 0x1
+#define ICP_QAT_SUOF_OBJ_NAME_LEN 128
+#define ICP_QAT_MOF_OBJ_ID_LEN 8
+#define ICP_QAT_MOF_OBJ_CHUNKID_LEN 8
+#define ICP_QAT_MOF_FID 0x00666f6d
+#define ICP_QAT_MOF_MAJVER 0x0
+#define ICP_QAT_MOF_MINVER 0x1
+#define ICP_QAT_MOF_SYM_OBJS "SYM_OBJS"
+#define ICP_QAT_SUOF_OBJS "SUF_OBJS"
+#define ICP_QAT_SUOF_IMAG "SUF_IMAG"
+#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long))
+#define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long))
+
+#define DSS_FWSK_MODULUS_LEN 384 /* RSA3K */
+#define DSS_FWSK_EXPONENT_LEN 4
+#define DSS_FWSK_PADDING_LEN 380
+#define DSS_SIGNATURE_LEN 384 /* RSA3K */
+
+#define CSS_FWSK_MODULUS_LEN 256 /* RSA2K */
+#define CSS_FWSK_EXPONENT_LEN 4
+#define CSS_FWSK_PADDING_LEN 252
+#define CSS_SIGNATURE_LEN 256 /* RSA2K */
+
+#define ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) ((handle)->chip_info->css_3k ? \
+ DSS_FWSK_MODULUS_LEN : \
+ CSS_FWSK_MODULUS_LEN)
+
+#define ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) ((handle)->chip_info->css_3k ? \
+ DSS_FWSK_EXPONENT_LEN : \
+ CSS_FWSK_EXPONENT_LEN)
+
+#define ICP_QAT_CSS_FWSK_PAD_LEN(handle) ((handle)->chip_info->css_3k ? \
+ DSS_FWSK_PADDING_LEN : \
+ CSS_FWSK_PADDING_LEN)
+
+#define ICP_QAT_CSS_FWSK_PUB_LEN(handle) (ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
+ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
+ ICP_QAT_CSS_FWSK_PAD_LEN(handle))
+
+#define ICP_QAT_CSS_SIGNATURE_LEN(handle) ((handle)->chip_info->css_3k ? \
+ DSS_SIGNATURE_LEN : \
+ CSS_SIGNATURE_LEN)
+
+#define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \
+ ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \
+ ICP_QAT_SIMG_AE_INSTS_LEN)
+#define ICP_QAT_CSS_AE_SIMG_LEN(handle) (sizeof(struct icp_qat_css_hdr) + \
+ ICP_QAT_CSS_FWSK_PUB_LEN(handle) + \
+ ICP_QAT_CSS_SIGNATURE_LEN(handle) + \
+ ICP_QAT_CSS_AE_IMG_LEN)
+#define ICP_QAT_AE_IMG_OFFSET(handle) (sizeof(struct icp_qat_css_hdr) + \
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
+ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
+ ICP_QAT_CSS_SIGNATURE_LEN(handle))
+#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000
+#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000
+
+#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
+#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
+#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
+#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
+
+#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
+#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
+#define ICP_QAT_LOC_MEM2_MODE(ae_mode) (((ae_mode) >> 0x6) & 0x1)
+#define ICP_QAT_LOC_MEM3_MODE(ae_mode) (((ae_mode) >> 0x7) & 0x1)
+#define ICP_QAT_LOC_TINDEX_MODE(ae_mode) (((ae_mode) >> 0xe) & 0x1)
+
+enum icp_qat_uof_mem_region {
+ ICP_QAT_UOF_SRAM_REGION = 0x0,
+ ICP_QAT_UOF_LMEM_REGION = 0x3,
+ ICP_QAT_UOF_UMEM_REGION = 0x5
+};
+
+enum icp_qat_uof_regtype {
+ ICP_NO_DEST = 0,
+ ICP_GPA_REL = 1,
+ ICP_GPA_ABS = 2,
+ ICP_GPB_REL = 3,
+ ICP_GPB_ABS = 4,
+ ICP_SR_REL = 5,
+ ICP_SR_RD_REL = 6,
+ ICP_SR_WR_REL = 7,
+ ICP_SR_ABS = 8,
+ ICP_SR_RD_ABS = 9,
+ ICP_SR_WR_ABS = 10,
+ ICP_DR_REL = 19,
+ ICP_DR_RD_REL = 20,
+ ICP_DR_WR_REL = 21,
+ ICP_DR_ABS = 22,
+ ICP_DR_RD_ABS = 23,
+ ICP_DR_WR_ABS = 24,
+ ICP_LMEM = 26,
+ ICP_LMEM0 = 27,
+ ICP_LMEM1 = 28,
+ ICP_NEIGH_REL = 31,
+ ICP_LMEM2 = 61,
+ ICP_LMEM3 = 62,
+};
+
+enum icp_qat_css_fwtype {
+ CSS_AE_FIRMWARE = 0,
+ CSS_MMP_FIRMWARE = 1
+};
+
+struct icp_qat_uclo_page {
+ struct icp_qat_uclo_encap_page *encap_page;
+ struct icp_qat_uclo_region *region;
+ unsigned int flags;
+};
+
+struct icp_qat_uclo_region {
+ struct icp_qat_uclo_page *loaded;
+ struct icp_qat_uclo_page *page;
+};
+
+struct icp_qat_uclo_aeslice {
+ struct icp_qat_uclo_region *region;
+ struct icp_qat_uclo_page *page;
+ struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
+ struct icp_qat_uclo_encapme *encap_image;
+ unsigned int ctx_mask_assigned;
+ unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uclo_aedata {
+ unsigned int slice_num;
+ unsigned int eff_ustore_size;
+ struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uof_encap_obj {
+ char *beg_uof;
+ struct icp_qat_uof_objhdr *obj_hdr;
+ struct icp_qat_uof_chunkhdr *chunk_hdr;
+ struct icp_qat_uof_varmem_seg *var_mem_seg;
+};
+
+struct icp_qat_uclo_encap_uwblock {
+ unsigned int start_addr;
+ unsigned int words_num;
+ u64 micro_words;
+};
+
+struct icp_qat_uclo_encap_page {
+ unsigned int def_page;
+ unsigned int page_region;
+ unsigned int beg_addr_v;
+ unsigned int beg_addr_p;
+ unsigned int micro_words_num;
+ unsigned int uwblock_num;
+ struct icp_qat_uclo_encap_uwblock *uwblock;
+};
+
+struct icp_qat_uclo_encapme {
+ struct icp_qat_uof_image *img_ptr;
+ struct icp_qat_uclo_encap_page *page;
+ unsigned int ae_reg_num;
+ struct icp_qat_uof_ae_reg *ae_reg;
+ unsigned int init_regsym_num;
+ struct icp_qat_uof_init_regsym *init_regsym;
+ unsigned int sbreak_num;
+ struct icp_qat_uof_sbreak *sbreak;
+ unsigned int uwords_num;
+};
+
+struct icp_qat_uclo_init_mem_table {
+ unsigned int entry_num;
+ struct icp_qat_uof_initmem *init_mem;
+};
+
+struct icp_qat_uclo_objhdr {
+ char *file_buff;
+ unsigned int checksum;
+ unsigned int size;
+};
+
+struct icp_qat_uof_strtable {
+ unsigned int table_len;
+ unsigned int reserved;
+ u64 strings;
+};
+
+struct icp_qat_uclo_objhandle {
+ unsigned int prod_type;
+ unsigned int prod_rev;
+ struct icp_qat_uclo_objhdr *obj_hdr;
+ struct icp_qat_uof_encap_obj encap_uof_obj;
+ struct icp_qat_uof_strtable str_table;
+ struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
+ struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
+ struct icp_qat_uclo_init_mem_table init_mem_tab;
+ struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
+ struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
+ int uimage_num;
+ int uword_in_bytes;
+ int global_inited;
+ unsigned int ae_num;
+ unsigned int ustore_phy_size;
+ void *obj_buf;
+ u64 *uword_buf;
+};
+
+struct icp_qat_uof_uword_block {
+ unsigned int start_addr;
+ unsigned int words_num;
+ unsigned int uword_offset;
+ unsigned int reserved;
+};
+
+struct icp_qat_uof_filehdr {
+ unsigned short file_id;
+ unsigned short reserved1;
+ char min_ver;
+ char maj_ver;
+ unsigned short reserved2;
+ unsigned short max_chunks;
+ unsigned short num_chunks;
+};
+
+struct icp_qat_uof_filechunkhdr {
+ char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+ unsigned int checksum;
+ unsigned int offset;
+ unsigned int size;
+};
+
+struct icp_qat_uof_objhdr {
+ unsigned int ac_dev_type;
+ unsigned short min_cpu_ver;
+ unsigned short max_cpu_ver;
+ short max_chunks;
+ short num_chunks;
+ unsigned int reserved1;
+ unsigned int reserved2;
+};
+
+struct icp_qat_uof_chunkhdr {
+ char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+ unsigned int offset;
+ unsigned int size;
+};
+
+struct icp_qat_uof_memvar_attr {
+ unsigned int offset_in_byte;
+ unsigned int value;
+};
+
+struct icp_qat_uof_initmem {
+ unsigned int sym_name;
+ char region;
+ char scope;
+ unsigned short reserved1;
+ unsigned int addr;
+ unsigned int num_in_bytes;
+ unsigned int val_attr_num;
+};
+
+struct icp_qat_uof_init_regsym {
+ unsigned int sym_name;
+ char init_type;
+ char value_type;
+ char reg_type;
+ unsigned char ctx;
+ unsigned int reg_addr;
+ unsigned int value;
+};
+
+struct icp_qat_uof_varmem_seg {
+ unsigned int sram_base;
+ unsigned int sram_size;
+ unsigned int sram_alignment;
+ unsigned int sdram_base;
+ unsigned int sdram_size;
+ unsigned int sdram_alignment;
+ unsigned int sdram1_base;
+ unsigned int sdram1_size;
+ unsigned int sdram1_alignment;
+ unsigned int scratch_base;
+ unsigned int scratch_size;
+ unsigned int scratch_alignment;
+};
+
+struct icp_qat_uof_gtid {
+ char tool_id[ICP_QAT_UOF_OBJID_LEN];
+ int tool_ver;
+ unsigned int reserved1;
+ unsigned int reserved2;
+};
+
+struct icp_qat_uof_sbreak {
+ unsigned int page_num;
+ unsigned int virt_uaddr;
+ unsigned char sbreak_type;
+ unsigned char reg_type;
+ unsigned short reserved1;
+ unsigned int addr_offset;
+ unsigned int reg_addr;
+};
+
+struct icp_qat_uof_code_page {
+ unsigned int page_region;
+ unsigned int page_num;
+ unsigned char def_page;
+ unsigned char reserved2;
+ unsigned short reserved1;
+ unsigned int beg_addr_v;
+ unsigned int beg_addr_p;
+ unsigned int neigh_reg_tab_offset;
+ unsigned int uc_var_tab_offset;
+ unsigned int imp_var_tab_offset;
+ unsigned int imp_expr_tab_offset;
+ unsigned int code_area_offset;
+};
+
+struct icp_qat_uof_image {
+ unsigned int img_name;
+ unsigned int ae_assigned;
+ unsigned int ctx_assigned;
+ unsigned int ac_dev_type;
+ unsigned int entry_address;
+ unsigned int fill_pattern[2];
+ unsigned int reloadable_size;
+ unsigned char sensitivity;
+ unsigned char reserved;
+ unsigned short ae_mode;
+ unsigned short max_ver;
+ unsigned short min_ver;
+ unsigned short image_attrib;
+ unsigned short reserved2;
+ unsigned short page_region_num;
+ unsigned short numpages;
+ unsigned int reg_tab_offset;
+ unsigned int init_reg_sym_tab;
+ unsigned int sbreak_tab;
+ unsigned int app_metadata;
+};
+
+struct icp_qat_uof_objtable {
+ unsigned int entry_num;
+};
+
+struct icp_qat_uof_ae_reg {
+ unsigned int name;
+ unsigned int vis_name;
+ unsigned short type;
+ unsigned short addr;
+ unsigned short access_mode;
+ unsigned char visible;
+ unsigned char reserved1;
+ unsigned short ref_count;
+ unsigned short reserved2;
+ unsigned int xo_id;
+};
+
+struct icp_qat_uof_code_area {
+ unsigned int micro_words_num;
+ unsigned int uword_block_tab;
+};
+
+struct icp_qat_uof_batch_init {
+ unsigned int ae;
+ unsigned int addr;
+ unsigned int *value;
+ unsigned int size;
+ struct icp_qat_uof_batch_init *next;
+};
+
+struct icp_qat_suof_img_hdr {
+ char *simg_buf;
+ unsigned long simg_len;
+ char *css_header;
+ char *css_key;
+ char *css_signature;
+ char *css_simg;
+ unsigned long simg_size;
+ unsigned int ae_num;
+ unsigned int ae_mask;
+ unsigned int fw_type;
+ unsigned long simg_name;
+ unsigned long appmeta_data;
+};
+
+struct icp_qat_suof_img_tbl {
+ unsigned int num_simgs;
+ struct icp_qat_suof_img_hdr *simg_hdr;
+};
+
+struct icp_qat_suof_handle {
+ unsigned int file_id;
+ unsigned int check_sum;
+ char min_ver;
+ char maj_ver;
+ char fw_type;
+ char *suof_buf;
+ unsigned int suof_size;
+ char *sym_str;
+ unsigned int sym_size;
+ struct icp_qat_suof_img_tbl img_table;
+};
+
+struct icp_qat_fw_auth_desc {
+ unsigned int img_len;
+ unsigned int ae_mask;
+ unsigned int css_hdr_high;
+ unsigned int css_hdr_low;
+ unsigned int img_high;
+ unsigned int img_low;
+ unsigned int signature_high;
+ unsigned int signature_low;
+ unsigned int fwsk_pub_high;
+ unsigned int fwsk_pub_low;
+ unsigned int img_ae_mode_data_high;
+ unsigned int img_ae_mode_data_low;
+ unsigned int img_ae_init_data_high;
+ unsigned int img_ae_init_data_low;
+ unsigned int img_ae_insts_high;
+ unsigned int img_ae_insts_low;
+};
+
+struct icp_qat_auth_chunk {
+ struct icp_qat_fw_auth_desc fw_auth_desc;
+ u64 chunk_size;
+ u64 chunk_bus_addr;
+};
+
+struct icp_qat_css_hdr {
+ unsigned int module_type;
+ unsigned int header_len;
+ unsigned int header_ver;
+ unsigned int module_id;
+ unsigned int module_vendor;
+ unsigned int date;
+ unsigned int size;
+ unsigned int key_size;
+ unsigned int module_size;
+ unsigned int exponent_size;
+ unsigned int fw_type;
+ unsigned int reserved[21];
+};
+
+struct icp_qat_simg_ae_mode {
+ unsigned int file_id;
+ unsigned short maj_ver;
+ unsigned short min_ver;
+ unsigned int dev_type;
+ unsigned short devmax_ver;
+ unsigned short devmin_ver;
+ unsigned int ae_mask;
+ unsigned int ctx_enables;
+ char fw_type;
+ char ctx_mode;
+ char nn_mode;
+ char lm0_mode;
+ char lm1_mode;
+ char scs_mode;
+ char lm2_mode;
+ char lm3_mode;
+ char tindex_mode;
+ unsigned char reserved[7];
+ char simg_name[256];
+ char appmeta_data[256];
+};
+
+struct icp_qat_suof_filehdr {
+ unsigned int file_id;
+ unsigned int check_sum;
+ char min_ver;
+ char maj_ver;
+ char fw_type;
+ char reserved;
+ unsigned short max_chunks;
+ unsigned short num_chunks;
+};
+
+struct icp_qat_suof_chunk_hdr {
+ char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN];
+ u64 offset;
+ u64 size;
+};
+
+struct icp_qat_suof_strtable {
+ unsigned int tab_length;
+ unsigned int strings;
+};
+
+struct icp_qat_suof_objhdr {
+ unsigned int img_length;
+ unsigned int reserved;
+};
+
+struct icp_qat_mof_file_hdr {
+ unsigned int file_id;
+ unsigned int checksum;
+ char min_ver;
+ char maj_ver;
+ unsigned short reserved;
+ unsigned short max_chunks;
+ unsigned short num_chunks;
+};
+
+struct icp_qat_mof_chunkhdr {
+ char chunk_id[ICP_QAT_MOF_OBJ_ID_LEN];
+ u64 offset;
+ u64 size;
+};
+
+struct icp_qat_mof_str_table {
+ unsigned int tab_len;
+ unsigned int strings;
+};
+
+struct icp_qat_mof_obj_hdr {
+ unsigned short max_chunks;
+ unsigned short num_chunks;
+ unsigned int reserved;
+};
+
+struct icp_qat_mof_obj_chunkhdr {
+ char chunk_id[ICP_QAT_MOF_OBJ_CHUNKID_LEN];
+ u64 offset;
+ u64 size;
+ unsigned int name;
+ unsigned int reserved;
+};
+
+struct icp_qat_mof_objhdr {
+ char *obj_name;
+ char *obj_buf;
+ unsigned int obj_size;
+};
+
+struct icp_qat_mof_table {
+ unsigned int num_objs;
+ struct icp_qat_mof_objhdr *obj_hdr;
+};
+
+struct icp_qat_mof_handle {
+ unsigned int file_id;
+ unsigned int checksum;
+ char min_ver;
+ char maj_ver;
+ char *mof_buf;
+ u32 mof_size;
+ char *sym_str;
+ unsigned int sym_size;
+ char *uobjs_hdr;
+ char *sobjs_hdr;
+ struct icp_qat_mof_table obj_table;
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
new file mode 100644
index 0000000000..3c4bba4a87
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -0,0 +1,1423 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+#include "qat_bl.h"
+
+#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+#define HW_CAP_AES_V2(accel_dev) \
+ (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
+ ICP_ACCEL_CAPABILITIES_AES_V2)
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+/* Common content descriptor */
+struct qat_alg_cd {
+ union {
+ struct qat_enc { /* Encrypt content desc */
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+ } qat_enc_cd;
+ struct qat_dec { /* Decrypt content desc */
+ struct icp_qat_hw_auth_algo_blk hash;
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ } qat_dec_cd;
+ };
+} __aligned(64);
+
+struct qat_alg_aead_ctx {
+ struct qat_alg_cd *enc_cd;
+ struct qat_alg_cd *dec_cd;
+ dma_addr_t enc_cd_paddr;
+ dma_addr_t dec_cd_paddr;
+ struct icp_qat_fw_la_bulk_req enc_fw_req;
+ struct icp_qat_fw_la_bulk_req dec_fw_req;
+ struct crypto_shash *hash_tfm;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ struct qat_crypto_instance *inst;
+ union {
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ struct sha512_state sha512;
+ };
+ char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
+ char opad[SHA512_BLOCK_SIZE];
+};
+
+struct qat_alg_skcipher_ctx {
+ struct icp_qat_hw_cipher_algo_blk *enc_cd;
+ struct icp_qat_hw_cipher_algo_blk *dec_cd;
+ dma_addr_t enc_cd_paddr;
+ dma_addr_t dec_cd_paddr;
+ struct icp_qat_fw_la_bulk_req enc_fw_req;
+ struct icp_qat_fw_la_bulk_req dec_fw_req;
+ struct qat_crypto_instance *inst;
+ struct crypto_skcipher *ftfm;
+ struct crypto_cipher *tweak;
+ bool fallback;
+ int mode;
+};
+
+static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return ICP_QAT_HW_SHA1_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return ICP_QAT_HW_SHA256_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ default:
+ return -EFAULT;
+ }
+}
+
+static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+ struct qat_alg_aead_ctx *ctx,
+ const u8 *auth_key,
+ unsigned int auth_keylen)
+{
+ SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
+ int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+ int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+ __be32 *hash_state_out;
+ __be64 *hash512_state_out;
+ int i, offset;
+
+ memset(ctx->ipad, 0, block_size);
+ memset(ctx->opad, 0, block_size);
+ shash->tfm = ctx->hash_tfm;
+
+ if (auth_keylen > block_size) {
+ int ret = crypto_shash_digest(shash, auth_key,
+ auth_keylen, ctx->ipad);
+ if (ret)
+ return ret;
+
+ memcpy(ctx->opad, ctx->ipad, digest_size);
+ } else {
+ memcpy(ctx->ipad, auth_key, auth_keylen);
+ memcpy(ctx->opad, auth_key, auth_keylen);
+ }
+
+ for (i = 0; i < block_size; i++) {
+ char *ipad_ptr = ctx->ipad + i;
+ char *opad_ptr = ctx->opad + i;
+ *ipad_ptr ^= HMAC_IPAD_VALUE;
+ *opad_ptr ^= HMAC_OPAD_VALUE;
+ }
+
+ if (crypto_shash_init(shash))
+ return -EFAULT;
+
+ if (crypto_shash_update(shash, ctx->ipad, block_size))
+ return -EFAULT;
+
+ hash_state_out = (__be32 *)hash->sha.state1;
+ hash512_state_out = (__be64 *)hash_state_out;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (crypto_shash_export(shash, &ctx->sha1))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (crypto_shash_export(shash, &ctx->sha256))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (crypto_shash_export(shash, &ctx->sha512))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
+ break;
+ default:
+ return -EFAULT;
+ }
+
+ if (crypto_shash_init(shash))
+ return -EFAULT;
+
+ if (crypto_shash_update(shash, ctx->opad, block_size))
+ return -EFAULT;
+
+ offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+ if (offset < 0)
+ return -EFAULT;
+
+ hash_state_out = (__be32 *)(hash->sha.state1 + offset);
+ hash512_state_out = (__be64 *)hash_state_out;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (crypto_shash_export(shash, &ctx->sha1))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (crypto_shash_export(shash, &ctx->sha256))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (crypto_shash_export(shash, &ctx->sha512))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
+ break;
+ default:
+ return -EFAULT;
+ }
+ memzero_explicit(ctx->ipad, block_size);
+ memzero_explicit(ctx->opad, block_size);
+ return 0;
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+{
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+ QAT_COMN_PTR_TYPE_SGL);
+ ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_PARTIAL_NONE);
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
+ int alg,
+ struct crypto_authenc_keys *keys,
+ int mode)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+ unsigned int digestsize = crypto_aead_authsize(aead_tfm);
+ struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
+ struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
+ struct icp_qat_hw_auth_algo_blk *hash =
+ (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
+ sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+
+ /* CD setup */
+ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+ memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+ hash->sha.inner_setup.auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+ ctx->qat_hash_alg, digestsize);
+ hash->sha.inner_setup.auth_counter.counter =
+ cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+ if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
+ return -EFAULT;
+
+ /* Request setup */
+ qat_alg_init_common_hdr(header);
+ header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+ cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+ /* Cipher CD config setup */
+ cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+ cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+ cipher_cd_ctrl->cipher_cfg_offset = 0;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+ /* Auth CD config setup */
+ hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
+ hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+ hash_cd_ctrl->inner_res_sz = digestsize;
+ hash_cd_ctrl->final_sz = digestsize;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ hash_cd_ctrl->inner_state1_sz =
+ round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+ hash_cd_ctrl->inner_state2_sz =
+ round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ default:
+ break;
+ }
+ hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+ ((sizeof(struct icp_qat_hw_auth_setup) +
+ round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+ return 0;
+}
+
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
+ int alg,
+ struct crypto_authenc_keys *keys,
+ int mode)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+ unsigned int digestsize = crypto_aead_authsize(aead_tfm);
+ struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
+ struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
+ struct icp_qat_hw_cipher_algo_blk *cipher =
+ (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
+ sizeof(struct icp_qat_hw_auth_setup) +
+ roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ struct icp_qat_fw_la_auth_req_params *auth_param =
+ (struct icp_qat_fw_la_auth_req_params *)
+ ((char *)&req_tmpl->serv_specif_rqpars +
+ sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+ /* CD setup */
+ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
+ memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+ hash->sha.inner_setup.auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+ ctx->qat_hash_alg,
+ digestsize);
+ hash->sha.inner_setup.auth_counter.counter =
+ cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+ if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
+ return -EFAULT;
+
+ /* Request setup */
+ qat_alg_init_common_hdr(header);
+ header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+ cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+ /* Cipher CD config setup */
+ cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+ cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+ cipher_cd_ctrl->cipher_cfg_offset =
+ (sizeof(struct icp_qat_hw_auth_setup) +
+ roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+
+ /* Auth CD config setup */
+ hash_cd_ctrl->hash_cfg_offset = 0;
+ hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+ hash_cd_ctrl->inner_res_sz = digestsize;
+ hash_cd_ctrl->final_sz = digestsize;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ hash_cd_ctrl->inner_state1_sz =
+ round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+ hash_cd_ctrl->inner_state2_sz =
+ round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ default:
+ break;
+ }
+
+ hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+ ((sizeof(struct icp_qat_hw_auth_setup) +
+ round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+ auth_param->auth_res_sz = digestsize;
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ return 0;
+}
+
+static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct icp_qat_hw_cipher_algo_blk *cd,
+ const u8 *key, unsigned int keylen)
+{
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
+ bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+ int mode = ctx->mode;
+
+ qat_alg_init_common_hdr(header);
+ header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+ cd_pars->u.s.content_desc_params_sz =
+ sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
+
+ if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
+ ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+
+ /* Store both XTS keys in CD, only the first key is sent
+ * to the HW, the second key is used for tweak calculation
+ */
+ memcpy(cd->ucs_aes.key, key, keylen);
+ keylen = keylen / 2;
+ } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+ ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+ memcpy(cd->ucs_aes.key, key, keylen);
+ keylen = round_up(keylen, 16);
+ } else {
+ memcpy(cd->aes.key, key, keylen);
+ }
+
+ /* Cipher CD config setup */
+ cd_ctrl->cipher_key_sz = keylen >> 3;
+ cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+ cd_ctrl->cipher_cfg_offset = 0;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+}
+
+static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
+ int alg, const u8 *key,
+ unsigned int keylen, int mode)
+{
+ struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
+ struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+ qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
+ cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+ enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+}
+
+static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
+ u8 *key_reverse)
+{
+ struct crypto_aes_ctx aes_expanded;
+ int nrounds;
+ u8 *key;
+
+ aes_expandkey(&aes_expanded, key_forward, keylen);
+ if (keylen == AES_KEYSIZE_128) {
+ nrounds = 10;
+ key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
+ memcpy(key_reverse, key, AES_BLOCK_SIZE);
+ } else {
+ /* AES_KEYSIZE_256 */
+ nrounds = 14;
+ key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
+ memcpy(key_reverse, key, AES_BLOCK_SIZE);
+ memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ }
+}
+
+static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
+ int alg, const u8 *key,
+ unsigned int keylen, int mode)
+{
+ struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
+ struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+ bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+
+ qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
+ cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+
+ if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
+ /* Key reversing not supported, set no convert */
+ dec_cd->aes.cipher_config.val =
+ QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
+
+ /* In-place key reversal */
+ qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
+ dec_cd->ucs_aes.key);
+ } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
+ dec_cd->aes.cipher_config.val =
+ QAT_AES_HW_CONFIG_DEC(alg, mode);
+ } else {
+ dec_cd->aes.cipher_config.val =
+ QAT_AES_HW_CONFIG_ENC(alg, mode);
+ }
+}
+
+static int qat_alg_validate_key(int key_len, int *alg, int mode)
+{
+ if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (key_len) {
+ case AES_KEYSIZE_128 << 1:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ case AES_KEYSIZE_256 << 1:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen, int mode)
+{
+ struct crypto_authenc_keys keys;
+ int alg;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen))
+ goto bad_key;
+
+ if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
+ goto bad_key;
+
+ if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
+ goto error;
+
+ if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
+ goto error;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return 0;
+bad_key:
+ memzero_explicit(&keys, sizeof(keys));
+ return -EINVAL;
+error:
+ memzero_explicit(&keys, sizeof(keys));
+ return -EFAULT;
+}
+
+static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key,
+ unsigned int keylen,
+ int mode)
+{
+ int alg;
+
+ if (qat_alg_validate_key(keylen, &alg, mode))
+ return -EINVAL;
+
+ qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
+ qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
+ return 0;
+}
+
+static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+
+ return qat_alg_aead_init_sessions(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct qat_crypto_instance *inst = NULL;
+ int node = numa_node_id();
+ struct device *dev;
+ int ret;
+
+ inst = qat_crypto_get_instance_node(node);
+ if (!inst)
+ return -EINVAL;
+ dev = &GET_DEV(inst->accel_dev);
+ ctx->inst = inst;
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->enc_cd) {
+ ret = -ENOMEM;
+ goto out_free_inst;
+ }
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->dec_cd) {
+ ret = -ENOMEM;
+ goto out_free_enc;
+ }
+
+ ret = qat_alg_aead_init_sessions(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
+ if (ret)
+ goto out_free_all;
+
+ return 0;
+
+out_free_all:
+ memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+ dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+ ctx->dec_cd, ctx->dec_cd_paddr);
+ ctx->dec_cd = NULL;
+out_free_enc:
+ memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+ dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+ ctx->enc_cd, ctx->enc_cd_paddr);
+ ctx->enc_cd = NULL;
+out_free_inst:
+ ctx->inst = NULL;
+ qat_crypto_put_instance(inst);
+ return ret;
+}
+
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (ctx->enc_cd)
+ return qat_alg_aead_rekey(tfm, key, keylen);
+ else
+ return qat_alg_aead_newkey(tfm, key, keylen);
+}
+
+static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
+{
+ struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct aead_request *areq = qat_req->aead_req;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
+ int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+ qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+ if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ res = -EBADMSG;
+ aead_request_complete(areq, res);
+}
+
+static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
+{
+ struct skcipher_request *sreq = qat_req->skcipher_req;
+ u64 iv_lo_prev;
+ u64 iv_lo;
+ u64 iv_hi;
+
+ memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
+
+ iv_lo = be64_to_cpu(qat_req->iv_lo);
+ iv_hi = be64_to_cpu(qat_req->iv_hi);
+
+ iv_lo_prev = iv_lo;
+ iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
+ if (iv_lo < iv_lo_prev)
+ iv_hi++;
+
+ qat_req->iv_lo = cpu_to_be64(iv_lo);
+ qat_req->iv_hi = cpu_to_be64(iv_hi);
+}
+
+static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
+{
+ struct skcipher_request *sreq = qat_req->skcipher_req;
+ int offset = sreq->cryptlen - AES_BLOCK_SIZE;
+ struct scatterlist *sgl;
+
+ if (qat_req->encryption)
+ sgl = sreq->dst;
+ else
+ sgl = sreq->src;
+
+ scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
+}
+
+static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
+{
+ struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+ switch (ctx->mode) {
+ case ICP_QAT_HW_CIPHER_CTR_MODE:
+ qat_alg_update_iv_ctr_mode(qat_req);
+ break;
+ case ICP_QAT_HW_CIPHER_CBC_MODE:
+ qat_alg_update_iv_cbc_mode(qat_req);
+ break;
+ case ICP_QAT_HW_CIPHER_XTS_MODE:
+ break;
+ default:
+ dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
+ ctx->mode);
+ }
+}
+
+static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
+{
+ struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct skcipher_request *sreq = qat_req->skcipher_req;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
+ int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+ qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+ if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ res = -EINVAL;
+
+ if (qat_req->encryption)
+ qat_alg_update_iv(qat_req);
+
+ memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
+
+ skcipher_request_complete(sreq, res);
+}
+
+void qat_alg_callback(void *resp)
+{
+ struct icp_qat_fw_la_resp *qat_resp = resp;
+ struct qat_crypto_request *qat_req =
+ (void *)(__force long)qat_resp->opaque_data;
+ struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
+
+ qat_req->cb(qat_resp, qat_req);
+
+ qat_alg_send_backlog(backlog);
+}
+
+static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
+ struct qat_crypto_instance *inst,
+ struct crypto_async_request *base)
+{
+ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+ alg_req->fw_req = (u32 *)&qat_req->req;
+ alg_req->tx_ring = inst->sym_tx;
+ alg_req->base = base;
+ alg_req->backlog = &inst->backlog;
+
+ return qat_alg_send_message(alg_req);
+}
+
+static int qat_alg_aead_dec(struct aead_request *areq)
+{
+ struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+ int digst_size = crypto_aead_authsize(aead_tfm);
+ gfp_t f = qat_algs_alloc_flags(&areq->base);
+ int ret;
+ u32 cipher_len;
+
+ cipher_len = areq->cryptlen - digst_size;
+ if (cipher_len % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+ &qat_req->buf, NULL, f);
+ if (unlikely(ret))
+ return ret;
+
+ msg = &qat_req->req;
+ *msg = ctx->dec_fw_req;
+ qat_req->aead_ctx = ctx;
+ qat_req->aead_req = areq;
+ qat_req->cb = qat_aead_alg_callback;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+ qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+ cipher_param->cipher_length = cipher_len;
+ cipher_param->cipher_offset = areq->assoclen;
+ memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
+ auth_param->auth_off = 0;
+ auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
+
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
+ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+ return ret;
+}
+
+static int qat_alg_aead_enc(struct aead_request *areq)
+{
+ struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ gfp_t f = qat_algs_alloc_flags(&areq->base);
+ struct icp_qat_fw_la_bulk_req *msg;
+ u8 *iv = areq->iv;
+ int ret;
+
+ if (areq->cryptlen % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+ &qat_req->buf, NULL, f);
+ if (unlikely(ret))
+ return ret;
+
+ msg = &qat_req->req;
+ *msg = ctx->enc_fw_req;
+ qat_req->aead_ctx = ctx;
+ qat_req->aead_req = areq;
+ qat_req->cb = qat_aead_alg_callback;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+ qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
+
+ memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+ cipher_param->cipher_length = areq->cryptlen;
+ cipher_param->cipher_offset = areq->assoclen;
+
+ auth_param->auth_off = 0;
+ auth_param->auth_len = areq->assoclen + areq->cryptlen;
+
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ if (ret == -ENOSPC)
+ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+ return ret;
+}
+
+static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
+{
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+
+ return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
+}
+
+static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
+{
+ struct qat_crypto_instance *inst = NULL;
+ struct device *dev;
+ int node = numa_node_id();
+ int ret;
+
+ inst = qat_crypto_get_instance_node(node);
+ if (!inst)
+ return -EINVAL;
+ dev = &GET_DEV(inst->accel_dev);
+ ctx->inst = inst;
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->enc_cd) {
+ ret = -ENOMEM;
+ goto out_free_instance;
+ }
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->dec_cd) {
+ ret = -ENOMEM;
+ goto out_free_enc;
+ }
+
+ ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
+ if (ret)
+ goto out_free_all;
+
+ return 0;
+
+out_free_all:
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+ ctx->dec_cd, ctx->dec_cd_paddr);
+ ctx->dec_cd = NULL;
+out_free_enc:
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+ ctx->enc_cd, ctx->enc_cd_paddr);
+ ctx->enc_cd = NULL;
+out_free_instance:
+ ctx->inst = NULL;
+ qat_crypto_put_instance(inst);
+ return ret;
+}
+
+static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ int mode)
+{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->mode = mode;
+
+ if (ctx->enc_cd)
+ return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
+ else
+ return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
+}
+
+static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CTR_MODE);
+}
+
+static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ if (keylen >> 1 == AES_KEYSIZE_192) {
+ ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
+ if (ret)
+ return ret;
+
+ ctx->fallback = true;
+
+ return 0;
+ }
+
+ ctx->fallback = false;
+
+ ret = qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_XTS_MODE);
+ if (ret)
+ return ret;
+
+ if (HW_CAP_AES_V2(ctx->inst->accel_dev))
+ ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
+ keylen / 2);
+
+ return ret;
+}
+
+static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
+{
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+ bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+ u8 *iv = qat_req->skcipher_req->iv;
+
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+
+ if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
+ crypto_cipher_encrypt_one(ctx->tweak,
+ (u8 *)cipher_param->u.cipher_IV_array,
+ iv);
+ else
+ memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+}
+
+static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ gfp_t f = qat_algs_alloc_flags(&req->base);
+ struct icp_qat_fw_la_bulk_req *msg;
+ int ret;
+
+ if (req->cryptlen == 0)
+ return 0;
+
+ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
+ &qat_req->buf, NULL, f);
+ if (unlikely(ret))
+ return ret;
+
+ msg = &qat_req->req;
+ *msg = ctx->enc_fw_req;
+ qat_req->skcipher_ctx = ctx;
+ qat_req->skcipher_req = req;
+ qat_req->cb = qat_skcipher_alg_callback;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+ qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ qat_req->encryption = true;
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+ cipher_param->cipher_length = req->cryptlen;
+ cipher_param->cipher_offset = 0;
+
+ qat_alg_set_req_iv(qat_req);
+
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
+ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+ return ret;
+}
+
+static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
+{
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ return qat_alg_skcipher_encrypt(req);
+}
+
+static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ skcipher_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_skcipher_encrypt(nreq);
+ }
+
+ return qat_alg_skcipher_encrypt(req);
+}
+
+static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ gfp_t f = qat_algs_alloc_flags(&req->base);
+ struct icp_qat_fw_la_bulk_req *msg;
+ int ret;
+
+ if (req->cryptlen == 0)
+ return 0;
+
+ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
+ &qat_req->buf, NULL, f);
+ if (unlikely(ret))
+ return ret;
+
+ msg = &qat_req->req;
+ *msg = ctx->dec_fw_req;
+ qat_req->skcipher_ctx = ctx;
+ qat_req->skcipher_req = req;
+ qat_req->cb = qat_skcipher_alg_callback;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+ qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ qat_req->encryption = false;
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+ cipher_param->cipher_length = req->cryptlen;
+ cipher_param->cipher_offset = 0;
+
+ qat_alg_set_req_iv(qat_req);
+ qat_alg_update_iv(qat_req);
+
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ if (ret == -ENOSPC)
+ qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+ return ret;
+}
+
+static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
+{
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ return qat_alg_skcipher_decrypt(req);
+}
+
+static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ skcipher_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_skcipher_decrypt(nreq);
+ }
+
+ return qat_alg_skcipher_decrypt(req);
+}
+
+static int qat_alg_aead_init(struct crypto_aead *tfm,
+ enum icp_qat_hw_auth_algo hash,
+ const char *hash_name)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(ctx->hash_tfm))
+ return PTR_ERR(ctx->hash_tfm);
+ ctx->qat_hash_alg = hash;
+ crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+ return 0;
+}
+
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
+{
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+}
+
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
+{
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+}
+
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
+{
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+}
+
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev;
+
+ crypto_free_shash(ctx->hash_tfm);
+
+ if (!inst)
+ return;
+
+ dev = &GET_DEV(inst->accel_dev);
+ if (ctx->enc_cd) {
+ memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+ dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+ ctx->enc_cd, ctx->enc_cd_paddr);
+ }
+ if (ctx->dec_cd) {
+ memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+ dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+ ctx->dec_cd, ctx->dec_cd_paddr);
+ }
+ qat_crypto_put_instance(inst);
+}
+
+static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
+{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+ return 0;
+}
+
+static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
+{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int reqsize;
+
+ ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->ftfm))
+ return PTR_ERR(ctx->ftfm);
+
+ ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
+ if (IS_ERR(ctx->tweak)) {
+ crypto_free_skcipher(ctx->ftfm);
+ return PTR_ERR(ctx->tweak);
+ }
+
+ reqsize = max(sizeof(struct qat_crypto_request),
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(ctx->ftfm));
+ crypto_skcipher_set_reqsize(tfm, reqsize);
+
+ return 0;
+}
+
+static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev;
+
+ if (!inst)
+ return;
+
+ dev = &GET_DEV(inst->accel_dev);
+ if (ctx->enc_cd) {
+ memset(ctx->enc_cd, 0,
+ sizeof(struct icp_qat_hw_cipher_algo_blk));
+ dma_free_coherent(dev,
+ sizeof(struct icp_qat_hw_cipher_algo_blk),
+ ctx->enc_cd, ctx->enc_cd_paddr);
+ }
+ if (ctx->dec_cd) {
+ memset(ctx->dec_cd, 0,
+ sizeof(struct icp_qat_hw_cipher_algo_blk));
+ dma_free_coherent(dev,
+ sizeof(struct icp_qat_hw_cipher_algo_blk),
+ ctx->dec_cd, ctx->dec_cd_paddr);
+ }
+ qat_crypto_put_instance(inst);
+}
+
+static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
+{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->ftfm)
+ crypto_free_skcipher(ctx->ftfm);
+
+ if (ctx->tweak)
+ crypto_free_cipher(ctx->tweak);
+
+ qat_alg_skcipher_exit_tfm(tfm);
+}
+
+static struct aead_alg qat_aeads[] = { {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_alg_aead_sha1_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_alg_aead_sha256_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_alg_aead_sha512_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
+static struct skcipher_alg qat_skciphers[] = { {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "qat_aes_cbc",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_cbc_setkey,
+ .decrypt = qat_alg_skcipher_blk_decrypt,
+ .encrypt = qat_alg_skcipher_blk_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+}, {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "qat_aes_ctr",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_ctr_setkey,
+ .decrypt = qat_alg_skcipher_decrypt,
+ .encrypt = qat_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+}, {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "qat_aes_xts",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_xts_tfm,
+ .exit = qat_alg_skcipher_exit_xts_tfm,
+ .setkey = qat_alg_skcipher_xts_setkey,
+ .decrypt = qat_alg_skcipher_xts_decrypt,
+ .encrypt = qat_alg_skcipher_xts_encrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+} };
+
+int qat_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs != 1)
+ goto unlock;
+
+ ret = crypto_register_skciphers(qat_skciphers,
+ ARRAY_SIZE(qat_skciphers));
+ if (ret)
+ goto unlock;
+
+ ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+ if (ret)
+ goto unreg_algs;
+
+unlock:
+ mutex_unlock(&algs_lock);
+ return ret;
+
+unreg_algs:
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
+ goto unlock;
+}
+
+void qat_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs != 0)
+ goto unlock;
+
+ crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
+
+unlock:
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
new file mode 100644
index 0000000000..b97b678823
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <crypto/algapi.h>
+#include "adf_transport.h"
+#include "qat_algs_send.h"
+#include "qat_crypto.h"
+
+#define ADF_MAX_RETRIES 20
+
+static int qat_alg_send_message_retry(struct qat_alg_req *req)
+{
+ int ret = 0, ctr = 0;
+
+ do {
+ ret = adf_send_message(req->tx_ring, req->fw_req);
+ } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
+
+ if (ret == -EAGAIN)
+ return -ENOSPC;
+
+ return -EINPROGRESS;
+}
+
+void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
+{
+ struct qat_alg_req *req, *tmp;
+
+ spin_lock_bh(&backlog->lock);
+ list_for_each_entry_safe(req, tmp, &backlog->list, list) {
+ if (adf_send_message(req->tx_ring, req->fw_req)) {
+ /* The HW ring is full. Do nothing.
+ * qat_alg_send_backlog() will be invoked again by
+ * another callback.
+ */
+ break;
+ }
+ list_del(&req->list);
+ crypto_request_complete(req->base, -EINPROGRESS);
+ }
+ spin_unlock_bh(&backlog->lock);
+}
+
+static bool qat_alg_try_enqueue(struct qat_alg_req *req)
+{
+ struct qat_instance_backlog *backlog = req->backlog;
+ struct adf_etr_ring_data *tx_ring = req->tx_ring;
+ u32 *fw_req = req->fw_req;
+
+ /* Check if any request is already backlogged */
+ if (!list_empty(&backlog->list))
+ return false;
+
+ /* Check if ring is nearly full */
+ if (adf_ring_nearly_full(tx_ring))
+ return false;
+
+ /* Try to enqueue to HW ring */
+ if (adf_send_message(tx_ring, fw_req))
+ return false;
+
+ return true;
+}
+
+
+static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
+{
+ struct qat_instance_backlog *backlog = req->backlog;
+ int ret = -EINPROGRESS;
+
+ if (qat_alg_try_enqueue(req))
+ return ret;
+
+ spin_lock_bh(&backlog->lock);
+ if (!qat_alg_try_enqueue(req)) {
+ list_add_tail(&req->list, &backlog->list);
+ ret = -EBUSY;
+ }
+ spin_unlock_bh(&backlog->lock);
+
+ return ret;
+}
+
+int qat_alg_send_message(struct qat_alg_req *req)
+{
+ u32 flags = req->base->flags;
+
+ if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return qat_alg_send_message_maybacklog(req);
+ else
+ return qat_alg_send_message_retry(req);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.h b/drivers/crypto/intel/qat/qat_common/qat_algs_send.h
new file mode 100644
index 0000000000..0baca16e1e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef QAT_ALGS_SEND_H
+#define QAT_ALGS_SEND_H
+
+#include <linux/list.h>
+#include "adf_transport_internal.h"
+
+struct qat_instance_backlog {
+ struct list_head list;
+ spinlock_t lock; /* protects backlog list */
+};
+
+struct qat_alg_req {
+ u32 *fw_req;
+ struct adf_etr_ring_data *tx_ring;
+ struct crypto_async_request *base;
+ struct list_head list;
+ struct qat_instance_backlog *backlog;
+};
+
+int qat_alg_send_message(struct qat_alg_req *req);
+void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
new file mode 100644
index 0000000000..4128200a90
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
@@ -0,0 +1,1307 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/module.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/dh.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include <crypto/scatterwalk.h>
+#include "icp_qat_fw_pke.h"
+#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_rsa_input_params {
+ union {
+ struct {
+ dma_addr_t m;
+ dma_addr_t e;
+ dma_addr_t n;
+ } enc;
+ struct {
+ dma_addr_t c;
+ dma_addr_t d;
+ dma_addr_t n;
+ } dec;
+ struct {
+ dma_addr_t c;
+ dma_addr_t p;
+ dma_addr_t q;
+ dma_addr_t dp;
+ dma_addr_t dq;
+ dma_addr_t qinv;
+ } dec_crt;
+ u64 in_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_rsa_output_params {
+ union {
+ struct {
+ dma_addr_t c;
+ } enc;
+ struct {
+ dma_addr_t m;
+ } dec;
+ u64 out_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_rsa_ctx {
+ char *n;
+ char *e;
+ char *d;
+ char *p;
+ char *q;
+ char *dp;
+ char *dq;
+ char *qinv;
+ dma_addr_t dma_n;
+ dma_addr_t dma_e;
+ dma_addr_t dma_d;
+ dma_addr_t dma_p;
+ dma_addr_t dma_q;
+ dma_addr_t dma_dp;
+ dma_addr_t dma_dq;
+ dma_addr_t dma_qinv;
+ unsigned int key_sz;
+ bool crt_mode;
+ struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_dh_input_params {
+ union {
+ struct {
+ dma_addr_t b;
+ dma_addr_t xa;
+ dma_addr_t p;
+ } in;
+ struct {
+ dma_addr_t xa;
+ dma_addr_t p;
+ } in_g2;
+ u64 in_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_dh_output_params {
+ union {
+ dma_addr_t r;
+ u64 out_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_dh_ctx {
+ char *g;
+ char *xa;
+ char *p;
+ dma_addr_t dma_g;
+ dma_addr_t dma_xa;
+ dma_addr_t dma_p;
+ unsigned int p_size;
+ bool g2;
+ struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_asym_request {
+ union {
+ struct qat_rsa_input_params rsa;
+ struct qat_dh_input_params dh;
+ } in;
+ union {
+ struct qat_rsa_output_params rsa;
+ struct qat_dh_output_params dh;
+ } out;
+ dma_addr_t phy_in;
+ dma_addr_t phy_out;
+ char *src_align;
+ char *dst_align;
+ struct icp_qat_fw_pke_request req;
+ union {
+ struct qat_rsa_ctx *rsa;
+ struct qat_dh_ctx *dh;
+ } ctx;
+ union {
+ struct akcipher_request *rsa;
+ struct kpp_request *dh;
+ } areq;
+ int err;
+ void (*cb)(struct icp_qat_fw_pke_resp *resp);
+ struct qat_alg_req alg_req;
+} __aligned(64);
+
+static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
+ struct qat_crypto_instance *inst,
+ struct crypto_async_request *base)
+{
+ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+ alg_req->fw_req = (u32 *)&qat_req->req;
+ alg_req->tx_ring = inst->pke_tx;
+ alg_req->base = base;
+ alg_req->backlog = &inst->backlog;
+
+ return qat_alg_send_message(alg_req);
+}
+
+static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
+{
+ struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+ struct kpp_request *areq = req->areq.dh;
+ struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
+ int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+ resp->pke_resp_hdr.comn_resp_flags);
+
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+ if (areq->src) {
+ dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
+ DMA_TO_DEVICE);
+ kfree_sensitive(req->src_align);
+ }
+
+ areq->dst_len = req->ctx.dh->p_size;
+ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+ DMA_FROM_DEVICE);
+ if (req->dst_align) {
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
+ kfree_sensitive(req->dst_align);
+ }
+
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
+
+ kpp_request_complete(areq, err);
+}
+
+#define PKE_DH_1536 0x390c1a49
+#define PKE_DH_G2_1536 0x2e0b1a3e
+#define PKE_DH_2048 0x4d0c1a60
+#define PKE_DH_G2_2048 0x3e0b1a55
+#define PKE_DH_3072 0x510c1a77
+#define PKE_DH_G2_3072 0x3a0b1a6c
+#define PKE_DH_4096 0x690c1a8e
+#define PKE_DH_G2_4096 0x4a0b1a83
+
+static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 1536:
+ return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
+ case 2048:
+ return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
+ case 3072:
+ return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
+ case 4096:
+ return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
+ default:
+ return 0;
+ }
+}
+
+static int qat_dh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(kpp_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ gfp_t flags = qat_algs_alloc_flags(&req->base);
+ int n_input_params = 0;
+ u8 *vaddr;
+ int ret;
+
+ if (unlikely(!ctx->xa))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->p_size) {
+ req->dst_len = ctx->p_size;
+ return -EOVERFLOW;
+ }
+
+ if (req->src_len > ctx->p_size)
+ return -EINVAL;
+
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+ msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
+ !req->src && ctx->g2);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->cb = qat_dh_cb;
+ qat_req->ctx.dh = ctx;
+ qat_req->areq.dh = req;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+ /*
+ * If no source is provided use g as base
+ */
+ if (req->src) {
+ qat_req->in.dh.in.xa = ctx->dma_xa;
+ qat_req->in.dh.in.p = ctx->dma_p;
+ n_input_params = 3;
+ } else {
+ if (ctx->g2) {
+ qat_req->in.dh.in_g2.xa = ctx->dma_xa;
+ qat_req->in.dh.in_g2.p = ctx->dma_p;
+ n_input_params = 2;
+ } else {
+ qat_req->in.dh.in.b = ctx->dma_g;
+ qat_req->in.dh.in.xa = ctx->dma_xa;
+ qat_req->in.dh.in.p = ctx->dma_p;
+ n_input_params = 3;
+ }
+ }
+
+ ret = -ENOMEM;
+ if (req->src) {
+ /*
+ * src can be of any size in valid range, but HW expects it to
+ * be the same as modulo p so in case it is different we need
+ * to allocate a new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
+ qat_req->src_align = NULL;
+ vaddr = sg_virt(req->src);
+ } else {
+ int shift = ctx->p_size - req->src_len;
+
+ qat_req->src_align = kzalloc(ctx->p_size, flags);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift,
+ req->src, 0, req->src_len, 0);
+
+ vaddr = qat_req->src_align;
+ }
+
+ qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
+ goto unmap_src;
+ }
+ /*
+ * dst can be of any size in valid range, but HW expects it to be the
+ * same as modulo m so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
+ qat_req->dst_align = NULL;
+ vaddr = sg_virt(req->dst);
+ } else {
+ qat_req->dst_align = kzalloc(ctx->p_size, flags);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
+
+ vaddr = qat_req->dst_align;
+ }
+ qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+ goto unmap_dst;
+
+ qat_req->in.dh.in_tab[n_input_params] = 0;
+ qat_req->out.dh.out_tab[1] = 0;
+ /* Mapping in.in.b or in.in_g2.xa is the same */
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
+ sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+ goto unmap_dst;
+
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap_in_params;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (u64)(__force long)qat_req;
+ msg->input_param_count = n_input_params;
+ msg->output_param_count = 1;
+
+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
+
+ return ret;
+
+unmap_all:
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+unmap_dst:
+ if (!dma_mapping_error(dev, qat_req->out.dh.r))
+ dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+ DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
+unmap_src:
+ if (req->src) {
+ if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+ dma_unmap_single(dev, qat_req->in.dh.in.b,
+ ctx->p_size,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
+ }
+ return ret;
+}
+
+static int qat_dh_check_params_length(unsigned int p_len)
+{
+ switch (p_len) {
+ case 1536:
+ case 2048:
+ case 3072:
+ case 4096:
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+
+ if (qat_dh_check_params_length(params->p_size << 3))
+ return -EINVAL;
+
+ ctx->p_size = params->p_size;
+ ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+ if (!ctx->p)
+ return -ENOMEM;
+ memcpy(ctx->p, params->p, ctx->p_size);
+
+ /* If g equals 2 don't copy it */
+ if (params->g_size == 1 && *(char *)params->g == 0x02) {
+ ctx->g2 = true;
+ return 0;
+ }
+
+ ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+ if (!ctx->g)
+ return -ENOMEM;
+ memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
+ params->g_size);
+
+ return 0;
+}
+
+static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
+{
+ if (ctx->g) {
+ memset(ctx->g, 0, ctx->p_size);
+ dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
+ ctx->g = NULL;
+ }
+ if (ctx->xa) {
+ memset(ctx->xa, 0, ctx->p_size);
+ dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
+ ctx->xa = NULL;
+ }
+ if (ctx->p) {
+ memset(ctx->p, 0, ctx->p_size);
+ dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+ }
+ ctx->p_size = 0;
+ ctx->g2 = false;
+}
+
+static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ struct dh params;
+ int ret;
+
+ if (crypto_dh_decode_key(buf, len, &params) < 0)
+ return -EINVAL;
+
+ /* Free old secret if any */
+ qat_dh_clear_ctx(dev, ctx);
+
+ ret = qat_dh_set_params(ctx, &params);
+ if (ret < 0)
+ goto err_clear_ctx;
+
+ ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+ GFP_KERNEL);
+ if (!ctx->xa) {
+ ret = -ENOMEM;
+ goto err_clear_ctx;
+ }
+ memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
+ params.key_size);
+
+ return 0;
+
+err_clear_ctx:
+ qat_dh_clear_ctx(dev, ctx);
+ return ret;
+}
+
+static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->p_size;
+}
+
+static int qat_dh_init_tfm(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(numa_node_id());
+
+ if (!inst)
+ return -EINVAL;
+
+ kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+
+ ctx->p_size = 0;
+ ctx->g2 = false;
+ ctx->inst = inst;
+ return 0;
+}
+
+static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+ qat_dh_clear_ctx(dev, ctx);
+ qat_crypto_put_instance(ctx->inst);
+}
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+ struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+ struct akcipher_request *areq = req->areq.rsa;
+ struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
+ int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+ resp->pke_resp_hdr.comn_resp_flags);
+
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+ DMA_TO_DEVICE);
+
+ kfree_sensitive(req->src_align);
+
+ areq->dst_len = req->ctx.rsa->key_sz;
+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+ DMA_FROM_DEVICE);
+ if (req->dst_align) {
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
+
+ kfree_sensitive(req->dst_align);
+ }
+
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+
+ akcipher_request_complete(areq, err);
+}
+
+void qat_alg_asym_callback(void *_resp)
+{
+ struct icp_qat_fw_pke_resp *resp = _resp;
+ struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
+ struct qat_instance_backlog *backlog = areq->alg_req.backlog;
+
+ areq->cb(resp);
+
+ qat_alg_send_backlog(backlog);
+}
+
+#define PKE_RSA_EP_512 0x1c161b21
+#define PKE_RSA_EP_1024 0x35111bf7
+#define PKE_RSA_EP_1536 0x4d111cdc
+#define PKE_RSA_EP_2048 0x6e111dba
+#define PKE_RSA_EP_3072 0x7d111ea3
+#define PKE_RSA_EP_4096 0xa5101f7e
+
+static unsigned long qat_rsa_enc_fn_id(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_EP_512;
+ case 1024:
+ return PKE_RSA_EP_1024;
+ case 1536:
+ return PKE_RSA_EP_1536;
+ case 2048:
+ return PKE_RSA_EP_2048;
+ case 3072:
+ return PKE_RSA_EP_3072;
+ case 4096:
+ return PKE_RSA_EP_4096;
+ default:
+ return 0;
+ }
+}
+
+#define PKE_RSA_DP1_512 0x1c161b3c
+#define PKE_RSA_DP1_1024 0x35111c12
+#define PKE_RSA_DP1_1536 0x4d111cf7
+#define PKE_RSA_DP1_2048 0x6e111dda
+#define PKE_RSA_DP1_3072 0x7d111ebe
+#define PKE_RSA_DP1_4096 0xa5101f98
+
+static unsigned long qat_rsa_dec_fn_id(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_DP1_512;
+ case 1024:
+ return PKE_RSA_DP1_1024;
+ case 1536:
+ return PKE_RSA_DP1_1536;
+ case 2048:
+ return PKE_RSA_DP1_2048;
+ case 3072:
+ return PKE_RSA_DP1_3072;
+ case 4096:
+ return PKE_RSA_DP1_4096;
+ default:
+ return 0;
+ }
+}
+
+#define PKE_RSA_DP2_512 0x1c131b57
+#define PKE_RSA_DP2_1024 0x26131c2d
+#define PKE_RSA_DP2_1536 0x45111d12
+#define PKE_RSA_DP2_2048 0x59121dfa
+#define PKE_RSA_DP2_3072 0x81121ed9
+#define PKE_RSA_DP2_4096 0xb1111fb2
+
+static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_DP2_512;
+ case 1024:
+ return PKE_RSA_DP2_1024;
+ case 1536:
+ return PKE_RSA_DP2_1536;
+ case 2048:
+ return PKE_RSA_DP2_2048;
+ case 3072:
+ return PKE_RSA_DP2_3072;
+ case 4096:
+ return PKE_RSA_DP2_4096;
+ default:
+ return 0;
+ }
+}
+
+static int qat_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ gfp_t flags = qat_algs_alloc_flags(&req->base);
+ u8 *vaddr;
+ int ret;
+
+ if (unlikely(!ctx->n || !ctx->e))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->key_sz) {
+ req->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+
+ if (req->src_len > ctx->key_sz)
+ return -EINVAL;
+
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->cb = qat_rsa_cb;
+ qat_req->ctx.rsa = ctx;
+ qat_req->areq.rsa = req;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+ qat_req->in.rsa.enc.e = ctx->dma_e;
+ qat_req->in.rsa.enc.n = ctx->dma_n;
+ ret = -ENOMEM;
+
+ /*
+ * src can be of any size in valid range, but HW expects it to be the
+ * same as modulo n so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+ qat_req->src_align = NULL;
+ vaddr = sg_virt(req->src);
+ } else {
+ int shift = ctx->key_sz - req->src_len;
+
+ qat_req->src_align = kzalloc(ctx->key_sz, flags);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+ 0, req->src_len, 0);
+ vaddr = qat_req->src_align;
+ }
+
+ qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
+ goto unmap_src;
+
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+ qat_req->dst_align = NULL;
+ vaddr = sg_virt(req->dst);
+ } else {
+ qat_req->dst_align = kzalloc(ctx->key_sz, flags);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
+ vaddr = qat_req->dst_align;
+ }
+
+ qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
+ goto unmap_dst;
+
+ qat_req->in.rsa.in_tab[3] = 0;
+ qat_req->out.rsa.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+ goto unmap_dst;
+
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap_in_params;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (u64)(__force long)qat_req;
+ msg->input_param_count = 3;
+ msg->output_param_count = 1;
+
+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
+
+ return ret;
+
+unmap_all:
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+unmap_dst:
+ if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+ dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+ ctx->key_sz, DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
+unmap_src:
+ if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+ dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
+ return ret;
+}
+
+static int qat_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ gfp_t flags = qat_algs_alloc_flags(&req->base);
+ u8 *vaddr;
+ int ret;
+
+ if (unlikely(!ctx->n || !ctx->d))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->key_sz) {
+ req->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+
+ if (req->src_len > ctx->key_sz)
+ return -EINVAL;
+
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
+ qat_rsa_dec_fn_id_crt(ctx->key_sz) :
+ qat_rsa_dec_fn_id(ctx->key_sz);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->cb = qat_rsa_cb;
+ qat_req->ctx.rsa = ctx;
+ qat_req->areq.rsa = req;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+ if (ctx->crt_mode) {
+ qat_req->in.rsa.dec_crt.p = ctx->dma_p;
+ qat_req->in.rsa.dec_crt.q = ctx->dma_q;
+ qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
+ qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
+ qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
+ } else {
+ qat_req->in.rsa.dec.d = ctx->dma_d;
+ qat_req->in.rsa.dec.n = ctx->dma_n;
+ }
+ ret = -ENOMEM;
+
+ /*
+ * src can be of any size in valid range, but HW expects it to be the
+ * same as modulo n so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+ qat_req->src_align = NULL;
+ vaddr = sg_virt(req->src);
+ } else {
+ int shift = ctx->key_sz - req->src_len;
+
+ qat_req->src_align = kzalloc(ctx->key_sz, flags);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+ 0, req->src_len, 0);
+ vaddr = qat_req->src_align;
+ }
+
+ qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
+ goto unmap_src;
+
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+ qat_req->dst_align = NULL;
+ vaddr = sg_virt(req->dst);
+ } else {
+ qat_req->dst_align = kzalloc(ctx->key_sz, flags);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
+ vaddr = qat_req->dst_align;
+ }
+ qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
+ goto unmap_dst;
+
+ if (ctx->crt_mode)
+ qat_req->in.rsa.in_tab[6] = 0;
+ else
+ qat_req->in.rsa.in_tab[3] = 0;
+ qat_req->out.rsa.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+ goto unmap_dst;
+
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap_in_params;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (u64)(__force long)qat_req;
+ if (ctx->crt_mode)
+ msg->input_param_count = 6;
+ else
+ msg->input_param_count = 3;
+
+ msg->output_param_count = 1;
+
+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+ if (ret == -ENOSPC)
+ goto unmap_all;
+
+ return ret;
+
+unmap_all:
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+unmap_dst:
+ if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+ dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+ ctx->key_sz, DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
+unmap_src:
+ if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+ dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
+ return ret;
+}
+
+static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
+ size_t vlen)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ ctx->key_sz = vlen;
+ ret = -EINVAL;
+ /* invalid key size provided */
+ if (!qat_rsa_enc_fn_id(ctx->key_sz))
+ goto err;
+
+ ret = -ENOMEM;
+ ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+ if (!ctx->n)
+ goto err;
+
+ memcpy(ctx->n, ptr, ctx->key_sz);
+ return 0;
+err:
+ ctx->key_sz = 0;
+ ctx->n = NULL;
+ return ret;
+}
+
+static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
+ size_t vlen)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+ ctx->e = NULL;
+ return -EINVAL;
+ }
+
+ ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+ if (!ctx->e)
+ return -ENOMEM;
+
+ memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
+ return 0;
+}
+
+static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
+ size_t vlen)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ ret = -EINVAL;
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+ goto err;
+
+ ret = -ENOMEM;
+ ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+ if (!ctx->d)
+ goto err;
+
+ memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
+ return 0;
+err:
+ ctx->d = NULL;
+ return ret;
+}
+
+static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
+{
+ while (!**ptr && *len) {
+ (*ptr)++;
+ (*len)--;
+ }
+}
+
+static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr;
+ unsigned int len;
+ unsigned int half_key_sz = ctx->key_sz / 2;
+
+ /* p */
+ ptr = rsa_key->p;
+ len = rsa_key->p_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto err;
+ ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+ if (!ctx->p)
+ goto err;
+ memcpy(ctx->p + (half_key_sz - len), ptr, len);
+
+ /* q */
+ ptr = rsa_key->q;
+ len = rsa_key->q_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_p;
+ ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+ if (!ctx->q)
+ goto free_p;
+ memcpy(ctx->q + (half_key_sz - len), ptr, len);
+
+ /* dp */
+ ptr = rsa_key->dp;
+ len = rsa_key->dp_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_q;
+ ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+ GFP_KERNEL);
+ if (!ctx->dp)
+ goto free_q;
+ memcpy(ctx->dp + (half_key_sz - len), ptr, len);
+
+ /* dq */
+ ptr = rsa_key->dq;
+ len = rsa_key->dq_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_dp;
+ ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+ GFP_KERNEL);
+ if (!ctx->dq)
+ goto free_dp;
+ memcpy(ctx->dq + (half_key_sz - len), ptr, len);
+
+ /* qinv */
+ ptr = rsa_key->qinv;
+ len = rsa_key->qinv_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_dq;
+ ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+ GFP_KERNEL);
+ if (!ctx->qinv)
+ goto free_dq;
+ memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
+
+ ctx->crt_mode = true;
+ return;
+
+free_dq:
+ memset(ctx->dq, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+ ctx->dq = NULL;
+free_dp:
+ memset(ctx->dp, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+ ctx->dp = NULL;
+free_q:
+ memset(ctx->q, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+ ctx->q = NULL;
+free_p:
+ memset(ctx->p, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+err:
+ ctx->crt_mode = false;
+}
+
+static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
+{
+ unsigned int half_key_sz = ctx->key_sz / 2;
+
+ /* Free the old key if any */
+ if (ctx->n)
+ dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+ if (ctx->e)
+ dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+ if (ctx->d) {
+ memset(ctx->d, '\0', ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+ }
+ if (ctx->p) {
+ memset(ctx->p, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+ }
+ if (ctx->q) {
+ memset(ctx->q, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+ }
+ if (ctx->dp) {
+ memset(ctx->dp, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+ }
+ if (ctx->dq) {
+ memset(ctx->dq, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+ }
+ if (ctx->qinv) {
+ memset(ctx->qinv, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
+ }
+
+ ctx->n = NULL;
+ ctx->e = NULL;
+ ctx->d = NULL;
+ ctx->p = NULL;
+ ctx->q = NULL;
+ ctx->dp = NULL;
+ ctx->dq = NULL;
+ ctx->qinv = NULL;
+ ctx->crt_mode = false;
+ ctx->key_sz = 0;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ struct rsa_key rsa_key;
+ int ret;
+
+ qat_rsa_clear_ctx(dev, ctx);
+
+ if (private)
+ ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+ if (ret < 0)
+ goto free;
+
+ ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
+ if (ret < 0)
+ goto free;
+ ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
+ if (ret < 0)
+ goto free;
+ if (private) {
+ ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+ if (ret < 0)
+ goto free;
+ qat_rsa_setkey_crt(ctx, &rsa_key);
+ }
+
+ if (!ctx->n || !ctx->e) {
+ /* invalid key provided */
+ ret = -EINVAL;
+ goto free;
+ }
+ if (private && !ctx->d) {
+ /* invalid private key provided */
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return 0;
+free:
+ qat_rsa_clear_ctx(dev, ctx);
+ return ret;
+}
+
+static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return qat_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return qat_rsa_setkey(tfm, key, keylen, true);
+}
+
+static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ return ctx->key_sz;
+}
+
+static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(numa_node_id());
+
+ if (!inst)
+ return -EINVAL;
+
+ akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+
+ ctx->key_sz = 0;
+ ctx->inst = inst;
+ return 0;
+}
+
+static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+ qat_rsa_clear_ctx(dev, ctx);
+ qat_crypto_put_instance(ctx->inst);
+}
+
+static struct akcipher_alg rsa = {
+ .encrypt = qat_rsa_enc,
+ .decrypt = qat_rsa_dec,
+ .set_pub_key = qat_rsa_setpubkey,
+ .set_priv_key = qat_rsa_setprivkey,
+ .max_size = qat_rsa_max_size,
+ .init = qat_rsa_init_tfm,
+ .exit = qat_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "qat-rsa",
+ .cra_priority = 1000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct qat_rsa_ctx),
+ },
+};
+
+static struct kpp_alg dh = {
+ .set_secret = qat_dh_set_secret,
+ .generate_public_key = qat_dh_compute_value,
+ .compute_shared_secret = qat_dh_compute_value,
+ .max_size = qat_dh_max_size,
+ .init = qat_dh_init_tfm,
+ .exit = qat_dh_exit_tfm,
+ .base = {
+ .cra_name = "dh",
+ .cra_driver_name = "qat-dh",
+ .cra_priority = 1000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct qat_dh_ctx),
+ },
+};
+
+int qat_asym_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs == 1) {
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
+ if (ret)
+ goto unlock;
+ ret = crypto_register_kpp(&dh);
+ }
+unlock:
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void qat_asym_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs == 0) {
+ crypto_unregister_akcipher(&rsa);
+ crypto_unregister_kpp(&dh);
+ }
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
new file mode 100644
index 0000000000..76baed0a76
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2014 - 2022 Intel Corporation */
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "qat_bl.h"
+#include "qat_crypto.h"
+
+void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
+ struct qat_request_buffs *buf)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ struct qat_alg_buf_list *bl = buf->bl;
+ struct qat_alg_buf_list *blout = buf->blout;
+ dma_addr_t blp = buf->blp;
+ dma_addr_t blpout = buf->bloutp;
+ size_t sz = buf->sz;
+ size_t sz_out = buf->sz_out;
+ int bl_dma_dir;
+ int i;
+
+ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+ for (i = 0; i < bl->num_bufs; i++)
+ dma_unmap_single(dev, bl->buffers[i].addr,
+ bl->buffers[i].len, bl_dma_dir);
+
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
+ if (!buf->sgl_src_valid)
+ kfree(bl);
+
+ if (blp != blpout) {
+ for (i = 0; i < blout->num_mapped_bufs; i++) {
+ dma_unmap_single(dev, blout->buffers[i].addr,
+ blout->buffers[i].len,
+ DMA_FROM_DEVICE);
+ }
+ dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
+
+ if (!buf->sgl_dst_valid)
+ kfree(blout);
+ }
+}
+
+static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct scatterlist *sglout,
+ struct qat_request_buffs *buf,
+ dma_addr_t extra_dst_buff,
+ size_t sz_extra_dst_buff,
+ unsigned int sskip,
+ unsigned int dskip,
+ gfp_t flags)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ int i, sg_nctr = 0;
+ int n = sg_nents(sgl);
+ struct qat_alg_buf_list *bufl;
+ struct qat_alg_buf_list *buflout = NULL;
+ dma_addr_t blp = DMA_MAPPING_ERROR;
+ dma_addr_t bloutp = DMA_MAPPING_ERROR;
+ struct scatterlist *sg;
+ size_t sz_out, sz = struct_size(bufl, buffers, n);
+ int node = dev_to_node(&GET_DEV(accel_dev));
+ unsigned int left;
+ int bufl_dma_dir;
+
+ if (unlikely(!n))
+ return -EINVAL;
+
+ buf->sgl_src_valid = false;
+ buf->sgl_dst_valid = false;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ bufl = kzalloc_node(sz, flags, node);
+ if (unlikely(!bufl))
+ return -ENOMEM;
+ } else {
+ bufl = &buf->sgl_src.sgl_hdr;
+ memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+ buf->sgl_src_valid = true;
+ }
+
+ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+ for (i = 0; i < n; i++)
+ bufl->buffers[i].addr = DMA_MAPPING_ERROR;
+
+ left = sskip;
+
+ for_each_sg(sgl, sg, n, i) {
+ int y = sg_nctr;
+
+ if (!sg->length)
+ continue;
+
+ if (left >= sg->length) {
+ left -= sg->length;
+ continue;
+ }
+ bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+ sg->length - left,
+ bufl_dma_dir);
+ bufl->buffers[y].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
+ goto err_in;
+ sg_nctr++;
+ if (left) {
+ bufl->buffers[y].len -= left;
+ left = 0;
+ }
+ }
+ bufl->num_bufs = sg_nctr;
+ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, blp)))
+ goto err_in;
+ buf->bl = bufl;
+ buf->blp = blp;
+ buf->sz = sz;
+ /* Handle out of place operation */
+ if (sgl != sglout) {
+ struct qat_alg_buf *buffers;
+ int extra_buff = extra_dst_buff ? 1 : 0;
+ int n_sglout = sg_nents(sglout);
+
+ n = n_sglout + extra_buff;
+ sz_out = struct_size(buflout, buffers, n);
+ left = dskip;
+
+ sg_nctr = 0;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ buflout = kzalloc_node(sz_out, flags, node);
+ if (unlikely(!buflout))
+ goto err_in;
+ } else {
+ buflout = &buf->sgl_dst.sgl_hdr;
+ memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+ buf->sgl_dst_valid = true;
+ }
+
+ buffers = buflout->buffers;
+ for (i = 0; i < n; i++)
+ buffers[i].addr = DMA_MAPPING_ERROR;
+
+ for_each_sg(sglout, sg, n_sglout, i) {
+ int y = sg_nctr;
+
+ if (!sg->length)
+ continue;
+
+ if (left >= sg->length) {
+ left -= sg->length;
+ continue;
+ }
+ buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+ sg->length - left,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
+ goto err_out;
+ buffers[y].len = sg->length;
+ sg_nctr++;
+ if (left) {
+ buffers[y].len -= left;
+ left = 0;
+ }
+ }
+ if (extra_buff) {
+ buffers[sg_nctr].addr = extra_dst_buff;
+ buffers[sg_nctr].len = sz_extra_dst_buff;
+ }
+
+ buflout->num_bufs = sg_nctr;
+ buflout->num_bufs += extra_buff;
+ buflout->num_mapped_bufs = sg_nctr;
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, bloutp)))
+ goto err_out;
+ buf->blout = buflout;
+ buf->bloutp = bloutp;
+ buf->sz_out = sz_out;
+ } else {
+ /* Otherwise set the src and dst to the same address */
+ buf->bloutp = buf->blp;
+ buf->sz_out = 0;
+ }
+ return 0;
+
+err_out:
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
+ n = sg_nents(sglout);
+ for (i = 0; i < n; i++) {
+ if (buflout->buffers[i].addr == extra_dst_buff)
+ break;
+ if (!dma_mapping_error(dev, buflout->buffers[i].addr))
+ dma_unmap_single(dev, buflout->buffers[i].addr,
+ buflout->buffers[i].len,
+ DMA_FROM_DEVICE);
+ }
+
+ if (!buf->sgl_dst_valid)
+ kfree(buflout);
+
+err_in:
+ if (!dma_mapping_error(dev, blp))
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
+ n = sg_nents(sgl);
+ for (i = 0; i < n; i++)
+ if (!dma_mapping_error(dev, bufl->buffers[i].addr))
+ dma_unmap_single(dev, bufl->buffers[i].addr,
+ bufl->buffers[i].len,
+ bufl_dma_dir);
+
+ if (!buf->sgl_src_valid)
+ kfree(bufl);
+
+ dev_err(dev, "Failed to map buf for dma\n");
+ return -ENOMEM;
+}
+
+int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct scatterlist *sglout,
+ struct qat_request_buffs *buf,
+ struct qat_sgl_to_bufl_params *params,
+ gfp_t flags)
+{
+ dma_addr_t extra_dst_buff = 0;
+ size_t sz_extra_dst_buff = 0;
+ unsigned int sskip = 0;
+ unsigned int dskip = 0;
+
+ if (params) {
+ extra_dst_buff = params->extra_dst_buff;
+ sz_extra_dst_buff = params->sz_extra_dst_buff;
+ sskip = params->sskip;
+ dskip = params->dskip;
+ }
+
+ return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
+ extra_dst_buff, sz_extra_dst_buff,
+ sskip, dskip, flags);
+}
+
+static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
+ struct qat_alg_buf_list *bl)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ int n = bl->num_bufs;
+ int i;
+
+ for (i = 0; i < n; i++)
+ if (!dma_mapping_error(dev, bl->buffers[i].addr))
+ dma_unmap_single(dev, bl->buffers[i].addr,
+ bl->buffers[i].len, DMA_FROM_DEVICE);
+}
+
+static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct qat_alg_buf_list **bl)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ struct qat_alg_buf_list *bufl;
+ int node = dev_to_node(dev);
+ struct scatterlist *sg;
+ int n, i, sg_nctr;
+ size_t sz;
+
+ n = sg_nents(sgl);
+ sz = struct_size(bufl, buffers, n);
+ bufl = kzalloc_node(sz, GFP_KERNEL, node);
+ if (unlikely(!bufl))
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++)
+ bufl->buffers[i].addr = DMA_MAPPING_ERROR;
+
+ sg_nctr = 0;
+ for_each_sg(sgl, sg, n, i) {
+ int y = sg_nctr;
+
+ if (!sg->length)
+ continue;
+
+ bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
+ sg->length,
+ DMA_FROM_DEVICE);
+ bufl->buffers[y].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
+ goto err_map;
+ sg_nctr++;
+ }
+ bufl->num_bufs = sg_nctr;
+ bufl->num_mapped_bufs = sg_nctr;
+
+ *bl = bufl;
+
+ return 0;
+
+err_map:
+ for (i = 0; i < n; i++)
+ if (!dma_mapping_error(dev, bufl->buffers[i].addr))
+ dma_unmap_single(dev, bufl->buffers[i].addr,
+ bufl->buffers[i].len,
+ DMA_FROM_DEVICE);
+ kfree(bufl);
+ *bl = NULL;
+
+ return -ENOMEM;
+}
+
+static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct qat_alg_buf_list *bl,
+ bool free_bl)
+{
+ if (bl) {
+ qat_bl_sgl_unmap(accel_dev, bl);
+
+ if (free_bl)
+ kfree(bl);
+ }
+ if (sgl)
+ sgl_free(sgl);
+}
+
+static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
+ struct scatterlist **sgl,
+ struct qat_alg_buf_list **bl,
+ unsigned int dlen,
+ gfp_t gfp)
+{
+ struct scatterlist *dst;
+ int ret;
+
+ dst = sgl_alloc(dlen, gfp, NULL);
+ if (!dst) {
+ dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ret = qat_bl_sgl_map(accel_dev, dst, bl);
+ if (ret)
+ goto err;
+
+ *sgl = dst;
+
+ return 0;
+
+err:
+ sgl_free(dst);
+ *sgl = NULL;
+ return ret;
+}
+
+int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
+ struct scatterlist **sg,
+ unsigned int dlen,
+ struct qat_request_buffs *qat_bufs,
+ gfp_t gfp)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ dma_addr_t new_blp = DMA_MAPPING_ERROR;
+ struct qat_alg_buf_list *new_bl;
+ struct scatterlist *new_sg;
+ size_t new_bl_size;
+ int ret;
+
+ ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
+ if (ret)
+ return ret;
+
+ new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
+
+ /* Map new firmware SGL descriptor */
+ new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, new_blp)))
+ goto err;
+
+ /* Unmap old firmware SGL descriptor */
+ dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
+
+ /* Free and unmap old scatterlist */
+ qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
+ !qat_bufs->sgl_dst_valid);
+
+ qat_bufs->sgl_dst_valid = false;
+ qat_bufs->blout = new_bl;
+ qat_bufs->bloutp = new_blp;
+ qat_bufs->sz_out = new_bl_size;
+
+ *sg = new_sg;
+
+ return 0;
+err:
+ qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
+
+ if (!dma_mapping_error(dev, new_blp))
+ dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
new file mode 100644
index 0000000000..d87e4f35ac
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2014 - 2022 Intel Corporation */
+#ifndef QAT_BL_H
+#define QAT_BL_H
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#define QAT_MAX_BUFF_DESC 4
+
+struct qat_alg_buf {
+ u32 len;
+ u32 resrvd;
+ u64 addr;
+} __packed;
+
+struct qat_alg_buf_list {
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
+ struct qat_alg_buf buffers[];
+} __packed;
+
+struct qat_alg_fixed_buf_list {
+ struct qat_alg_buf_list sgl_hdr;
+ struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
+} __packed __aligned(64);
+
+struct qat_request_buffs {
+ struct qat_alg_buf_list *bl;
+ dma_addr_t blp;
+ struct qat_alg_buf_list *blout;
+ dma_addr_t bloutp;
+ size_t sz;
+ size_t sz_out;
+ bool sgl_src_valid;
+ bool sgl_dst_valid;
+ struct qat_alg_fixed_buf_list sgl_src;
+ struct qat_alg_fixed_buf_list sgl_dst;
+};
+
+struct qat_sgl_to_bufl_params {
+ dma_addr_t extra_dst_buff;
+ size_t sz_extra_dst_buff;
+ unsigned int sskip;
+ unsigned int dskip;
+};
+
+void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
+ struct qat_request_buffs *buf);
+int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct scatterlist *sglout,
+ struct qat_request_buffs *buf,
+ struct qat_sgl_to_bufl_params *params,
+ gfp_t flags);
+
+static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
+{
+ return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
+int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
+ struct scatterlist **newd,
+ unsigned int dlen,
+ struct qat_request_buffs *qat_bufs,
+ gfp_t gfp);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
new file mode 100644
index 0000000000..b533984906
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/crypto.h>
+#include <crypto/acompress.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "qat_bl.h"
+#include "qat_comp_req.h"
+#include "qat_compression.h"
+#include "qat_algs_send.h"
+
+#define QAT_RFC_1950_HDR_SIZE 2
+#define QAT_RFC_1950_FOOTER_SIZE 4
+#define QAT_RFC_1950_CM_DEFLATE 8
+#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
+#define QAT_RFC_1950_CM_MASK 0x0f
+#define QAT_RFC_1950_CM_OFFSET 4
+#define QAT_RFC_1950_DICT_MASK 0x20
+#define QAT_RFC_1950_COMP_HDR 0x785e
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+enum direction {
+ DECOMPRESSION = 0,
+ COMPRESSION = 1,
+};
+
+struct qat_compression_req;
+
+struct qat_compression_ctx {
+ u8 comp_ctx[QAT_COMP_CTX_SIZE];
+ struct qat_compression_instance *inst;
+ int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
+};
+
+struct qat_dst {
+ bool is_null;
+ int resubmitted;
+};
+
+struct qat_compression_req {
+ u8 req[QAT_COMP_REQ_SIZE];
+ struct qat_compression_ctx *qat_compression_ctx;
+ struct acomp_req *acompress_req;
+ struct qat_request_buffs buf;
+ enum direction dir;
+ int actual_dlen;
+ struct qat_alg_req alg_req;
+ struct work_struct resubmit;
+ struct qat_dst dst;
+};
+
+static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
+ struct qat_compression_instance *inst,
+ struct crypto_async_request *base)
+{
+ struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+ alg_req->fw_req = (u32 *)&qat_req->req;
+ alg_req->tx_ring = inst->dc_tx;
+ alg_req->base = base;
+ alg_req->backlog = &inst->backlog;
+
+ return qat_alg_send_message(alg_req);
+}
+
+static void qat_comp_resubmit(struct work_struct *work)
+{
+ struct qat_compression_req *qat_req =
+ container_of(work, struct qat_compression_req, resubmit);
+ struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ struct qat_request_buffs *qat_bufs = &qat_req->buf;
+ struct qat_compression_instance *inst = ctx->inst;
+ struct acomp_req *areq = qat_req->acompress_req;
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
+ unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
+ u8 *req = qat_req->req;
+ dma_addr_t dfbuf;
+ int ret;
+
+ areq->dlen = dlen;
+
+ dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
+ crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
+ qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
+
+ ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
+ qat_algs_alloc_flags(&areq->base));
+ if (ret)
+ goto err;
+
+ qat_req->dst.resubmitted = true;
+
+ dfbuf = qat_req->buf.bloutp;
+ qat_comp_override_dst(req, dfbuf, dlen);
+
+ ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
+ if (ret != -ENOSPC)
+ return;
+
+err:
+ qat_bl_free_bufl(accel_dev, qat_bufs);
+ acomp_request_complete(areq, ret);
+}
+
+static int parse_zlib_header(u16 zlib_h)
+{
+ int ret = -EINVAL;
+ __be16 header;
+ u8 *header_p;
+ u8 cmf, flg;
+
+ header = cpu_to_be16(zlib_h);
+ header_p = (u8 *)&header;
+
+ flg = header_p[0];
+ cmf = header_p[1];
+
+ if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K)
+ return ret;
+
+ if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE)
+ return ret;
+
+ if (flg & QAT_RFC_1950_DICT_MASK)
+ return ret;
+
+ return 0;
+}
+
+static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req,
+ void *resp)
+{
+ struct acomp_req *areq = qat_req->acompress_req;
+ enum direction dir = qat_req->dir;
+ __be32 qat_produced_adler;
+
+ qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp));
+
+ if (dir == COMPRESSION) {
+ __be16 zlib_header;
+
+ zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR);
+ scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1);
+ areq->dlen += QAT_RFC_1950_HDR_SIZE;
+
+ scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen,
+ QAT_RFC_1950_FOOTER_SIZE, 1);
+ areq->dlen += QAT_RFC_1950_FOOTER_SIZE;
+ } else {
+ __be32 decomp_adler;
+ int footer_offset;
+ int consumed;
+
+ consumed = qat_comp_get_consumed_ctr(resp);
+ footer_offset = consumed + QAT_RFC_1950_HDR_SIZE;
+ if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen)
+ return -EBADMSG;
+
+ scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset,
+ QAT_RFC_1950_FOOTER_SIZE, 0);
+
+ if (qat_produced_adler != decomp_adler)
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
+ void *resp)
+{
+ struct acomp_req *areq = qat_req->acompress_req;
+ struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
+ struct qat_compression_instance *inst = ctx->inst;
+ int consumed, produced;
+ s8 cmp_err, xlt_err;
+ int res = -EBADMSG;
+ int status;
+ u8 cnv;
+
+ status = qat_comp_get_cmp_status(resp);
+ status |= qat_comp_get_xlt_status(resp);
+ cmp_err = qat_comp_get_cmp_err(resp);
+ xlt_err = qat_comp_get_xlt_err(resp);
+
+ consumed = qat_comp_get_consumed_ctr(resp);
+ produced = qat_comp_get_produced_ctr(resp);
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
+ crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
+ qat_req->dir == COMPRESSION ? "comp " : "decomp",
+ status ? "ERR" : "OK ",
+ areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
+
+ areq->dlen = 0;
+
+ if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
+ if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
+ if (qat_req->dst.resubmitted) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Output does not fit destination buffer\n");
+ res = -EOVERFLOW;
+ goto end;
+ }
+
+ INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
+ adf_misc_wq_queue_work(&qat_req->resubmit);
+ return;
+ }
+ }
+
+ if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ goto end;
+
+ if (qat_req->dir == COMPRESSION) {
+ cnv = qat_comp_get_cmp_cnv_flag(resp);
+ if (unlikely(!cnv)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Verified compression not supported\n");
+ goto end;
+ }
+
+ if (unlikely(produced > qat_req->actual_dlen)) {
+ memset(inst->dc_data->ovf_buff, 0,
+ inst->dc_data->ovf_buff_sz);
+ dev_dbg(&GET_DEV(accel_dev),
+ "Actual buffer overflow: produced=%d, dlen=%d\n",
+ produced, qat_req->actual_dlen);
+ goto end;
+ }
+ }
+
+ res = 0;
+ areq->dlen = produced;
+
+ if (ctx->qat_comp_callback)
+ res = ctx->qat_comp_callback(qat_req, resp);
+
+end:
+ qat_bl_free_bufl(accel_dev, &qat_req->buf);
+ acomp_request_complete(areq, res);
+}
+
+void qat_comp_alg_callback(void *resp)
+{
+ struct qat_compression_req *qat_req =
+ (void *)(__force long)qat_comp_get_opaque(resp);
+ struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
+
+ qat_comp_generic_callback(qat_req, resp);
+
+ qat_alg_send_backlog(backlog);
+}
+
+static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_compression_instance *inst;
+ int node;
+
+ if (tfm->node == NUMA_NO_NODE)
+ node = numa_node_id();
+ else
+ node = tfm->node;
+
+ memset(ctx, 0, sizeof(*ctx));
+ inst = qat_compression_get_instance_node(node);
+ if (!inst)
+ return -EINVAL;
+ ctx->inst = inst;
+
+ ctx->inst->build_deflate_ctx(ctx->comp_ctx);
+
+ return 0;
+}
+
+static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ qat_compression_put_instance(ctx->inst);
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = qat_comp_alg_init_tfm(acomp_tfm);
+ ctx->qat_comp_callback = &qat_comp_rfc1950_callback;
+
+ return ret;
+}
+
+static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
+ unsigned int shdr, unsigned int sftr,
+ unsigned int dhdr, unsigned int dftr)
+{
+ struct qat_compression_req *qat_req = acomp_request_ctx(areq);
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_compression_instance *inst = ctx->inst;
+ gfp_t f = qat_algs_alloc_flags(&areq->base);
+ struct qat_sgl_to_bufl_params params = {0};
+ int slen = areq->slen - shdr - sftr;
+ int dlen = areq->dlen - dhdr - dftr;
+ dma_addr_t sfbuf, dfbuf;
+ u8 *req = qat_req->req;
+ size_t ovf_buff_sz;
+ int ret;
+
+ params.sskip = shdr;
+ params.dskip = dhdr;
+
+ if (!areq->src || !slen)
+ return -EINVAL;
+
+ if (areq->dst && !dlen)
+ return -EINVAL;
+
+ qat_req->dst.is_null = false;
+
+ /* Handle acomp requests that require the allocation of a destination
+ * buffer. The size of the destination buffer is double the source
+ * buffer (rounded up to the size of a page) to fit the decompressed
+ * output or an expansion on the data for compression.
+ */
+ if (!areq->dst) {
+ qat_req->dst.is_null = true;
+
+ dlen = round_up(2 * slen, PAGE_SIZE);
+ areq->dst = sgl_alloc(dlen, f, NULL);
+ if (!areq->dst)
+ return -ENOMEM;
+
+ dlen -= dhdr + dftr;
+ areq->dlen = dlen;
+ qat_req->dst.resubmitted = false;
+ }
+
+ if (dir == COMPRESSION) {
+ params.extra_dst_buff = inst->dc_data->ovf_buff_p;
+ ovf_buff_sz = inst->dc_data->ovf_buff_sz;
+ params.sz_extra_dst_buff = ovf_buff_sz;
+ }
+
+ ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+ &qat_req->buf, &params, f);
+ if (unlikely(ret))
+ return ret;
+
+ sfbuf = qat_req->buf.blp;
+ dfbuf = qat_req->buf.bloutp;
+ qat_req->qat_compression_ctx = ctx;
+ qat_req->acompress_req = areq;
+ qat_req->dir = dir;
+
+ if (dir == COMPRESSION) {
+ qat_req->actual_dlen = dlen;
+ dlen += ovf_buff_sz;
+ qat_comp_create_compression_req(ctx->comp_ctx, req,
+ (u64)(__force long)sfbuf, slen,
+ (u64)(__force long)dfbuf, dlen,
+ (u64)(__force long)qat_req);
+ } else {
+ qat_comp_create_decompression_req(ctx->comp_ctx, req,
+ (u64)(__force long)sfbuf, slen,
+ (u64)(__force long)dfbuf, dlen,
+ (u64)(__force long)qat_req);
+ }
+
+ ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
+ if (ret == -ENOSPC)
+ qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+
+ return ret;
+}
+
+static int qat_comp_alg_compress(struct acomp_req *req)
+{
+ return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
+}
+
+static int qat_comp_alg_decompress(struct acomp_req *req)
+{
+ return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
+}
+
+static int qat_comp_alg_rfc1950_compress(struct acomp_req *req)
+{
+ if (!req->dst && req->dlen != 0)
+ return -EINVAL;
+
+ if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
+ return -EINVAL;
+
+ return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0,
+ QAT_RFC_1950_HDR_SIZE,
+ QAT_RFC_1950_FOOTER_SIZE);
+}
+
+static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ u16 zlib_header;
+ int ret;
+
+ if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
+ return -EBADMSG;
+
+ scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0);
+
+ ret = parse_zlib_header(zlib_header);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n");
+ return ret;
+ }
+
+ return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE,
+ QAT_RFC_1950_FOOTER_SIZE, 0, 0);
+}
+
+static struct acomp_alg qat_acomp[] = { {
+ .base = {
+ .cra_name = "deflate",
+ .cra_driver_name = "qat_deflate",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+ .cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_comp_alg_init_tfm,
+ .exit = qat_comp_alg_exit_tfm,
+ .compress = qat_comp_alg_compress,
+ .decompress = qat_comp_alg_decompress,
+ .dst_free = sgl_free,
+ .reqsize = sizeof(struct qat_compression_req),
+}, {
+ .base = {
+ .cra_name = "zlib-deflate",
+ .cra_driver_name = "qat_zlib_deflate",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_comp_alg_rfc1950_init_tfm,
+ .exit = qat_comp_alg_exit_tfm,
+ .compress = qat_comp_alg_rfc1950_compress,
+ .decompress = qat_comp_alg_rfc1950_decompress,
+ .dst_free = sgl_free,
+ .reqsize = sizeof(struct qat_compression_req),
+} };
+
+int qat_comp_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs == 1)
+ ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void qat_comp_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs == 0)
+ crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
new file mode 100644
index 0000000000..404e32c5e7
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _QAT_COMP_REQ_H_
+#define _QAT_COMP_REQ_H_
+
+#include "icp_qat_fw_comp.h"
+
+#define QAT_COMP_REQ_SIZE (sizeof(struct icp_qat_fw_comp_req))
+#define QAT_COMP_CTX_SIZE (QAT_COMP_REQ_SIZE * 2)
+
+static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen,
+ u64 dst, u32 dlen, u64 opaque)
+{
+ struct icp_qat_fw_comp_req *fw_tmpl = ctx;
+ struct icp_qat_fw_comp_req *fw_req = req;
+ struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
+
+ memcpy(fw_req, fw_tmpl, sizeof(*fw_req));
+ fw_req->comn_mid.src_data_addr = src;
+ fw_req->comn_mid.src_length = slen;
+ fw_req->comn_mid.dest_data_addr = dst;
+ fw_req->comn_mid.dst_length = dlen;
+ fw_req->comn_mid.opaque_data = opaque;
+ req_pars->comp_len = slen;
+ req_pars->out_buffer_sz = dlen;
+}
+
+static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen)
+{
+ struct icp_qat_fw_comp_req *fw_req = req;
+ struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
+
+ fw_req->comn_mid.dest_data_addr = dst;
+ fw_req->comn_mid.dst_length = dlen;
+ req_pars->out_buffer_sz = dlen;
+}
+
+static inline void qat_comp_create_compression_req(void *ctx, void *req,
+ u64 src, u32 slen,
+ u64 dst, u32 dlen,
+ u64 opaque)
+{
+ qat_comp_create_req(ctx, req, src, slen, dst, dlen, opaque);
+}
+
+static inline void qat_comp_create_decompression_req(void *ctx, void *req,
+ u64 src, u32 slen,
+ u64 dst, u32 dlen,
+ u64 opaque)
+{
+ struct icp_qat_fw_comp_req *fw_tmpl = ctx;
+
+ fw_tmpl++;
+ qat_comp_create_req(fw_tmpl, req, src, slen, dst, dlen, opaque);
+}
+
+static inline u32 qat_comp_get_consumed_ctr(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->comp_resp_pars.input_byte_counter;
+}
+
+static inline u32 qat_comp_get_produced_ctr(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->comp_resp_pars.output_byte_counter;
+}
+
+static inline u32 qat_comp_get_produced_adler32(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->comp_resp_pars.crc.legacy.curr_adler_32;
+}
+
+static inline u64 qat_comp_get_opaque(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->opaque_data;
+}
+
+static inline s8 qat_comp_get_cmp_err(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->comn_resp.comn_error.cmp_err_code;
+}
+
+static inline s8 qat_comp_get_xlt_err(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+ return qat_resp->comn_resp.comn_error.xlat_err_code;
+}
+
+static inline s8 qat_comp_get_cmp_status(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
+
+ return ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(stat_filed);
+}
+
+static inline s8 qat_comp_get_xlt_status(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
+
+ return ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(stat_filed);
+}
+
+static inline u8 qat_comp_get_cmp_cnv_flag(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+ u8 flags = qat_resp->comn_resp.hdr_flags;
+
+ return ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(flags);
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
new file mode 100644
index 0000000000..7842a9f221
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_compression.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_compression;
+
+void qat_compression_put_instance(struct qat_compression_instance *inst)
+{
+ atomic_dec(&inst->refctr);
+ adf_dev_put(inst->accel_dev);
+}
+
+static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
+{
+ struct qat_compression_instance *inst;
+ struct list_head *list_ptr, *tmp;
+ int i;
+
+ list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) {
+ inst = list_entry(list_ptr,
+ struct qat_compression_instance, list);
+
+ for (i = 0; i < atomic_read(&inst->refctr); i++)
+ qat_compression_put_instance(inst);
+
+ if (inst->dc_tx)
+ adf_remove_ring(inst->dc_tx);
+
+ if (inst->dc_rx)
+ adf_remove_ring(inst->dc_rx);
+
+ list_del(list_ptr);
+ kfree(inst);
+ }
+ return 0;
+}
+
+struct qat_compression_instance *qat_compression_get_instance_node(int node)
+{
+ struct qat_compression_instance *inst = NULL;
+ struct adf_accel_dev *accel_dev = NULL;
+ unsigned long best = ~0;
+ struct list_head *itr;
+
+ list_for_each(itr, adf_devmgr_get_head()) {
+ struct adf_accel_dev *tmp_dev;
+ unsigned long ctr;
+ int tmp_dev_node;
+
+ tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+ tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
+
+ if ((node == tmp_dev_node || tmp_dev_node < 0) &&
+ adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
+ ctr = atomic_read(&tmp_dev->ref_count);
+ if (best > ctr) {
+ accel_dev = tmp_dev;
+ best = ctr;
+ }
+ }
+ }
+
+ if (!accel_dev) {
+ pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
+ /* Get any started device */
+ list_for_each(itr, adf_devmgr_get_head()) {
+ struct adf_accel_dev *tmp_dev;
+
+ tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+ if (adf_dev_started(tmp_dev) &&
+ !list_empty(&tmp_dev->compression_list)) {
+ accel_dev = tmp_dev;
+ break;
+ }
+ }
+ }
+
+ if (!accel_dev)
+ return NULL;
+
+ best = ~0;
+ list_for_each(itr, &accel_dev->compression_list) {
+ struct qat_compression_instance *tmp_inst;
+ unsigned long ctr;
+
+ tmp_inst = list_entry(itr, struct qat_compression_instance, list);
+ ctr = atomic_read(&tmp_inst->refctr);
+ if (best > ctr) {
+ inst = tmp_inst;
+ best = ctr;
+ }
+ }
+ if (inst) {
+ if (adf_dev_get(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+ return NULL;
+ }
+ atomic_inc(&inst->refctr);
+ }
+ return inst;
+}
+
+static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
+{
+ struct qat_compression_instance *inst;
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ unsigned long num_inst, num_msg_dc;
+ unsigned long bank;
+ int msg_size;
+ int ret;
+ int i;
+
+ INIT_LIST_HEAD(&accel_dev->compression_list);
+ strscpy(key, ADF_NUM_DC, sizeof(key));
+ ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+ if (ret)
+ return ret;
+
+ ret = kstrtoul(val, 10, &num_inst);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_inst; i++) {
+ inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!inst) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ list_add_tail(&inst->list, &accel_dev->compression_list);
+ inst->id = i;
+ atomic_set(&inst->refctr, 0);
+ inst->accel_dev = accel_dev;
+ inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
+
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+ ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+ if (ret)
+ return ret;
+
+ ret = kstrtoul(val, 10, &bank);
+ if (ret)
+ return ret;
+
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+ ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+ if (ret)
+ return ret;
+
+ ret = kstrtoul(val, 10, &num_msg_dc);
+ if (ret)
+ return ret;
+
+ msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+ ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
+ msg_size, key, NULL, 0, &inst->dc_tx);
+ if (ret)
+ return ret;
+
+ msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+ ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
+ msg_size, key, qat_comp_alg_callback, 0,
+ &inst->dc_rx);
+ if (ret)
+ return ret;
+
+ inst->dc_data = accel_dev->dc_data;
+ INIT_LIST_HEAD(&inst->backlog.list);
+ spin_lock_init(&inst->backlog.lock);
+ }
+ return 0;
+err:
+ qat_compression_free_instances(accel_dev);
+ return ret;
+}
+
+static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+ dma_addr_t obuff_p = DMA_MAPPING_ERROR;
+ size_t ovf_buff_sz = QAT_COMP_MAX_SKID;
+ struct adf_dc_data *dc_data = NULL;
+ u8 *obuff = NULL;
+
+ dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
+ if (!dc_data)
+ goto err;
+
+ obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev));
+ if (!obuff)
+ goto err;
+
+ obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, obuff_p)))
+ goto err;
+
+ dc_data->ovf_buff = obuff;
+ dc_data->ovf_buff_p = obuff_p;
+ dc_data->ovf_buff_sz = ovf_buff_sz;
+
+ accel_dev->dc_data = dc_data;
+
+ return 0;
+
+err:
+ accel_dev->dc_data = NULL;
+ kfree(obuff);
+ devm_kfree(dev, dc_data);
+ return -ENOMEM;
+}
+
+static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
+{
+ struct adf_dc_data *dc_data = accel_dev->dc_data;
+ struct device *dev = &GET_DEV(accel_dev);
+
+ if (!dc_data)
+ return;
+
+ dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
+ DMA_FROM_DEVICE);
+ kfree_sensitive(dc_data->ovf_buff);
+ devm_kfree(dev, dc_data);
+ accel_dev->dc_data = NULL;
+}
+
+static int qat_compression_init(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = qat_compression_alloc_dc_data(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = qat_compression_create_instances(accel_dev);
+ if (ret)
+ qat_free_dc_data(accel_dev);
+
+ return ret;
+}
+
+static int qat_compression_shutdown(struct adf_accel_dev *accel_dev)
+{
+ qat_free_dc_data(accel_dev);
+ return qat_compression_free_instances(accel_dev);
+}
+
+static int qat_compression_event_handler(struct adf_accel_dev *accel_dev,
+ enum adf_event event)
+{
+ int ret;
+
+ switch (event) {
+ case ADF_EVENT_INIT:
+ ret = qat_compression_init(accel_dev);
+ break;
+ case ADF_EVENT_SHUTDOWN:
+ ret = qat_compression_shutdown(accel_dev);
+ break;
+ case ADF_EVENT_RESTARTING:
+ case ADF_EVENT_RESTARTED:
+ case ADF_EVENT_START:
+ case ADF_EVENT_STOP:
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+int qat_compression_register(void)
+{
+ memset(&qat_compression, 0, sizeof(qat_compression));
+ qat_compression.event_hld = qat_compression_event_handler;
+ qat_compression.name = "qat_compression";
+ return adf_service_register(&qat_compression);
+}
+
+int qat_compression_unregister(void)
+{
+ return adf_service_unregister(&qat_compression);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h
new file mode 100644
index 0000000000..aebac2302d
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _QAT_COMPRESSION_H_
+#define _QAT_COMPRESSION_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
+
+#define QAT_COMP_MAX_SKID 4096
+
+struct qat_compression_instance {
+ struct adf_etr_ring_data *dc_tx;
+ struct adf_etr_ring_data *dc_rx;
+ struct adf_accel_dev *accel_dev;
+ struct list_head list;
+ unsigned long state;
+ int id;
+ atomic_t refctr;
+ struct qat_instance_backlog backlog;
+ struct adf_dc_data *dc_data;
+ void (*build_deflate_ctx)(void *ctx);
+};
+
+static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ u32 mask = ~hw_device->accel_capabilities_mask;
+
+ if (mask & ADF_ACCEL_CAPABILITIES_COMPRESSION)
+ return false;
+
+ return true;
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
new file mode 100644
index 0000000000..40c8e74d1c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_gen2_hw_data.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_crypto;
+
+void qat_crypto_put_instance(struct qat_crypto_instance *inst)
+{
+ atomic_dec(&inst->refctr);
+ adf_dev_put(inst->accel_dev);
+}
+
+static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
+{
+ struct qat_crypto_instance *inst, *tmp;
+ int i;
+
+ list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
+ for (i = 0; i < atomic_read(&inst->refctr); i++)
+ qat_crypto_put_instance(inst);
+
+ if (inst->sym_tx)
+ adf_remove_ring(inst->sym_tx);
+
+ if (inst->sym_rx)
+ adf_remove_ring(inst->sym_rx);
+
+ if (inst->pke_tx)
+ adf_remove_ring(inst->pke_tx);
+
+ if (inst->pke_rx)
+ adf_remove_ring(inst->pke_rx);
+
+ list_del(&inst->list);
+ kfree(inst);
+ }
+ return 0;
+}
+
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
+{
+ struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
+ struct qat_crypto_instance *inst = NULL, *tmp_inst;
+ unsigned long best = ~0;
+
+ list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+ unsigned long ctr;
+
+ if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
+ dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
+ adf_dev_started(tmp_dev) &&
+ !list_empty(&tmp_dev->crypto_list)) {
+ ctr = atomic_read(&tmp_dev->ref_count);
+ if (best > ctr) {
+ accel_dev = tmp_dev;
+ best = ctr;
+ }
+ }
+ }
+
+ if (!accel_dev) {
+ pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
+ /* Get any started device */
+ list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+ if (adf_dev_started(tmp_dev) &&
+ !list_empty(&tmp_dev->crypto_list)) {
+ accel_dev = tmp_dev;
+ break;
+ }
+ }
+ }
+
+ if (!accel_dev)
+ return NULL;
+
+ best = ~0;
+ list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
+ unsigned long ctr;
+
+ ctr = atomic_read(&tmp_inst->refctr);
+ if (best > ctr) {
+ inst = tmp_inst;
+ best = ctr;
+ }
+ }
+ if (inst) {
+ if (adf_dev_get(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+ return NULL;
+ }
+ atomic_inc(&inst->refctr);
+ }
+ return inst;
+}
+
+/**
+ * qat_crypto_vf_dev_config()
+ * create dev config required to create crypto inst.
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create
+ * asym, sym or, crypto instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
+{
+ u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
+
+ if (ring_to_svc_map != ADF_GEN2_DEFAULT_RING_TO_SRV_MAP) {
+ dev_err(&GET_DEV(accel_dev),
+ "Unsupported ring/service mapping present on PF");
+ return -EFAULT;
+ }
+
+ return GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+}
+
+static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
+{
+ unsigned long num_inst, num_msg_sym, num_msg_asym;
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ unsigned long sym_bank, asym_bank;
+ struct qat_crypto_instance *inst;
+ int msg_size;
+ int ret;
+ int i;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ ret = adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val);
+ if (ret)
+ return ret;
+
+ ret = kstrtoul(val, 0, &num_inst);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_inst; i++) {
+ inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!inst) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ list_add_tail(&inst->list, &accel_dev->crypto_list);
+ inst->id = i;
+ atomic_set(&inst->refctr, 0);
+ inst->accel_dev = accel_dev;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+ ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+ if (ret)
+ goto err;
+
+ ret = kstrtoul(val, 10, &sym_bank);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+ ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+ if (ret)
+ goto err;
+
+ ret = kstrtoul(val, 10, &asym_bank);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+ if (ret)
+ goto err;
+
+ ret = kstrtoul(val, 10, &num_msg_sym);
+ if (ret)
+ goto err;
+
+ num_msg_sym = num_msg_sym >> 1;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+ if (ret)
+ goto err;
+
+ ret = kstrtoul(val, 10, &num_msg_asym);
+ if (ret)
+ goto err;
+ num_msg_asym = num_msg_asym >> 1;
+
+ msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
+ msg_size, key, NULL, 0, &inst->sym_tx);
+ if (ret)
+ goto err;
+
+ msg_size = msg_size >> 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
+ msg_size, key, NULL, 0, &inst->pke_tx);
+ if (ret)
+ goto err;
+
+ msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
+ msg_size, key, qat_alg_callback, 0,
+ &inst->sym_rx);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
+ msg_size, key, qat_alg_asym_callback, 0,
+ &inst->pke_rx);
+ if (ret)
+ goto err;
+
+ INIT_LIST_HEAD(&inst->backlog.list);
+ spin_lock_init(&inst->backlog.lock);
+ }
+ return 0;
+err:
+ qat_crypto_free_instances(accel_dev);
+ return ret;
+}
+
+static int qat_crypto_init(struct adf_accel_dev *accel_dev)
+{
+ if (qat_crypto_create_instances(accel_dev))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
+{
+ return qat_crypto_free_instances(accel_dev);
+}
+
+static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
+ enum adf_event event)
+{
+ int ret;
+
+ switch (event) {
+ case ADF_EVENT_INIT:
+ ret = qat_crypto_init(accel_dev);
+ break;
+ case ADF_EVENT_SHUTDOWN:
+ ret = qat_crypto_shutdown(accel_dev);
+ break;
+ case ADF_EVENT_RESTARTING:
+ case ADF_EVENT_RESTARTED:
+ case ADF_EVENT_START:
+ case ADF_EVENT_STOP:
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+int qat_crypto_register(void)
+{
+ memset(&qat_crypto, 0, sizeof(qat_crypto));
+ qat_crypto.event_hld = qat_crypto_event_handler;
+ qat_crypto.name = "qat_crypto";
+ return adf_service_register(&qat_crypto);
+}
+
+int qat_crypto_unregister(void)
+{
+ return adf_service_unregister(&qat_crypto);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.h b/drivers/crypto/intel/qat/qat_common/qat_crypto.h
new file mode 100644
index 0000000000..6a0e961bb9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _QAT_CRYPTO_INSTANCE_H_
+#define _QAT_CRYPTO_INSTANCE_H_
+
+#include <crypto/aes.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_la.h"
+#include "qat_algs_send.h"
+#include "qat_bl.h"
+
+struct qat_crypto_instance {
+ struct adf_etr_ring_data *sym_tx;
+ struct adf_etr_ring_data *sym_rx;
+ struct adf_etr_ring_data *pke_tx;
+ struct adf_etr_ring_data *pke_rx;
+ struct adf_accel_dev *accel_dev;
+ struct list_head list;
+ unsigned long state;
+ int id;
+ atomic_t refctr;
+ struct qat_instance_backlog backlog;
+};
+
+struct qat_crypto_request;
+
+struct qat_crypto_request {
+ struct icp_qat_fw_la_bulk_req req;
+ union {
+ struct qat_alg_aead_ctx *aead_ctx;
+ struct qat_alg_skcipher_ctx *skcipher_ctx;
+ };
+ union {
+ struct aead_request *aead_req;
+ struct skcipher_request *skcipher_req;
+ };
+ struct qat_request_buffs buf;
+ void (*cb)(struct icp_qat_fw_la_resp *resp,
+ struct qat_crypto_request *req);
+ union {
+ struct {
+ __be64 iv_hi;
+ __be64 iv_lo;
+ };
+ u8 iv[AES_BLOCK_SIZE];
+ };
+ bool encryption;
+ struct qat_alg_req alg_req;
+};
+
+static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ u32 mask = ~hw_device->accel_capabilities_mask;
+
+ if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC)
+ return false;
+ if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC)
+ return false;
+ if (mask & ADF_ACCEL_CAPABILITIES_AUTHENTICATION)
+ return false;
+
+ return true;
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
new file mode 100644
index 0000000000..cbb946a800
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -0,0 +1,1594 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci_ids.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_uclo.h"
+
+#define BAD_REGADDR 0xffff
+#define MAX_RETRY_TIMES 10000
+#define INIT_CTX_ARB_VALUE 0x0
+#define INIT_CTX_ENABLE_VALUE 0x0
+#define INIT_PC_VALUE 0x0
+#define INIT_WAKEUP_EVENTS_VALUE 0x1
+#define INIT_SIG_EVENTS_VALUE 0x1
+#define INIT_CCENABLE_VALUE 0x2000
+#define RST_CSR_QAT_LSB 20
+#define RST_CSR_AE_LSB 0
+#define MC_TIMESTAMP_ENABLE (0x1 << 7)
+
+#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+ (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+ (~(1 << CE_REG_PAR_ERR_BITPOS)))
+#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
+ (inst = ((inst & 0xFFFF00C03FFull) | \
+ ((((const_val) << 12) & 0x0FF00000ull) | \
+ (((const_val) << 10) & 0x0003FC00ull))))
+#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
+ (inst = ((inst & 0xFFFF00FFF00ull) | \
+ ((((const_val) << 12) & 0x0FF00000ull) | \
+ (((const_val) << 0) & 0x000000FFull))))
+
+#define AE(handle, ae) ((handle)->hal_handle->aes[ae])
+
+static const u64 inst_4b[] = {
+ 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
+ 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+ 0x0A021000000ull
+};
+
+static const u64 inst[] = {
+ 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
+ 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+ 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
+ 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+ 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
+ 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
+ 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
+ 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
+ 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
+ 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
+ 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
+ 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
+ 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
+ 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
+ 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
+ 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
+ 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
+ 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
+ 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
+ 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
+ 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
+ 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
+};
+
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask)
+{
+ AE(handle, ae).live_ctx_mask = ctx_mask;
+}
+
+#define CSR_RETRY_TIMES 500
+static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int csr)
+{
+ unsigned int iterations = CSR_RETRY_TIMES;
+ int value;
+
+ do {
+ value = GET_AE_CSR(handle, ae, csr);
+ if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+ return value;
+ } while (iterations--);
+
+ pr_err("QAT: Read CSR timeout\n");
+ return 0;
+}
+
+static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int csr,
+ unsigned int value)
+{
+ unsigned int iterations = CSR_RETRY_TIMES;
+
+ do {
+ SET_AE_CSR(handle, ae, csr, value);
+ if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+ return 0;
+ } while (iterations--);
+
+ pr_err("QAT: Write CSR Timeout\n");
+ return -EFAULT;
+}
+
+static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ unsigned int *events)
+{
+ unsigned int cur_ctx;
+
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+ *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int cycles,
+ int chk_inactive)
+{
+ unsigned int base_cnt = 0, cur_cnt = 0;
+ unsigned int csr = (1 << ACS_ABO_BITPOS);
+ int times = MAX_RETRY_TIMES;
+ int elapsed_cycles = 0;
+
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+ base_cnt &= 0xffff;
+ while ((int)cycles > elapsed_cycles && times--) {
+ if (chk_inactive)
+ csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+ cur_cnt &= 0xffff;
+ elapsed_cycles = cur_cnt - base_cnt;
+
+ if (elapsed_cycles < 0)
+ elapsed_cycles += 0x10000;
+
+ /* ensure at least 8 time cycles elapsed in wait_cycles */
+ if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
+ return 0;
+ }
+ if (times < 0) {
+ pr_err("QAT: wait_num_cycles time out\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit)))
+#define SET_BIT(wrd, bit) ((wrd) | 1 << (bit))
+
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char mode)
+{
+ unsigned int csr, new_csr;
+
+ if (mode != 4 && mode != 8) {
+ pr_err("QAT: bad ctx mode=%d\n", mode);
+ return -EINVAL;
+ }
+
+ /* Sets the accelaration engine context mode to either four or eight */
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ csr = IGNORE_W1C_MASK & csr;
+ new_csr = (mode == 4) ?
+ SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
+ CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+ return 0;
+}
+
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char mode)
+{
+ unsigned int csr, new_csr;
+
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ csr &= IGNORE_W1C_MASK;
+
+ new_csr = (mode) ?
+ SET_BIT(csr, CE_NN_MODE_BITPOS) :
+ CLR_BIT(csr, CE_NN_MODE_BITPOS);
+
+ if (new_csr != csr)
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+
+ return 0;
+}
+
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, enum icp_qat_uof_regtype lm_type,
+ unsigned char mode)
+{
+ unsigned int csr, new_csr;
+
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ csr &= IGNORE_W1C_MASK;
+ switch (lm_type) {
+ case ICP_LMEM0:
+ new_csr = (mode) ?
+ SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
+ CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
+ break;
+ case ICP_LMEM1:
+ new_csr = (mode) ?
+ SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
+ CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
+ break;
+ case ICP_LMEM2:
+ new_csr = (mode) ?
+ SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) :
+ CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS);
+ break;
+ case ICP_LMEM3:
+ new_csr = (mode) ?
+ SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) :
+ CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS);
+ break;
+ default:
+ pr_err("QAT: lmType = 0x%x\n", lm_type);
+ return -EINVAL;
+ }
+
+ if (new_csr != csr)
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+ return 0;
+}
+
+void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char mode)
+{
+ unsigned int csr, new_csr;
+
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ csr &= IGNORE_W1C_MASK;
+ new_csr = (mode) ?
+ SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) :
+ CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS);
+ if (new_csr != csr)
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+}
+
+static unsigned short qat_hal_get_reg_addr(unsigned int type,
+ unsigned short reg_num)
+{
+ unsigned short reg_addr;
+
+ switch (type) {
+ case ICP_GPA_ABS:
+ case ICP_GPB_ABS:
+ reg_addr = 0x80 | (reg_num & 0x7f);
+ break;
+ case ICP_GPA_REL:
+ case ICP_GPB_REL:
+ reg_addr = reg_num & 0x1f;
+ break;
+ case ICP_SR_RD_REL:
+ case ICP_SR_WR_REL:
+ case ICP_SR_REL:
+ reg_addr = 0x180 | (reg_num & 0x1f);
+ break;
+ case ICP_SR_ABS:
+ reg_addr = 0x140 | ((reg_num & 0x3) << 1);
+ break;
+ case ICP_DR_RD_REL:
+ case ICP_DR_WR_REL:
+ case ICP_DR_REL:
+ reg_addr = 0x1c0 | (reg_num & 0x1f);
+ break;
+ case ICP_DR_ABS:
+ reg_addr = 0x100 | ((reg_num & 0x3) << 1);
+ break;
+ case ICP_NEIGH_REL:
+ reg_addr = 0x280 | (reg_num & 0x1f);
+ break;
+ case ICP_LMEM0:
+ reg_addr = 0x200;
+ break;
+ case ICP_LMEM1:
+ reg_addr = 0x220;
+ break;
+ case ICP_LMEM2:
+ reg_addr = 0x2c0;
+ break;
+ case ICP_LMEM3:
+ reg_addr = 0x2e0;
+ break;
+ case ICP_NO_DEST:
+ reg_addr = 0x300 | (reg_num & 0xff);
+ break;
+ default:
+ reg_addr = BAD_REGADDR;
+ break;
+ }
+ return reg_addr;
+}
+
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned int reset_mask = handle->chip_info->icp_rst_mask;
+ unsigned int reset_csr = handle->chip_info->icp_rst_csr;
+ unsigned int csr_val;
+
+ csr_val = GET_CAP_CSR(handle, reset_csr);
+ csr_val |= reset_mask;
+ SET_CAP_CSR(handle, reset_csr, csr_val);
+}
+
+static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask,
+ unsigned int ae_csr, unsigned int csr_val)
+{
+ unsigned int ctx, cur_ctx;
+
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+
+ for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+ if (!(ctx_mask & (1 << ctx)))
+ continue;
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+ qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
+ }
+
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ unsigned int ae_csr)
+{
+ unsigned int cur_ctx, csr_val;
+
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+
+ return csr_val;
+}
+
+static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask,
+ unsigned int events)
+{
+ unsigned int ctx, cur_ctx;
+
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+ for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+ if (!(ctx_mask & (1 << ctx)))
+ continue;
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+ qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
+ }
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask,
+ unsigned int events)
+{
+ unsigned int ctx, cur_ctx;
+
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+ for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+ if (!(ctx_mask & (1 << ctx)))
+ continue;
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+ qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
+ events);
+ }
+ qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ unsigned int base_cnt, cur_cnt;
+ unsigned char ae;
+ int times = MAX_RETRY_TIMES;
+
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+ base_cnt &= 0xffff;
+
+ do {
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+ cur_cnt &= 0xffff;
+ } while (times-- && (cur_cnt == base_cnt));
+
+ if (times < 0) {
+ pr_err("QAT: AE%d is inactive!!\n", ae);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+ unsigned int ae)
+{
+ unsigned int enable = 0, active = 0;
+
+ enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
+ (active & (1 << ACS_ABO_BITPOS)))
+ return 1;
+ else
+ return 0;
+}
+
+static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ unsigned int misc_ctl_csr, misc_ctl;
+ unsigned char ae;
+
+ misc_ctl_csr = handle->chip_info->misc_ctl_csr;
+ /* stop the timestamp timers */
+ misc_ctl = GET_CAP_CSR(handle, misc_ctl_csr);
+ if (misc_ctl & MC_TIMESTAMP_ENABLE)
+ SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl &
+ (~MC_TIMESTAMP_ENABLE));
+
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
+ qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
+ }
+ /* start timestamp timers */
+ SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE);
+}
+
+#define ESRAM_AUTO_TINIT BIT(2)
+#define ESRAM_AUTO_TINIT_DONE BIT(3)
+#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
+#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
+static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
+{
+ void __iomem *csr_addr =
+ (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
+ ESRAM_AUTO_INIT_CSR_OFFSET);
+ unsigned int csr_val;
+ int times = 30;
+
+ if (handle->pci_dev->device != PCI_DEVICE_ID_INTEL_QAT_DH895XCC)
+ return 0;
+
+ csr_val = ADF_CSR_RD(csr_addr, 0);
+ if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
+ return 0;
+
+ csr_val = ADF_CSR_RD(csr_addr, 0);
+ csr_val |= ESRAM_AUTO_TINIT;
+ ADF_CSR_WR(csr_addr, 0, csr_val);
+
+ do {
+ qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
+ csr_val = ADF_CSR_RD(csr_addr, 0);
+ } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
+ if (times < 0) {
+ pr_err("QAT: Fail to init eSram!\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#define SHRAM_INIT_CYCLES 2060
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned int clk_csr = handle->chip_info->glb_clk_enable_csr;
+ unsigned int reset_mask = handle->chip_info->icp_rst_mask;
+ unsigned int reset_csr = handle->chip_info->icp_rst_csr;
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ unsigned char ae = 0;
+ unsigned int times = 100;
+ unsigned int csr_val;
+
+ /* write to the reset csr */
+ csr_val = GET_CAP_CSR(handle, reset_csr);
+ csr_val &= ~reset_mask;
+ do {
+ SET_CAP_CSR(handle, reset_csr, csr_val);
+ if (!(times--))
+ goto out_err;
+ csr_val = GET_CAP_CSR(handle, reset_csr);
+ csr_val &= reset_mask;
+ } while (csr_val);
+ /* enable clock */
+ csr_val = GET_CAP_CSR(handle, clk_csr);
+ csr_val |= reset_mask;
+ SET_CAP_CSR(handle, clk_csr, csr_val);
+ if (qat_hal_check_ae_alive(handle))
+ goto out_err;
+
+ /* Set undefined power-up/reset states to reasonable default values */
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+ INIT_CTX_ENABLE_VALUE);
+ qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
+ CTX_STS_INDIRECT,
+ handle->hal_handle->upc_mask &
+ INIT_PC_VALUE);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+ qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+ qat_hal_put_wakeup_event(handle, ae,
+ ICP_QAT_UCLO_AE_ALL_CTX,
+ INIT_WAKEUP_EVENTS_VALUE);
+ qat_hal_put_sig_event(handle, ae,
+ ICP_QAT_UCLO_AE_ALL_CTX,
+ INIT_SIG_EVENTS_VALUE);
+ }
+ if (qat_hal_init_esram(handle))
+ goto out_err;
+ if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
+ goto out_err;
+ qat_hal_reset_timestamp(handle);
+
+ return 0;
+out_err:
+ pr_err("QAT: failed to get device out of reset\n");
+ return -EFAULT;
+}
+
+static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask)
+{
+ unsigned int ctx;
+
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ ctx &= IGNORE_W1C_MASK &
+ (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static u64 qat_hal_parity_64bit(u64 word)
+{
+ word ^= word >> 1;
+ word ^= word >> 2;
+ word ^= word >> 4;
+ word ^= word >> 8;
+ word ^= word >> 16;
+ word ^= word >> 32;
+ return word & 1;
+}
+
+static u64 qat_hal_set_uword_ecc(u64 uword)
+{
+ u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+ bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
+ bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
+ bit6_mask = 0xdaf69a46910ULL;
+
+ /* clear the ecc bits */
+ uword &= ~(0x7fULL << 0x2C);
+ uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
+ uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
+ uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
+ uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
+ uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
+ uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
+ uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
+ return uword;
+}
+
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int uaddr,
+ unsigned int words_num, u64 *uword)
+{
+ unsigned int ustore_addr;
+ unsigned int i;
+
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+ uaddr |= UA_ECS;
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+ for (i = 0; i < words_num; i++) {
+ unsigned int uwrd_lo, uwrd_hi;
+ u64 tmp;
+
+ tmp = qat_hal_set_uword_ecc(uword[i]);
+ uwrd_lo = (unsigned int)(tmp & 0xffffffff);
+ uwrd_hi = (unsigned int)(tmp >> 0x20);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+ }
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask)
+{
+ unsigned int ctx;
+
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ ctx &= IGNORE_W1C_MASK;
+ ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
+ ctx |= (ctx_mask << CE_ENABLE_BITPOS);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ unsigned char ae;
+ unsigned short reg;
+
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
+ qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
+ reg, 0);
+ qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
+ reg, 0);
+ }
+ }
+}
+
+static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ unsigned char ae;
+ unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
+ int times = MAX_RETRY_TIMES;
+ unsigned int csr_val = 0;
+ unsigned int savctx = 0;
+ int ret = 0;
+
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+ csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
+ qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ csr_val &= IGNORE_W1C_MASK;
+ if (handle->chip_info->nn)
+ csr_val |= CE_NN_MODE;
+
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
+ qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
+ (u64 *)inst);
+ qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+ handle->hal_handle->upc_mask &
+ INIT_PC_VALUE);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
+ qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
+ qat_hal_wr_indr_csr(handle, ae, ctx_mask,
+ CTX_SIG_EVENTS_INDIRECT, 0);
+ qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+ qat_hal_enable_ctx(handle, ae, ctx_mask);
+ }
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ /* wait for AE to finish */
+ do {
+ ret = qat_hal_wait_cycles(handle, ae, 20, 1);
+ } while (ret && times--);
+
+ if (times < 0) {
+ pr_err("QAT: clear GPR of AE %d failed", ae);
+ return -EINVAL;
+ }
+ qat_hal_disable_ctx(handle, ae, ctx_mask);
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+ savctx & ACS_ACNO);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+ INIT_CTX_ENABLE_VALUE);
+ qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+ handle->hal_handle->upc_mask &
+ INIT_PC_VALUE);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+ qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+ qat_hal_put_wakeup_event(handle, ae, ctx_mask,
+ INIT_WAKEUP_EVENTS_VALUE);
+ qat_hal_put_sig_event(handle, ae, ctx_mask,
+ INIT_SIG_EVENTS_VALUE);
+ }
+ return 0;
+}
+
+static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
+ struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ unsigned int max_en_ae_id = 0;
+ struct adf_bar *sram_bar;
+ unsigned int csr_val = 0;
+ unsigned long ae_mask;
+ unsigned char ae = 0;
+ int ret = 0;
+
+ handle->pci_dev = pci_info->pci_dev;
+ switch (handle->pci_dev->device) {
+ case ADF_4XXX_PCI_DEVICE_ID:
+ case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
+ handle->chip_info->mmp_sram_size = 0;
+ handle->chip_info->nn = false;
+ handle->chip_info->lm2lm3 = true;
+ handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
+ handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
+ handle->chip_info->icp_rst_mask = 0x100015;
+ handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0;
+ handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX;
+ handle->chip_info->wakeup_event_val = 0x80000000;
+ handle->chip_info->fw_auth = true;
+ handle->chip_info->css_3k = true;
+ handle->chip_info->tgroup_share_ustore = true;
+ handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
+ handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
+ handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI_4XXX;
+ handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO_4XXX;
+ handle->chip_info->fcu_loaded_ae_csr = FCU_AE_LOADED_4XXX;
+ handle->chip_info->fcu_loaded_ae_pos = 0;
+
+ handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET_4XXX;
+ handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET_4XXX;
+ handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET_4XXX;
+ handle->hal_cap_ae_local_csr_addr_v =
+ (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+ + LOCAL_TO_XFER_REG_OFFSET);
+ break;
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+ handle->chip_info->mmp_sram_size = 0;
+ handle->chip_info->nn = true;
+ handle->chip_info->lm2lm3 = false;
+ handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
+ handle->chip_info->icp_rst_csr = ICP_RESET;
+ handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
+ (hw_data->accel_mask << RST_CSR_QAT_LSB);
+ handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
+ handle->chip_info->misc_ctl_csr = MISC_CONTROL;
+ handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
+ handle->chip_info->fw_auth = true;
+ handle->chip_info->css_3k = false;
+ handle->chip_info->tgroup_share_ustore = false;
+ handle->chip_info->fcu_ctl_csr = FCU_CONTROL;
+ handle->chip_info->fcu_sts_csr = FCU_STATUS;
+ handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI;
+ handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO;
+ handle->chip_info->fcu_loaded_ae_csr = FCU_STATUS;
+ handle->chip_info->fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
+ handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
+ handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
+ handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
+ handle->hal_cap_ae_local_csr_addr_v =
+ (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+ + LOCAL_TO_XFER_REG_OFFSET);
+ break;
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+ handle->chip_info->mmp_sram_size = 0x40000;
+ handle->chip_info->nn = true;
+ handle->chip_info->lm2lm3 = false;
+ handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
+ handle->chip_info->icp_rst_csr = ICP_RESET;
+ handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
+ (hw_data->accel_mask << RST_CSR_QAT_LSB);
+ handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
+ handle->chip_info->misc_ctl_csr = MISC_CONTROL;
+ handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
+ handle->chip_info->fw_auth = false;
+ handle->chip_info->css_3k = false;
+ handle->chip_info->tgroup_share_ustore = false;
+ handle->chip_info->fcu_ctl_csr = 0;
+ handle->chip_info->fcu_sts_csr = 0;
+ handle->chip_info->fcu_dram_addr_hi = 0;
+ handle->chip_info->fcu_dram_addr_lo = 0;
+ handle->chip_info->fcu_loaded_ae_csr = 0;
+ handle->chip_info->fcu_loaded_ae_pos = 0;
+ handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
+ handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
+ handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
+ handle->hal_cap_ae_local_csr_addr_v =
+ (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+ + LOCAL_TO_XFER_REG_OFFSET);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ if (handle->chip_info->mmp_sram_size > 0) {
+ sram_bar =
+ &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
+ handle->hal_sram_addr_v = sram_bar->virt_addr;
+ }
+ handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
+ handle->hal_handle->ae_mask = hw_data->ae_mask;
+ handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask;
+ handle->hal_handle->slice_mask = hw_data->accel_mask;
+ handle->cfg_ae_mask = ALL_AE_MASK;
+ /* create AE objects */
+ handle->hal_handle->upc_mask = 0x1ffff;
+ handle->hal_handle->max_ustore = 0x4000;
+
+ ae_mask = handle->hal_handle->ae_mask;
+ for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE) {
+ handle->hal_handle->aes[ae].free_addr = 0;
+ handle->hal_handle->aes[ae].free_size =
+ handle->hal_handle->max_ustore;
+ handle->hal_handle->aes[ae].ustore_size =
+ handle->hal_handle->max_ustore;
+ handle->hal_handle->aes[ae].live_ctx_mask =
+ ICP_QAT_UCLO_AE_ALL_CTX;
+ max_en_ae_id = ae;
+ }
+ handle->hal_handle->ae_max_num = max_en_ae_id + 1;
+
+ /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
+ csr_val |= 0x1;
+ qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
+ }
+out_err:
+ return ret;
+}
+
+int qat_hal_init(struct adf_accel_dev *accel_dev)
+{
+ struct icp_qat_fw_loader_handle *handle;
+ int ret = 0;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
+ if (!handle->hal_handle) {
+ ret = -ENOMEM;
+ goto out_hal_handle;
+ }
+
+ handle->chip_info = kzalloc(sizeof(*handle->chip_info), GFP_KERNEL);
+ if (!handle->chip_info) {
+ ret = -ENOMEM;
+ goto out_chip_info;
+ }
+
+ ret = qat_hal_chip_init(handle, accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "qat_hal_chip_init error\n");
+ goto out_err;
+ }
+
+ /* take all AEs out of reset */
+ ret = qat_hal_clr_reset(handle);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
+ goto out_err;
+ }
+
+ qat_hal_clear_xfer(handle);
+ if (!handle->chip_info->fw_auth) {
+ ret = qat_hal_clear_gpr(handle);
+ if (ret)
+ goto out_err;
+ }
+
+ accel_dev->fw_loader->fw_loader = handle;
+ return 0;
+
+out_err:
+ kfree(handle->chip_info);
+out_chip_info:
+ kfree(handle->hal_handle);
+out_hal_handle:
+ kfree(handle);
+ return ret;
+}
+
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
+{
+ if (!handle)
+ return;
+ kfree(handle->chip_info);
+ kfree(handle->hal_handle);
+ kfree(handle);
+}
+
+int qat_hal_start(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ u32 wakeup_val = handle->chip_info->wakeup_event_val;
+ u32 fcu_ctl_csr, fcu_sts_csr;
+ unsigned int fcu_sts;
+ unsigned char ae;
+ u32 ae_ctr = 0;
+ int retry = 0;
+
+ if (handle->chip_info->fw_auth) {
+ fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+ fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+ ae_ctr = hweight32(ae_mask);
+ SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START);
+ do {
+ msleep(FW_AUTH_WAIT_PERIOD);
+ fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+ if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
+ return ae_ctr;
+ } while (retry++ < FW_AUTH_MAX_RETRY);
+ pr_err("QAT: start error (FCU_STS = 0x%x)\n", fcu_sts);
+ return 0;
+ } else {
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ qat_hal_put_wakeup_event(handle, ae, 0, wakeup_val);
+ qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX);
+ ae_ctr++;
+ }
+ return ae_ctr;
+ }
+}
+
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+ unsigned int ctx_mask)
+{
+ if (!handle->chip_info->fw_auth)
+ qat_hal_disable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int ctx_mask, unsigned int upc)
+{
+ qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+ handle->hal_handle->upc_mask & upc);
+}
+
+static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int uaddr,
+ unsigned int words_num, u64 *uword)
+{
+ unsigned int i, uwrd_lo, uwrd_hi;
+ unsigned int ustore_addr, misc_control;
+
+ misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+ qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
+ misc_control & 0xfffffffb);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+ uaddr |= UA_ECS;
+ for (i = 0; i < words_num; i++) {
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+ uaddr++;
+ uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
+ uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
+ uword[i] = uwrd_hi;
+ uword[i] = (uword[i] << 0x20) | uwrd_lo;
+ }
+ qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int uaddr,
+ unsigned int words_num, unsigned int *data)
+{
+ unsigned int i, ustore_addr;
+
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+ uaddr |= UA_ECS;
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+ for (i = 0; i < words_num; i++) {
+ unsigned int uwrd_lo, uwrd_hi, tmp;
+
+ uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
+ ((data[i] & 0xff00) << 2) |
+ (0x3 << 8) | (data[i] & 0xff);
+ uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
+ uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
+ tmp = ((data[i] >> 0x10) & 0xffff);
+ uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+ }
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+#define MAX_EXEC_INST 100
+static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ u64 *micro_inst, unsigned int inst_num,
+ int code_off, unsigned int max_cycle,
+ unsigned int *endpc)
+{
+ unsigned int ind_lm_addr_byte0 = 0, ind_lm_addr_byte1 = 0;
+ unsigned int ind_lm_addr_byte2 = 0, ind_lm_addr_byte3 = 0;
+ unsigned int ind_t_index = 0, ind_t_index_byte = 0;
+ unsigned int ind_lm_addr0 = 0, ind_lm_addr1 = 0;
+ unsigned int ind_lm_addr2 = 0, ind_lm_addr3 = 0;
+ u64 savuwords[MAX_EXEC_INST];
+ unsigned int ind_cnt_sig;
+ unsigned int ind_sig, act_sig;
+ unsigned int csr_val = 0, newcsr_val;
+ unsigned int savctx;
+ unsigned int savcc, wakeup_events, savpc;
+ unsigned int ctxarb_ctl, ctx_enables;
+
+ if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
+ pr_err("QAT: invalid instruction num %d\n", inst_num);
+ return -EINVAL;
+ }
+ /* save current context */
+ ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
+ ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
+ ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_0_BYTE_INDEX);
+ ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_1_BYTE_INDEX);
+ if (handle->chip_info->lm2lm3) {
+ ind_lm_addr2 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ LM_ADDR_2_INDIRECT);
+ ind_lm_addr3 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ LM_ADDR_3_INDIRECT);
+ ind_lm_addr_byte2 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_2_BYTE_INDEX);
+ ind_lm_addr_byte3 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_3_BYTE_INDEX);
+ ind_t_index = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_T_INDEX);
+ ind_t_index_byte = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_T_INDEX_BYTE_INDEX);
+ }
+ if (inst_num <= MAX_EXEC_INST)
+ qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
+ qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
+ savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
+ savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ ctx_enables &= IGNORE_W1C_MASK;
+ savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ FUTURE_COUNT_SIGNAL_INDIRECT);
+ ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_SIG_EVENTS_INDIRECT);
+ act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
+ /* execute micro codes */
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+ qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
+ if (code_off)
+ qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
+ qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
+ qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+ qat_hal_enable_ctx(handle, ae, (1 << ctx));
+ /* wait for micro codes to finish */
+ if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
+ return -EFAULT;
+ if (endpc) {
+ unsigned int ctx_status;
+
+ ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_STS_INDIRECT);
+ *endpc = ctx_status & handle->hal_handle->upc_mask;
+ }
+ /* retore to saved context */
+ qat_hal_disable_ctx(handle, ae, (1 << ctx));
+ if (inst_num <= MAX_EXEC_INST)
+ qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
+ qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
+ handle->hal_handle->upc_mask & savpc);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+ newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
+ qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
+ qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ LM_ADDR_0_INDIRECT, ind_lm_addr0);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ LM_ADDR_1_INDIRECT, ind_lm_addr1);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
+ if (handle->chip_info->lm2lm3) {
+ qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_2_INDIRECT,
+ ind_lm_addr2);
+ qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_3_INDIRECT,
+ ind_lm_addr3);
+ qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+ INDIRECT_LM_ADDR_2_BYTE_INDEX,
+ ind_lm_addr_byte2);
+ qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+ INDIRECT_LM_ADDR_3_BYTE_INDEX,
+ ind_lm_addr_byte3);
+ qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+ INDIRECT_T_INDEX, ind_t_index);
+ qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+ INDIRECT_T_INDEX_BYTE_INDEX,
+ ind_t_index_byte);
+ }
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
+ qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+ CTX_SIG_EVENTS_INDIRECT, ind_sig);
+ qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+ return 0;
+}
+
+static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int *data)
+{
+ unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
+ unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
+ unsigned short reg_addr;
+ int status = 0;
+ u64 insts, savuword;
+
+ reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+ if (reg_addr == BAD_REGADDR) {
+ pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
+ return -EINVAL;
+ }
+ switch (reg_type) {
+ case ICP_GPA_REL:
+ insts = 0xA070000000ull | (reg_addr & 0x3ff);
+ break;
+ default:
+ insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+ break;
+ }
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ ctx_enables &= IGNORE_W1C_MASK;
+ if (ctx != (savctx & ACS_ACNO))
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+ ctx & ACS_ACNO);
+ qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+ uaddr = UA_ECS;
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+ insts = qat_hal_set_uword_ecc(insts);
+ uwrd_lo = (unsigned int)(insts & 0xffffffff);
+ uwrd_hi = (unsigned int)(insts >> 0x20);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+ /* delay for at least 8 cycles */
+ qat_hal_wait_cycles(handle, ae, 0x8, 0);
+ /*
+ * read ALU output
+ * the instruction should have been executed
+ * prior to clearing the ECS in putUwords
+ */
+ *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
+ qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+ qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
+ if (ctx != (savctx & ACS_ACNO))
+ qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+ savctx & ACS_ACNO);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+ return status;
+}
+
+static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int data)
+{
+ unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
+ u64 insts[] = {
+ 0x0F440000000ull,
+ 0x0F040000000ull,
+ 0x0F0000C0300ull,
+ 0x0E000010000ull
+ };
+ const int num_inst = ARRAY_SIZE(insts), code_off = 1;
+ const int imm_w1 = 0, imm_w0 = 1;
+
+ dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+ if (dest_addr == BAD_REGADDR) {
+ pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
+ return -EINVAL;
+ }
+
+ data16lo = 0xffff & data;
+ data16hi = 0xffff & (data >> 0x10);
+ src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+ (0xff & data16hi));
+ src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+ (0xff & data16lo));
+ switch (reg_type) {
+ case ICP_GPA_REL:
+ insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+ ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+ insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+ ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+ break;
+ default:
+ insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+ ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+
+ insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+ ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+ break;
+ }
+
+ return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
+ code_off, num_inst * 0x5, NULL);
+}
+
+int qat_hal_get_ins_num(void)
+{
+ return ARRAY_SIZE(inst_4b);
+}
+
+static int qat_hal_concat_micro_code(u64 *micro_inst,
+ unsigned int inst_num, unsigned int size,
+ unsigned int addr, unsigned int *value)
+{
+ int i;
+ unsigned int cur_value;
+ const u64 *inst_arr;
+ int fixup_offset;
+ int usize = 0;
+ int orig_num;
+
+ orig_num = inst_num;
+ cur_value = value[0];
+ inst_arr = inst_4b;
+ usize = ARRAY_SIZE(inst_4b);
+ fixup_offset = inst_num;
+ for (i = 0; i < usize; i++)
+ micro_inst[inst_num++] = inst_arr[i];
+ INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
+ fixup_offset++;
+ INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
+ fixup_offset++;
+ INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
+ fixup_offset++;
+ INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
+
+ return inst_num - orig_num;
+}
+
+static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ int *pfirst_exec, u64 *micro_inst,
+ unsigned int inst_num)
+{
+ int stat = 0;
+ unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
+ unsigned int gprb0 = 0, gprb1 = 0;
+
+ if (*pfirst_exec) {
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
+ qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
+ *pfirst_exec = 0;
+ }
+ stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
+ inst_num * 0x5, NULL);
+ if (stat != 0)
+ return -EFAULT;
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
+
+ return 0;
+}
+
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae,
+ struct icp_qat_uof_batch_init *lm_init_header)
+{
+ struct icp_qat_uof_batch_init *plm_init;
+ u64 *micro_inst_arry;
+ int micro_inst_num;
+ int alloc_inst_size;
+ int first_exec = 1;
+ int stat = 0;
+
+ plm_init = lm_init_header->next;
+ alloc_inst_size = lm_init_header->size;
+ if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
+ alloc_inst_size = handle->hal_handle->max_ustore;
+ micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64),
+ GFP_KERNEL);
+ if (!micro_inst_arry)
+ return -ENOMEM;
+ micro_inst_num = 0;
+ while (plm_init) {
+ unsigned int addr, *value, size;
+
+ ae = plm_init->ae;
+ addr = plm_init->addr;
+ value = plm_init->value;
+ size = plm_init->size;
+ micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
+ micro_inst_num,
+ size, addr, value);
+ plm_init = plm_init->next;
+ }
+ /* exec micro codes */
+ if (micro_inst_arry && micro_inst_num > 0) {
+ micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
+ stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
+ micro_inst_arry,
+ micro_inst_num);
+ }
+ kfree(micro_inst_arry);
+ return stat;
+}
+
+static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int val)
+{
+ int status = 0;
+ unsigned int reg_addr;
+ unsigned int ctx_enables;
+ unsigned short mask;
+ unsigned short dr_offset = 0x10;
+
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ if (CE_INUSE_CONTEXTS & ctx_enables) {
+ if (ctx & 0x1) {
+ pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
+ return -EINVAL;
+ }
+ mask = 0x1f;
+ dr_offset = 0x20;
+ } else {
+ mask = 0x0f;
+ }
+ if (reg_num & ~mask)
+ return -EINVAL;
+ reg_addr = reg_num + (ctx << 0x5);
+ switch (reg_type) {
+ case ICP_SR_RD_REL:
+ case ICP_SR_REL:
+ SET_AE_XFER(handle, ae, reg_addr, val);
+ break;
+ case ICP_DR_RD_REL:
+ case ICP_DR_REL:
+ SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+ return status;
+}
+
+static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int data)
+{
+ unsigned int gprval, ctx_enables;
+ unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
+ data16low;
+ unsigned short reg_mask;
+ int status = 0;
+ u64 micro_inst[] = {
+ 0x0F440000000ull,
+ 0x0F040000000ull,
+ 0x0A000000000ull,
+ 0x0F0000C0300ull,
+ 0x0E000010000ull
+ };
+ const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
+ const unsigned short gprnum = 0, dly = num_inst * 0x5;
+
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ if (CE_INUSE_CONTEXTS & ctx_enables) {
+ if (ctx & 0x1) {
+ pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
+ return -EINVAL;
+ }
+ reg_mask = (unsigned short)~0x1f;
+ } else {
+ reg_mask = (unsigned short)~0xf;
+ }
+ if (reg_num & reg_mask)
+ return -EINVAL;
+ xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+ if (xfr_addr == BAD_REGADDR) {
+ pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
+ return -EINVAL;
+ }
+ status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+ if (status) {
+ pr_err("QAT: failed to read register");
+ return status;
+ }
+ gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
+ data16low = 0xffff & data;
+ data16hi = 0xffff & (data >> 0x10);
+ src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+ (unsigned short)(0xff & data16hi));
+ src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+ (unsigned short)(0xff & data16low));
+ micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
+ ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+ micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
+ ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+ micro_inst[0x2] = micro_inst[0x2] |
+ ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
+ status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
+ code_off, dly, NULL);
+ qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
+ return status;
+}
+
+static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx,
+ unsigned short nn, unsigned int val)
+{
+ unsigned int ctx_enables;
+ int stat = 0;
+
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ ctx_enables &= IGNORE_W1C_MASK;
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
+
+ stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
+ qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+ return stat;
+}
+
+static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
+ *handle, unsigned char ae,
+ unsigned short absreg_num,
+ unsigned short *relreg,
+ unsigned char *ctx)
+{
+ unsigned int ctx_enables;
+
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ if (ctx_enables & CE_INUSE_CONTEXTS) {
+ /* 4-ctx mode */
+ *relreg = absreg_num & 0x1F;
+ *ctx = (absreg_num >> 0x4) & 0x6;
+ } else {
+ /* 8-ctx mode */
+ *relreg = absreg_num & 0x0F;
+ *ctx = (absreg_num >> 0x4) & 0x7;
+ }
+ return 0;
+}
+
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned long ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata)
+{
+ int stat = 0;
+ unsigned short reg;
+ unsigned char ctx = 0;
+ enum icp_qat_uof_regtype type;
+
+ if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
+ return -EINVAL;
+
+ do {
+ if (ctx_mask == 0) {
+ qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+ &ctx);
+ type = reg_type - 1;
+ } else {
+ reg = reg_num;
+ type = reg_type;
+ if (!test_bit(ctx, &ctx_mask))
+ continue;
+ }
+ stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
+ if (stat) {
+ pr_err("QAT: write gpr fail\n");
+ return -EINVAL;
+ }
+ } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+ return 0;
+}
+
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned long ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata)
+{
+ int stat = 0;
+ unsigned short reg;
+ unsigned char ctx = 0;
+ enum icp_qat_uof_regtype type;
+
+ if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+ return -EINVAL;
+
+ do {
+ if (ctx_mask == 0) {
+ qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+ &ctx);
+ type = reg_type - 3;
+ } else {
+ reg = reg_num;
+ type = reg_type;
+ if (!test_bit(ctx, &ctx_mask))
+ continue;
+ }
+ stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
+ regdata);
+ if (stat) {
+ pr_err("QAT: write wr xfer fail\n");
+ return -EINVAL;
+ }
+ } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+ return 0;
+}
+
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned long ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_num, unsigned int regdata)
+{
+ int stat = 0;
+ unsigned short reg;
+ unsigned char ctx = 0;
+ enum icp_qat_uof_regtype type;
+
+ if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+ return -EINVAL;
+
+ do {
+ if (ctx_mask == 0) {
+ qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+ &ctx);
+ type = reg_type - 3;
+ } else {
+ reg = reg_num;
+ type = reg_type;
+ if (!test_bit(ctx, &ctx_mask))
+ continue;
+ }
+ stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
+ regdata);
+ if (stat) {
+ pr_err("QAT: write rd xfer fail\n");
+ return -EINVAL;
+ }
+ } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+ return 0;
+}
+
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned long ctx_mask,
+ unsigned short reg_num, unsigned int regdata)
+{
+ int stat = 0;
+ unsigned char ctx;
+ if (!handle->chip_info->nn) {
+ dev_err(&handle->pci_dev->dev, "QAT: No next neigh in 0x%x\n",
+ handle->pci_dev->device);
+ return -EINVAL;
+ }
+
+ if (ctx_mask == 0)
+ return -EINVAL;
+
+ for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+ if (!test_bit(ctx, &ctx_mask))
+ continue;
+ stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
+ if (stat) {
+ pr_err("QAT: write neigh error\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
new file mode 100644
index 0000000000..4bd150d144
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -0,0 +1,2130 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci_ids.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_fw_loader_handle.h"
+
+#define UWORD_CPYBUF_SIZE 1024U
+#define INVLD_UWORD 0xffffffffffull
+#define PID_MINOR_REV 0xf
+#define PID_MAJOR_REV (0xf << 4)
+
+static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
+ unsigned int ae, unsigned int image_num)
+{
+ struct icp_qat_uclo_aedata *ae_data;
+ struct icp_qat_uclo_encapme *encap_image;
+ struct icp_qat_uclo_page *page = NULL;
+ struct icp_qat_uclo_aeslice *ae_slice = NULL;
+
+ ae_data = &obj_handle->ae_data[ae];
+ encap_image = &obj_handle->ae_uimage[image_num];
+ ae_slice = &ae_data->ae_slices[ae_data->slice_num];
+ ae_slice->encap_image = encap_image;
+
+ if (encap_image->img_ptr) {
+ ae_slice->ctx_mask_assigned =
+ encap_image->img_ptr->ctx_assigned;
+ ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
+ } else {
+ ae_slice->ctx_mask_assigned = 0;
+ }
+ ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
+ if (!ae_slice->region)
+ return -ENOMEM;
+ ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
+ if (!ae_slice->page)
+ goto out_err;
+ page = ae_slice->page;
+ page->encap_page = encap_image->page;
+ ae_slice->page->region = ae_slice->region;
+ ae_data->slice_num++;
+ return 0;
+out_err:
+ kfree(ae_slice->region);
+ ae_slice->region = NULL;
+ return -ENOMEM;
+}
+
+static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
+{
+ unsigned int i;
+
+ if (!ae_data) {
+ pr_err("QAT: bad argument, ae_data is NULL\n ");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ae_data->slice_num; i++) {
+ kfree(ae_data->ae_slices[i].region);
+ ae_data->ae_slices[i].region = NULL;
+ kfree(ae_data->ae_slices[i].page);
+ ae_data->ae_slices[i].page = NULL;
+ }
+ return 0;
+}
+
+static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
+ unsigned int str_offset)
+{
+ if (!str_table->table_len || str_offset > str_table->table_len)
+ return NULL;
+ return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
+}
+
+static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
+{
+ int maj = hdr->maj_ver & 0xff;
+ int min = hdr->min_ver & 0xff;
+
+ if (hdr->file_id != ICP_QAT_UOF_FID) {
+ pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+ return -EINVAL;
+ }
+ if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
+ pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
+ maj, min);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
+{
+ int maj = suof_hdr->maj_ver & 0xff;
+ int min = suof_hdr->min_ver & 0xff;
+
+ if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
+ pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
+ return -EINVAL;
+ }
+ if (suof_hdr->fw_type != 0) {
+ pr_err("QAT: unsupported firmware type\n");
+ return -EINVAL;
+ }
+ if (suof_hdr->num_chunks <= 0x1) {
+ pr_err("QAT: SUOF chunk amount is incorrect\n");
+ return -EINVAL;
+ }
+ if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
+ pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
+ maj, min);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
+ unsigned int addr, unsigned int *val,
+ unsigned int num_in_bytes)
+{
+ unsigned int outval;
+ unsigned char *ptr = (unsigned char *)val;
+
+ while (num_in_bytes) {
+ memcpy(&outval, ptr, 4);
+ SRAM_WRITE(handle, addr, outval);
+ num_in_bytes -= 4;
+ ptr += 4;
+ addr += 4;
+ }
+}
+
+static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned int addr,
+ unsigned int *val,
+ unsigned int num_in_bytes)
+{
+ unsigned int outval;
+ unsigned char *ptr = (unsigned char *)val;
+
+ addr >>= 0x2; /* convert to uword address */
+
+ while (num_in_bytes) {
+ memcpy(&outval, ptr, 4);
+ qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
+ num_in_bytes -= 4;
+ ptr += 4;
+ }
+}
+
+static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae,
+ struct icp_qat_uof_batch_init
+ *umem_init_header)
+{
+ struct icp_qat_uof_batch_init *umem_init;
+
+ if (!umem_init_header)
+ return;
+ umem_init = umem_init_header->next;
+ while (umem_init) {
+ unsigned int addr, *value, size;
+
+ ae = umem_init->ae;
+ addr = umem_init->addr;
+ value = umem_init->value;
+ size = umem_init->size;
+ qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
+ umem_init = umem_init->next;
+ }
+}
+
+static void
+qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_batch_init **base)
+{
+ struct icp_qat_uof_batch_init *umem_init;
+
+ umem_init = *base;
+ while (umem_init) {
+ struct icp_qat_uof_batch_init *pre;
+
+ pre = umem_init;
+ umem_init = umem_init->next;
+ kfree(pre);
+ }
+ *base = NULL;
+}
+
+static int qat_uclo_parse_num(char *str, unsigned int *num)
+{
+ char buf[16] = {0};
+ unsigned long ae = 0;
+ int i;
+
+ strncpy(buf, str, 15);
+ for (i = 0; i < 16; i++) {
+ if (!isdigit(buf[i])) {
+ buf[i] = '\0';
+ break;
+ }
+ }
+ if ((kstrtoul(buf, 10, &ae)))
+ return -EFAULT;
+
+ *num = (unsigned int)ae;
+ return 0;
+}
+
+static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_initmem *init_mem,
+ unsigned int size_range, unsigned int *ae)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ char *str;
+
+ if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
+ pr_err("QAT: initmem is out of range");
+ return -EINVAL;
+ }
+ if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
+ pr_err("QAT: Memory scope for init_mem error\n");
+ return -EINVAL;
+ }
+ str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
+ if (!str) {
+ pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+ return -EINVAL;
+ }
+ if (qat_uclo_parse_num(str, ae)) {
+ pr_err("QAT: Parse num for AE number failed\n");
+ return -EINVAL;
+ }
+ if (*ae >= ICP_QAT_UCLO_MAX_AE) {
+ pr_err("QAT: ae %d out of range\n", *ae);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
+ *handle, struct icp_qat_uof_initmem
+ *init_mem, unsigned int ae,
+ struct icp_qat_uof_batch_init
+ **init_tab_base)
+{
+ struct icp_qat_uof_batch_init *init_header, *tail;
+ struct icp_qat_uof_batch_init *mem_init, *tail_old;
+ struct icp_qat_uof_memvar_attr *mem_val_attr;
+ unsigned int i, flag = 0;
+
+ mem_val_attr =
+ (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
+ sizeof(struct icp_qat_uof_initmem));
+
+ init_header = *init_tab_base;
+ if (!init_header) {
+ init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
+ if (!init_header)
+ return -ENOMEM;
+ init_header->size = 1;
+ *init_tab_base = init_header;
+ flag = 1;
+ }
+ tail_old = init_header;
+ while (tail_old->next)
+ tail_old = tail_old->next;
+ tail = tail_old;
+ for (i = 0; i < init_mem->val_attr_num; i++) {
+ mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
+ if (!mem_init)
+ goto out_err;
+ mem_init->ae = ae;
+ mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
+ mem_init->value = &mem_val_attr->value;
+ mem_init->size = 4;
+ mem_init->next = NULL;
+ tail->next = mem_init;
+ tail = mem_init;
+ init_header->size += qat_hal_get_ins_num();
+ mem_val_attr++;
+ }
+ return 0;
+out_err:
+ /* Do not free the list head unless we allocated it. */
+ tail_old = tail_old->next;
+ if (flag) {
+ kfree(*init_tab_base);
+ *init_tab_base = NULL;
+ }
+
+ while (tail_old) {
+ mem_init = tail_old->next;
+ kfree(tail_old);
+ tail_old = mem_init;
+ }
+ return -ENOMEM;
+}
+
+static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_initmem *init_mem)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int ae;
+
+ if (qat_uclo_fetch_initmem_ae(handle, init_mem,
+ handle->chip_info->lm_size, &ae))
+ return -EINVAL;
+ if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+ &obj_handle->lm_init_tab[ae]))
+ return -EINVAL;
+ return 0;
+}
+
+static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_initmem *init_mem)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int ae, ustore_size, uaddr, i;
+ struct icp_qat_uclo_aedata *aed;
+
+ ustore_size = obj_handle->ustore_phy_size;
+ if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
+ return -EINVAL;
+ if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+ &obj_handle->umem_init_tab[ae]))
+ return -EINVAL;
+ /* set the highest ustore address referenced */
+ uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
+ aed = &obj_handle->ae_data[ae];
+ for (i = 0; i < aed->slice_num; i++) {
+ if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
+ aed->ae_slices[i].encap_image->uwords_num = uaddr;
+ }
+ return 0;
+}
+
+static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_initmem *init_mem)
+{
+ switch (init_mem->region) {
+ case ICP_QAT_UOF_LMEM_REGION:
+ if (qat_uclo_init_lmem_seg(handle, init_mem))
+ return -EINVAL;
+ break;
+ case ICP_QAT_UOF_UMEM_REGION:
+ if (qat_uclo_init_umem_seg(handle, init_mem))
+ return -EINVAL;
+ break;
+ default:
+ pr_err("QAT: initmem region error. region type=0x%x\n",
+ init_mem->region);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uclo_encapme *image)
+{
+ unsigned int i;
+ struct icp_qat_uclo_encap_page *page;
+ struct icp_qat_uof_image *uof_image;
+ unsigned char ae;
+ unsigned int ustore_size;
+ unsigned int patt_pos;
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+ u64 *fill_data;
+
+ uof_image = image->img_ptr;
+ fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
+ GFP_KERNEL);
+ if (!fill_data)
+ return -ENOMEM;
+ for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
+ memcpy(&fill_data[i], &uof_image->fill_pattern,
+ sizeof(u64));
+ page = image->page;
+
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ unsigned long ae_assigned = uof_image->ae_assigned;
+
+ if (!test_bit(ae, &ae_assigned))
+ continue;
+
+ if (!test_bit(ae, &cfg_ae_mask))
+ continue;
+
+ ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
+ patt_pos = page->beg_addr_p + page->micro_words_num;
+
+ qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
+ page->beg_addr_p, &fill_data[0]);
+ qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
+ ustore_size - patt_pos + 1,
+ &fill_data[page->beg_addr_p]);
+ }
+ kfree(fill_data);
+ return 0;
+}
+
+static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
+{
+ int i, ae;
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+
+ for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
+ if (initmem->num_in_bytes) {
+ if (qat_uclo_init_ae_memory(handle, initmem))
+ return -EINVAL;
+ }
+ initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
+ (uintptr_t)initmem +
+ sizeof(struct icp_qat_uof_initmem)) +
+ (sizeof(struct icp_qat_uof_memvar_attr) *
+ initmem->val_attr_num));
+ }
+
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ if (qat_hal_batch_wr_lm(handle, ae,
+ obj_handle->lm_init_tab[ae])) {
+ pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+ return -EINVAL;
+ }
+ qat_uclo_cleanup_batch_init_list(handle,
+ &obj_handle->lm_init_tab[ae]);
+ qat_uclo_batch_wr_umem(handle, ae,
+ obj_handle->umem_init_tab[ae]);
+ qat_uclo_cleanup_batch_init_list(handle,
+ &obj_handle->
+ umem_init_tab[ae]);
+ }
+ return 0;
+}
+
+static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
+ char *chunk_id, void *cur)
+{
+ int i;
+ struct icp_qat_uof_chunkhdr *chunk_hdr =
+ (struct icp_qat_uof_chunkhdr *)
+ ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
+
+ for (i = 0; i < obj_hdr->num_chunks; i++) {
+ if ((cur < (void *)&chunk_hdr[i]) &&
+ !strncmp(chunk_hdr[i].chunk_id, chunk_id,
+ ICP_QAT_UOF_OBJID_LEN)) {
+ return &chunk_hdr[i];
+ }
+ }
+ return NULL;
+}
+
+static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
+{
+ int i;
+ unsigned int topbit = 1 << 0xF;
+ unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
+
+ reg ^= inbyte << 0x8;
+ for (i = 0; i < 0x8; i++) {
+ if (reg & topbit)
+ reg = (reg << 1) ^ 0x1021;
+ else
+ reg <<= 1;
+ }
+ return reg & 0xFFFF;
+}
+
+static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
+{
+ unsigned int chksum = 0;
+
+ if (ptr)
+ while (num--)
+ chksum = qat_uclo_calc_checksum(chksum, *ptr++);
+ return chksum;
+}
+
+static struct icp_qat_uclo_objhdr *
+qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
+ char *chunk_id)
+{
+ struct icp_qat_uof_filechunkhdr *file_chunk;
+ struct icp_qat_uclo_objhdr *obj_hdr;
+ char *chunk;
+ int i;
+
+ file_chunk = (struct icp_qat_uof_filechunkhdr *)
+ (buf + sizeof(struct icp_qat_uof_filehdr));
+ for (i = 0; i < file_hdr->num_chunks; i++) {
+ if (!strncmp(file_chunk->chunk_id, chunk_id,
+ ICP_QAT_UOF_OBJID_LEN)) {
+ chunk = buf + file_chunk->offset;
+ if (file_chunk->checksum != qat_uclo_calc_str_checksum(
+ chunk, file_chunk->size))
+ break;
+ obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
+ if (!obj_hdr)
+ break;
+ obj_hdr->file_buff = chunk;
+ obj_hdr->checksum = file_chunk->checksum;
+ obj_hdr->size = file_chunk->size;
+ return obj_hdr;
+ }
+ file_chunk++;
+ }
+ return NULL;
+}
+
+static int
+qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
+ struct icp_qat_uof_image *image)
+{
+ struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
+ struct icp_qat_uof_objtable *neigh_reg_tab;
+ struct icp_qat_uof_code_page *code_page;
+
+ code_page = (struct icp_qat_uof_code_page *)
+ ((char *)image + sizeof(struct icp_qat_uof_image));
+ uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+ code_page->uc_var_tab_offset);
+ imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+ code_page->imp_var_tab_offset);
+ imp_expr_tab = (struct icp_qat_uof_objtable *)
+ (encap_uof_obj->beg_uof +
+ code_page->imp_expr_tab_offset);
+ if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
+ imp_expr_tab->entry_num) {
+ pr_err("QAT: UOF can't contain imported variable to be parsed\n");
+ return -EINVAL;
+ }
+ neigh_reg_tab = (struct icp_qat_uof_objtable *)
+ (encap_uof_obj->beg_uof +
+ code_page->neigh_reg_tab_offset);
+ if (neigh_reg_tab->entry_num) {
+ pr_err("QAT: UOF can't contain neighbor register table\n");
+ return -EINVAL;
+ }
+ if (image->numpages > 1) {
+ pr_err("QAT: UOF can't contain multiple pages\n");
+ return -EINVAL;
+ }
+ if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
+ pr_err("QAT: UOF can't use shared control store feature\n");
+ return -EFAULT;
+ }
+ if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
+ pr_err("QAT: UOF can't use reloadable feature\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
+ *encap_uof_obj,
+ struct icp_qat_uof_image *img,
+ struct icp_qat_uclo_encap_page *page)
+{
+ struct icp_qat_uof_code_page *code_page;
+ struct icp_qat_uof_code_area *code_area;
+ struct icp_qat_uof_objtable *uword_block_tab;
+ struct icp_qat_uof_uword_block *uwblock;
+ int i;
+
+ code_page = (struct icp_qat_uof_code_page *)
+ ((char *)img + sizeof(struct icp_qat_uof_image));
+ page->def_page = code_page->def_page;
+ page->page_region = code_page->page_region;
+ page->beg_addr_v = code_page->beg_addr_v;
+ page->beg_addr_p = code_page->beg_addr_p;
+ code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
+ code_page->code_area_offset);
+ page->micro_words_num = code_area->micro_words_num;
+ uword_block_tab = (struct icp_qat_uof_objtable *)
+ (encap_uof_obj->beg_uof +
+ code_area->uword_block_tab);
+ page->uwblock_num = uword_block_tab->entry_num;
+ uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
+ sizeof(struct icp_qat_uof_objtable));
+ page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
+ for (i = 0; i < uword_block_tab->entry_num; i++)
+ page->uwblock[i].micro_words =
+ (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
+}
+
+static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
+ struct icp_qat_uclo_encapme *ae_uimage,
+ int max_image)
+{
+ int i, j;
+ struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
+ struct icp_qat_uof_image *image;
+ struct icp_qat_uof_objtable *ae_regtab;
+ struct icp_qat_uof_objtable *init_reg_sym_tab;
+ struct icp_qat_uof_objtable *sbreak_tab;
+ struct icp_qat_uof_encap_obj *encap_uof_obj =
+ &obj_handle->encap_uof_obj;
+
+ for (j = 0; j < max_image; j++) {
+ chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+ ICP_QAT_UOF_IMAG, chunk_hdr);
+ if (!chunk_hdr)
+ break;
+ image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
+ chunk_hdr->offset);
+ ae_regtab = (struct icp_qat_uof_objtable *)
+ (image->reg_tab_offset +
+ obj_handle->obj_hdr->file_buff);
+ ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
+ ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
+ (((char *)ae_regtab) +
+ sizeof(struct icp_qat_uof_objtable));
+ init_reg_sym_tab = (struct icp_qat_uof_objtable *)
+ (image->init_reg_sym_tab +
+ obj_handle->obj_hdr->file_buff);
+ ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
+ ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
+ (((char *)init_reg_sym_tab) +
+ sizeof(struct icp_qat_uof_objtable));
+ sbreak_tab = (struct icp_qat_uof_objtable *)
+ (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
+ ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
+ ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
+ (((char *)sbreak_tab) +
+ sizeof(struct icp_qat_uof_objtable));
+ ae_uimage[j].img_ptr = image;
+ if (qat_uclo_check_image_compat(encap_uof_obj, image))
+ goto out_err;
+ ae_uimage[j].page =
+ kzalloc(sizeof(struct icp_qat_uclo_encap_page),
+ GFP_KERNEL);
+ if (!ae_uimage[j].page)
+ goto out_err;
+ qat_uclo_map_image_page(encap_uof_obj, image,
+ ae_uimage[j].page);
+ }
+ return j;
+out_err:
+ for (i = 0; i < j; i++)
+ kfree(ae_uimage[i].page);
+ return 0;
+}
+
+static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
+{
+ int i, ae;
+ int mflag = 0;
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+
+ for_each_set_bit(ae, &ae_mask, max_ae) {
+ if (!test_bit(ae, &cfg_ae_mask))
+ continue;
+
+ for (i = 0; i < obj_handle->uimage_num; i++) {
+ unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
+
+ if (!test_bit(ae, &ae_assigned))
+ continue;
+ mflag = 1;
+ if (qat_uclo_init_ae_data(obj_handle, ae, i))
+ return -EINVAL;
+ }
+ }
+ if (!mflag) {
+ pr_err("QAT: uimage uses AE not set\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct icp_qat_uof_strtable *
+qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
+ char *tab_name, struct icp_qat_uof_strtable *str_table)
+{
+ struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+ chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
+ obj_hdr->file_buff, tab_name, NULL);
+ if (chunk_hdr) {
+ int hdr_size;
+
+ memcpy(&str_table->table_len, obj_hdr->file_buff +
+ chunk_hdr->offset, sizeof(str_table->table_len));
+ hdr_size = (char *)&str_table->strings - (char *)str_table;
+ str_table->strings = (uintptr_t)obj_hdr->file_buff +
+ chunk_hdr->offset + hdr_size;
+ return str_table;
+ }
+ return NULL;
+}
+
+static void
+qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
+ struct icp_qat_uclo_init_mem_table *init_mem_tab)
+{
+ struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+ chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+ ICP_QAT_UOF_IMEM, NULL);
+ if (chunk_hdr) {
+ memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
+ chunk_hdr->offset, sizeof(unsigned int));
+ init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
+ (encap_uof_obj->beg_uof + chunk_hdr->offset +
+ sizeof(unsigned int));
+ }
+}
+
+static unsigned int
+qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
+{
+ switch (handle->pci_dev->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+ return ICP_QAT_AC_895XCC_DEV_TYPE;
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
+ return ICP_QAT_AC_C62X_DEV_TYPE;
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+ return ICP_QAT_AC_C3XXX_DEV_TYPE;
+ case ADF_4XXX_PCI_DEVICE_ID:
+ case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
+ return ICP_QAT_AC_4XXX_A_DEV_TYPE;
+ default:
+ pr_err("QAT: unsupported device 0x%x\n",
+ handle->pci_dev->device);
+ return 0;
+ }
+}
+
+static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
+{
+ unsigned int maj_ver, prod_type = obj_handle->prod_type;
+
+ if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
+ pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
+ obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
+ prod_type);
+ return -EINVAL;
+ }
+ maj_ver = obj_handle->prod_rev & 0xff;
+ if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
+ obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
+ pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
+ unsigned char ae, unsigned char ctx_mask,
+ enum icp_qat_uof_regtype reg_type,
+ unsigned short reg_addr, unsigned int value)
+{
+ switch (reg_type) {
+ case ICP_GPA_ABS:
+ case ICP_GPB_ABS:
+ ctx_mask = 0;
+ fallthrough;
+ case ICP_GPA_REL:
+ case ICP_GPB_REL:
+ return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
+ reg_addr, value);
+ case ICP_SR_ABS:
+ case ICP_DR_ABS:
+ case ICP_SR_RD_ABS:
+ case ICP_DR_RD_ABS:
+ ctx_mask = 0;
+ fallthrough;
+ case ICP_SR_REL:
+ case ICP_DR_REL:
+ case ICP_SR_RD_REL:
+ case ICP_DR_RD_REL:
+ return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
+ reg_addr, value);
+ case ICP_SR_WR_ABS:
+ case ICP_DR_WR_ABS:
+ ctx_mask = 0;
+ fallthrough;
+ case ICP_SR_WR_REL:
+ case ICP_DR_WR_REL:
+ return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
+ reg_addr, value);
+ case ICP_NEIGH_REL:
+ return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
+ default:
+ pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
+ unsigned int ae,
+ struct icp_qat_uclo_encapme *encap_ae)
+{
+ unsigned int i;
+ unsigned char ctx_mask;
+ struct icp_qat_uof_init_regsym *init_regsym;
+
+ if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
+ ICP_QAT_UCLO_MAX_CTX)
+ ctx_mask = 0xff;
+ else
+ ctx_mask = 0x55;
+
+ for (i = 0; i < encap_ae->init_regsym_num; i++) {
+ unsigned int exp_res;
+
+ init_regsym = &encap_ae->init_regsym[i];
+ exp_res = init_regsym->value;
+ switch (init_regsym->init_type) {
+ case ICP_QAT_UOF_INIT_REG:
+ qat_uclo_init_reg(handle, ae, ctx_mask,
+ (enum icp_qat_uof_regtype)
+ init_regsym->reg_type,
+ (unsigned short)init_regsym->reg_addr,
+ exp_res);
+ break;
+ case ICP_QAT_UOF_INIT_REG_CTX:
+ /* check if ctx is appropriate for the ctxMode */
+ if (!((1 << init_regsym->ctx) & ctx_mask)) {
+ pr_err("QAT: invalid ctx num = 0x%x\n",
+ init_regsym->ctx);
+ return -EINVAL;
+ }
+ qat_uclo_init_reg(handle, ae,
+ (unsigned char)
+ (1 << init_regsym->ctx),
+ (enum icp_qat_uof_regtype)
+ init_regsym->reg_type,
+ (unsigned short)init_regsym->reg_addr,
+ exp_res);
+ break;
+ case ICP_QAT_UOF_INIT_EXPR:
+ pr_err("QAT: INIT_EXPR feature not supported\n");
+ return -EINVAL;
+ case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
+ pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ struct icp_qat_uclo_aedata *aed;
+ unsigned int s, ae;
+
+ if (obj_handle->global_inited)
+ return 0;
+ if (obj_handle->init_mem_tab.entry_num) {
+ if (qat_uclo_init_memory(handle)) {
+ pr_err("QAT: initialize memory failed\n");
+ return -EINVAL;
+ }
+ }
+
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ aed = &obj_handle->ae_data[ae];
+ for (s = 0; s < aed->slice_num; s++) {
+ if (!aed->ae_slices[s].encap_image)
+ continue;
+ if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image))
+ return -EINVAL;
+ }
+ }
+ obj_handle->global_inited = 1;
+ return 0;
+}
+
+static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uclo_objhandle *obj_handle,
+ unsigned char ae,
+ struct icp_qat_uof_image *uof_image)
+{
+ unsigned char mode;
+ int ret;
+
+ mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
+ ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
+ if (ret) {
+ pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+ return ret;
+ }
+ if (handle->chip_info->nn) {
+ mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
+ ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
+ if (ret) {
+ pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+ return ret;
+ }
+ }
+ mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
+ ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
+ if (ret) {
+ pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+ return ret;
+ }
+ mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
+ ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
+ if (ret) {
+ pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+ return ret;
+ }
+ if (handle->chip_info->lm2lm3) {
+ mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
+ ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
+ if (ret) {
+ pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
+ return ret;
+ }
+ mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
+ ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
+ if (ret) {
+ pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
+ return ret;
+ }
+ mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
+ qat_hal_set_ae_tindex_mode(handle, ae, mode);
+ }
+ return 0;
+}
+
+static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uof_image *uof_image;
+ struct icp_qat_uclo_aedata *ae_data;
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+ unsigned char ae, s;
+ int error;
+
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ if (!test_bit(ae, &cfg_ae_mask))
+ continue;
+
+ ae_data = &obj_handle->ae_data[ae];
+ for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
+ ICP_QAT_UCLO_MAX_CTX); s++) {
+ if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+ continue;
+ uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
+ error = qat_hal_set_modes(handle, obj_handle, ae,
+ uof_image);
+ if (error)
+ return error;
+ }
+ }
+ return 0;
+}
+
+static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ struct icp_qat_uclo_encapme *image;
+ int a;
+
+ for (a = 0; a < obj_handle->uimage_num; a++) {
+ image = &obj_handle->ae_uimage[a];
+ image->uwords_num = image->page->beg_addr_p +
+ image->page->micro_words_num;
+ }
+}
+
+static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int ae;
+
+ obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
+ obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
+ obj_handle->obj_hdr->file_buff;
+ obj_handle->uword_in_bytes = 6;
+ obj_handle->prod_type = qat_uclo_get_dev_type(handle);
+ obj_handle->prod_rev = PID_MAJOR_REV |
+ (PID_MINOR_REV & handle->hal_handle->revision_id);
+ if (qat_uclo_check_uof_compat(obj_handle)) {
+ pr_err("QAT: UOF incompatible\n");
+ return -EINVAL;
+ }
+ obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
+ GFP_KERNEL);
+ if (!obj_handle->uword_buf)
+ return -ENOMEM;
+ obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
+ if (!obj_handle->obj_hdr->file_buff ||
+ !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
+ &obj_handle->str_table)) {
+ pr_err("QAT: UOF doesn't have effective images\n");
+ goto out_err;
+ }
+ obj_handle->uimage_num =
+ qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
+ ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
+ if (!obj_handle->uimage_num)
+ goto out_err;
+ if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
+ pr_err("QAT: Bad object\n");
+ goto out_check_uof_aemask_err;
+ }
+ qat_uclo_init_uword_num(handle);
+ qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
+ &obj_handle->init_mem_tab);
+ if (qat_uclo_set_ae_mode(handle))
+ goto out_check_uof_aemask_err;
+ return 0;
+out_check_uof_aemask_err:
+ for (ae = 0; ae < obj_handle->uimage_num; ae++)
+ kfree(obj_handle->ae_uimage[ae].page);
+out_err:
+ kfree(obj_handle->uword_buf);
+ return -EFAULT;
+}
+
+static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_suof_filehdr *suof_ptr,
+ int suof_size)
+{
+ unsigned int check_sum = 0;
+ unsigned int min_ver_offset = 0;
+ struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+
+ suof_handle->file_id = ICP_QAT_SUOF_FID;
+ suof_handle->suof_buf = (char *)suof_ptr;
+ suof_handle->suof_size = suof_size;
+ min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
+ min_ver);
+ check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
+ min_ver_offset);
+ if (check_sum != suof_ptr->check_sum) {
+ pr_err("QAT: incorrect SUOF checksum\n");
+ return -EINVAL;
+ }
+ suof_handle->check_sum = suof_ptr->check_sum;
+ suof_handle->min_ver = suof_ptr->min_ver;
+ suof_handle->maj_ver = suof_ptr->maj_ver;
+ suof_handle->fw_type = suof_ptr->fw_type;
+ return 0;
+}
+
+static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_suof_img_hdr *suof_img_hdr,
+ struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+ struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+ struct icp_qat_simg_ae_mode *ae_mode;
+ struct icp_qat_suof_objhdr *suof_objhdr;
+
+ suof_img_hdr->simg_buf = (suof_handle->suof_buf +
+ suof_chunk_hdr->offset +
+ sizeof(*suof_objhdr));
+ suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
+ (suof_handle->suof_buf +
+ suof_chunk_hdr->offset))->img_length;
+
+ suof_img_hdr->css_header = suof_img_hdr->simg_buf;
+ suof_img_hdr->css_key = (suof_img_hdr->css_header +
+ sizeof(struct icp_qat_css_hdr));
+ suof_img_hdr->css_signature = suof_img_hdr->css_key +
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+ suof_img_hdr->css_simg = suof_img_hdr->css_signature +
+ ICP_QAT_CSS_SIGNATURE_LEN(handle);
+
+ ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
+ suof_img_hdr->ae_mask = ae_mode->ae_mask;
+ suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
+ suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
+ suof_img_hdr->fw_type = ae_mode->fw_type;
+}
+
+static void
+qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
+ struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+ char **sym_str = (char **)&suof_handle->sym_str;
+ unsigned int *sym_size = &suof_handle->sym_size;
+ struct icp_qat_suof_strtable *str_table_obj;
+
+ *sym_size = *(unsigned int *)(uintptr_t)
+ (suof_chunk_hdr->offset + suof_handle->suof_buf);
+ *sym_str = (char *)(uintptr_t)
+ (suof_handle->suof_buf + suof_chunk_hdr->offset +
+ sizeof(str_table_obj->tab_length));
+}
+
+static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_suof_img_hdr *img_hdr)
+{
+ struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
+ unsigned int prod_rev, maj_ver, prod_type;
+
+ prod_type = qat_uclo_get_dev_type(handle);
+ img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
+ prod_rev = PID_MAJOR_REV |
+ (PID_MINOR_REV & handle->hal_handle->revision_id);
+ if (img_ae_mode->dev_type != prod_type) {
+ pr_err("QAT: incompatible product type %x\n",
+ img_ae_mode->dev_type);
+ return -EINVAL;
+ }
+ maj_ver = prod_rev & 0xff;
+ if (maj_ver > img_ae_mode->devmax_ver ||
+ maj_ver < img_ae_mode->devmin_ver) {
+ pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+
+ kfree(sobj_handle->img_table.simg_hdr);
+ sobj_handle->img_table.simg_hdr = NULL;
+ kfree(handle->sobj_handle);
+ handle->sobj_handle = NULL;
+}
+
+static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
+ unsigned int img_id, unsigned int num_simgs)
+{
+ struct icp_qat_suof_img_hdr img_header;
+
+ if (img_id != num_simgs - 1) {
+ memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
+ sizeof(*suof_img_hdr));
+ memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
+ sizeof(*suof_img_hdr));
+ memcpy(&suof_img_hdr[img_id], &img_header,
+ sizeof(*suof_img_hdr));
+ }
+}
+
+static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_suof_filehdr *suof_ptr,
+ int suof_size)
+{
+ struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+ struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
+ struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
+ int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
+ unsigned int i = 0;
+ struct icp_qat_suof_img_hdr img_header;
+
+ if (!suof_ptr || suof_size == 0) {
+ pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
+ return -EINVAL;
+ }
+ if (qat_uclo_check_suof_format(suof_ptr))
+ return -EINVAL;
+ ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
+ if (ret)
+ return ret;
+ suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
+ ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
+
+ qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
+ suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
+
+ if (suof_handle->img_table.num_simgs != 0) {
+ suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
+ sizeof(img_header),
+ GFP_KERNEL);
+ if (!suof_img_hdr)
+ return -ENOMEM;
+ suof_handle->img_table.simg_hdr = suof_img_hdr;
+
+ for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
+ qat_uclo_map_simg(handle, &suof_img_hdr[i],
+ &suof_chunk_hdr[1 + i]);
+ ret = qat_uclo_check_simg_compat(handle,
+ &suof_img_hdr[i]);
+ if (ret)
+ return ret;
+ suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
+ if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
+ ae0_img = i;
+ }
+
+ if (!handle->chip_info->tgroup_share_ustore) {
+ qat_uclo_tail_img(suof_img_hdr, ae0_img,
+ suof_handle->img_table.num_simgs);
+ }
+ }
+ return 0;
+}
+
+#define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
+#define BITS_IN_DWORD 32
+
+static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_fw_auth_desc *desc)
+{
+ u32 fcu_sts, retry = 0;
+ u32 fcu_ctl_csr, fcu_sts_csr;
+ u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
+ u64 bus_addr;
+
+ bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
+ - sizeof(struct icp_qat_auth_chunk);
+
+ fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+ fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+ fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
+ fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
+
+ SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
+ SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
+ SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
+
+ do {
+ msleep(FW_AUTH_WAIT_PERIOD);
+ fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+ if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
+ goto auth_fail;
+ if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
+ if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
+ return 0;
+ } while (retry++ < FW_AUTH_MAX_RETRY);
+auth_fail:
+ pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
+ fcu_sts & FCU_AUTH_STS_MASK, retry);
+ return -EINVAL;
+}
+
+static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle,
+ int imgid)
+{
+ struct icp_qat_suof_handle *sobj_handle;
+
+ if (!handle->chip_info->tgroup_share_ustore)
+ return false;
+
+ sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
+ if (handle->hal_handle->admin_ae_mask &
+ sobj_handle->img_table.simg_hdr[imgid].ae_mask)
+ return false;
+
+ return true;
+}
+
+static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_fw_auth_desc *desc)
+{
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ unsigned long desc_ae_mask = desc->ae_mask;
+ u32 fcu_sts, ae_broadcast_mask = 0;
+ u32 fcu_loaded_csr, ae_loaded;
+ u32 fcu_sts_csr, fcu_ctl_csr;
+ unsigned int ae, retry = 0;
+
+ if (handle->chip_info->tgroup_share_ustore) {
+ fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+ fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+ fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
+ } else {
+ pr_err("Chip 0x%x doesn't support broadcast load\n",
+ handle->pci_dev->device);
+ return -EINVAL;
+ }
+
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
+ pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
+ return -EINVAL;
+ }
+
+ if (test_bit(ae, &desc_ae_mask))
+ ae_broadcast_mask |= 1 << ae;
+ }
+
+ if (ae_broadcast_mask) {
+ SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE,
+ ae_broadcast_mask);
+
+ SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD);
+
+ do {
+ msleep(FW_AUTH_WAIT_PERIOD);
+ fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+ fcu_sts &= FCU_AUTH_STS_MASK;
+
+ if (fcu_sts == FCU_STS_LOAD_FAIL) {
+ pr_err("Broadcast load failed: 0x%x)\n", fcu_sts);
+ return -EINVAL;
+ } else if (fcu_sts == FCU_STS_LOAD_DONE) {
+ ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr);
+ ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos;
+
+ if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask)
+ break;
+ }
+ } while (retry++ < FW_AUTH_MAX_RETRY);
+
+ if (retry > FW_AUTH_MAX_RETRY) {
+ pr_err("QAT: broadcast load failed timeout %d\n", retry);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
+ struct icp_firml_dram_desc *dram_desc,
+ unsigned int size)
+{
+ void *vptr;
+ dma_addr_t ptr;
+
+ vptr = dma_alloc_coherent(&handle->pci_dev->dev,
+ size, &ptr, GFP_KERNEL);
+ if (!vptr)
+ return -ENOMEM;
+ dram_desc->dram_base_addr_v = vptr;
+ dram_desc->dram_bus_addr = ptr;
+ dram_desc->dram_size = size;
+ return 0;
+}
+
+static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
+ struct icp_firml_dram_desc *dram_desc)
+{
+ if (handle && dram_desc && dram_desc->dram_base_addr_v) {
+ dma_free_coherent(&handle->pci_dev->dev,
+ (size_t)(dram_desc->dram_size),
+ dram_desc->dram_base_addr_v,
+ dram_desc->dram_bus_addr);
+ }
+
+ if (dram_desc)
+ memset(dram_desc, 0, sizeof(*dram_desc));
+}
+
+static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_fw_auth_desc **desc)
+{
+ struct icp_firml_dram_desc dram_desc;
+
+ if (*desc) {
+ dram_desc.dram_base_addr_v = *desc;
+ dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
+ (*desc))->chunk_bus_addr;
+ dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
+ (*desc))->chunk_size;
+ qat_uclo_simg_free(handle, &dram_desc);
+ }
+}
+
+static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ unsigned int fw_type)
+{
+ char *fw_type_name = fw_type ? "MMP" : "AE";
+ unsigned int css_dword_size = sizeof(u32);
+
+ if (handle->chip_info->fw_auth) {
+ struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+ unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+
+ if ((css_hdr->header_len * css_dword_size) != header_len)
+ goto err;
+ if ((css_hdr->size * css_dword_size) != size)
+ goto err;
+ if (fw_type != css_hdr->fw_type)
+ goto err;
+ if (size <= header_len)
+ goto err;
+ size -= header_len;
+ }
+
+ if (fw_type == CSS_AE_FIRMWARE) {
+ if (size < sizeof(struct icp_qat_simg_ae_mode *) +
+ ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
+ goto err;
+ if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
+ goto err;
+ } else if (fw_type == CSS_MMP_FIRMWARE) {
+ if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
+ goto err;
+ } else {
+ pr_err("QAT: Unsupported firmware type\n");
+ return -EINVAL;
+ }
+ return 0;
+
+err:
+ pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+ return -EINVAL;
+}
+
+static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_qat_fw_auth_desc **desc)
+{
+ struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+ struct icp_qat_fw_auth_desc *auth_desc;
+ struct icp_qat_auth_chunk *auth_chunk;
+ u64 virt_addr, bus_addr, virt_base;
+ unsigned int length, simg_offset = sizeof(*auth_chunk);
+ struct icp_qat_simg_ae_mode *simg_ae_mode;
+ struct icp_firml_dram_desc img_desc;
+
+ if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
+ pr_err("QAT: error, input image size overflow %d\n", size);
+ return -EINVAL;
+ }
+ length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
+ ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
+ size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
+ if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
+ pr_err("QAT: error, allocate continuous dram fail\n");
+ return -ENOMEM;
+ }
+
+ auth_chunk = img_desc.dram_base_addr_v;
+ auth_chunk->chunk_size = img_desc.dram_size;
+ auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
+ virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
+ bus_addr = img_desc.dram_bus_addr + simg_offset;
+ auth_desc = img_desc.dram_base_addr_v;
+ auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+ auth_desc->css_hdr_low = (unsigned int)bus_addr;
+ virt_addr = virt_base;
+
+ memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
+ /* pub key */
+ bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
+ sizeof(*css_hdr);
+ virt_addr = virt_addr + sizeof(*css_hdr);
+
+ auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+ auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
+
+ memcpy((void *)(uintptr_t)virt_addr,
+ (void *)(image + sizeof(*css_hdr)),
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+ /* padding */
+ memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
+ 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
+
+ /* exponent */
+ memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
+ (void *)(image + sizeof(*css_hdr) +
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
+ sizeof(unsigned int));
+
+ /* signature */
+ bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
+ auth_desc->fwsk_pub_low) +
+ ICP_QAT_CSS_FWSK_PUB_LEN(handle);
+ virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
+ auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+ auth_desc->signature_low = (unsigned int)bus_addr;
+
+ memcpy((void *)(uintptr_t)virt_addr,
+ (void *)(image + sizeof(*css_hdr) +
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
+ ICP_QAT_CSS_SIGNATURE_LEN(handle));
+
+ bus_addr = ADD_ADDR(auth_desc->signature_high,
+ auth_desc->signature_low) +
+ ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+
+ auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+ auth_desc->img_low = (unsigned int)bus_addr;
+ auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
+ memcpy((void *)(uintptr_t)virt_addr,
+ (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
+ auth_desc->img_len);
+ virt_addr = virt_base;
+ /* AE firmware */
+ if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
+ CSS_AE_FIRMWARE) {
+ auth_desc->img_ae_mode_data_high = auth_desc->img_high;
+ auth_desc->img_ae_mode_data_low = auth_desc->img_low;
+ bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
+ auth_desc->img_ae_mode_data_low) +
+ sizeof(struct icp_qat_simg_ae_mode);
+
+ auth_desc->img_ae_init_data_high = (unsigned int)
+ (bus_addr >> BITS_IN_DWORD);
+ auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
+ bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
+ auth_desc->img_ae_insts_high = (unsigned int)
+ (bus_addr >> BITS_IN_DWORD);
+ auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
+ virt_addr += sizeof(struct icp_qat_css_hdr);
+ virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
+ virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr;
+ auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
+ } else {
+ auth_desc->img_ae_insts_high = auth_desc->img_high;
+ auth_desc->img_ae_insts_low = auth_desc->img_low;
+ }
+ *desc = auth_desc;
+ return 0;
+}
+
+static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_fw_auth_desc *desc)
+{
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ u32 fcu_sts_csr, fcu_ctl_csr;
+ u32 loaded_aes, loaded_csr;
+ unsigned int i;
+ u32 fcu_sts;
+
+ fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+ fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+ loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
+
+ for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) {
+ int retry = 0;
+
+ if (!((desc->ae_mask >> i) & 0x1))
+ continue;
+ if (qat_hal_check_ae_active(handle, i)) {
+ pr_err("QAT: AE %d is active\n", i);
+ return -EINVAL;
+ }
+ SET_CAP_CSR(handle, fcu_ctl_csr,
+ (FCU_CTRL_CMD_LOAD |
+ (1 << FCU_CTRL_BROADCAST_POS) |
+ (i << FCU_CTRL_AE_POS)));
+
+ do {
+ msleep(FW_AUTH_WAIT_PERIOD);
+ fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+ if ((fcu_sts & FCU_AUTH_STS_MASK) ==
+ FCU_STS_LOAD_DONE) {
+ loaded_aes = GET_CAP_CSR(handle, loaded_csr);
+ loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos;
+ if (loaded_aes & (1 << i))
+ break;
+ }
+ } while (retry++ < FW_AUTH_MAX_RETRY);
+ if (retry > FW_AUTH_MAX_RETRY) {
+ pr_err("QAT: firmware load failed timeout %x\n", retry);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size)
+{
+ struct icp_qat_suof_handle *suof_handle;
+
+ suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
+ if (!suof_handle)
+ return -ENOMEM;
+ handle->sobj_handle = suof_handle;
+ if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
+ qat_uclo_del_suof(handle);
+ pr_err("QAT: map SUOF failed\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size)
+{
+ struct icp_qat_fw_auth_desc *desc = NULL;
+ int status = 0;
+ int ret;
+
+ ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
+ if (ret)
+ return ret;
+
+ if (handle->chip_info->fw_auth) {
+ status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
+ if (!status)
+ status = qat_uclo_auth_fw(handle, desc);
+ qat_uclo_ummap_auth_fw(handle, &desc);
+ } else {
+ if (handle->chip_info->mmp_sram_size < mem_size) {
+ pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
+ return -EFBIG;
+ }
+ qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
+ }
+ return status;
+}
+
+static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size)
+{
+ struct icp_qat_uof_filehdr *filehdr;
+ struct icp_qat_uclo_objhandle *objhdl;
+
+ objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
+ if (!objhdl)
+ return -ENOMEM;
+ objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
+ if (!objhdl->obj_buf)
+ goto out_objbuf_err;
+ filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
+ if (qat_uclo_check_uof_format(filehdr))
+ goto out_objhdr_err;
+ objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
+ ICP_QAT_UOF_OBJS);
+ if (!objhdl->obj_hdr) {
+ pr_err("QAT: object file chunk is null\n");
+ goto out_objhdr_err;
+ }
+ handle->obj_handle = objhdl;
+ if (qat_uclo_parse_uof_obj(handle))
+ goto out_overlay_obj_err;
+ return 0;
+
+out_overlay_obj_err:
+ handle->obj_handle = NULL;
+ kfree(objhdl->obj_hdr);
+out_objhdr_err:
+ kfree(objhdl->obj_buf);
+out_objbuf_err:
+ kfree(objhdl);
+ return -ENOMEM;
+}
+
+static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_mof_file_hdr *mof_ptr,
+ u32 mof_size)
+{
+ struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
+ unsigned int min_ver_offset;
+ unsigned int checksum;
+
+ mobj_handle->file_id = ICP_QAT_MOF_FID;
+ mobj_handle->mof_buf = (char *)mof_ptr;
+ mobj_handle->mof_size = mof_size;
+
+ min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr,
+ min_ver);
+ checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
+ min_ver_offset);
+ if (checksum != mof_ptr->checksum) {
+ pr_err("QAT: incorrect MOF checksum\n");
+ return -EINVAL;
+ }
+
+ mobj_handle->checksum = mof_ptr->checksum;
+ mobj_handle->min_ver = mof_ptr->min_ver;
+ mobj_handle->maj_ver = mof_ptr->maj_ver;
+ return 0;
+}
+
+static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
+
+ kfree(mobj_handle->obj_table.obj_hdr);
+ mobj_handle->obj_table.obj_hdr = NULL;
+ kfree(handle->mobj_handle);
+ handle->mobj_handle = NULL;
+}
+
+static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
+ const char *obj_name, char **obj_ptr,
+ unsigned int *obj_size)
+{
+ struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
+ unsigned int i;
+
+ for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
+ if (!strncmp(obj_hdr[i].obj_name, obj_name,
+ ICP_QAT_SUOF_OBJ_NAME_LEN)) {
+ *obj_ptr = obj_hdr[i].obj_buf;
+ *obj_size = obj_hdr[i].obj_size;
+ return 0;
+ }
+ }
+
+ pr_err("QAT: object %s is not found inside MOF\n", obj_name);
+ return -EINVAL;
+}
+
+static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
+ struct icp_qat_mof_objhdr *mobj_hdr,
+ struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
+{
+ u8 *obj;
+
+ if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG,
+ ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
+ obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset;
+ } else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG,
+ ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
+ obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
+ } else {
+ pr_err("QAT: unsupported chunk id\n");
+ return -EINVAL;
+ }
+ mobj_hdr->obj_buf = obj;
+ mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
+ mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str;
+ return 0;
+}
+
+static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
+{
+ struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
+ struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
+ struct icp_qat_mof_obj_hdr *uobj_hdr;
+ struct icp_qat_mof_obj_hdr *sobj_hdr;
+ struct icp_qat_mof_objhdr *mobj_hdr;
+ unsigned int uobj_chunk_num = 0;
+ unsigned int sobj_chunk_num = 0;
+ unsigned int *valid_chunk;
+ int ret, i;
+
+ uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
+ sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
+ if (uobj_hdr)
+ uobj_chunk_num = uobj_hdr->num_chunks;
+ if (sobj_hdr)
+ sobj_chunk_num = sobj_hdr->num_chunks;
+
+ mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
+ sizeof(*mobj_hdr), GFP_KERNEL);
+ if (!mobj_hdr)
+ return -ENOMEM;
+
+ mobj_handle->obj_table.obj_hdr = mobj_hdr;
+ valid_chunk = &mobj_handle->obj_table.num_objs;
+ uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
+ ((uintptr_t)uobj_hdr + sizeof(*uobj_hdr));
+ sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
+ ((uintptr_t)sobj_hdr + sizeof(*sobj_hdr));
+
+ /* map uof objects */
+ for (i = 0; i < uobj_chunk_num; i++) {
+ ret = qat_uclo_map_obj_from_mof(mobj_handle,
+ &mobj_hdr[*valid_chunk],
+ &uobj_chunkhdr[i]);
+ if (ret)
+ return ret;
+ (*valid_chunk)++;
+ }
+
+ /* map suof objects */
+ for (i = 0; i < sobj_chunk_num; i++) {
+ ret = qat_uclo_map_obj_from_mof(mobj_handle,
+ &mobj_hdr[*valid_chunk],
+ &sobj_chunkhdr[i]);
+ if (ret)
+ return ret;
+ (*valid_chunk)++;
+ }
+
+ if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
+ pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
+ struct icp_qat_mof_chunkhdr *mof_chunkhdr)
+{
+ char **sym_str = (char **)&mobj_handle->sym_str;
+ unsigned int *sym_size = &mobj_handle->sym_size;
+ struct icp_qat_mof_str_table *str_table_obj;
+
+ *sym_size = *(unsigned int *)(uintptr_t)
+ (mof_chunkhdr->offset + mobj_handle->mof_buf);
+ *sym_str = (char *)(uintptr_t)
+ (mobj_handle->mof_buf + mof_chunkhdr->offset +
+ sizeof(str_table_obj->tab_len));
+}
+
+static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
+ struct icp_qat_mof_chunkhdr *mof_chunkhdr)
+{
+ char *chunk_id = mof_chunkhdr->chunk_id;
+
+ if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
+ qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
+ else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
+ mobj_handle->uobjs_hdr = mobj_handle->mof_buf +
+ mof_chunkhdr->offset;
+ else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
+ mobj_handle->sobjs_hdr = mobj_handle->mof_buf +
+ mof_chunkhdr->offset;
+}
+
+static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
+{
+ int maj = mof_hdr->maj_ver & 0xff;
+ int min = mof_hdr->min_ver & 0xff;
+
+ if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
+ pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
+ return -EINVAL;
+ }
+
+ if (mof_hdr->num_chunks <= 0x1) {
+ pr_err("QAT: MOF chunk amount is incorrect\n");
+ return -EINVAL;
+ }
+ if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
+ pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
+ maj, min);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_mof_file_hdr *mof_ptr,
+ u32 mof_size, const char *obj_name,
+ char **obj_ptr, unsigned int *obj_size)
+{
+ struct icp_qat_mof_chunkhdr *mof_chunkhdr;
+ unsigned int file_id = mof_ptr->file_id;
+ struct icp_qat_mof_handle *mobj_handle;
+ unsigned short chunks_num;
+ unsigned int i;
+ int ret;
+
+ if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) {
+ if (obj_ptr)
+ *obj_ptr = (char *)mof_ptr;
+ if (obj_size)
+ *obj_size = mof_size;
+ return 0;
+ }
+ if (qat_uclo_check_mof_format(mof_ptr))
+ return -EINVAL;
+
+ mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL);
+ if (!mobj_handle)
+ return -ENOMEM;
+
+ handle->mobj_handle = mobj_handle;
+ ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
+ if (ret)
+ return ret;
+
+ mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr);
+ chunks_num = mof_ptr->num_chunks;
+
+ /* Parse MOF file chunks */
+ for (i = 0; i < chunks_num; i++)
+ qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
+
+ /* All sym_objs uobjs and sobjs should be available */
+ if (!mobj_handle->sym_str ||
+ (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
+ return -EINVAL;
+
+ ret = qat_uclo_map_objs_from_mof(mobj_handle);
+ if (ret)
+ return ret;
+
+ /* Seek specified uof object in MOF */
+ return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name,
+ obj_ptr, obj_size);
+}
+
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, u32 mem_size, const char *obj_name)
+{
+ char *obj_addr;
+ u32 obj_size;
+ int ret;
+
+ BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
+ (sizeof(handle->hal_handle->ae_mask) * 8));
+
+ if (!handle || !addr_ptr || mem_size < 24)
+ return -EINVAL;
+
+ if (obj_name) {
+ ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name,
+ &obj_addr, &obj_size);
+ if (ret)
+ return ret;
+ } else {
+ obj_addr = addr_ptr;
+ obj_size = mem_size;
+ }
+
+ return (handle->chip_info->fw_auth) ?
+ qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
+ qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
+}
+
+void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int a;
+
+ if (handle->mobj_handle)
+ qat_uclo_del_mof(handle);
+ if (handle->sobj_handle)
+ qat_uclo_del_suof(handle);
+ if (!obj_handle)
+ return;
+
+ kfree(obj_handle->uword_buf);
+ for (a = 0; a < obj_handle->uimage_num; a++)
+ kfree(obj_handle->ae_uimage[a].page);
+
+ for (a = 0; a < handle->hal_handle->ae_max_num; a++)
+ qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
+
+ kfree(obj_handle->obj_hdr);
+ kfree(obj_handle->obj_buf);
+ kfree(obj_handle);
+ handle->obj_handle = NULL;
+}
+
+static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
+ struct icp_qat_uclo_encap_page *encap_page,
+ u64 *uword, unsigned int addr_p,
+ unsigned int raddr, u64 fill)
+{
+ unsigned int i, addr;
+ u64 uwrd = 0;
+
+ if (!encap_page) {
+ *uword = fill;
+ return;
+ }
+ addr = (encap_page->page_region) ? raddr : addr_p;
+ for (i = 0; i < encap_page->uwblock_num; i++) {
+ if (addr >= encap_page->uwblock[i].start_addr &&
+ addr <= encap_page->uwblock[i].start_addr +
+ encap_page->uwblock[i].words_num - 1) {
+ addr -= encap_page->uwblock[i].start_addr;
+ addr *= obj_handle->uword_in_bytes;
+ memcpy(&uwrd, (void *)(((uintptr_t)
+ encap_page->uwblock[i].micro_words) + addr),
+ obj_handle->uword_in_bytes);
+ uwrd = uwrd & GENMASK_ULL(43, 0);
+ }
+ }
+ *uword = uwrd;
+ if (*uword == INVLD_UWORD)
+ *uword = fill;
+}
+
+static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uclo_encap_page
+ *encap_page, unsigned int ae)
+{
+ unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ u64 fill_pat;
+
+ /* load the page starting at appropriate ustore address */
+ /* get fill-pattern from an image -- they are all the same */
+ memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
+ sizeof(u64));
+ uw_physical_addr = encap_page->beg_addr_p;
+ uw_relative_addr = 0;
+ words_num = encap_page->micro_words_num;
+ while (words_num) {
+ cpylen = min(words_num, UWORD_CPYBUF_SIZE);
+
+ /* load the buffer */
+ for (i = 0; i < cpylen; i++)
+ qat_uclo_fill_uwords(obj_handle, encap_page,
+ &obj_handle->uword_buf[i],
+ uw_physical_addr + i,
+ uw_relative_addr + i, fill_pat);
+
+ /* copy the buffer to ustore */
+ qat_hal_wr_uwords(handle, (unsigned char)ae,
+ uw_physical_addr, cpylen,
+ obj_handle->uword_buf);
+
+ uw_physical_addr += cpylen;
+ uw_relative_addr += cpylen;
+ words_num -= cpylen;
+ }
+}
+
+static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
+ struct icp_qat_uof_image *image)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned long ae_mask = handle->hal_handle->ae_mask;
+ unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+ unsigned long ae_assigned = image->ae_assigned;
+ struct icp_qat_uclo_aedata *aed;
+ unsigned int ctx_mask, s;
+ struct icp_qat_uclo_page *page;
+ unsigned char ae;
+ int ctx;
+
+ if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
+ ctx_mask = 0xff;
+ else
+ ctx_mask = 0x55;
+ /* load the default page and set assigned CTX PC
+ * to the entrypoint address */
+ for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+ if (!test_bit(ae, &cfg_ae_mask))
+ continue;
+
+ if (!test_bit(ae, &ae_assigned))
+ continue;
+
+ aed = &obj_handle->ae_data[ae];
+ /* find the slice to which this image is assigned */
+ for (s = 0; s < aed->slice_num; s++) {
+ if (image->ctx_assigned &
+ aed->ae_slices[s].ctx_mask_assigned)
+ break;
+ }
+ if (s >= aed->slice_num)
+ continue;
+ page = aed->ae_slices[s].page;
+ if (!page->encap_page->def_page)
+ continue;
+ qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
+
+ page = aed->ae_slices[s].page;
+ for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
+ aed->ae_slices[s].cur_page[ctx] =
+ (ctx_mask & (1 << ctx)) ? page : NULL;
+ qat_hal_set_live_ctx(handle, (unsigned char)ae,
+ image->ctx_assigned);
+ qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
+ image->entry_address);
+ }
+}
+
+static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
+{
+ unsigned int i;
+ struct icp_qat_fw_auth_desc *desc = NULL;
+ struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+ struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+ int ret;
+
+ for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+ ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
+ simg_hdr[i].simg_len,
+ CSS_AE_FIRMWARE);
+ if (ret)
+ return ret;
+
+ if (qat_uclo_map_auth_fw(handle,
+ (char *)simg_hdr[i].simg_buf,
+ (unsigned int)
+ simg_hdr[i].simg_len,
+ &desc))
+ goto wr_err;
+ if (qat_uclo_auth_fw(handle, desc))
+ goto wr_err;
+ if (qat_uclo_is_broadcast(handle, i)) {
+ if (qat_uclo_broadcast_load_fw(handle, desc))
+ goto wr_err;
+ } else {
+ if (qat_uclo_load_fw(handle, desc))
+ goto wr_err;
+ }
+ qat_uclo_ummap_auth_fw(handle, &desc);
+ }
+ return 0;
+wr_err:
+ qat_uclo_ummap_auth_fw(handle, &desc);
+ return -EINVAL;
+}
+
+static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
+{
+ struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+ unsigned int i;
+
+ if (qat_uclo_init_globals(handle))
+ return -EINVAL;
+ for (i = 0; i < obj_handle->uimage_num; i++) {
+ if (!obj_handle->ae_uimage[i].img_ptr)
+ return -EINVAL;
+ if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
+ return -EINVAL;
+ qat_uclo_wr_uimage_page(handle,
+ obj_handle->ae_uimage[i].img_ptr);
+ }
+ return 0;
+}
+
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+{
+ return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) :
+ qat_uclo_wr_uof_img(handle);
+}
+
+int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
+ unsigned int cfg_ae_mask)
+{
+ if (!cfg_ae_mask)
+ return -EINVAL;
+
+ handle->cfg_ae_mask = cfg_ae_mask;
+ return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
new file mode 100644
index 0000000000..38d6f8e162
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
+qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
new file mode 100644
index 0000000000..09551f9491
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "adf_heartbeat.h"
+#include "icp_qat_hw.h"
+
+#define ADF_DH895XCC_VF_MSK 0xFFFFFFFF
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
+ 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+ 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+ 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
+};
+
+static struct adf_hw_device_class dh895xcc_class = {
+ .name = ADF_DH895XCC_DEVICE_NAME,
+ .type = DEV_DH895XCC,
+ .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ u32 fuses = self->fuses;
+
+ return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
+ ADF_DH895XCC_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ u32 fuses = self->fuses;
+
+ return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCC_PMISC_BAR;
+}
+
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * Timestamp update interval is 16 AE clock ticks for dh895xcc.
+ */
+ return self->clock_frequency / 16;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCC_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCC_SRAM_BAR;
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 capabilities;
+ u32 legfuses;
+
+ capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+ /* Read accelerator capabilities mask */
+ pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
+
+ /* A set bit in legfuses means the feature is OFF in this SKU */
+ if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+ return capabilities;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+ >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
+
+ switch (sku) {
+ case ADF_DH895XCC_FUSECTL_SKU_1:
+ return DEV_SKU_1;
+ case ADF_DH895XCC_FUSECTL_SKU_2:
+ return DEV_SKU_2;
+ case ADF_DH895XCC_FUSECTL_SKU_3:
+ return DEV_SKU_3;
+ case ADF_DH895XCC_FUSECTL_SKU_4:
+ return DEV_SKU_4;
+ default:
+ return DEV_SKU_UNKNOWN;
+ }
+ return DEV_SKU_UNKNOWN;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ return thrd_to_arb_map;
+}
+
+static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+ /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ & ~ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
+
+ /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+ & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+ }
+}
+
+static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+ u32 val;
+
+ /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+
+ /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+ | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+}
+
+static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+ u32 sources, pending, disabled;
+ u32 errsou3, errmsk3;
+ u32 errsou5, errmsk5;
+
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+ errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
+ sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
+ | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
+
+ if (!sources)
+ return 0;
+
+ /* Get the already disabled interrupts */
+ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+ errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
+ disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
+ | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
+
+ pending = sources & ~disabled;
+ if (!pending)
+ return 0;
+
+ /* Due to HW limitations, when disabling the interrupts, we can't
+ * just disable the requested sources, as this would lead to missed
+ * interrupts if sources changes just before writing to ERRMSK3 and
+ * ERRMSK5.
+ * To work around it, disable all and re-enable only the sources that
+ * are not in vf_mask and were not already disabled. Re-enabling will
+ * trigger a new interrupt for the sources that have changed in the
+ * meantime, if any.
+ */
+ errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+ errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+ errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
+ errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+ /* Return the sources of the (new) interrupt(s) */
+ return pending;
+}
+
+static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
+{
+ adf_gen2_cfg_iov_thds(accel_dev, enable,
+ ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
+ ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
+}
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &dh895xcc_class;
+ hw_data->instance_id = dh895xcc_class.instances++;
+ hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
+ hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
+ hw_data->num_logical_accel = 1;
+ hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
+ hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = adf_gen2_enable_error_correction;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_accel_cap = get_accel_cap;
+ hw_data->get_num_accels = adf_gen2_get_num_accels;
+ hw_data->get_num_aes = adf_gen2_get_num_aes;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_admin_info = adf_gen2_get_admin_info;
+ hw_data->get_arb_info = adf_gen2_get_arb_info;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
+ hw_data->get_sku = get_sku;
+ hw_data->fw_name = ADF_DH895XCC_FW;
+ hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->configure_iov_threads = configure_iov_threads;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = adf_gen2_enable_ints;
+ hw_data->reset_device = adf_reset_sbr;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->dev_config = adf_gen2_dev_config;
+ hw_data->clock_frequency = ADF_DH895X_AE_FREQ;
+ hw_data->get_hb_clock = get_ts_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
+
+ adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
+ hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
+ hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
new file mode 100644
index 0000000000..cd3a219854
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_DH895x_HW_DATA_H_
+#define ADF_DH895x_HW_DATA_H_
+
+#include <linux/units.h>
+
+/* PCIe configuration space */
+#define ADF_DH895XCC_SRAM_BAR 0
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
+#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
+#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
+#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
+#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
+#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
+#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
+#define ADF_DH895XCC_MAX_ACCELERATORS 6
+#define ADF_DH895XCC_MAX_ACCELENGINES 12
+#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
+#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
+#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
+#define ADF_DH895XCC_ETR_MAX_BANKS 32
+
+/* Masks for VF2PF interrupts */
+#define ADF_DH895XCC_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask) (((vf_mask) & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16)
+#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask) ((vf_mask) >> 16)
+
+/* AE to function mapping */
+#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
+#define ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS 12
+
+/* Clocks frequency */
+#define ADF_DH895X_AE_FREQ (933 * HZ_PER_MHZ)
+
+/* FW names */
+#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
new file mode 100644
index 0000000000..1e748e8ce1
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_dbgfs.h>
+#include "adf_dh895xcc_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_DH895XCC_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+ int i;
+
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ if (bar->virt_addr)
+ pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+ }
+
+ if (accel_dev->hw_device) {
+ switch (accel_pci_dev->pci_dev->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+ adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
+ break;
+ default:
+ break;
+ }
+ kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_dbgfs_exit(accel_dev);
+ adf_cfg_dev_remove(accel_dev);
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ unsigned int i, bar_nr;
+ unsigned long bar_mask;
+ int ret;
+
+ switch (ent->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+ return -ENODEV;
+ }
+
+ if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+ /* If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow. */
+ dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /* Add accel device to accel table.
+ * This should be called before adf_cleanup_accel is called */
+ if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ kfree(accel_dev);
+ return -EFAULT;
+ }
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and configure device configuration structure */
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_dh895xcc(accel_dev->hw_device);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+ &hw_data->fuses);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+ /* If the device has no acceleration engines then ignore it. */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ ((~hw_data->ae_mask) & 0x01)) {
+ dev_err(&pdev->dev, "No acceleration units found");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ pcie_set_readrq(pdev, 1024);
+
+ /* enable PCI device */
+ if (pci_enable_device(pdev)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* set dma identifier */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
+ }
+
+ if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
+ ret = -EFAULT;
+ goto out_err_disable;
+ }
+
+ /* Get accelerator capabilities mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+
+ /* Find and map all the device's BARS */
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+ bar->base_addr = pci_resource_start(pdev, bar_nr);
+ if (!bar->base_addr)
+ break;
+ bar->size = pci_resource_len(pdev, bar_nr);
+ bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+ ret = -EFAULT;
+ goto out_err_free_reg;
+ }
+ }
+ pci_set_master(pdev);
+
+ if (pci_save_state(pdev)) {
+ dev_err(&pdev->dev, "Failed to save pci state\n");
+ ret = -ENOMEM;
+ goto out_err_free_reg;
+ }
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_down(accel_dev, false);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ adf_dev_down(accel_dev, false);
+ adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+ request_module("intel_qat");
+
+ if (pci_register_driver(&adf_driver)) {
+ pr_err("QAT: Driver initialization failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+ pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_DH895XCC_FW);
+MODULE_FIRMWARE(ADF_DH895XCC_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
new file mode 100644
index 0000000000..0153c85ce7
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
+qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
new file mode 100644
index 0000000000..70e56cc16e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
+#include "adf_dh895xccvf_hw_data.h"
+
+static struct adf_hw_device_class dh895xcciov_class = {
+ .name = ADF_DH895XCCVF_DEVICE_NAME,
+ .type = DEV_DH895XCCVF,
+ .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_VF;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &dh895xcciov_class;
+ hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
+ hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
+ hw_data->num_logical_accel = 1;
+ hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
+ hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+ hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+ hw_data->free_irq = adf_vf_isr_resource_free;
+ hw_data->enable_error_correction = adf_vf_void_noop;
+ hw_data->init_admin_comms = adf_vf_int_noop;
+ hw_data->exit_admin_comms = adf_vf_void_noop;
+ hw_data->send_admin_init = adf_vf2pf_notify_init;
+ hw_data->init_arb = adf_vf_int_noop;
+ hw_data->exit_arb = adf_vf_void_noop;
+ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_sku = get_sku;
+ hw_data->enable_ints = adf_vf_void_noop;
+ hw_data->dev_class->instances++;
+ hw_data->dev_config = adf_gen2_dev_config;
+ adf_devmgr_update_class_index(hw_data);
+ adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+ adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
new file mode 100644
index 0000000000..6973fa967b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
+#ifndef ADF_DH895XVF_HW_DATA_H_
+#define ADF_DH895XVF_HW_DATA_H_
+
+#define ADF_DH895XCCIOV_PMISC_BAR 1
+#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
+#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
+#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
+#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
+#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
+#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCCIOV_ETR_BAR 0
+#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
new file mode 100644
index 0000000000..fefb85ceae
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_dbgfs.h>
+#include "adf_dh895xccvf_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_DH895XCCVF_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+ struct adf_accel_dev *pf;
+ int i;
+
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ if (bar->virt_addr)
+ pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+ }
+
+ if (accel_dev->hw_device) {
+ switch (accel_pci_dev->pci_dev->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
+ adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
+ break;
+ default:
+ break;
+ }
+ kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_dbgfs_exit(accel_dev);
+ adf_cfg_dev_remove(accel_dev);
+ pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+ adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_dev *pf;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ unsigned int i, bar_nr;
+ unsigned long bar_mask;
+ int ret;
+
+ switch (ent->device) {
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+ return -ENODEV;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!accel_dev)
+ return -ENOMEM;
+
+ accel_dev->is_vf = true;
+ pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /* Add accel device to accel table */
+ if (adf_devmgr_add_dev(accel_dev, pf)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ kfree(accel_dev);
+ return -EFAULT;
+ }
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and configure device configuration structure */
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* enable PCI device */
+ if (pci_enable_device(pdev)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* set dma identifier */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto out_err_disable;
+ }
+
+ if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
+ ret = -EFAULT;
+ goto out_err_disable;
+ }
+
+ /* Find and map all the device's BARS */
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+ bar->base_addr = pci_resource_start(pdev, bar_nr);
+ if (!bar->base_addr)
+ break;
+ bar->size = pci_resource_len(pdev, bar_nr);
+ bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+ ret = -EFAULT;
+ goto out_err_free_reg;
+ }
+ }
+ pci_set_master(pdev);
+ /* Completion for VF2PF request/response message exchange */
+ init_completion(&accel_dev->vf.msg_received);
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = adf_dev_up(accel_dev, false);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_down(accel_dev, false);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ adf_flush_vf_wq(accel_dev);
+ adf_dev_down(accel_dev, false);
+ adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+ request_module("intel_qat");
+
+ if (pci_register_driver(&adf_driver)) {
+ pr_err("QAT: Driver initialization failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+ pci_unregister_driver(&adf_driver);
+ adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);