summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccree
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/ccree')
-rw-r--r--drivers/crypto/ccree/Makefile8
-rw-r--r--drivers/crypto/ccree/cc_aead.c2664
-rw-r--r--drivers/crypto/ccree/cc_aead.h108
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c1393
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h70
-rw-r--r--drivers/crypto/ccree/cc_cipher.c1509
-rw-r--r--drivers/crypto/ccree/cc_cipher.h39
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h142
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c107
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h28
-rw-r--r--drivers/crypto/ccree/cc_driver.c679
-rw-r--r--drivers/crypto/ccree/cc_driver.h235
-rw-r--r--drivers/crypto/ccree/cc_fips.c154
-rw-r--r--drivers/crypto/ccree/cc_fips.h38
-rw-r--r--drivers/crypto/ccree/cc_hash.c2315
-rw-r--r--drivers/crypto/ccree/cc_hash.h106
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h282
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h633
-rw-r--r--drivers/crypto/ccree/cc_kernel_regs.h168
-rw-r--r--drivers/crypto/ccree/cc_lli_defs.h59
-rw-r--r--drivers/crypto/ccree/cc_pm.c82
-rw-r--r--drivers/crypto/ccree/cc_pm.h32
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c662
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h42
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c91
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h52
26 files changed, 11698 insertions, 0 deletions
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
new file mode 100644
index 000000000..5cfda508e
--- /dev/null
+++ b/drivers/crypto/ccree/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2012-2019 ARM Limited (or its affiliates).
+
+obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_sram_mgr.o
+ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
+ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
+ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
new file mode 100644
index 000000000..35794c727
--- /dev/null
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -0,0 +1,2664 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/gcm.h>
+#include <linux/rtnetlink.h>
+#include <crypto/internal/des.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_aead.h"
+#include "cc_request_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
+
+#define template_aead template_u.aead
+
+#define MAX_AEAD_SETKEY_SEQ 12
+#define MAX_AEAD_PROCESS_SEQ 23
+
+#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
+#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
+
+#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
+
+struct cc_aead_handle {
+ u32 sram_workspace_addr;
+ struct list_head aead_list;
+};
+
+struct cc_hmac_s {
+ u8 *padded_authkey;
+ u8 *ipad_opad; /* IPAD, OPAD*/
+ dma_addr_t padded_authkey_dma_addr;
+ dma_addr_t ipad_opad_dma_addr;
+};
+
+struct cc_xcbc_s {
+ u8 *xcbc_keys; /* K1,K2,K3 */
+ dma_addr_t xcbc_keys_dma_addr;
+};
+
+struct cc_aead_ctx {
+ struct cc_drvdata *drvdata;
+ u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
+ u8 *enckey;
+ dma_addr_t enckey_dma_addr;
+ union {
+ struct cc_hmac_s hmac;
+ struct cc_xcbc_s xcbc;
+ } auth_state;
+ unsigned int enc_keylen;
+ unsigned int auth_keylen;
+ unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
+ unsigned int hash_len;
+ enum drv_cipher_mode cipher_mode;
+ enum cc_flow_mode flow_mode;
+ enum drv_hash_mode auth_mode;
+};
+
+static void cc_aead_exit(struct crypto_aead *tfm)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
+ crypto_tfm_alg_name(&tfm->base));
+
+ /* Unmap enckey buffer */
+ if (ctx->enckey) {
+ dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
+ ctx->enckey_dma_addr);
+ dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
+ &ctx->enckey_dma_addr);
+ ctx->enckey_dma_addr = 0;
+ ctx->enckey = NULL;
+ }
+
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+
+ if (xcbc->xcbc_keys) {
+ dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
+ xcbc->xcbc_keys,
+ xcbc->xcbc_keys_dma_addr);
+ }
+ dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
+ &xcbc->xcbc_keys_dma_addr);
+ xcbc->xcbc_keys_dma_addr = 0;
+ xcbc->xcbc_keys = NULL;
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+ if (hmac->ipad_opad) {
+ dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
+ hmac->ipad_opad,
+ hmac->ipad_opad_dma_addr);
+ dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
+ &hmac->ipad_opad_dma_addr);
+ hmac->ipad_opad_dma_addr = 0;
+ hmac->ipad_opad = NULL;
+ }
+ if (hmac->padded_authkey) {
+ dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
+ hmac->padded_authkey,
+ hmac->padded_authkey_dma_addr);
+ dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
+ &hmac->padded_authkey_dma_addr);
+ hmac->padded_authkey_dma_addr = 0;
+ hmac->padded_authkey = NULL;
+ }
+ }
+}
+
+static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return cc_get_default_hash_len(ctx->drvdata);
+}
+
+static int cc_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg, aead_alg);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
+ crypto_tfm_alg_name(&tfm->base));
+
+ /* Initialize modes in instance */
+ ctx->cipher_mode = cc_alg->cipher_mode;
+ ctx->flow_mode = cc_alg->flow_mode;
+ ctx->auth_mode = cc_alg->auth_mode;
+ ctx->drvdata = cc_alg->drvdata;
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
+
+ /* Allocate key buffer, cache line aligned */
+ ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
+ &ctx->enckey_dma_addr, GFP_KERNEL);
+ if (!ctx->enckey) {
+ dev_err(dev, "Failed allocating key buffer\n");
+ goto init_failed;
+ }
+ dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
+ ctx->enckey);
+
+ /* Set default authlen value */
+
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+ const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
+
+ /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
+ /* (and temporary for user key - up to 256b) */
+ xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
+ &xcbc->xcbc_keys_dma_addr,
+ GFP_KERNEL);
+ if (!xcbc->xcbc_keys) {
+ dev_err(dev, "Failed allocating buffer for XCBC keys\n");
+ goto init_failed;
+ }
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+ const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
+ dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
+
+ /* Allocate dma-coherent buffer for IPAD + OPAD */
+ hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
+ &hmac->ipad_opad_dma_addr,
+ GFP_KERNEL);
+
+ if (!hmac->ipad_opad) {
+ dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
+ goto init_failed;
+ }
+
+ dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
+ hmac->ipad_opad);
+
+ hmac->padded_authkey = dma_alloc_coherent(dev,
+ MAX_HMAC_BLOCK_SIZE,
+ pkey_dma,
+ GFP_KERNEL);
+
+ if (!hmac->padded_authkey) {
+ dev_err(dev, "failed to allocate padded_authkey\n");
+ goto init_failed;
+ }
+ } else {
+ ctx->auth_state.hmac.ipad_opad = NULL;
+ ctx->auth_state.hmac.padded_authkey = NULL;
+ }
+ ctx->hash_len = cc_get_aead_hash_len(tfm);
+
+ return 0;
+
+init_failed:
+ cc_aead_exit(tfm);
+ return -ENOMEM;
+}
+
+static void cc_aead_complete(struct device *dev, void *cc_req, int err)
+{
+ struct aead_request *areq = (struct aead_request *)cc_req;
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ /* BACKLOG notification */
+ if (err == -EINPROGRESS)
+ goto done;
+
+ cc_unmap_aead_request(dev, areq);
+
+ /* Restore ordinary iv pointer */
+ areq->iv = areq_ctx->backup_iv;
+
+ if (err)
+ goto done;
+
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
+ ctx->authsize) != 0) {
+ dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
+ ctx->authsize, ctx->cipher_mode);
+ /* In case of payload authentication failure, MUST NOT
+ * revealed the decrypted message --> zero its memory.
+ */
+ sg_zero_buffer(areq->dst, sg_nents(areq->dst),
+ areq->cryptlen, areq->assoclen);
+ err = -EBADMSG;
+ }
+ /*ENCRYPT*/
+ } else if (areq_ctx->is_icv_fragmented) {
+ u32 skip = areq->cryptlen + areq_ctx->dst_offset;
+
+ cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
+ skip, (skip + ctx->authsize),
+ CC_SG_FROM_BUF);
+ }
+done:
+ aead_request_complete(areq, err);
+}
+
+static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
+ struct cc_aead_ctx *ctx)
+{
+ /* Load the AES key */
+ hw_desc_init(&desc[0]);
+ /* We are using for the source/user key the same buffer
+ * as for the output keys, * because after this key loading it
+ * is not needed anymore
+ */
+ set_din_type(&desc[0], DMA_DLLI,
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
+ NS_BIT);
+ set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_key_size_aes(&desc[0], ctx->auth_keylen);
+ set_flow_mode(&desc[0], S_DIN_to_AES);
+ set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
+
+ hw_desc_init(&desc[1]);
+ set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[1], DIN_AES_DOUT);
+ set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ hw_desc_init(&desc[2]);
+ set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[2], DIN_AES_DOUT);
+ set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+ + AES_KEYSIZE_128),
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ hw_desc_init(&desc[3]);
+ set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[3], DIN_AES_DOUT);
+ set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+ + 2 * AES_KEYSIZE_128),
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ return 4;
+}
+
+static unsigned int hmac_setkey(struct cc_hw_desc *desc,
+ struct cc_aead_ctx *ctx)
+{
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ unsigned int digest_ofs = 0;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+ unsigned int idx = 0;
+ int i;
+
+ /* calc derived HMAC key */
+ for (i = 0; i < 2; i++) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_sram(&desc[idx],
+ cc_larval_digest_addr(ctx->drvdata,
+ ctx->auth_mode),
+ digest_size);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Prepare ipad key */
+ hw_desc_init(&desc[idx]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ hmac->padded_authkey_dma_addr,
+ SHA256_BLOCK_SIZE, NS_BIT);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_xor_active(&desc[idx]);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get the digset */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_dout_dlli(&desc[idx],
+ (hmac->ipad_opad_dma_addr + digest_ofs),
+ digest_size, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ idx++;
+
+ digest_ofs += digest_size;
+ }
+
+ return idx;
+}
+
+static int validate_keys_sizes(struct cc_aead_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
+ ctx->enc_keylen, ctx->auth_keylen);
+
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ break;
+ case DRV_HASH_XCBC_MAC:
+ if (ctx->auth_keylen != AES_KEYSIZE_128 &&
+ ctx->auth_keylen != AES_KEYSIZE_192 &&
+ ctx->auth_keylen != AES_KEYSIZE_256)
+ return -ENOTSUPP;
+ break;
+ case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
+ if (ctx->auth_keylen > 0)
+ return -EINVAL;
+ break;
+ default:
+ dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
+ return -EINVAL;
+ }
+ /* Check cipher key size */
+ if (ctx->flow_mode == S_DIN_to_DES) {
+ if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
+ dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
+ ctx->enc_keylen);
+ return -EINVAL;
+ }
+ } else { /* Default assumed to be AES ciphers */
+ if (ctx->enc_keylen != AES_KEYSIZE_128 &&
+ ctx->enc_keylen != AES_KEYSIZE_192 &&
+ ctx->enc_keylen != AES_KEYSIZE_256) {
+ dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
+ ctx->enc_keylen);
+ return -EINVAL;
+ }
+ }
+
+ return 0; /* All tests of keys sizes passed */
+}
+
+/* This function prepers the user key so it can pass to the hmac processing
+ * (copy to intenral buffer or hash in case of key longer than block
+ */
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
+ unsigned int keylen)
+{
+ dma_addr_t key_dma_addr = 0;
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ u32 larval_addr;
+ struct cc_crypto_req cc_req = {};
+ unsigned int blocksize;
+ unsigned int digestsize;
+ unsigned int hashmode;
+ unsigned int idx = 0;
+ int rc = 0;
+ u8 *key = NULL;
+ struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
+ dma_addr_t padded_authkey_dma_addr =
+ ctx->auth_state.hmac.padded_authkey_dma_addr;
+
+ switch (ctx->auth_mode) { /* auth_key required and >0 */
+ case DRV_HASH_SHA1:
+ blocksize = SHA1_BLOCK_SIZE;
+ digestsize = SHA1_DIGEST_SIZE;
+ hashmode = DRV_HASH_HW_SHA1;
+ break;
+ case DRV_HASH_SHA256:
+ default:
+ blocksize = SHA256_BLOCK_SIZE;
+ digestsize = SHA256_DIGEST_SIZE;
+ hashmode = DRV_HASH_HW_SHA256;
+ }
+
+ if (keylen != 0) {
+
+ key = kmemdup(authkey, keylen, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
+ key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
+ kfree_sensitive(key);
+ return -ENOMEM;
+ }
+ if (keylen > blocksize) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hashmode);
+ larval_addr = cc_larval_digest_addr(ctx->drvdata,
+ ctx->auth_mode);
+ set_din_sram(&desc[idx], larval_addr, digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hashmode);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ key_dma_addr, keylen, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get hashed key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hashmode);
+ set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+ digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ set_cipher_config0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, (blocksize - digestsize));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
+ digestsize), (blocksize - digestsize),
+ NS_BIT, 0);
+ idx++;
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
+ keylen, NS_BIT);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+ keylen, NS_BIT, 0);
+ idx++;
+
+ if ((blocksize - keylen) != 0) {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0,
+ (blocksize - keylen));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (padded_authkey_dma_addr +
+ keylen),
+ (blocksize - keylen), NS_BIT, 0);
+ idx++;
+ }
+ }
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, (blocksize - keylen));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+ blocksize, NS_BIT, 0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc)
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+
+ if (key_dma_addr)
+ dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+
+ kfree_sensitive(key);
+
+ return rc;
+}
+
+static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
+ unsigned int seq_len = 0;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ const u8 *enckey, *authkey;
+ int rc;
+
+ dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
+ ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
+ struct crypto_authenc_keys keys;
+
+ rc = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (rc)
+ return rc;
+ enckey = keys.enckey;
+ authkey = keys.authkey;
+ ctx->enc_keylen = keys.enckeylen;
+ ctx->auth_keylen = keys.authkeylen;
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ /* the nonce is stored in bytes at end of key */
+ if (ctx->enc_keylen <
+ (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
+ return -EINVAL;
+ /* Copy nonce from last 4 bytes in CTR key to
+ * first 4 bytes in CTR IV
+ */
+ memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
+ CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
+ /* Set CTR key size */
+ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+ } else { /* non-authenc - has just one key */
+ enckey = key;
+ authkey = NULL;
+ ctx->enc_keylen = keylen;
+ ctx->auth_keylen = 0;
+ }
+
+ rc = validate_keys_sizes(ctx);
+ if (rc)
+ return rc;
+
+ /* STAT_PHASE_1: Copy key to ctx */
+
+ /* Get key material */
+ memcpy(ctx->enckey, enckey, ctx->enc_keylen);
+ if (ctx->enc_keylen == 24)
+ memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
+ ctx->auth_keylen);
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
+ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
+ if (rc)
+ return rc;
+ }
+
+ /* STAT_PHASE_2: Create sequence */
+
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ seq_len = hmac_setkey(desc, ctx);
+ break;
+ case DRV_HASH_XCBC_MAC:
+ seq_len = xcbc_setkey(desc, ctx);
+ break;
+ case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
+ break; /* No auth. key setup */
+ default:
+ dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
+ return -ENOTSUPP;
+ }
+
+ /* STAT_PHASE_3: Submit sequence to HW */
+
+ if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
+ if (rc) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ return rc;
+ }
+ }
+
+ /* Update STAT_PHASE_3 */
+ return rc;
+}
+
+static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ return err;
+
+ err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
+ cc_aead_setkey(aead, key, keylen);
+
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
+static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (keylen < 3)
+ return -EINVAL;
+
+ keylen -= 3;
+ memcpy(ctx->ctr_nonce, key + keylen, 3);
+
+ return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ /* Unsupported auth. sizes */
+ if (authsize == 0 ||
+ authsize > crypto_aead_maxauthsize(authenc)) {
+ return -ENOTSUPP;
+ }
+
+ ctx->authsize = authsize;
+ dev_dbg(dev, "authlen=%d\n", ctx->authsize);
+
+ return 0;
+}
+
+static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
+ unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ switch (assoc_dma_type) {
+ case CC_DMA_BUF_DLLI:
+ dev_dbg(dev, "ASSOC buffer type DLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
+ areq_ctx->assoclen, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
+ set_din_not_last_indication(&desc[idx]);
+ break;
+ case CC_DMA_BUF_MLLI:
+ dev_dbg(dev, "ASSOC buffer type MLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
+ areq_ctx->assoc.mlli_nents, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
+ set_din_not_last_indication(&desc[idx]);
+ break;
+ case CC_DMA_BUF_NULL:
+ default:
+ dev_err(dev, "Invalid ASSOC buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_proc_authen_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size, int direct)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ unsigned int idx = *seq_size;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ switch (data_dma_type) {
+ case CC_DMA_BUF_DLLI:
+ {
+ struct scatterlist *cipher =
+ (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ areq_ctx->dst_sgl : areq_ctx->src_sgl;
+
+ unsigned int offset =
+ (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ areq_ctx->dst_offset : areq_ctx->src_offset;
+ dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (sg_dma_address(cipher) + offset),
+ areq_ctx->cryptlen, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ }
+ case CC_DMA_BUF_MLLI:
+ {
+ /* DOUBLE-PASS flow (as default)
+ * assoc. + iv + data -compact in one table
+ * if assoclen is ZERO only IV perform
+ */
+ u32 mlli_addr = areq_ctx->assoc.sram_addr;
+ u32 mlli_nents = areq_ctx->assoc.mlli_nents;
+
+ if (areq_ctx->is_single_pass) {
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ mlli_addr = areq_ctx->dst.sram_addr;
+ mlli_nents = areq_ctx->dst.mlli_nents;
+ } else {
+ mlli_addr = areq_ctx->src.sram_addr;
+ mlli_nents = areq_ctx->src.mlli_nents;
+ }
+ }
+
+ dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
+ NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ }
+ case CC_DMA_BUF_NULL:
+ default:
+ dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_proc_cipher_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (areq_ctx->cryptlen == 0)
+ return; /*null processing*/
+
+ switch (data_dma_type) {
+ case CC_DMA_BUF_DLLI:
+ dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (sg_dma_address(areq_ctx->src_sgl) +
+ areq_ctx->src_offset), areq_ctx->cryptlen,
+ NS_BIT);
+ set_dout_dlli(&desc[idx],
+ (sg_dma_address(areq_ctx->dst_sgl) +
+ areq_ctx->dst_offset),
+ areq_ctx->cryptlen, NS_BIT, 0);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ case CC_DMA_BUF_MLLI:
+ dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
+ areq_ctx->src.mlli_nents, NS_BIT);
+ set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
+ areq_ctx->dst.mlli_nents, NS_BIT, 0);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ case CC_DMA_BUF_NULL:
+ default:
+ dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_proc_digest_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ int direct = req_ctx->gen_ctx.op_type;
+
+ /* Get final ICV result */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ hw_desc_init(&desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ } else {
+ set_cipher_config0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_cipher_mode(&desc[idx], hash_mode);
+ }
+ } else { /*Decrypt*/
+ /* Get ICV out from hardware */
+ hw_desc_init(&desc[idx]);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
+ ctx->authsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_cipher_config0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_aes_not_hash_mode(&desc[idx]);
+ } else {
+ set_cipher_mode(&desc[idx], hash_mode);
+ }
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_set_cipher_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = req_ctx->hw_iv_size;
+ unsigned int idx = *seq_size;
+ int direct = req_ctx->gen_ctx.op_type;
+
+ /* Setup cipher state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_config0(&desc[idx], direct);
+ set_flow_mode(&desc[idx], ctx->flow_mode);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
+ hw_iv_size, NS_BIT);
+ if (ctx->cipher_mode == DRV_CIPHER_CTR)
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ else
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], ctx->cipher_mode);
+ idx++;
+
+ /* Setup enc. key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_config0(&desc[idx], direct);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_flow_mode(&desc[idx], ctx->flow_mode);
+ if (ctx->flow_mode == S_DIN_to_AES) {
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+ ctx->enc_keylen), NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ } else {
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ set_key_size_des(&desc[idx], ctx->enc_keylen);
+ }
+ set_cipher_mode(&desc[idx], ctx->cipher_mode);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size, unsigned int data_flow_mode)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int idx = *seq_size;
+
+ if (req_ctx->cryptlen == 0)
+ return; /*null processing*/
+
+ cc_set_cipher_desc(req, desc, &idx);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* We must wait for DMA to write all cipher */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+ }
+
+ *seq_size = idx;
+}
+
+static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ unsigned int idx = *seq_size;
+
+ /* Loading hash ipad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
+ NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load init. digest len (64 bytes) */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+ ctx->hash_len);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int idx = *seq_size;
+
+ /* Loading MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K1 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+ AES_KEYSIZE_128, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K2 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+ AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K3 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+ 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_proc_header_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+
+ /* Hash associated data */
+ if (areq_ctx->assoclen > 0)
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
+
+ /* Hash IV */
+ *seq_size = idx;
+}
+
+static void cc_proc_scheme_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ unsigned int idx = *seq_size;
+
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
+ ctx->hash_len);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_cipher_do(&desc[idx], DO_PAD);
+ idx++;
+
+ /* Get final ICV result */
+ hw_desc_init(&desc[idx]);
+ set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
+ digest_size);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_cipher_mode(&desc[idx], hash_mode);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
+ digest_size, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load init. digest len (64 bytes) */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+ ctx->hash_len);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
+ digest_size);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_mlli_to_sram(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
+ !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
+ dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
+ ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ /* Copy MLLI table host-to-sram */
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[*seq_size],
+ ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[*seq_size], BYPASS);
+ (*seq_size)++;
+ }
+}
+
+static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
+ enum cc_flow_mode setup_flow_mode,
+ bool is_single_pass)
+{
+ enum cc_flow_mode data_flow_mode;
+
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ if (setup_flow_mode == S_DIN_to_AES)
+ data_flow_mode = is_single_pass ?
+ AES_to_HASH_and_DOUT : DIN_AES_DOUT;
+ else
+ data_flow_mode = is_single_pass ?
+ DES_to_HASH_and_DOUT : DIN_DES_DOUT;
+ } else { /* Decrypt */
+ if (setup_flow_mode == S_DIN_to_AES)
+ data_flow_mode = is_single_pass ?
+ AES_and_HASH : DIN_AES_DOUT;
+ else
+ data_flow_mode = is_single_pass ?
+ DES_and_HASH : DIN_DES_DOUT;
+ }
+
+ return data_flow_mode;
+}
+
+static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int data_flow_mode =
+ cc_get_data_flow(direct, ctx->flow_mode,
+ req_ctx->is_single_pass);
+
+ if (req_ctx->is_single_pass) {
+ /*
+ * Single-pass flow
+ */
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
+ return;
+ }
+
+ /*
+ * Double-pass flow
+ * Fallback for unsupported single-pass modes,
+ * i.e. using assoc. data of non-word-multiple
+ */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* encrypt first.. */
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* authenc after..*/
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
+
+ } else { /*DECRYPT*/
+ /* authenc first..*/
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ /* decrypt after.. */
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* read the digest result with setting the completion bit
+ * must be after the cipher operation
+ */
+ cc_proc_digest_desc(req, desc, seq_size);
+ }
+}
+
+static void
+cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int data_flow_mode =
+ cc_get_data_flow(direct, ctx->flow_mode,
+ req_ctx->is_single_pass);
+
+ if (req_ctx->is_single_pass) {
+ /*
+ * Single-pass flow
+ */
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
+ return;
+ }
+
+ /*
+ * Double-pass flow
+ * Fallback for unsupported single-pass modes,
+ * i.e. using assoc. data of non-word-multiple
+ */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* encrypt first.. */
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* authenc after.. */
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_digest_desc(req, desc, seq_size);
+ } else { /*DECRYPT*/
+ /* authenc first.. */
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ /* decrypt after..*/
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* read the digest result with setting the completion bit
+ * must be after the cipher operation
+ */
+ cc_proc_digest_desc(req, desc, seq_size);
+ }
+}
+
+static int validate_data_size(struct cc_aead_ctx *ctx,
+ enum drv_crypto_direction direct,
+ struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ unsigned int assoclen = areq_ctx->assoclen;
+ unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ (req->cryptlen - ctx->authsize) : req->cryptlen;
+
+ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->cryptlen < ctx->authsize)
+ goto data_size_err;
+
+ areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
+
+ switch (ctx->flow_mode) {
+ case S_DIN_to_AES:
+ if (ctx->cipher_mode == DRV_CIPHER_CBC &&
+ !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
+ goto data_size_err;
+ if (ctx->cipher_mode == DRV_CIPHER_CCM)
+ break;
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ if (areq_ctx->plaintext_authenticate_only)
+ areq_ctx->is_single_pass = false;
+ break;
+ }
+
+ if (!IS_ALIGNED(assoclen, sizeof(u32)))
+ areq_ctx->is_single_pass = false;
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR &&
+ !IS_ALIGNED(cipherlen, sizeof(u32)))
+ areq_ctx->is_single_pass = false;
+
+ break;
+ case S_DIN_to_DES:
+ if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
+ goto data_size_err;
+ if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
+ areq_ctx->is_single_pass = false;
+ break;
+ default:
+ dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
+ goto data_size_err;
+ }
+
+ return 0;
+
+data_size_err:
+ return -EINVAL;
+}
+
+static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
+{
+ unsigned int len = 0;
+
+ if (header_size == 0)
+ return 0;
+
+ if (header_size < ((1UL << 16) - (1UL << 8))) {
+ len = 2;
+
+ pa0_buff[0] = (header_size >> 8) & 0xFF;
+ pa0_buff[1] = header_size & 0xFF;
+ } else {
+ len = 6;
+
+ pa0_buff[0] = 0xFF;
+ pa0_buff[1] = 0xFE;
+ pa0_buff[2] = (header_size >> 24) & 0xFF;
+ pa0_buff[3] = (header_size >> 16) & 0xFF;
+ pa0_buff[4] = (header_size >> 8) & 0xFF;
+ pa0_buff[5] = header_size & 0xFF;
+ }
+
+ return len;
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+ unsigned int cipher_flow_mode;
+ dma_addr_t mac_result;
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ mac_result = req_ctx->mac_buf_dma_addr;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_and_HASH;
+ mac_result = req_ctx->icv_dma_addr;
+ }
+
+ /* load key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+ ctx->enc_keylen), NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load ctr state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI,
+ req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load MAC key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+ ctx->enc_keylen), NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* load MAC state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* process assoc data */
+ if (req_ctx->assoclen > 0) {
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ sg_dma_address(&req_ctx->ccm_adata_sg),
+ AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+ }
+
+ /* process the cipher */
+ if (req_ctx->cryptlen)
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
+
+ /* Read temporal MAC */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+ set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
+ NS_BIT, 0);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* load AES-CTR state (for last MAC calculation)*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* encrypt the "T" value and store MAC in mac_state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+ ctx->authsize, NS_BIT);
+ set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ *seq_size = idx;
+ return 0;
+}
+
+static int config_ccm_adata(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ //unsigned int size_of_a = 0, rem_a_size = 0;
+ unsigned int lp = req->iv[0];
+ /* Note: The code assume that req->iv[0] already contains the value
+ * of L' of RFC3610
+ */
+ unsigned int l = lp + 1; /* This is L' of RFC 3610. */
+ unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
+ u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
+ u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
+ u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+ unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - ctx->authsize);
+ int rc;
+
+ memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
+ memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
+
+ /* taken from crypto/ccm.c */
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (l < 2 || l > 8) {
+ dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
+ return -EINVAL;
+ }
+ memcpy(b0, req->iv, AES_BLOCK_SIZE);
+
+ /* format control info per RFC 3610 and
+ * NIST Special Publication 800-38C
+ */
+ *b0 |= (8 * ((m - 2) / 2));
+ if (req_ctx->assoclen > 0)
+ *b0 |= 64; /* Enable bit 6 if Adata exists. */
+
+ rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
+ if (rc) {
+ dev_err(dev, "message len overflow detected");
+ return rc;
+ }
+ /* END of "taken from crypto/ccm.c" */
+
+ /* l(a) - size of associated data. */
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
+
+ memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
+ req->iv[15] = 1;
+
+ memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
+ ctr_count_0[15] = 0;
+
+ return 0;
+}
+
+static void cc_proc_rfc4309_ccm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+
+ /* L' */
+ memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
+ /* For RFC 4309, always use 4 bytes for message length
+ * (at most 2^32-1 bytes).
+ */
+ areq_ctx->ctr_iv[0] = 3;
+
+ /* In RFC 4309 there is an 11-bytes nonce+IV part,
+ * that we build here.
+ */
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
+ CCM_BLOCK_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
+ CCM_BLOCK_IV_SIZE);
+ req->iv = areq_ctx->ctr_iv;
+}
+
+static void cc_set_ghash_desc(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+
+ /* load key to AES*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* process one zero block to generate hkey */
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+ set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT, 0);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Memory Barrier */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Load GHASH subkey */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Configure Hash Engine to work with GHASH.
+ * Since it was not possible to extend HASH submodes to add GHASH,
+ * The following command is necessary in order to
+ * select GHASH (according to HW designers)
+ */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Load GHASH initial STATE (which is 0). (for any hash there is an
+ * initial state)
+ */
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+
+ /* load key to AES*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
+ /* load AES/CTR initial CTR value inc by 2*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI,
+ req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ }
+
+ *seq_size = idx;
+}
+
+static void cc_proc_gcm_result(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ dma_addr_t mac_result;
+ unsigned int idx = *seq_size;
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ mac_result = req_ctx->mac_buf_dma_addr;
+ } else { /* Encrypt */
+ mac_result = req_ctx->icv_dma_addr;
+ }
+
+ /* process(ghash) gcm_block_len */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT, 0);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_aes_not_hash_mode(&desc[idx]);
+
+ idx++;
+
+ /* load AES/CTR initial CTR value inc by 1*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Memory Barrier */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* process GCTR on stored GHASH and store MAC in mac_state*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int cipher_flow_mode;
+
+ //in RFC4543 no data to encrypt. just copy data from src to dest.
+ if (req_ctx->plaintext_authenticate_only) {
+ cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
+ cc_set_ghash_desc(req, desc, seq_size);
+ /* process(ghash) assoc data */
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
+ return 0;
+ }
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_and_HASH;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ }
+
+ // for gcm and rfc4106.
+ cc_set_ghash_desc(req, desc, seq_size);
+ /* process(ghash) assoc data */
+ if (req_ctx->assoclen > 0)
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
+ /* process(gctr+ghash) */
+ if (req_ctx->cryptlen)
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
+
+ return 0;
+}
+
+static int config_gcm_context(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - ctx->authsize);
+ __be32 counter = cpu_to_be32(2);
+
+ dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
+
+ memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
+
+ memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
+
+ memcpy(req->iv + 12, &counter, 4);
+ memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
+
+ counter = cpu_to_be32(1);
+ memcpy(req->iv + 12, &counter, 4);
+ memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
+
+ if (!req_ctx->plaintext_authenticate_only) {
+ __be64 temp64;
+
+ temp64 = cpu_to_be64(req_ctx->assoclen * 8);
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
+ temp64 = cpu_to_be64(cryptlen * 8);
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
+ } else {
+ /* rfc4543=> all data(AAD,IV,Plain) are considered additional
+ * data that is nothing is encrypted.
+ */
+ __be64 temp64;
+
+ temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8);
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
+ temp64 = 0;
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
+ }
+
+ return 0;
+}
+
+static void cc_proc_rfc4_gcm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
+ ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
+ GCM_BLOCK_RFC4_IV_SIZE);
+ req->iv = areq_ctx->ctr_iv;
+}
+
+static int cc_proc_aead(struct aead_request *req,
+ enum drv_crypto_direction direct)
+{
+ int rc = 0;
+ int seq_len = 0;
+ struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+
+ dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
+ ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
+ ctx, req, req->iv, sg_virt(req->src), req->src->offset,
+ sg_virt(req->dst), req->dst->offset, req->cryptlen);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ /* Check data length according to mode */
+ if (validate_data_size(ctx, direct, req)) {
+ dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
+ req->cryptlen, areq_ctx->assoclen);
+ return -EINVAL;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_aead_complete;
+ cc_req.user_arg = req;
+
+ /* Setup request context */
+ areq_ctx->gen_ctx.op_type = direct;
+ areq_ctx->req_authsize = ctx->authsize;
+ areq_ctx->cipher_mode = ctx->cipher_mode;
+
+ /* STAT_PHASE_1: Map buffers */
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ /* Build CTR IV - Copy nonce from last 4 bytes in
+ * CTR key to first 4 bytes in CTR IV
+ */
+ memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
+ CTR_RFC3686_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
+ /* Initialize counter portion of counter block */
+ *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+
+ /* Replace with counter iv */
+ req->iv = areq_ctx->ctr_iv;
+ areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
+ } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
+ (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
+ areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
+ if (areq_ctx->ctr_iv != req->iv) {
+ memcpy(areq_ctx->ctr_iv, req->iv,
+ crypto_aead_ivsize(tfm));
+ req->iv = areq_ctx->ctr_iv;
+ }
+ } else {
+ areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
+ }
+
+ if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ rc = config_ccm_adata(req);
+ if (rc) {
+ dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
+ rc);
+ goto exit;
+ }
+ } else {
+ areq_ctx->ccm_hdr_size = ccm_header_size_null;
+ }
+
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ rc = config_gcm_context(req);
+ if (rc) {
+ dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
+ rc);
+ goto exit;
+ }
+ }
+
+ rc = cc_map_aead_request(ctx->drvdata, req);
+ if (rc) {
+ dev_err(dev, "map_request() failed\n");
+ goto exit;
+ }
+
+ /* STAT_PHASE_2: Create sequence */
+
+ /* Load MLLI tables to SRAM if necessary */
+ cc_mlli_to_sram(req, desc, &seq_len);
+
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ cc_hmac_authenc(req, desc, &seq_len);
+ break;
+ case DRV_HASH_XCBC_MAC:
+ cc_xcbc_authenc(req, desc, &seq_len);
+ break;
+ case DRV_HASH_NULL:
+ if (ctx->cipher_mode == DRV_CIPHER_CCM)
+ cc_ccm(req, desc, &seq_len);
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR)
+ cc_gcm(req, desc, &seq_len);
+ break;
+ default:
+ dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
+ cc_unmap_aead_request(dev, req);
+ rc = -ENOTSUPP;
+ goto exit;
+ }
+
+ /* STAT_PHASE_3: Lock HW and push sequence */
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
+
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_aead_request(dev, req);
+ }
+
+exit:
+ return rc;
+}
+
+static int cc_aead_encrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_encrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
+ goto out;
+
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
+
+ cc_proc_rfc4309_ccm(req);
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int cc_aead_decrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
+ goto out;
+
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
+
+ cc_proc_rfc4309_ccm(req);
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+out:
+ return rc;
+}
+
+static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ keylen -= 4;
+ memcpy(ctx->ctr_nonce, key + keylen, 4);
+
+ return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ keylen -= 4;
+ memcpy(ctx->ctr_nonce, key + keylen, 4);
+
+ return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "authsize %d\n", authsize);
+
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "authsize %d\n", authsize);
+
+ if (authsize != 16)
+ return -EINVAL;
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
+ goto out;
+
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
+
+ cc_proc_rfc4_gcm(req);
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
+ goto out;
+
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
+ //plaintext is not encryped with rfc4543
+ areq_ctx->plaintext_authenticate_only = true;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
+
+ cc_proc_rfc4_gcm(req);
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
+ goto out;
+
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
+
+ cc_proc_rfc4_gcm(req);
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
+ goto out;
+
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
+ //plaintext is not decryped with rfc4543
+ areq_ctx->plaintext_authenticate_only = true;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
+
+ cc_proc_rfc4_gcm(req);
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+/* aead alg */
+static struct cc_alg_template aead_algs[] = {
+ {
+ .name = "authenc(hmac(sha1),cbc(aes))",
+ .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA1,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .template_aead = {
+ .setkey = cc_des3_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .auth_mode = DRV_HASH_SHA1,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(aes))",
+ .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA256,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .template_aead = {
+ .setkey = cc_des3_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .auth_mode = DRV_HASH_SHA256,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "authenc(xcbc(aes),cbc(aes))",
+ .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_XCBC_MAC,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
+ .blocksize = 1,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA1,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
+ .blocksize = 1,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA256,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
+ .blocksize = 1,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_XCBC_MAC,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "ccm(aes)",
+ .driver_name = "ccm-aes-ccree",
+ .blocksize = 1,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_ccm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CCM,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "rfc4309(ccm(aes))",
+ .driver_name = "rfc4309-ccm-aes-ccree",
+ .blocksize = 1,
+ .template_aead = {
+ .setkey = cc_rfc4309_ccm_setkey,
+ .setauthsize = cc_rfc4309_ccm_setauthsize,
+ .encrypt = cc_rfc4309_ccm_encrypt,
+ .decrypt = cc_rfc4309_ccm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CCM_BLOCK_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CCM,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "gcm(aes)",
+ .driver_name = "gcm-aes-ccree",
+ .blocksize = 1,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_gcm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "rfc4106(gcm(aes))",
+ .driver_name = "rfc4106-gcm-aes-ccree",
+ .blocksize = 1,
+ .template_aead = {
+ .setkey = cc_rfc4106_gcm_setkey,
+ .setauthsize = cc_rfc4106_gcm_setauthsize,
+ .encrypt = cc_rfc4106_gcm_encrypt,
+ .decrypt = cc_rfc4106_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "rfc4543(gcm(aes))",
+ .driver_name = "rfc4543-gcm-aes-ccree",
+ .blocksize = 1,
+ .template_aead = {
+ .setkey = cc_rfc4543_gcm_setkey,
+ .setauthsize = cc_rfc4543_gcm_setauthsize,
+ .encrypt = cc_rfc4543_gcm_encrypt,
+ .decrypt = cc_rfc4543_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+};
+
+static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
+ struct device *dev)
+{
+ struct cc_crypto_alg *t_alg;
+ struct aead_alg *alg;
+
+ t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ alg = &tmpl->template_aead;
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->driver_name);
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CC_CRA_PRIO;
+
+ alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_blocksize = tmpl->blocksize;
+ alg->init = cc_aead_init;
+ alg->exit = cc_aead_exit;
+
+ t_alg->aead_alg = *alg;
+
+ t_alg->cipher_mode = tmpl->cipher_mode;
+ t_alg->flow_mode = tmpl->flow_mode;
+ t_alg->auth_mode = tmpl->auth_mode;
+
+ return t_alg;
+}
+
+int cc_aead_free(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_alg *t_alg, *n;
+ struct cc_aead_handle *aead_handle = drvdata->aead_handle;
+
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
+ crypto_unregister_aead(&t_alg->aead_alg);
+ list_del(&t_alg->entry);
+ }
+
+ return 0;
+}
+
+int cc_aead_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_aead_handle *aead_handle;
+ struct cc_crypto_alg *t_alg;
+ int rc = -ENOMEM;
+ int alg;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL);
+ if (!aead_handle) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
+ INIT_LIST_HEAD(&aead_handle->aead_list);
+ drvdata->aead_handle = aead_handle;
+
+ aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
+ MAX_HMAC_DIGEST_SIZE);
+
+ if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ /* Linux crypto */
+ for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
+ if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & aead_algs[alg].std_body))
+ continue;
+
+ t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ aead_algs[alg].driver_name);
+ goto fail1;
+ }
+ t_alg->drvdata = drvdata;
+ rc = crypto_register_aead(&t_alg->aead_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ t_alg->aead_alg.base.cra_driver_name);
+ goto fail1;
+ }
+
+ list_add_tail(&t_alg->entry, &aead_handle->aead_list);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->aead_alg.base.cra_driver_name);
+ }
+
+ return 0;
+
+fail1:
+ cc_aead_free(drvdata);
+fail0:
+ return rc;
+}
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
new file mode 100644
index 000000000..b69591550
--- /dev/null
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+/* \file cc_aead.h
+ * ARM CryptoCell AEAD Crypto API
+ */
+
+#ifndef __CC_AEAD_H__
+#define __CC_AEAD_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+
+/* mac_cmp - HW writes 8 B but all bytes hold the same value */
+#define ICV_CMP_SIZE 8
+#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
+#define MAX_MAC_SIZE SHA256_DIGEST_SIZE
+
+/* defines for AES GCM configuration buffer */
+#define GCM_BLOCK_LEN_SIZE 8
+
+#define GCM_BLOCK_RFC4_IV_OFFSET 4
+#define GCM_BLOCK_RFC4_IV_SIZE 8 /* IV size for rfc's */
+#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
+#define GCM_BLOCK_RFC4_NONCE_SIZE 4
+
+/* Offsets into AES CCM configuration buffer */
+#define CCM_B0_OFFSET 0
+#define CCM_A0_OFFSET 16
+#define CCM_CTR_COUNT_0_OFFSET 32
+/* CCM B0 and CTR_COUNT constants. */
+#define CCM_BLOCK_NONCE_OFFSET 1 /* Nonce offset inside B0 and CTR_COUNT */
+#define CCM_BLOCK_NONCE_SIZE 3 /* Nonce size inside B0 and CTR_COUNT */
+#define CCM_BLOCK_IV_OFFSET 4 /* IV offset inside B0 and CTR_COUNT */
+#define CCM_BLOCK_IV_SIZE 8 /* IV size inside B0 and CTR_COUNT */
+
+enum aead_ccm_header_size {
+ ccm_header_size_null = -1,
+ ccm_header_size_zero = 0,
+ ccm_header_size_2 = 2,
+ ccm_header_size_6 = 6,
+ ccm_header_size_max = S32_MAX
+};
+
+struct aead_req_ctx {
+ /* Allocate cache line although only 4 bytes are needed to
+ * assure next field falls @ cache line
+ * Used for both: digest HW compare and CCM/GCM MAC value
+ */
+ u8 mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
+ u8 ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
+
+ //used in gcm
+ u8 gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
+ u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
+ u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
+ struct {
+ u8 len_a[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
+ u8 len_c[GCM_BLOCK_LEN_SIZE];
+ } gcm_len_block;
+
+ u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
+ /* HW actual size input */
+ unsigned int hw_iv_size ____cacheline_aligned;
+ /* used to prevent cache coherence problem */
+ u8 backup_mac[MAX_MAC_SIZE];
+ u8 *backup_iv; /* store orig iv */
+ u32 assoclen; /* size of AAD buffer to authenticate */
+ dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
+ /* buffer for internal ccm configurations */
+ dma_addr_t ccm_iv0_dma_addr;
+ dma_addr_t icv_dma_addr; /* Phys. address of ICV */
+
+ //used in gcm
+ /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc1_dma_addr;
+ /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc2_dma_addr;
+ dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
+ dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
+
+ u8 *icv_virt_addr; /* Virt. address of ICV */
+ struct async_gen_req_ctx gen_ctx;
+ struct cc_mlli assoc;
+ struct cc_mlli src;
+ struct cc_mlli dst;
+ struct scatterlist *src_sgl;
+ struct scatterlist *dst_sgl;
+ unsigned int src_offset;
+ unsigned int dst_offset;
+ enum cc_req_dma_buf_type assoc_buff_type;
+ enum cc_req_dma_buf_type data_buff_type;
+ struct mlli_params mlli_params;
+ unsigned int cryptlen;
+ struct scatterlist ccm_adata_sg;
+ enum aead_ccm_header_size ccm_hdr_size;
+ unsigned int req_authsize;
+ enum drv_cipher_mode cipher_mode;
+ bool is_icv_fragmented;
+ bool is_single_pass;
+ bool plaintext_authenticate_only; //for gcm_rfc4543
+};
+
+int cc_aead_alloc(struct cc_drvdata *drvdata);
+int cc_aead_free(struct cc_drvdata *drvdata);
+
+#endif /*__CC_AEAD_H__*/
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
new file mode 100644
index 000000000..9efd88f87
--- /dev/null
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -0,0 +1,1393 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+
+#include "cc_buffer_mgr.h"
+#include "cc_lli_defs.h"
+#include "cc_cipher.h"
+#include "cc_hash.h"
+#include "cc_aead.h"
+
+union buffer_array_entry {
+ struct scatterlist *sgl;
+ dma_addr_t buffer_dma;
+};
+
+struct buffer_array {
+ unsigned int num_of_buffers;
+ union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+};
+
+static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
+{
+ switch (type) {
+ case CC_DMA_BUF_NULL:
+ return "BUF_NULL";
+ case CC_DMA_BUF_DLLI:
+ return "BUF_DLLI";
+ case CC_DMA_BUF_MLLI:
+ return "BUF_MLLI";
+ default:
+ return "BUF_INVALID";
+ }
+}
+
+/**
+ * cc_copy_mac() - Copy MAC to temporary location
+ *
+ * @dev: device object
+ * @req: aead request object
+ * @dir: [IN] copy from/to sgl
+ */
+static void cc_copy_mac(struct device *dev, struct aead_request *req,
+ enum cc_sg_cpy_direct dir)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ u32 skip = req->assoclen + req->cryptlen;
+
+ cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
+ (skip - areq_ctx->req_authsize), skip, dir);
+}
+
+/**
+ * cc_get_sgl_nents() - Get scatterlist number of entries.
+ *
+ * @dev: Device object
+ * @sg_list: SG list
+ * @nbytes: [IN] Total SGL data bytes.
+ * @lbytes: [OUT] Returns the amount of bytes at the last entry
+ *
+ * Return:
+ * Number of entries in the scatterlist
+ */
+static unsigned int cc_get_sgl_nents(struct device *dev,
+ struct scatterlist *sg_list,
+ unsigned int nbytes, u32 *lbytes)
+{
+ unsigned int nents = 0;
+
+ *lbytes = 0;
+
+ while (nbytes && sg_list) {
+ nents++;
+ /* get the number of bytes in the last entry */
+ *lbytes = nbytes;
+ nbytes -= (sg_list->length > nbytes) ?
+ nbytes : sg_list->length;
+ sg_list = sg_next(sg_list);
+ }
+
+ dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
+ return nents;
+}
+
+/**
+ * cc_copy_sg_portion() - Copy scatter list data,
+ * from to_skip to end, to dest and vice versa
+ *
+ * @dev: Device object
+ * @dest: Buffer to copy to/from
+ * @sg: SG list
+ * @to_skip: Number of bytes to skip before copying
+ * @end: Offset of last byte to copy
+ * @direct: Transfer direction (true == from SG list to buffer, false == from
+ * buffer to SG list)
+ */
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
+{
+ u32 nents;
+
+ nents = sg_nents_for_len(sg, end);
+ sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
+ (direct == CC_SG_TO_BUF));
+}
+
+static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
+ u32 buff_size, u32 *curr_nents,
+ u32 **mlli_entry_pp)
+{
+ u32 *mlli_entry_p = *mlli_entry_pp;
+ u32 new_nents;
+
+ /* Verify there is no memory overflow*/
+ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
+ dev_err(dev, "Too many mlli entries. current %d max %d\n",
+ new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
+ return -ENOMEM;
+ }
+
+ /*handle buffer longer than 64 kbytes */
+ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
+ cc_lli_set_addr(mlli_entry_p, buff_dma);
+ cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
+ dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+ *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
+ buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
+ buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
+ mlli_entry_p = mlli_entry_p + 2;
+ (*curr_nents)++;
+ }
+ /*Last entry */
+ cc_lli_set_addr(mlli_entry_p, buff_dma);
+ cc_lli_set_size(mlli_entry_p, buff_size);
+ dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+ *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
+ mlli_entry_p = mlli_entry_p + 2;
+ *mlli_entry_pp = mlli_entry_p;
+ (*curr_nents)++;
+ return 0;
+}
+
+static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
+ u32 sgl_data_len, u32 sgl_offset,
+ u32 *curr_nents, u32 **mlli_entry_pp)
+{
+ struct scatterlist *curr_sgl = sgl;
+ u32 *mlli_entry_p = *mlli_entry_pp;
+ s32 rc = 0;
+
+ for ( ; (curr_sgl && sgl_data_len);
+ curr_sgl = sg_next(curr_sgl)) {
+ u32 entry_data_len =
+ (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
+ sg_dma_len(curr_sgl) - sgl_offset :
+ sgl_data_len;
+ sgl_data_len -= entry_data_len;
+ rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
+ sgl_offset, entry_data_len,
+ curr_nents, &mlli_entry_p);
+ if (rc)
+ return rc;
+
+ sgl_offset = 0;
+ }
+ *mlli_entry_pp = mlli_entry_p;
+ return 0;
+}
+
+static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
+ struct mlli_params *mlli_params, gfp_t flags)
+{
+ u32 *mlli_p;
+ u32 total_nents = 0, prev_total_nents = 0;
+ int rc = 0, i;
+
+ dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
+
+ /* Allocate memory from the pointed pool */
+ mlli_params->mlli_virt_addr =
+ dma_pool_alloc(mlli_params->curr_pool, flags,
+ &mlli_params->mlli_dma_addr);
+ if (!mlli_params->mlli_virt_addr) {
+ dev_err(dev, "dma_pool_alloc() failed\n");
+ rc = -ENOMEM;
+ goto build_mlli_exit;
+ }
+ /* Point to start of MLLI */
+ mlli_p = mlli_params->mlli_virt_addr;
+ /* go over all SG's and link it to one MLLI table */
+ for (i = 0; i < sg_data->num_of_buffers; i++) {
+ union buffer_array_entry *entry = &sg_data->entry[i];
+ u32 tot_len = sg_data->total_data_len[i];
+ u32 offset = sg_data->offset[i];
+
+ rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
+ &total_nents, &mlli_p);
+ if (rc)
+ return rc;
+
+ /* set last bit in the current table */
+ if (sg_data->mlli_nents[i]) {
+ /*Calculate the current MLLI table length for the
+ *length field in the descriptor
+ */
+ *sg_data->mlli_nents[i] +=
+ (total_nents - prev_total_nents);
+ prev_total_nents = total_nents;
+ }
+ }
+
+ /* Set MLLI size for the bypass operation */
+ mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
+
+ dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
+ mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
+ mlli_params->mlli_len);
+
+build_mlli_exit:
+ return rc;
+}
+
+static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
+ unsigned int nents, struct scatterlist *sgl,
+ unsigned int data_len, unsigned int data_offset,
+ bool is_last_table, u32 *mlli_nents)
+{
+ unsigned int index = sgl_data->num_of_buffers;
+
+ dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+ index, nents, sgl, data_len, is_last_table);
+ sgl_data->nents[index] = nents;
+ sgl_data->entry[index].sgl = sgl;
+ sgl_data->offset[index] = data_offset;
+ sgl_data->total_data_len[index] = data_len;
+ sgl_data->is_last[index] = is_last_table;
+ sgl_data->mlli_nents[index] = mlli_nents;
+ if (sgl_data->mlli_nents[index])
+ *sgl_data->mlli_nents[index] = 0;
+ sgl_data->num_of_buffers++;
+}
+
+static int cc_map_sg(struct device *dev, struct scatterlist *sg,
+ unsigned int nbytes, int direction, u32 *nents,
+ u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
+{
+ int ret = 0;
+
+ if (!nbytes) {
+ *mapped_nents = 0;
+ *lbytes = 0;
+ *nents = 0;
+ return 0;
+ }
+
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
+ if (*nents > max_sg_nents) {
+ *nents = 0;
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ *nents, max_sg_nents);
+ return -ENOMEM;
+ }
+
+ ret = dma_map_sg(dev, sg, *nents, direction);
+ if (!ret) {
+ *nents = 0;
+ dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
+ return -ENOMEM;
+ }
+
+ *mapped_nents = ret;
+
+ return 0;
+}
+
+static int
+cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
+ u8 *config_data, struct buffer_array *sg_data,
+ unsigned int assoclen)
+{
+ dev_dbg(dev, " handle additional data config set to DLLI\n");
+ /* create sg for the current buffer */
+ sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
+ AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
+ if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
+ dev_err(dev, "dma_map_sg() config buffer failed\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(&areq_ctx->ccm_adata_sg),
+ sg_page(&areq_ctx->ccm_adata_sg),
+ sg_virt(&areq_ctx->ccm_adata_sg),
+ areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
+ /* prepare for case of MLLI */
+ if (assoclen > 0) {
+ cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
+ (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
+ 0, false, NULL);
+ }
+ return 0;
+}
+
+static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
+ u8 *curr_buff, u32 curr_buff_cnt,
+ struct buffer_array *sg_data)
+{
+ dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
+ /* create sg for the current buffer */
+ sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
+ if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
+ dev_err(dev, "dma_map_sg() src buffer failed\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
+ sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
+ areq_ctx->buff_sg->length);
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ areq_ctx->in_nents = 0;
+ /* prepare for case of MLLI */
+ cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+ false, NULL);
+ return 0;
+}
+
+void cc_unmap_cipher_request(struct device *dev, void *ctx,
+ unsigned int ivsize, struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
+
+ if (req_ctx->gen_ctx.iv_dma_addr) {
+ dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
+ &req_ctx->gen_ctx.iv_dma_addr, ivsize);
+ dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
+ ivsize, DMA_BIDIRECTIONAL);
+ }
+ /* Release pool */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
+ req_ctx->mlli_params.mlli_virt_addr) {
+ dma_pool_free(req_ctx->mlli_params.curr_pool,
+ req_ctx->mlli_params.mlli_virt_addr,
+ req_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ if (src != dst) {
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
+ dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ } else {
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ }
+}
+
+int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst, gfp_t flags)
+{
+ struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
+ struct mlli_params *mlli_params = &req_ctx->mlli_params;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct buffer_array sg_data;
+ u32 dummy = 0;
+ int rc = 0;
+ u32 mapped_nents = 0;
+ int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+ req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+
+ /* Map IV buffer */
+ if (ivsize) {
+ dump_byte_array("iv", info, ivsize);
+ req_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
+ dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ ivsize, info);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
+ } else {
+ req_ctx->gen_ctx.iv_dma_addr = 0;
+ }
+
+ /* Map the src SGL */
+ rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ if (rc)
+ goto cipher_exit;
+ if (mapped_nents > 1)
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+ if (src == dst) {
+ /* Handle inplace operation */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ req_ctx->out_nents = 0;
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
+ }
+ } else {
+ /* Map the dst sg */
+ rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
+ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents);
+ if (rc)
+ goto cipher_exit;
+ if (mapped_nents > 1)
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
+ cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
+ nbytes, 0, true,
+ &req_ctx->out_mlli_nents);
+ }
+ }
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
+ goto cipher_exit;
+ }
+
+ dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
+ cc_dma_buf_type(req_ctx->dma_buf_type));
+
+ return 0;
+
+cipher_exit:
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ return rc;
+}
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = areq_ctx->hw_iv_size;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+ if (areq_ctx->mac_buf_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
+ MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ if (areq_ctx->hkey_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
+ AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ if (areq_ctx->gcm_block_len_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ if (areq_ctx->gcm_iv_inc1_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ if (areq_ctx->gcm_iv_inc2_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+ }
+
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ if (areq_ctx->ccm_iv0_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
+ }
+ if (areq_ctx->gen_ctx.iv_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
+ hw_iv_size, DMA_BIDIRECTIONAL);
+ kfree_sensitive(areq_ctx->gen_ctx.iv);
+ }
+
+ /* Release pool */
+ if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
+ (areq_ctx->mlli_params.mlli_virt_addr)) {
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
+ dma_pool_free(areq_ctx->mlli_params.curr_pool,
+ areq_ctx->mlli_params.mlli_virt_addr,
+ areq_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+ sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
+ areq_ctx->assoclen, req->cryptlen);
+
+ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
+ if (req->src != req->dst) {
+ dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+ sg_virt(req->dst));
+ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
+ }
+ if (drvdata->coherent &&
+ areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->src == req->dst) {
+ /* copy back mac from temporary location to deal with possible
+ * data memory overriding that caused by cache coherence
+ * problem.
+ */
+ cc_copy_mac(dev, req, CC_SG_FROM_BUF);
+ }
+}
+
+static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
+ u32 last_entry_data_size)
+{
+ return ((sgl_nents > 1) && (last_entry_data_size < authsize));
+}
+
+static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = areq_ctx->hw_iv_size;
+ struct device *dev = drvdata_to_dev(drvdata);
+ gfp_t flags = cc_gfp_flags(&req->base);
+ int rc = 0;
+
+ if (!req->iv) {
+ areq_ctx->gen_ctx.iv_dma_addr = 0;
+ areq_ctx->gen_ctx.iv = NULL;
+ goto chain_iv_exit;
+ }
+
+ areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
+ if (!areq_ctx->gen_ctx.iv)
+ return -ENOMEM;
+
+ areq_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
+ dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ hw_iv_size, req->iv);
+ kfree_sensitive(areq_ctx->gen_ctx.iv);
+ areq_ctx->gen_ctx.iv = NULL;
+ rc = -ENOMEM;
+ goto chain_iv_exit;
+ }
+
+ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
+
+chain_iv_exit:
+ return rc;
+}
+
+static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = 0;
+ int mapped_nents = 0;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (!sg_data) {
+ rc = -EINVAL;
+ goto chain_assoc_exit;
+ }
+
+ if (areq_ctx->assoclen == 0) {
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
+ areq_ctx->assoc.nents = 0;
+ areq_ctx->assoc.mlli_nents = 0;
+ dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
+ goto chain_assoc_exit;
+ }
+
+ mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
+ if (mapped_nents < 0)
+ return mapped_nents;
+
+ if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+ areq_ctx->assoc.nents = mapped_nents;
+
+ /* in CCM case we have additional entry for
+ * ccm header configurations
+ */
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
+ dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
+ (areq_ctx->assoc.nents + 1),
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ rc = -ENOMEM;
+ goto chain_assoc_exit;
+ }
+ }
+
+ if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
+ else
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+
+ if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
+ dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
+ areq_ctx->assoclen, 0, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+ }
+
+chain_assoc_exit:
+ return rc;
+}
+
+static void cc_prepare_aead_data_dlli(struct aead_request *req,
+ u32 *src_last_bytes, u32 *dst_last_bytes)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+ struct scatterlist *sg;
+ ssize_t offset;
+
+ areq_ctx->is_icv_fragmented = false;
+
+ if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ sg = areq_ctx->src_sgl;
+ offset = *src_last_bytes - authsize;
+ } else {
+ sg = areq_ctx->dst_sgl;
+ offset = *dst_last_bytes - authsize;
+ }
+
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
+ areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
+}
+
+static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ u32 *src_last_bytes, u32 *dst_last_bytes,
+ bool is_last_table)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct scatterlist *sg;
+
+ if (req->src == req->dst) {
+ /*INPLACE*/
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
+
+ if (areq_ctx->is_icv_fragmented) {
+ /* Backup happens only when ICV is fragmented, ICV
+ * verification is made by CPU compare in order to
+ * simplify MAC verification upon request completion
+ */
+ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ /* In coherent platforms (e.g. ACP)
+ * already copying ICV for any
+ * INPLACE-DECRYPT operation, hence
+ * we must neglect this code.
+ */
+ if (!drvdata->coherent)
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
+ areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+ } else {
+ areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
+ areq_ctx->icv_dma_addr =
+ areq_ctx->mac_buf_dma_addr;
+ }
+ } else { /* Contig. ICV */
+ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
+ /*Should hanlde if the sg is not contig.*/
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
+ (*src_last_bytes - authsize);
+ }
+
+ } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ /*NON-INPLACE and DECRYPT*/
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
+ /* Backup happens only when ICV is fragmented, ICV
+
+ * verification is made by CPU compare in order to simplify
+ * MAC verification upon request completion
+ */
+ if (areq_ctx->is_icv_fragmented) {
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+ areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+
+ } else { /* Contig. ICV */
+ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
+ /*Should hanlde if the sg is not contig.*/
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
+ (*src_last_bytes - authsize);
+ }
+
+ } else {
+ /*NON-INPLACE and ENCRYPT*/
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->dst.nents, authsize,
+ *dst_last_bytes);
+
+ if (!areq_ctx->is_icv_fragmented) {
+ sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
+ /* Contig. ICV */
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+ (*dst_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
+ (*dst_last_bytes - authsize);
+ } else {
+ areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
+ areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
+ }
+ }
+}
+
+static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last_table, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(drvdata);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+ unsigned int src_last_bytes = 0, dst_last_bytes = 0;
+ int rc = 0;
+ u32 src_mapped_nents = 0, dst_mapped_nents = 0;
+ u32 offset = 0;
+ /* non-inplace mode */
+ unsigned int size_for_map = req->assoclen + req->cryptlen;
+ u32 sg_index = 0;
+ u32 size_to_skip = req->assoclen;
+ struct scatterlist *sgl;
+
+ offset = size_to_skip;
+
+ if (!sg_data)
+ return -EINVAL;
+
+ areq_ctx->src_sgl = req->src;
+ areq_ctx->dst_sgl = req->dst;
+
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ authsize : 0;
+ src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
+ &src_last_bytes);
+ sg_index = areq_ctx->src_sgl->length;
+ //check where the data starts
+ while (src_mapped_nents && (sg_index <= size_to_skip)) {
+ src_mapped_nents--;
+ offset -= areq_ctx->src_sgl->length;
+ sgl = sg_next(areq_ctx->src_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->src_sgl = sgl;
+ sg_index += areq_ctx->src_sgl->length;
+ }
+ if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+
+ areq_ctx->src.nents = src_mapped_nents;
+
+ areq_ctx->src_offset = offset;
+
+ if (req->src != req->dst) {
+ size_for_map = req->assoclen + req->cryptlen;
+
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_for_map += authsize;
+ else
+ size_for_map -= authsize;
+
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
+ &areq_ctx->dst.mapped_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+ &dst_mapped_nents);
+ if (rc)
+ goto chain_data_exit;
+ }
+
+ dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
+ &dst_last_bytes);
+ sg_index = areq_ctx->dst_sgl->length;
+ offset = size_to_skip;
+
+ //check where the data starts
+ while (dst_mapped_nents && sg_index <= size_to_skip) {
+ dst_mapped_nents--;
+ offset -= areq_ctx->dst_sgl->length;
+ sgl = sg_next(areq_ctx->dst_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->dst_sgl = sgl;
+ sg_index += areq_ctx->dst_sgl->length;
+ }
+ if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+ areq_ctx->dst.nents = dst_mapped_nents;
+ areq_ctx->dst_offset = offset;
+ if (src_mapped_nents > 1 ||
+ dst_mapped_nents > 1 ||
+ do_chain) {
+ areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
+ cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes, &dst_last_bytes,
+ is_last_table);
+ } else {
+ areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
+ cc_prepare_aead_data_dlli(req, &src_last_bytes,
+ &dst_last_bytes);
+ }
+
+chain_data_exit:
+ return rc;
+}
+
+static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
+ struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ u32 curr_mlli_size = 0;
+
+ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
+ areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
+ curr_mlli_size = areq_ctx->assoc.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ }
+
+ if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
+ /*Inplace case dst nents equal to src nents*/
+ if (req->src == req->dst) {
+ areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
+ areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
+ if (!areq_ctx->is_single_pass)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->src.mlli_nents;
+ } else {
+ if (areq_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_DECRYPT) {
+ areq_ctx->src.sram_addr =
+ drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->dst.sram_addr =
+ areq_ctx->src.sram_addr +
+ areq_ctx->src.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ if (!areq_ctx->is_single_pass)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->src.mlli_nents;
+ } else {
+ areq_ctx->dst.sram_addr =
+ drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->src.sram_addr =
+ areq_ctx->dst.sram_addr +
+ areq_ctx->dst.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ if (!areq_ctx->is_single_pass)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->dst.mlli_nents;
+ }
+ }
+ }
+}
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct buffer_array sg_data;
+ unsigned int authsize = areq_ctx->req_authsize;
+ int rc = 0;
+ dma_addr_t dma_addr;
+ u32 mapped_nents = 0;
+ u32 dummy = 0; /*used for the assoc data fragments */
+ u32 size_to_map;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+
+ /* copy mac to a temporary location to deal with possible
+ * data memory overriding that caused by cache coherence problem.
+ */
+ if (drvdata->coherent &&
+ areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->src == req->dst)
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
+ /* cacluate the size for cipher remove ICV in decrypt*/
+ areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - authsize);
+
+ dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ MAX_MAC_SIZE, areq_ctx->mac_buf);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->mac_buf_dma_addr = dma_addr;
+
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+
+ dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, addr);
+ areq_ctx->ccm_iv0_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->ccm_iv0_dma_addr = dma_addr;
+
+ rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+ &sg_data, areq_ctx->assoclen);
+ if (rc)
+ goto aead_map_failure;
+ }
+
+ if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, areq_ctx->hkey);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->hkey_dma_addr = dma_addr;
+
+ dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->gcm_block_len_dma_addr = dma_addr;
+
+ dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
+ areq_ctx->gcm_iv_inc1_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
+
+ dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
+ areq_ctx->gcm_iv_inc2_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
+ }
+
+ size_to_map = req->cryptlen + req->assoclen;
+ /* If we do in-place encryption, we also need the auth tag */
+ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
+ (req->src == req->dst)) {
+ size_to_map += authsize;
+ }
+
+ rc = cc_map_sg(dev, req->src, size_to_map,
+ (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
+ &areq_ctx->src.mapped_nents,
+ (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
+ LLI_MAX_NUM_OF_DATA_ENTRIES),
+ &dummy, &mapped_nents);
+ if (rc)
+ goto aead_map_failure;
+
+ if (areq_ctx->is_single_pass) {
+ /*
+ * Create MLLI table for:
+ * (1) Assoc. data
+ * (2) Src/Dst SGLs
+ * Note: IV is contg. buffer (not an SGL)
+ */
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
+ if (rc)
+ goto aead_map_failure;
+ } else { /* DOUBLE-PASS flow */
+ /*
+ * Prepare MLLI table(s) in this order:
+ *
+ * If ENCRYPT/DECRYPT (inplace):
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for src/dst (inplace operation)
+ *
+ * If ENCRYPT (non-inplace)
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for dst
+ * (4) MLLI for src
+ *
+ * If DECRYPT (non-inplace)
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for src
+ * (4) MLLI for dst
+ */
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
+ if (rc)
+ goto aead_map_failure;
+ }
+
+ /* Mlli support -start building the MLLI according to the above
+ * results
+ */
+ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
+ goto aead_map_failure;
+
+ cc_update_aead_mlli_nents(drvdata, req);
+ dev_dbg(dev, "assoc params mn %d\n",
+ areq_ctx->assoc.mlli_nents);
+ dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
+ dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
+ }
+ return 0;
+
+aead_map_failure:
+ cc_unmap_aead_request(dev, req);
+ return rc;
+}
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ struct buffer_array sg_data;
+ int rc = 0;
+ u32 dummy = 0;
+ u32 mapped_nents = 0;
+
+ dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+ /* Init the type of the dma buffer */
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+ areq_ctx->in_nents = 0;
+
+ if (nbytes == 0 && *curr_buff_cnt == 0) {
+ /* nothing to do */
+ return 0;
+ }
+
+ /* map the previous buffer */
+ if (*curr_buff_cnt) {
+ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data);
+ if (rc)
+ return rc;
+ }
+
+ if (src && nbytes > 0 && do_update) {
+ rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents);
+ if (rc)
+ goto unmap_curr_buff;
+ if (src && mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ memcpy(areq_ctx->buff_sg, src,
+ sizeof(struct scatterlist));
+ areq_ctx->buff_sg->length = nbytes;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ } else {
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
+ }
+ }
+
+ /*build mlli */
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
+ /* add the src data to the sg_data */
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
+ 0, true, &areq_ctx->mlli_nents);
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
+ goto fail_unmap_din;
+ }
+ /* change the buffer index for the unmap function */
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
+ dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
+ cc_dma_buf_type(areq_ctx->data_dma_buf_type));
+ return 0;
+
+fail_unmap_din:
+ dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+ if (*curr_buff_cnt)
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+ u8 *next_buff = cc_next_buf(areq_ctx);
+ u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ unsigned int update_data_len;
+ u32 total_in_len = nbytes + *curr_buff_cnt;
+ struct buffer_array sg_data;
+ unsigned int swap_index = 0;
+ int rc = 0;
+ u32 dummy = 0;
+ u32 mapped_nents = 0;
+
+ dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+ /* Init the type of the dma buffer */
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+ mlli_params->curr_pool = NULL;
+ areq_ctx->curr_sg = NULL;
+ sg_data.num_of_buffers = 0;
+ areq_ctx->in_nents = 0;
+
+ if (total_in_len < block_size) {
+ dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+ curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
+ areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
+ sg_copy_to_buffer(src, areq_ctx->in_nents,
+ &curr_buff[*curr_buff_cnt], nbytes);
+ *curr_buff_cnt += nbytes;
+ return 1;
+ }
+
+ /* Calculate the residue size*/
+ *next_buff_cnt = total_in_len & (block_size - 1);
+ /* update data len */
+ update_data_len = total_in_len - *next_buff_cnt;
+
+ dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
+ *next_buff_cnt, update_data_len);
+
+ /* Copy the new residue to next buffer */
+ if (*next_buff_cnt) {
+ dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+ next_buff, (update_data_len - *curr_buff_cnt),
+ *next_buff_cnt);
+ cc_copy_sg_portion(dev, next_buff, src,
+ (update_data_len - *curr_buff_cnt),
+ nbytes, CC_SG_TO_BUF);
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if (*curr_buff_cnt) {
+ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data);
+ if (rc)
+ return rc;
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if (update_data_len > *curr_buff_cnt) {
+ rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents);
+ if (rc)
+ goto unmap_curr_buff;
+ if (mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ /* only one entry in the SG and no previous data */
+ memcpy(areq_ctx->buff_sg, src,
+ sizeof(struct scatterlist));
+ areq_ctx->buff_sg->length = update_data_len;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ } else {
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
+ }
+ }
+
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
+ /* add the src data to the sg_data */
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
+ (update_data_len - *curr_buff_cnt), 0, true,
+ &areq_ctx->mlli_nents);
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
+ goto fail_unmap_din;
+ }
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
+
+ return 0;
+
+fail_unmap_din:
+ dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+ if (*curr_buff_cnt)
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ u32 *prev_len = cc_next_buf_cnt(areq_ctx);
+
+ /*In case a pool was set, a table was
+ *allocated and should be released
+ */
+ if (areq_ctx->mlli_params.curr_pool) {
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
+ dma_pool_free(areq_ctx->mlli_params.curr_pool,
+ areq_ctx->mlli_params.mlli_virt_addr,
+ areq_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ if (src && areq_ctx->in_nents) {
+ dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
+ dma_unmap_sg(dev, src,
+ areq_ctx->in_nents, DMA_TO_DEVICE);
+ }
+
+ if (*prev_len) {
+ dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+ sg_virt(areq_ctx->buff_sg),
+ &sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_len(areq_ctx->buff_sg));
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+ if (!do_revert) {
+ /* clean the previous data length for update
+ * operation
+ */
+ *prev_len = 0;
+ } else {
+ areq_ctx->buff_index ^= 1;
+ }
+ }
+}
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
+{
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ drvdata->mlli_buffs_pool =
+ dma_pool_create("dx_single_mlli_tables", dev,
+ MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
+ LLI_ENTRY_BYTE_SIZE,
+ MLLI_TABLE_MIN_ALIGNMENT, 0);
+
+ if (!drvdata->mlli_buffs_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
+{
+ dma_pool_destroy(drvdata->mlli_buffs_pool);
+ return 0;
+}
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
new file mode 100644
index 000000000..653441b65
--- /dev/null
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+/* \file cc_buffer_mgr.h
+ * Buffer Manager
+ */
+
+#ifndef __CC_BUFFER_MGR_H__
+#define __CC_BUFFER_MGR_H__
+
+#include <crypto/algapi.h>
+
+#include "cc_driver.h"
+
+enum cc_req_dma_buf_type {
+ CC_DMA_BUF_NULL = 0,
+ CC_DMA_BUF_DLLI,
+ CC_DMA_BUF_MLLI
+};
+
+enum cc_sg_cpy_direct {
+ CC_SG_TO_BUF = 0,
+ CC_SG_FROM_BUF = 1
+};
+
+struct cc_mlli {
+ u32 sram_addr;
+ unsigned int mapped_nents;
+ unsigned int nents; //sg nents
+ unsigned int mlli_nents; //mlli nents might be different than the above
+};
+
+struct mlli_params {
+ struct dma_pool *curr_pool;
+ void *mlli_virt_addr;
+ dma_addr_t mlli_dma_addr;
+ u32 mlli_len;
+};
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
+
+int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst, gfp_t flags);
+
+void cc_unmap_cipher_request(struct device *dev, void *ctx, unsigned int ivsize,
+ struct scatterlist *src, struct scatterlist *dst);
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags);
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags);
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert);
+
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
+
+#endif /*__BUFFER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
new file mode 100644
index 000000000..309da6334
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -0,0 +1,1509 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/des.h>
+#include <crypto/xts.h>
+#include <crypto/sm4.h>
+#include <crypto/scatterwalk.h>
+
+#include "cc_driver.h"
+#include "cc_lli_defs.h"
+#include "cc_buffer_mgr.h"
+#include "cc_cipher.h"
+#include "cc_request_mgr.h"
+
+#define MAX_SKCIPHER_SEQ_LEN 6
+
+#define template_skcipher template_u.skcipher
+
+struct cc_user_key_info {
+ u8 *key;
+ dma_addr_t key_dma_addr;
+};
+
+struct cc_hw_key_info {
+ enum cc_hw_crypto_key key1_slot;
+ enum cc_hw_crypto_key key2_slot;
+};
+
+struct cc_cpp_key_info {
+ u8 slot;
+ enum cc_cpp_alg alg;
+};
+
+enum cc_key_type {
+ CC_UNPROTECTED_KEY, /* User key */
+ CC_HW_PROTECTED_KEY, /* HW (FDE) key */
+ CC_POLICY_PROTECTED_KEY, /* CPP key */
+ CC_INVALID_PROTECTED_KEY /* Invalid key */
+};
+
+struct cc_cipher_ctx {
+ struct cc_drvdata *drvdata;
+ int keylen;
+ int cipher_mode;
+ int flow_mode;
+ unsigned int flags;
+ enum cc_key_type key_type;
+ struct cc_user_key_info user;
+ union {
+ struct cc_hw_key_info hw;
+ struct cc_cpp_key_info cpp;
+ };
+ struct crypto_shash *shash_tfm;
+ struct crypto_skcipher *fallback_tfm;
+ bool fallback_on;
+};
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
+
+static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+
+ return ctx_p->key_type;
+}
+
+static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ switch (size) {
+ case CC_AES_128_BIT_KEY_SIZE:
+ case CC_AES_192_BIT_KEY_SIZE:
+ if (ctx_p->cipher_mode != DRV_CIPHER_XTS)
+ return 0;
+ break;
+ case CC_AES_256_BIT_KEY_SIZE:
+ return 0;
+ case (CC_AES_192_BIT_KEY_SIZE * 2):
+ case (CC_AES_256_BIT_KEY_SIZE * 2):
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ break;
+ case S_DIN_to_DES:
+ if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
+ return 0;
+ break;
+ case S_DIN_to_SM4:
+ if (size == SM4_KEY_SIZE)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int validate_data_size(struct cc_cipher_ctx *ctx_p,
+ unsigned int size)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_CBC_CTS:
+ if (size >= AES_BLOCK_SIZE)
+ return 0;
+ break;
+ case DRV_CIPHER_OFB:
+ case DRV_CIPHER_CTR:
+ return 0;
+ case DRV_CIPHER_ECB:
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_ESSIV:
+ if (IS_ALIGNED(size, AES_BLOCK_SIZE))
+ return 0;
+ break;
+ default:
+ break;
+ }
+ break;
+ case S_DIN_to_DES:
+ if (IS_ALIGNED(size, DES_BLOCK_SIZE))
+ return 0;
+ break;
+ case S_DIN_to_SM4:
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_CTR:
+ return 0;
+ case DRV_CIPHER_ECB:
+ case DRV_CIPHER_CBC:
+ if (IS_ALIGNED(size, SM4_BLOCK_SIZE))
+ return 0;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int cc_cipher_init(struct crypto_tfm *tfm)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+ unsigned int fallback_req_size = 0;
+
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
+ crypto_tfm_alg_name(tfm));
+
+ ctx_p->cipher_mode = cc_alg->cipher_mode;
+ ctx_p->flow_mode = cc_alg->flow_mode;
+ ctx_p->drvdata = cc_alg->drvdata;
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ const char *name = crypto_tfm_alg_name(tfm);
+
+ /* Alloc hash tfm for essiv */
+ ctx_p->shash_tfm = crypto_alloc_shash("sha256", 0, 0);
+ if (IS_ERR(ctx_p->shash_tfm)) {
+ dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
+ return PTR_ERR(ctx_p->shash_tfm);
+ }
+ max_key_buf_size <<= 1;
+
+ /* Alloc fallabck tfm or essiv when key size != 256 bit */
+ ctx_p->fallback_tfm =
+ crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
+
+ if (IS_ERR(ctx_p->fallback_tfm)) {
+ /* Note we're still allowing registration with no fallback since it's
+ * better to have most modes supported than none at all.
+ */
+ dev_warn(dev, "Error allocating fallback algo %s. Some modes may be available.\n",
+ name);
+ ctx_p->fallback_tfm = NULL;
+ } else {
+ fallback_req_size = crypto_skcipher_reqsize(ctx_p->fallback_tfm);
+ }
+ }
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct cipher_req_ctx) + fallback_req_size);
+
+ /* Allocate key buffer, cache line aligned */
+ ctx_p->user.key = kzalloc(max_key_buf_size, GFP_KERNEL);
+ if (!ctx_p->user.key)
+ goto free_fallback;
+
+ dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
+ ctx_p->user.key);
+
+ /* Map key buffer */
+ ctx_p->user.key_dma_addr = dma_map_single(dev, ctx_p->user.key,
+ max_key_buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
+ dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+ max_key_buf_size, ctx_p->user.key);
+ goto free_key;
+ }
+ dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+ max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
+
+ return 0;
+
+free_key:
+ kfree(ctx_p->user.key);
+free_fallback:
+ crypto_free_skcipher(ctx_p->fallback_tfm);
+ crypto_free_shash(ctx_p->shash_tfm);
+
+ return -ENOMEM;
+}
+
+static void cc_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+ dev_dbg(dev, "Clearing context @%p for %s\n",
+ crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* Free hash tfm for essiv */
+ crypto_free_shash(ctx_p->shash_tfm);
+ ctx_p->shash_tfm = NULL;
+ crypto_free_skcipher(ctx_p->fallback_tfm);
+ ctx_p->fallback_tfm = NULL;
+ }
+
+ /* Unmap key buffer */
+ dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
+ DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
+ &ctx_p->user.key_dma_addr);
+
+ /* Free key buffer in context */
+ dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
+ kfree_sensitive(ctx_p->user.key);
+}
+
+struct tdes_keys {
+ u8 key1[DES_KEY_SIZE];
+ u8 key2[DES_KEY_SIZE];
+ u8 key3[DES_KEY_SIZE];
+};
+
+static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num)
+{
+ switch (slot_num) {
+ case 0:
+ return KFDE0_KEY;
+ case 1:
+ return KFDE1_KEY;
+ case 2:
+ return KFDE2_KEY;
+ case 3:
+ return KFDE3_KEY;
+ }
+ return END_OF_KEYS;
+}
+
+static u8 cc_slot_to_cpp_key(u8 slot_num)
+{
+ return (slot_num - CC_FIRST_CPP_KEY_SLOT);
+}
+
+static inline enum cc_key_type cc_slot_to_key_type(u8 slot_num)
+{
+ if (slot_num >= CC_FIRST_HW_KEY_SLOT && slot_num <= CC_LAST_HW_KEY_SLOT)
+ return CC_HW_PROTECTED_KEY;
+ else if (slot_num >= CC_FIRST_CPP_KEY_SLOT &&
+ slot_num <= CC_LAST_CPP_KEY_SLOT)
+ return CC_POLICY_PROTECTED_KEY;
+ else
+ return CC_INVALID_PROTECTED_KEY;
+}
+
+static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ struct cc_hkey_info hki;
+
+ dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n",
+ ctx_p, crypto_tfm_alg_name(tfm), keylen);
+ dump_byte_array("key", key, keylen);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ /* This check the size of the protected key token */
+ if (keylen != sizeof(hki)) {
+ dev_err(dev, "Unsupported protected key size %d.\n", keylen);
+ return -EINVAL;
+ }
+
+ memcpy(&hki, key, keylen);
+
+ /* The real key len for crypto op is the size of the HW key
+ * referenced by the HW key slot, not the hardware key token
+ */
+ keylen = hki.keylen;
+
+ if (validate_keys_sizes(ctx_p, keylen)) {
+ dev_dbg(dev, "Unsupported key size %d.\n", keylen);
+ return -EINVAL;
+ }
+
+ ctx_p->keylen = keylen;
+ ctx_p->fallback_on = false;
+
+ switch (cc_slot_to_key_type(hki.hw_key1)) {
+ case CC_HW_PROTECTED_KEY:
+ if (ctx_p->flow_mode == S_DIN_to_SM4) {
+ dev_err(dev, "Only AES HW protected keys are supported\n");
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key1 number (%d)\n",
+ hki.hw_key1);
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ if (hki.hw_key1 == hki.hw_key2) {
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki.hw_key1, hki.hw_key2);
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki.hw_key2);
+ return -EINVAL;
+ }
+ }
+
+ ctx_p->key_type = CC_HW_PROTECTED_KEY;
+ dev_dbg(dev, "HW protected key %d/%d set\n.",
+ ctx_p->hw.key1_slot, ctx_p->hw.key2_slot);
+ break;
+
+ case CC_POLICY_PROTECTED_KEY:
+ if (ctx_p->drvdata->hw_rev < CC_HW_REV_713) {
+ dev_err(dev, "CPP keys not supported in this hardware revision.\n");
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode != DRV_CIPHER_CBC &&
+ ctx_p->cipher_mode != DRV_CIPHER_CTR) {
+ dev_err(dev, "CPP keys only supported in CBC or CTR modes.\n");
+ return -EINVAL;
+ }
+
+ ctx_p->cpp.slot = cc_slot_to_cpp_key(hki.hw_key1);
+ if (ctx_p->flow_mode == S_DIN_to_AES)
+ ctx_p->cpp.alg = CC_CPP_AES;
+ else /* Must be SM4 since due to sethkey registration */
+ ctx_p->cpp.alg = CC_CPP_SM4;
+ ctx_p->key_type = CC_POLICY_PROTECTED_KEY;
+ dev_dbg(dev, "policy protected key alg: %d slot: %d.\n",
+ ctx_p->cpp.alg, ctx_p->cpp.slot);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported protected key (%d)\n", hki.hw_key1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+
+ dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
+ ctx_p, crypto_tfm_alg_name(tfm), keylen);
+ dump_byte_array("key", key, keylen);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ if (validate_keys_sizes(ctx_p, keylen)) {
+ dev_dbg(dev, "Invalid key size %d.\n", keylen);
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+
+ /* We only support 256 bit ESSIV-CBC-AES keys */
+ if (keylen != AES_KEYSIZE_256) {
+ unsigned int flags = crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_MASK;
+
+ if (likely(ctx_p->fallback_tfm)) {
+ ctx_p->fallback_on = true;
+ crypto_skcipher_clear_flags(ctx_p->fallback_tfm,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(ctx_p->fallback_tfm, flags);
+ return crypto_skcipher_setkey(ctx_p->fallback_tfm, key, keylen);
+ }
+
+ dev_dbg(dev, "Unsupported key size %d and no fallback.\n", keylen);
+ return -EINVAL;
+ }
+
+ /* Internal ESSIV key buffer is double sized */
+ max_key_buf_size <<= 1;
+ }
+
+ ctx_p->fallback_on = false;
+ ctx_p->key_type = CC_UNPROTECTED_KEY;
+
+ /*
+ * Verify DES weak keys
+ * Note that we're dropping the expanded key since the
+ * HW does the expansion on its own.
+ */
+ if (ctx_p->flow_mode == S_DIN_to_DES) {
+ if ((keylen == DES3_EDE_KEY_SIZE &&
+ verify_skcipher_des3_key(sktfm, key)) ||
+ verify_skcipher_des_key(sktfm, key)) {
+ dev_dbg(dev, "weak DES key");
+ return -EINVAL;
+ }
+ }
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
+ xts_check_key(tfm, key, keylen)) {
+ dev_dbg(dev, "weak XTS key");
+ return -EINVAL;
+ }
+
+ /* STAT_PHASE_1: Copy key to ctx */
+ dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
+ max_key_buf_size, DMA_TO_DEVICE);
+
+ memcpy(ctx_p->user.key, key, keylen);
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* sha256 for key2 - use sw implementation */
+ int err;
+
+ err = crypto_shash_tfm_digest(ctx_p->shash_tfm,
+ ctx_p->user.key, keylen,
+ ctx_p->user.key + keylen);
+ if (err) {
+ dev_err(dev, "Failed to hash ESSIV key.\n");
+ return err;
+ }
+
+ keylen <<= 1;
+ }
+ dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
+ max_key_buf_size, DMA_TO_DEVICE);
+ ctx_p->keylen = keylen;
+
+ dev_dbg(dev, "return safely");
+ return 0;
+}
+
+static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ return S_AES_to_DOUT;
+ case S_DIN_to_DES:
+ return S_DES_to_DOUT;
+ case S_DIN_to_SM4:
+ return S_SM4_to_DOUT;
+ default:
+ return ctx_p->flow_mode;
+ }
+}
+
+static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = cc_out_setup_mode(ctx_p);
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
+ return;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Read next IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ } else {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0);
+ }
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ /* IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+
+static void cc_setup_state_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Load IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
+ NS_BIT);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+ } else {
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
+ }
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+
+static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = (ctx_p->keylen / 2);
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+ unsigned int key_offset = key_len;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+
+ if (cipher_mode == DRV_CIPHER_ESSIV)
+ key_len = SHA256_DIGEST_SIZE;
+
+ /* load XEX key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key2_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ (key_dma_addr + key_offset),
+ key_len, NS_BIT);
+ }
+ set_xex_data_unit_size(&desc[*seq_size], nbytes);
+ set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
+ set_key_size_aes(&desc[*seq_size], key_len);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
+ (*seq_size)++;
+
+ /* Load IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_key_size_aes(&desc[*seq_size], key_len);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ return DIN_AES_DOUT;
+ case S_DIN_to_DES:
+ return DIN_DES_DOUT;
+ case S_DIN_to_SM4:
+ return DIN_SM4_DOUT;
+ default:
+ return ctx_p->flow_mode;
+ }
+}
+
+static void cc_setup_key_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ unsigned int din_size;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ case DRV_CIPHER_ECB:
+ /* Load key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+
+ if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) {
+ /* We use the AES key size coding for all CPP algs */
+ set_key_size_aes(&desc[*seq_size], key_len);
+ set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.slot);
+ flow_mode = cc_out_flow_mode(ctx_p);
+ } else {
+ if (flow_mode == S_DIN_to_AES) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ /* CC_POLICY_UNPROTECTED_KEY
+ * Invalid keys are filtered out in
+ * sethkey()
+ */
+ din_size = (key_len == 24) ?
+ AES_MAX_KEY_SIZE : key_len;
+
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, din_size,
+ NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], key_len);
+ } else {
+ /*des*/
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, key_len, NS_BIT);
+ set_key_size_des(&desc[*seq_size], key_len);
+ }
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ }
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ /* Load AES key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ (key_len / 2), NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, void *areq,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ /* bypass */
+ dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
+ &req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len,
+ ctx_p->drvdata->mlli_sram_addr);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[*seq_size], BYPASS);
+ (*seq_size)++;
+ }
+}
+
+static void cc_setup_flow_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ unsigned int flow_mode = cc_out_flow_mode(ctx_p);
+ bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY ||
+ ctx_p->cipher_mode == DRV_CIPHER_ECB);
+
+ /* Process */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(src), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(dst), nbytes);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+ nbytes, NS_BIT);
+ set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+ nbytes, NS_BIT, (!last_desc ? 0 : 1));
+ if (last_desc)
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ } else {
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_MLLI,
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->in_mlli_nents, NS_BIT);
+ if (req_ctx->out_nents == 0) {
+ dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+ ctx_p->drvdata->mlli_sram_addr,
+ ctx_p->drvdata->mlli_sram_addr);
+ set_dout_mlli(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->in_mlli_nents, NS_BIT,
+ (!last_desc ? 0 : 1));
+ } else {
+ dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+ ctx_p->drvdata->mlli_sram_addr,
+ ctx_p->drvdata->mlli_sram_addr +
+ (u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
+ set_dout_mlli(&desc[*seq_size],
+ (ctx_p->drvdata->mlli_sram_addr +
+ (LLI_ENTRY_BYTE_SIZE *
+ req_ctx->in_mlli_nents)),
+ req_ctx->out_mlli_nents, NS_BIT,
+ (!last_desc ? 0 : 1));
+ }
+ if (last_desc)
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ }
+}
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
+{
+ struct skcipher_request *req = (struct skcipher_request *)cc_req;
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *src = req->src;
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ memcpy(req->iv, req_ctx->iv, ivsize);
+ kfree_sensitive(req_ctx->iv);
+ }
+
+ skcipher_request_complete(req, err);
+}
+
+static int cc_cipher_process(struct skcipher_request *req,
+ enum drv_crypto_direction direction)
+{
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->cryptlen;
+ void *iv = req->iv;
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ struct cc_hw_desc desc[MAX_SKCIPHER_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ int rc;
+ unsigned int seq_len = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "%s req=%p iv=%p nbytes=%d\n",
+ ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ "Encrypt" : "Decrypt"), req, iv, nbytes);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ if (validate_data_size(ctx_p, nbytes)) {
+ dev_dbg(dev, "Unsupported data size %d.\n", nbytes);
+ rc = -EINVAL;
+ goto exit_process;
+ }
+ if (nbytes == 0) {
+ /* No data to process is valid */
+ rc = 0;
+ goto exit_process;
+ }
+
+ if (ctx_p->fallback_on) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx_p->fallback_tfm);
+ if (direction == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ return crypto_skcipher_encrypt(subreq);
+ else
+ return crypto_skcipher_decrypt(subreq);
+ }
+
+ /* The IV we are handed may be allocated from the stack so
+ * we must copy it to a DMAable buffer before use.
+ */
+ req_ctx->iv = kmemdup(iv, ivsize, flags);
+ if (!req_ctx->iv) {
+ rc = -ENOMEM;
+ goto exit_process;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_cipher_complete;
+ cc_req.user_arg = req;
+
+ /* Setup CPP operation details */
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
+ cc_req.cpp.is_cpp = true;
+ cc_req.cpp.alg = ctx_p->cpp.alg;
+ cc_req.cpp.slot = ctx_p->cpp.slot;
+ }
+
+ /* Setup request context */
+ req_ctx->gen_ctx.op_type = direction;
+
+ /* STAT_PHASE_1: Map buffers */
+
+ rc = cc_map_cipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
+ req_ctx->iv, src, dst, flags);
+ if (rc) {
+ dev_err(dev, "map_request() failed\n");
+ goto exit_process;
+ }
+
+ /* STAT_PHASE_2: Create sequence */
+
+ /* Setup state (IV) */
+ cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup MLLI line, if needed */
+ cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
+ /* Setup key */
+ cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
+ /* Setup state (IV and XEX key) */
+ cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Data processing */
+ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
+ /* Read next IV */
+ cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
+
+ /* STAT_PHASE_3: Lock HW and push sequence */
+
+ rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
+ &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ /* Failed to send the request or request completed
+ * synchronously
+ */
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ }
+
+exit_process:
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ kfree_sensitive(req_ctx->iv);
+ }
+
+ return rc;
+}
+
+static int cc_cipher_encrypt(struct skcipher_request *req)
+{
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+
+ memset(req_ctx, 0, sizeof(*req_ctx));
+
+ return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+}
+
+static int cc_cipher_decrypt(struct skcipher_request *req)
+{
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+
+ memset(req_ctx, 0, sizeof(*req_ctx));
+
+ return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+}
+
+/* Block cipher alg */
+static const struct cc_alg_template skcipher_algs[] = {
+ {
+ .name = "xts(paes)",
+ .driver_name = "xts-paes-ccree",
+ .blocksize = 1,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
+ },
+ {
+ .name = "essiv(cbc(paes),sha256)",
+ .driver_name = "essiv-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
+ },
+ {
+ .name = "ecb(paes)",
+ .driver_name = "ecb-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
+ },
+ {
+ .name = "cbc(paes)",
+ .driver_name = "cbc-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
+ },
+ {
+ .name = "ofb(paes)",
+ .driver_name = "ofb-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_OFB,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
+ },
+ {
+ .name = "cts(cbc(paes))",
+ .driver_name = "cts-cbc-paes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC_CTS,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
+ },
+ {
+ .name = "ctr(paes)",
+ .driver_name = "ctr-paes-ccree",
+ .blocksize = 1,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
+ },
+ {
+ /* See https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg40576.html
+ * for the reason why this differs from the generic
+ * implementation.
+ */
+ .name = "xts(aes)",
+ .driver_name = "xts-aes-ccree",
+ .blocksize = 1,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "essiv(cbc(aes),sha256)",
+ .driver_name = "essiv-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "ecb(aes)",
+ .driver_name = "ecb-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "ofb(aes)",
+ .driver_name = "ofb-aes-ccree",
+ .blocksize = 1,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_OFB,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "cts(cbc(aes))",
+ .driver_name = "cts-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC_CTS,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-ccree",
+ .blocksize = 1,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "ecb(des3_ede)",
+ .driver_name = "ecb-3des-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-ccree",
+ .blocksize = DES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "ecb(des)",
+ .driver_name = "ecb-des-ccree",
+ .blocksize = DES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "cbc(sm4)",
+ .driver_name = "cbc-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .name = "ecb(sm4)",
+ .driver_name = "ecb-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .name = "ctr(sm4)",
+ .driver_name = "ctr-sm4-ccree",
+ .blocksize = 1,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .name = "cbc(psm4)",
+ .driver_name = "cbc-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
+ {
+ .name = "ctr(psm4)",
+ .driver_name = "ctr-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
+};
+
+static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
+ struct device *dev)
+{
+ struct cc_crypto_alg *t_alg;
+ struct skcipher_alg *alg;
+
+ t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ alg = &t_alg->skcipher_alg;
+
+ memcpy(alg, &tmpl->template_skcipher, sizeof(*alg));
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->driver_name);
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CC_CRA_PRIO;
+ alg->base.cra_blocksize = tmpl->blocksize;
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_ctxsize = sizeof(struct cc_cipher_ctx);
+
+ alg->base.cra_init = cc_cipher_init;
+ alg->base.cra_exit = cc_cipher_exit;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ t_alg->cipher_mode = tmpl->cipher_mode;
+ t_alg->flow_mode = tmpl->flow_mode;
+
+ return t_alg;
+}
+
+int cc_cipher_free(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_alg *t_alg, *n;
+
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &drvdata->alg_list, entry) {
+ crypto_unregister_skcipher(&t_alg->skcipher_alg);
+ list_del(&t_alg->entry);
+ }
+ return 0;
+}
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_alg *t_alg;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = -ENOMEM;
+ int alg;
+
+ INIT_LIST_HEAD(&drvdata->alg_list);
+
+ /* Linux crypto */
+ dev_dbg(dev, "Number of algorithms = %zu\n",
+ ARRAY_SIZE(skcipher_algs));
+ for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
+ if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & skcipher_algs[alg].std_body) ||
+ (drvdata->sec_disabled && skcipher_algs[alg].sec_func))
+ continue;
+
+ dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
+ t_alg = cc_create_alg(&skcipher_algs[alg], dev);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ skcipher_algs[alg].driver_name);
+ goto fail0;
+ }
+ t_alg->drvdata = drvdata;
+
+ dev_dbg(dev, "registering %s\n",
+ skcipher_algs[alg].driver_name);
+ rc = crypto_register_skcipher(&t_alg->skcipher_alg);
+ dev_dbg(dev, "%s alg registration rc = %x\n",
+ t_alg->skcipher_alg.base.cra_driver_name, rc);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ t_alg->skcipher_alg.base.cra_driver_name);
+ goto fail0;
+ }
+
+ list_add_tail(&t_alg->entry, &drvdata->alg_list);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->skcipher_alg.base.cra_driver_name);
+ }
+ return 0;
+
+fail0:
+ cc_cipher_free(drvdata);
+ return rc;
+}
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
new file mode 100644
index 000000000..da3a38707
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+/* \file cc_cipher.h
+ * ARM CryptoCell Cipher Crypto API
+ */
+
+#ifndef __CC_CIPHER_H__
+#define __CC_CIPHER_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+
+struct cipher_req_ctx {
+ struct async_gen_req_ctx gen_ctx;
+ enum cc_req_dma_buf_type dma_buf_type;
+ u32 in_nents;
+ u32 in_mlli_nents;
+ u32 out_nents;
+ u32 out_mlli_nents;
+ u8 *iv;
+ struct mlli_params mlli_params;
+};
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata);
+
+int cc_cipher_free(struct cc_drvdata *drvdata);
+
+struct cc_hkey_info {
+ u16 keylen;
+ u8 hw_key1;
+ u8 hw_key2;
+} __packed;
+
+#define CC_HW_KEY_SIZE sizeof(struct cc_hkey_info)
+
+#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
new file mode 100644
index 000000000..bd9a1c089
--- /dev/null
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#ifndef _CC_CRYPTO_CTX_H_
+#define _CC_CRYPTO_CTX_H_
+
+#include <linux/types.h>
+
+#define CC_DRV_DES_IV_SIZE 8
+#define CC_DRV_DES_BLOCK_SIZE 8
+
+#define CC_DRV_DES_ONE_KEY_SIZE 8
+#define CC_DRV_DES_DOUBLE_KEY_SIZE 16
+#define CC_DRV_DES_TRIPLE_KEY_SIZE 24
+#define CC_DRV_DES_KEY_SIZE_MAX CC_DRV_DES_TRIPLE_KEY_SIZE
+
+#define CC_AES_IV_SIZE 16
+#define CC_AES_IV_SIZE_WORDS (CC_AES_IV_SIZE >> 2)
+
+#define CC_AES_BLOCK_SIZE 16
+#define CC_AES_BLOCK_SIZE_WORDS 4
+
+#define CC_AES_128_BIT_KEY_SIZE 16
+#define CC_AES_128_BIT_KEY_SIZE_WORDS (CC_AES_128_BIT_KEY_SIZE >> 2)
+#define CC_AES_192_BIT_KEY_SIZE 24
+#define CC_AES_192_BIT_KEY_SIZE_WORDS (CC_AES_192_BIT_KEY_SIZE >> 2)
+#define CC_AES_256_BIT_KEY_SIZE 32
+#define CC_AES_256_BIT_KEY_SIZE_WORDS (CC_AES_256_BIT_KEY_SIZE >> 2)
+#define CC_AES_KEY_SIZE_MAX CC_AES_256_BIT_KEY_SIZE
+#define CC_AES_KEY_SIZE_WORDS_MAX (CC_AES_KEY_SIZE_MAX >> 2)
+
+#define CC_MD5_DIGEST_SIZE 16
+#define CC_SHA1_DIGEST_SIZE 20
+#define CC_SHA224_DIGEST_SIZE 28
+#define CC_SHA256_DIGEST_SIZE 32
+#define CC_SHA256_DIGEST_SIZE_IN_WORDS 8
+#define CC_SHA384_DIGEST_SIZE 48
+#define CC_SHA512_DIGEST_SIZE 64
+
+#define CC_SHA1_BLOCK_SIZE 64
+#define CC_SHA1_BLOCK_SIZE_IN_WORDS 16
+#define CC_MD5_BLOCK_SIZE 64
+#define CC_MD5_BLOCK_SIZE_IN_WORDS 16
+#define CC_SHA224_BLOCK_SIZE 64
+#define CC_SHA256_BLOCK_SIZE 64
+#define CC_SHA256_BLOCK_SIZE_IN_WORDS 16
+#define CC_SHA1_224_256_BLOCK_SIZE 64
+#define CC_SHA384_BLOCK_SIZE 128
+#define CC_SHA512_BLOCK_SIZE 128
+
+#define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
+#define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
+
+#define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
+
+#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
+
+#define CC_CPP_NUM_SLOTS 8
+#define CC_CPP_NUM_ALGS 2
+
+enum cc_cpp_alg {
+ CC_CPP_SM4 = 1,
+ CC_CPP_AES = 0
+};
+
+enum drv_engine_type {
+ DRV_ENGINE_NULL = 0,
+ DRV_ENGINE_AES = 1,
+ DRV_ENGINE_DES = 2,
+ DRV_ENGINE_HASH = 3,
+ DRV_ENGINE_RC4 = 4,
+ DRV_ENGINE_DOUT = 5,
+ DRV_ENGINE_RESERVE32B = S32_MAX,
+};
+
+enum drv_crypto_alg {
+ DRV_CRYPTO_ALG_NULL = -1,
+ DRV_CRYPTO_ALG_AES = 0,
+ DRV_CRYPTO_ALG_DES = 1,
+ DRV_CRYPTO_ALG_HASH = 2,
+ DRV_CRYPTO_ALG_C2 = 3,
+ DRV_CRYPTO_ALG_HMAC = 4,
+ DRV_CRYPTO_ALG_AEAD = 5,
+ DRV_CRYPTO_ALG_BYPASS = 6,
+ DRV_CRYPTO_ALG_NUM = 7,
+ DRV_CRYPTO_ALG_RESERVE32B = S32_MAX
+};
+
+enum drv_crypto_direction {
+ DRV_CRYPTO_DIRECTION_NULL = -1,
+ DRV_CRYPTO_DIRECTION_ENCRYPT = 0,
+ DRV_CRYPTO_DIRECTION_DECRYPT = 1,
+ DRV_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
+ DRV_CRYPTO_DIRECTION_RESERVE32B = S32_MAX
+};
+
+enum drv_cipher_mode {
+ DRV_CIPHER_NULL_MODE = -1,
+ DRV_CIPHER_ECB = 0,
+ DRV_CIPHER_CBC = 1,
+ DRV_CIPHER_CTR = 2,
+ DRV_CIPHER_CBC_MAC = 3,
+ DRV_CIPHER_XTS = 4,
+ DRV_CIPHER_XCBC_MAC = 5,
+ DRV_CIPHER_OFB = 6,
+ DRV_CIPHER_CMAC = 7,
+ DRV_CIPHER_CCM = 8,
+ DRV_CIPHER_CBC_CTS = 11,
+ DRV_CIPHER_GCTR = 12,
+ DRV_CIPHER_ESSIV = 13,
+ DRV_CIPHER_RESERVE32B = S32_MAX
+};
+
+enum drv_hash_mode {
+ DRV_HASH_NULL = -1,
+ DRV_HASH_SHA1 = 0,
+ DRV_HASH_SHA256 = 1,
+ DRV_HASH_SHA224 = 2,
+ DRV_HASH_SHA512 = 3,
+ DRV_HASH_SHA384 = 4,
+ DRV_HASH_MD5 = 5,
+ DRV_HASH_CBC_MAC = 6,
+ DRV_HASH_XCBC_MAC = 7,
+ DRV_HASH_CMAC = 8,
+ DRV_HASH_SM3 = 9,
+ DRV_HASH_MODE_NUM = 10,
+ DRV_HASH_RESERVE32B = S32_MAX
+};
+
+enum drv_hash_hw_mode {
+ DRV_HASH_HW_MD5 = 0,
+ DRV_HASH_HW_SHA1 = 1,
+ DRV_HASH_HW_SHA256 = 2,
+ DRV_HASH_HW_SHA224 = 10,
+ DRV_HASH_HW_SHA512 = 4,
+ DRV_HASH_HW_SHA384 = 12,
+ DRV_HASH_HW_GHASH = 6,
+ DRV_HASH_HW_SM3 = 14,
+ DRV_HASH_HW_RESERVE32B = S32_MAX
+};
+
+#endif /* _CC_CRYPTO_CTX_H_ */
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
new file mode 100644
index 000000000..8f008f024
--- /dev/null
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/stringify.h>
+#include "cc_driver.h"
+#include "cc_crypto_ctx.h"
+#include "cc_debugfs.h"
+
+#define CC_DEBUG_REG(_X) { \
+ .name = __stringify(_X),\
+ .offset = CC_REG(_X) \
+ }
+
+/*
+ * This is a global var for the dentry of the
+ * debugfs ccree/ dir. It is not tied down to
+ * a specific instance of ccree, hence it is
+ * global.
+ */
+static struct dentry *cc_debugfs_dir;
+
+static struct debugfs_reg32 ver_sig_regs[] = {
+ { .name = "SIGNATURE" }, /* Must be 0th */
+ { .name = "VERSION" }, /* Must be 1st */
+};
+
+static const struct debugfs_reg32 pid_cid_regs[] = {
+ CC_DEBUG_REG(PERIPHERAL_ID_0),
+ CC_DEBUG_REG(PERIPHERAL_ID_1),
+ CC_DEBUG_REG(PERIPHERAL_ID_2),
+ CC_DEBUG_REG(PERIPHERAL_ID_3),
+ CC_DEBUG_REG(PERIPHERAL_ID_4),
+ CC_DEBUG_REG(COMPONENT_ID_0),
+ CC_DEBUG_REG(COMPONENT_ID_1),
+ CC_DEBUG_REG(COMPONENT_ID_2),
+ CC_DEBUG_REG(COMPONENT_ID_3),
+};
+
+static const struct debugfs_reg32 debug_regs[] = {
+ CC_DEBUG_REG(HOST_IRR),
+ CC_DEBUG_REG(HOST_POWER_DOWN_EN),
+ CC_DEBUG_REG(AXIM_MON_ERR),
+ CC_DEBUG_REG(DSCRPTR_QUEUE_CONTENT),
+ CC_DEBUG_REG(HOST_IMR),
+ CC_DEBUG_REG(AXIM_CFG),
+ CC_DEBUG_REG(AXIM_CACHE_PARAMS),
+ CC_DEBUG_REG(GPR_HOST),
+ CC_DEBUG_REG(AXIM_MON_COMP),
+};
+
+void __init cc_debugfs_global_init(void)
+{
+ cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
+}
+
+void cc_debugfs_global_fini(void)
+{
+ debugfs_remove(cc_debugfs_dir);
+}
+
+int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct debugfs_regset32 *regset, *verset;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = debug_regs;
+ regset->nregs = ARRAY_SIZE(debug_regs);
+ regset->base = drvdata->cc_base;
+ regset->dev = dev;
+
+ drvdata->dir = debugfs_create_dir(drvdata->plat_dev->name,
+ cc_debugfs_dir);
+
+ debugfs_create_regset32("regs", 0400, drvdata->dir, regset);
+ debugfs_create_bool("coherent", 0400, drvdata->dir, &drvdata->coherent);
+
+ verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
+ /* Failing here is not important enough to fail the module load */
+ if (!verset)
+ return 0;
+
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ ver_sig_regs[0].offset = drvdata->sig_offset;
+ ver_sig_regs[1].offset = drvdata->ver_offset;
+ verset->regs = ver_sig_regs;
+ verset->nregs = ARRAY_SIZE(ver_sig_regs);
+ } else {
+ verset->regs = pid_cid_regs;
+ verset->nregs = ARRAY_SIZE(pid_cid_regs);
+ }
+ verset->base = drvdata->cc_base;
+ verset->dev = dev;
+
+ debugfs_create_regset32("version", 0400, drvdata->dir, verset);
+ return 0;
+}
+
+void cc_debugfs_fini(struct cc_drvdata *drvdata)
+{
+ debugfs_remove_recursive(drvdata->dir);
+}
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
new file mode 100644
index 000000000..664ff402e
--- /dev/null
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#ifndef __CC_DEBUGFS_H__
+#define __CC_DEBUGFS_H__
+
+#ifdef CONFIG_DEBUG_FS
+void cc_debugfs_global_init(void);
+void cc_debugfs_global_fini(void);
+
+int cc_debugfs_init(struct cc_drvdata *drvdata);
+void cc_debugfs_fini(struct cc_drvdata *drvdata);
+
+#else
+
+static inline void cc_debugfs_global_init(void) {}
+static inline void cc_debugfs_global_fini(void) {}
+
+static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+ return 0;
+}
+
+static inline void cc_debugfs_fini(struct cc_drvdata *drvdata) {}
+
+#endif
+
+#endif /*__CC_SYSFS_H__*/
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
new file mode 100644
index 000000000..d489c6f80
--- /dev/null
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/crypto.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_debugfs.h"
+#include "cc_cipher.h"
+#include "cc_aead.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
+#include "cc_pm.h"
+#include "cc_fips.h"
+
+bool cc_dump_desc;
+module_param_named(dump_desc, cc_dump_desc, bool, 0600);
+MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
+bool cc_dump_bytes;
+module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
+MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+
+static bool cc_sec_disable;
+module_param_named(sec_disable, cc_sec_disable, bool, 0600);
+MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
+
+struct cc_hw_data {
+ char *name;
+ enum cc_hw_rev rev;
+ u32 sig;
+ u32 cidr_0123;
+ u32 pidr_0124;
+ int std_bodies;
+};
+
+#define CC_NUM_IDRS 4
+#define CC_HW_RESET_LOOP_COUNT 10
+
+/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
+static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
+ CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
+ CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
+};
+
+static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
+ CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
+ CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
+};
+
+/* Hardware revisions defs. */
+
+/* The 703 is a OSCCA only variant of the 713 */
+static const struct cc_hw_data cc703_hw = {
+ .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
+};
+
+static const struct cc_hw_data cc713_hw = {
+ .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
+};
+
+static const struct cc_hw_data cc712_hw = {
+ .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U,
+ .std_bodies = CC_STD_ALL
+};
+
+static const struct cc_hw_data cc710_hw = {
+ .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U,
+ .std_bodies = CC_STD_ALL
+};
+
+static const struct cc_hw_data cc630p_hw = {
+ .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U,
+ .std_bodies = CC_STD_ALL
+};
+
+static const struct of_device_id arm_ccree_dev_of_match[] = {
+ { .compatible = "arm,cryptocell-703-ree", .data = &cc703_hw },
+ { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw },
+ { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
+ { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
+ { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
+
+static void init_cc_cache_params(struct cc_drvdata *drvdata)
+{
+ struct device *dev = drvdata_to_dev(drvdata);
+ u32 cache_params, ace_const, val;
+ u64 mask;
+
+ /* compute CC_AXIM_CACHE_PARAMS */
+ cache_params = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
+ dev_dbg(dev, "Cache params previous: 0x%08X\n", cache_params);
+
+ /* non cached or write-back, write allocate */
+ val = drvdata->coherent ? 0xb : 0x2;
+
+ mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_AWCACHE);
+ cache_params &= ~mask;
+ cache_params |= FIELD_PREP(mask, val);
+
+ mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_AWCACHE_LAST);
+ cache_params &= ~mask;
+ cache_params |= FIELD_PREP(mask, val);
+
+ mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_ARCACHE);
+ cache_params &= ~mask;
+ cache_params |= FIELD_PREP(mask, val);
+
+ drvdata->cache_params = cache_params;
+
+ dev_dbg(dev, "Cache params current: 0x%08X\n", cache_params);
+
+ if (drvdata->hw_rev <= CC_HW_REV_710)
+ return;
+
+ /* compute CC_AXIM_ACE_CONST */
+ ace_const = cc_ioread(drvdata, CC_REG(AXIM_ACE_CONST));
+ dev_dbg(dev, "ACE-const previous: 0x%08X\n", ace_const);
+
+ /* system or outer-sharable */
+ val = drvdata->coherent ? 0x2 : 0x3;
+
+ mask = CC_GENMASK(CC_AXIM_ACE_CONST_ARDOMAIN);
+ ace_const &= ~mask;
+ ace_const |= FIELD_PREP(mask, val);
+
+ mask = CC_GENMASK(CC_AXIM_ACE_CONST_AWDOMAIN);
+ ace_const &= ~mask;
+ ace_const |= FIELD_PREP(mask, val);
+
+ dev_dbg(dev, "ACE-const current: 0x%08X\n", ace_const);
+
+ drvdata->ace_const = ace_const;
+}
+
+static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
+{
+ int i;
+ union {
+ u8 regs[CC_NUM_IDRS];
+ __le32 val;
+ } idr;
+
+ for (i = 0; i < CC_NUM_IDRS; ++i)
+ idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
+
+ return le32_to_cpu(idr.val);
+}
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len)
+{
+ char prefix[64];
+
+ if (!buf)
+ return;
+
+ snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
+
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
+ len, false);
+}
+
+static irqreturn_t cc_isr(int irq, void *dev_id)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u32 irr;
+ u32 imr;
+
+ /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
+ /* if driver suspended return, probably shared interrupt */
+ if (pm_runtime_suspended(dev))
+ return IRQ_NONE;
+
+ /* read the interrupt status */
+ irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
+ dev_dbg(dev, "Got IRR=0x%08X\n", irr);
+
+ if (irr == 0) /* Probably shared interrupt line */
+ return IRQ_NONE;
+
+ imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
+
+ /* clear interrupt - must be before processing events */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
+
+ drvdata->irq = irr;
+ /* Completion interrupt - most probable */
+ if (irr & drvdata->comp_mask) {
+ /* Mask all completion interrupts - will be unmasked in
+ * deferred service handler
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
+ irr &= ~drvdata->comp_mask;
+ complete_request(drvdata);
+ }
+#ifdef CONFIG_CRYPTO_FIPS
+ /* TEE FIPS interrupt */
+ if (irr & CC_GPR0_IRQ_MASK) {
+ /* Mask interrupt - will be unmasked in Deferred service
+ * handler
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
+ irr &= ~CC_GPR0_IRQ_MASK;
+ fips_handler(drvdata);
+ }
+#endif
+ /* AXI error interrupt */
+ if (irr & CC_AXI_ERR_IRQ_MASK) {
+ u32 axi_err;
+
+ /* Read the AXI error ID */
+ axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
+ dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
+ axi_err);
+
+ irr &= ~CC_AXI_ERR_IRQ_MASK;
+ }
+
+ if (irr) {
+ dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
+ /* Just warning */
+ }
+
+ return IRQ_HANDLED;
+}
+
+bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata)
+{
+ unsigned int val;
+ unsigned int i;
+
+ /* 712/710/63 has no reset completion indication, always return true */
+ if (drvdata->hw_rev <= CC_HW_REV_712)
+ return true;
+
+ for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
+ /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
+ * completed and device is fully functional
+ */
+ val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE));
+ if (val & CC_NVM_IS_IDLE_MASK) {
+ /* hw indicate reset completed */
+ return true;
+ }
+ /* allow scheduling other process on the processor */
+ schedule();
+ }
+ /* reset not completed */
+ return false;
+}
+
+int init_cc_regs(struct cc_drvdata *drvdata)
+{
+ unsigned int val;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* Unmask all AXI interrupt sources AXI_CFG1 register */
+ /* AXI interrupt config are obsoleted startign at cc7x3 */
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
+ dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+ cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ }
+
+ /* Clear all pending interrupts */
+ val = cc_ioread(drvdata, CC_REG(HOST_IRR));
+ dev_dbg(dev, "IRR=0x%08X\n", val);
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
+
+ /* Unmask relevant interrupt cause */
+ val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
+
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ val |= CC_GPR0_IRQ_MASK;
+
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), ~val);
+
+ cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), drvdata->cache_params);
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ cc_iowrite(drvdata, CC_REG(AXIM_ACE_CONST), drvdata->ace_const);
+
+ return 0;
+}
+
+static int init_cc_resources(struct platform_device *plat_dev)
+{
+ struct resource *req_mem_cc_regs = NULL;
+ struct cc_drvdata *new_drvdata;
+ struct device *dev = &plat_dev->dev;
+ struct device_node *np = dev->of_node;
+ u32 val, hw_rev_pidr, sig_cidr;
+ u64 dma_mask;
+ const struct cc_hw_data *hw_rev;
+ struct clk *clk;
+ int irq;
+ int rc = 0;
+
+ new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
+ if (!new_drvdata)
+ return -ENOMEM;
+
+ hw_rev = of_device_get_match_data(dev);
+ new_drvdata->hw_rev_name = hw_rev->name;
+ new_drvdata->hw_rev = hw_rev->rev;
+ new_drvdata->std_bodies = hw_rev->std_bodies;
+
+ if (hw_rev->rev >= CC_HW_REV_712) {
+ new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
+ new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
+ new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
+ } else {
+ new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
+ new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
+ new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
+ }
+
+ new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
+
+ platform_set_drvdata(plat_dev, new_drvdata);
+ new_drvdata->plat_dev = plat_dev;
+
+ clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Error getting clock\n");
+ new_drvdata->clk = clk;
+
+ new_drvdata->coherent = of_dma_is_coherent(np);
+
+ /* Get device resources */
+ /* First CC registers space */
+ req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ /* Map registers space */
+ new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ if (IS_ERR(new_drvdata->cc_base))
+ return PTR_ERR(new_drvdata->cc_base);
+
+ dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
+ req_mem_cc_regs);
+ dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
+ &req_mem_cc_regs->start, new_drvdata->cc_base);
+
+ /* Then IRQ */
+ irq = platform_get_irq(plat_dev, 0);
+ if (irq < 0)
+ return irq;
+
+ init_completion(&new_drvdata->hw_queue_avail);
+
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
+ rc = dma_set_coherent_mask(dev, dma_mask);
+ if (rc) {
+ dev_err(dev, "Failed in dma_set_coherent_mask, mask=%llx\n",
+ dma_mask);
+ return rc;
+ }
+
+ rc = clk_prepare_enable(new_drvdata->clk);
+ if (rc) {
+ dev_err(dev, "Failed to enable clock");
+ return rc;
+ }
+
+ new_drvdata->sec_disabled = cc_sec_disable;
+
+ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ rc = pm_runtime_get_sync(dev);
+ if (rc < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed: %d\n", rc);
+ goto post_pm_err;
+ }
+
+ /* Wait for Cryptocell reset completion */
+ if (!cc_wait_for_reset_completion(new_drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ }
+
+ if (hw_rev->rev <= CC_HW_REV_712) {
+ /* Verify correct mapping */
+ val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+ if (val != hw_rev->sig) {
+ dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
+ val, hw_rev->sig);
+ rc = -EINVAL;
+ goto post_pm_err;
+ }
+ sig_cidr = val;
+ hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
+ } else {
+ /* Verify correct mapping */
+ val = cc_read_idr(new_drvdata, pidr_0124_offsets);
+ if (val != hw_rev->pidr_0124) {
+ dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
+ val, hw_rev->pidr_0124);
+ rc = -EINVAL;
+ goto post_pm_err;
+ }
+ hw_rev_pidr = val;
+
+ val = cc_read_idr(new_drvdata, cidr_0123_offsets);
+ if (val != hw_rev->cidr_0123) {
+ dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
+ val, hw_rev->cidr_0123);
+ rc = -EINVAL;
+ goto post_pm_err;
+ }
+ sig_cidr = val;
+
+ /* Check HW engine configuration */
+ val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS));
+ switch (val) {
+ case CC_PINS_FULL:
+ /* This is fine */
+ break;
+ case CC_PINS_SLIM:
+ if (new_drvdata->std_bodies & CC_STD_NIST) {
+ dev_warn(dev, "703 mode forced due to HW configuration.\n");
+ new_drvdata->std_bodies = CC_STD_OSCCA;
+ }
+ break;
+ default:
+ dev_err(dev, "Unsupported engines configuration.\n");
+ rc = -EINVAL;
+ goto post_pm_err;
+ }
+
+ /* Check security disable state */
+ val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
+ val &= CC_SECURITY_DISABLED_MASK;
+ new_drvdata->sec_disabled |= !!val;
+
+ if (!new_drvdata->sec_disabled) {
+ new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
+ if (new_drvdata->std_bodies & CC_STD_NIST)
+ new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
+ }
+ }
+
+ if (new_drvdata->sec_disabled)
+ dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
+
+ /* Display HW versions */
+ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
+ hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
+ /* register the driver isr function */
+ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree",
+ new_drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n", irq);
+ goto post_pm_err;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", irq);
+
+ init_cc_cache_params(new_drvdata);
+
+ rc = init_cc_regs(new_drvdata);
+ if (rc) {
+ dev_err(dev, "init_cc_regs failed\n");
+ goto post_pm_err;
+ }
+
+ rc = cc_debugfs_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "Failed registering debugfs interface\n");
+ goto post_regs_err;
+ }
+
+ rc = cc_fips_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_fips_init failed 0x%x\n", rc);
+ goto post_debugfs_err;
+ }
+ rc = cc_sram_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_sram_mgr_init failed\n");
+ goto post_fips_init_err;
+ }
+
+ new_drvdata->mlli_sram_addr =
+ cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
+ if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
+ rc = -ENOMEM;
+ goto post_fips_init_err;
+ }
+
+ rc = cc_req_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_req_mgr_init failed\n");
+ goto post_fips_init_err;
+ }
+
+ rc = cc_buffer_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_buffer_mgr_init failed\n");
+ goto post_req_mgr_err;
+ }
+
+ /* hash must be allocated first due to use of send_request_init()
+ * and dependency of AEAD on it
+ */
+ rc = cc_hash_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_hash_alloc failed\n");
+ goto post_buf_mgr_err;
+ }
+
+ /* Allocate crypto algs */
+ rc = cc_cipher_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_cipher_alloc failed\n");
+ goto post_hash_err;
+ }
+
+ rc = cc_aead_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_aead_alloc failed\n");
+ goto post_cipher_err;
+ }
+
+ /* If we got here and FIPS mode is enabled
+ * it means all FIPS test passed, so let TEE
+ * know we're good.
+ */
+ cc_set_ree_fips_status(new_drvdata, true);
+
+ pm_runtime_put(dev);
+ return 0;
+
+post_cipher_err:
+ cc_cipher_free(new_drvdata);
+post_hash_err:
+ cc_hash_free(new_drvdata);
+post_buf_mgr_err:
+ cc_buffer_mgr_fini(new_drvdata);
+post_req_mgr_err:
+ cc_req_mgr_fini(new_drvdata);
+post_fips_init_err:
+ cc_fips_fini(new_drvdata);
+post_debugfs_err:
+ cc_debugfs_fini(new_drvdata);
+post_regs_err:
+ fini_cc_regs(new_drvdata);
+post_pm_err:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ clk_disable_unprepare(new_drvdata->clk);
+ return rc;
+}
+
+void fini_cc_regs(struct cc_drvdata *drvdata)
+{
+ /* Mask all interrupts */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
+}
+
+static void cleanup_cc_resources(struct platform_device *plat_dev)
+{
+ struct device *dev = &plat_dev->dev;
+ struct cc_drvdata *drvdata =
+ (struct cc_drvdata *)platform_get_drvdata(plat_dev);
+
+ cc_aead_free(drvdata);
+ cc_cipher_free(drvdata);
+ cc_hash_free(drvdata);
+ cc_buffer_mgr_fini(drvdata);
+ cc_req_mgr_fini(drvdata);
+ cc_fips_fini(drvdata);
+ cc_debugfs_fini(drvdata);
+ fini_cc_regs(drvdata);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ clk_disable_unprepare(drvdata->clk);
+}
+
+unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
+{
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ return HASH_LEN_SIZE_712;
+ else
+ return HASH_LEN_SIZE_630;
+}
+
+static int ccree_probe(struct platform_device *plat_dev)
+{
+ int rc;
+ struct device *dev = &plat_dev->dev;
+
+ /* Map registers space */
+ rc = init_cc_resources(plat_dev);
+ if (rc)
+ return rc;
+
+ dev_info(dev, "ARM ccree device initialized\n");
+
+ return 0;
+}
+
+static int ccree_remove(struct platform_device *plat_dev)
+{
+ struct device *dev = &plat_dev->dev;
+
+ dev_dbg(dev, "Releasing ccree resources...\n");
+
+ cleanup_cc_resources(plat_dev);
+
+ dev_info(dev, "ARM ccree device terminated\n");
+
+ return 0;
+}
+
+static struct platform_driver ccree_driver = {
+ .driver = {
+ .name = "ccree",
+ .of_match_table = arm_ccree_dev_of_match,
+#ifdef CONFIG_PM
+ .pm = &ccree_pm,
+#endif
+ },
+ .probe = ccree_probe,
+ .remove = ccree_remove,
+};
+
+static int __init ccree_init(void)
+{
+ int rc;
+
+ cc_debugfs_global_init();
+
+ rc = platform_driver_register(&ccree_driver);
+ if (rc) {
+ cc_debugfs_global_fini();
+ return rc;
+ }
+
+ return 0;
+}
+module_init(ccree_init);
+
+static void __exit ccree_exit(void)
+{
+ platform_driver_unregister(&ccree_driver);
+ cc_debugfs_global_fini();
+}
+module_exit(ccree_exit);
+
+/* Module description */
+MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_AUTHOR("ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
new file mode 100644
index 000000000..f49579aa1
--- /dev/null
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+/* \file cc_driver.h
+ * ARM CryptoCell Linux Crypto Driver
+ */
+
+#ifndef __CC_DRIVER_H__
+#define __CC_DRIVER_H__
+
+#ifdef COMP_IN_WQ
+#include <linux/workqueue.h>
+#else
+#include <linux/interrupt.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include "cc_host_regs.h"
+#include "cc_crypto_ctx.h"
+#include "cc_hw_queue_defs.h"
+#include "cc_sram_mgr.h"
+
+extern bool cc_dump_desc;
+extern bool cc_dump_bytes;
+
+#define DRV_MODULE_VERSION "5.0"
+
+enum cc_hw_rev {
+ CC_HW_REV_630 = 630,
+ CC_HW_REV_710 = 710,
+ CC_HW_REV_712 = 712,
+ CC_HW_REV_713 = 713
+};
+
+enum cc_std_body {
+ CC_STD_NIST = 0x1,
+ CC_STD_OSCCA = 0x2,
+ CC_STD_ALL = 0x3
+};
+
+#define CC_PINS_FULL 0x0
+#define CC_PINS_SLIM 0x9F
+
+/* Maximum DMA mask supported by IP */
+#define DMA_BIT_MASK_LEN 48
+
+#define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
+
+#define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
+
+#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+
+#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
+
+#define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
+
+#define AXIM_MON_COMP_VALUE CC_GENMASK(CC_AXIM_MON_COMP_VALUE)
+
+#define CC_CPP_AES_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
+
+#define CC_CPP_SM4_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
+
+/* Register name mangling macro */
+#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
+
+/* TEE FIPS status interrupt */
+#define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
+
+#define CC_CRA_PRIO 400
+
+#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
+
+#define MAX_REQUEST_QUEUE_SIZE 4096
+#define MAX_MLLI_BUFF_SIZE 2080
+
+/* Definitions for HW descriptors DIN/DOUT fields */
+#define NS_BIT 1
+#define AXI_ID 0
+/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
+ * field in the HW descriptor. The DMA engine +8 that value.
+ */
+
+struct cc_cpp_req {
+ bool is_cpp;
+ enum cc_cpp_alg alg;
+ u8 slot;
+};
+
+#define CC_MAX_IVGEN_DMA_ADDRESSES 3
+struct cc_crypto_req {
+ void (*user_cb)(struct device *dev, void *req, int err);
+ void *user_arg;
+ struct completion seq_compl; /* request completion */
+ struct cc_cpp_req cpp;
+};
+
+/**
+ * struct cc_drvdata - driver private data context
+ * @cc_base: virt address of the CC registers
+ * @irq: bitmap indicating source of last interrupt
+ */
+struct cc_drvdata {
+ void __iomem *cc_base;
+ int irq;
+ struct completion hw_queue_avail; /* wait for HW queue availability */
+ struct platform_device *plat_dev;
+ u32 mlli_sram_addr;
+ struct dma_pool *mlli_buffs_pool;
+ struct list_head alg_list;
+ void *hash_handle;
+ void *aead_handle;
+ void *request_mgr_handle;
+ void *fips_handle;
+ u32 sram_free_offset; /* offset to non-allocated area in SRAM */
+ struct dentry *dir; /* for debugfs */
+ struct clk *clk;
+ bool coherent;
+ char *hw_rev_name;
+ enum cc_hw_rev hw_rev;
+ u32 axim_mon_offset;
+ u32 sig_offset;
+ u32 ver_offset;
+ int std_bodies;
+ bool sec_disabled;
+ u32 comp_mask;
+ u32 cache_params;
+ u32 ace_const;
+};
+
+struct cc_crypto_alg {
+ struct list_head entry;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ struct cc_drvdata *drvdata;
+ struct skcipher_alg skcipher_alg;
+ struct aead_alg aead_alg;
+};
+
+struct cc_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ union {
+ struct skcipher_alg skcipher;
+ struct aead_alg aead;
+ } template_u;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ u32 min_hw_rev;
+ enum cc_std_body std_body;
+ bool sec_func;
+ unsigned int data_unit;
+ struct cc_drvdata *drvdata;
+};
+
+struct async_gen_req_ctx {
+ dma_addr_t iv_dma_addr;
+ u8 *iv;
+ enum drv_crypto_direction op_type;
+};
+
+static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
+{
+ return &drvdata->plat_dev->dev;
+}
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len);
+static inline void dump_byte_array(const char *name, const u8 *the_array,
+ size_t size)
+{
+ if (cc_dump_bytes)
+ __dump_byte_array(name, the_array, size);
+}
+
+bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
+int init_cc_regs(struct cc_drvdata *drvdata);
+void fini_cc_regs(struct cc_drvdata *drvdata);
+unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
+
+static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+
+static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
+static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
+{
+ return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+}
+
+static inline void set_queue_last_ind(struct cc_drvdata *drvdata,
+ struct cc_hw_desc *pdesc)
+{
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ set_queue_last_ind_bit(pdesc);
+}
+
+#endif /*__CC_DRIVER_H__*/
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
new file mode 100644
index 000000000..702aefc21
--- /dev/null
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#include <linux/kernel.h>
+#include <linux/fips.h>
+#include <linux/notifier.h>
+
+#include "cc_driver.h"
+#include "cc_fips.h"
+
+static void fips_dsr(unsigned long devarg);
+
+struct cc_fips_handle {
+ struct tasklet_struct tasklet;
+ struct notifier_block nb;
+ struct cc_drvdata *drvdata;
+};
+
+/* The function called once at driver entry point to check
+ * whether TEE FIPS error occurred.
+ */
+static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
+{
+ u32 reg;
+
+ reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
+ /* Did the TEE report status? */
+ if (reg & CC_FIPS_SYNC_TEE_STATUS)
+ /* Yes. Is it OK? */
+ return (reg & CC_FIPS_SYNC_MODULE_OK);
+
+ /* No. It's either not in use or will be reported later */
+ return true;
+}
+
+/*
+ * This function should push the FIPS REE library status towards the TEE library
+ * by writing the error state to HOST_GPR0 register.
+ */
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
+{
+ int val = CC_FIPS_SYNC_REE_STATUS;
+
+ if (drvdata->hw_rev < CC_HW_REV_712)
+ return;
+
+ val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR);
+
+ cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
+}
+
+/* Push REE side FIPS test failure to TEE side */
+static int cc_ree_fips_failure(struct notifier_block *nb, unsigned long unused1,
+ void *unused2)
+{
+ struct cc_fips_handle *fips_h =
+ container_of(nb, struct cc_fips_handle, nb);
+ struct cc_drvdata *drvdata = fips_h->drvdata;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ cc_set_ree_fips_status(drvdata, false);
+ dev_info(dev, "Notifying TEE of FIPS test failure...\n");
+
+ return NOTIFY_OK;
+}
+
+void cc_fips_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_fips_handle *fips_h = drvdata->fips_handle;
+
+ if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h)
+ return;
+
+ atomic_notifier_chain_unregister(&fips_fail_notif_chain, &fips_h->nb);
+
+ /* Kill tasklet */
+ tasklet_kill(&fips_h->tasklet);
+ drvdata->fips_handle = NULL;
+}
+
+void fips_handler(struct cc_drvdata *drvdata)
+{
+ struct cc_fips_handle *fips_handle_ptr = drvdata->fips_handle;
+
+ if (drvdata->hw_rev < CC_HW_REV_712)
+ return;
+
+ tasklet_schedule(&fips_handle_ptr->tasklet);
+}
+
+static inline void tee_fips_error(struct device *dev)
+{
+ if (fips_enabled)
+ panic("ccree: TEE reported cryptographic error in fips mode!\n");
+ else
+ dev_err(dev, "TEE reported error!\n");
+}
+
+/*
+ * This function check if cryptocell tee fips error occurred
+ * and in such case triggers system error
+ */
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
+{
+ struct device *dev = drvdata_to_dev(p_drvdata);
+
+ if (!cc_get_tee_fips_status(p_drvdata))
+ tee_fips_error(dev);
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void fips_dsr(unsigned long devarg)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
+ u32 irq, val;
+
+ irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
+
+ if (irq) {
+ cc_tee_handle_fips_error(drvdata);
+ }
+
+ /* after verifying that there is nothing to do,
+ * unmask AXI completion interrupt.
+ */
+ val = (CC_REG(HOST_IMR) & ~irq);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
+}
+
+/* The function called once at driver entry point .*/
+int cc_fips_init(struct cc_drvdata *p_drvdata)
+{
+ struct cc_fips_handle *fips_h;
+ struct device *dev = drvdata_to_dev(p_drvdata);
+
+ if (p_drvdata->hw_rev < CC_HW_REV_712)
+ return 0;
+
+ fips_h = devm_kzalloc(dev, sizeof(*fips_h), GFP_KERNEL);
+ if (!fips_h)
+ return -ENOMEM;
+
+ p_drvdata->fips_handle = fips_h;
+
+ dev_dbg(dev, "Initializing fips tasklet\n");
+ tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
+ fips_h->drvdata = p_drvdata;
+ fips_h->nb.notifier_call = cc_ree_fips_failure;
+ atomic_notifier_chain_register(&fips_fail_notif_chain, &fips_h->nb);
+
+ cc_tee_handle_fips_error(p_drvdata);
+
+ return 0;
+}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
new file mode 100644
index 000000000..fc33eeb4d
--- /dev/null
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#ifndef __CC_FIPS_H__
+#define __CC_FIPS_H__
+
+#ifdef CONFIG_CRYPTO_FIPS
+
+enum cc_fips_status {
+ CC_FIPS_SYNC_MODULE_OK = 0x0,
+ CC_FIPS_SYNC_MODULE_ERROR = 0x1,
+ CC_FIPS_SYNC_REE_STATUS = 0x4,
+ CC_FIPS_SYNC_TEE_STATUS = 0x8,
+ CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
+};
+
+int cc_fips_init(struct cc_drvdata *p_drvdata);
+void cc_fips_fini(struct cc_drvdata *drvdata);
+void fips_handler(struct cc_drvdata *drvdata);
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
+
+#else /* CONFIG_CRYPTO_FIPS */
+
+static inline int cc_fips_init(struct cc_drvdata *p_drvdata)
+{
+ return 0;
+}
+
+static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
+static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
+ bool ok) {}
+static inline void fips_handler(struct cc_drvdata *drvdata) {}
+static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
+
+#endif /* CONFIG_CRYPTO_FIPS */
+
+#endif /*__CC_FIPS_H__*/
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
new file mode 100644
index 000000000..683c9a430
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -0,0 +1,2315 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sm3.h>
+#include <crypto/internal/hash.h>
+
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
+
+#define CC_MAX_HASH_SEQ_LEN 12
+#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
+#define CC_SM3_HASH_LEN_SIZE 8
+
+struct cc_hash_handle {
+ u32 digest_len_sram_addr; /* const value in SRAM*/
+ u32 larval_digest_sram_addr; /* const value in SRAM */
+ struct list_head hash_list;
+};
+
+static const u32 cc_digest_len_init[] = {
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
+static const u32 cc_md5_init[] = {
+ SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const u32 cc_sha1_init[] = {
+ SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const u32 cc_sha224_init[] = {
+ SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
+static const u32 cc_sha256_init[] = {
+ SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
+static const u32 cc_digest_len_sha512_init[] = {
+ 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
+
+/*
+ * Due to the way the HW works, every double word in the SHA384 and SHA512
+ * larval hashes must be stored in hi/lo order
+ */
+#define hilo(x) upper_32_bits(x), lower_32_bits(x)
+static const u32 cc_sha384_init[] = {
+ hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
+ hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
+static const u32 cc_sha512_init[] = {
+ hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
+ hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
+
+static const u32 cc_sm3_init[] = {
+ SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
+ SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
+
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
+
+static const void *cc_larval_digest(struct device *dev, u32 mode);
+
+struct cc_hash_alg {
+ struct list_head entry;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct cc_drvdata *drvdata;
+ struct ahash_alg ahash_alg;
+};
+
+struct hash_key_req_ctx {
+ u32 keylen;
+ dma_addr_t key_dma_addr;
+ u8 *key;
+};
+
+/* hash per-session context */
+struct cc_hash_ctx {
+ struct cc_drvdata *drvdata;
+ /* holds the origin digest; the digest after "setkey" if HMAC,*
+ * the initial digest if HASH.
+ */
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
+
+ dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
+ dma_addr_t digest_buff_dma_addr;
+ /* use for hmac with key large then mode block size */
+ struct hash_key_req_ctx key_params;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ unsigned int hash_len;
+ struct completion setkey_comp;
+ bool is_hmac;
+};
+
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
+ unsigned int flow_mode, struct cc_hw_desc desc[],
+ bool is_not_last_data, unsigned int *seq_size);
+
+static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
+{
+ if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
+ mode == DRV_HASH_SHA512) {
+ set_bytes_swap(desc, 1);
+ } else {
+ set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ }
+}
+
+static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize)
+{
+ state->digest_result_dma_addr =
+ dma_map_single(dev, state->digest_result_buff,
+ digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
+ dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
+ digestsize);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
+ digestsize, state->digest_result_buff,
+ &state->digest_result_dma_addr);
+
+ return 0;
+}
+
+static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+
+ memset(state, 0, sizeof(*state));
+
+ if (is_hmac) {
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
+ ctx->hw_mode != DRV_CIPHER_CMAC) {
+ dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+
+ memcpy(state->digest_buff, ctx->digest_buff,
+ ctx->inter_digestsize);
+ if (ctx->hash_mode == DRV_HASH_SHA512 ||
+ ctx->hash_mode == DRV_HASH_SHA384)
+ memcpy(state->digest_bytes_len,
+ cc_digest_len_sha512_init,
+ ctx->hash_len);
+ else
+ memcpy(state->digest_bytes_len,
+ cc_digest_len_init,
+ ctx->hash_len);
+ }
+
+ if (ctx->hash_mode != DRV_HASH_NULL) {
+ dma_sync_single_for_cpu(dev,
+ ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+ memcpy(state->opad_digest_buff,
+ ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
+ }
+ } else { /*hash*/
+ /* Copy the initial digests if hash flow. */
+ const void *larval = cc_larval_digest(dev, ctx->hash_mode);
+
+ memcpy(state->digest_buff, larval, ctx->inter_digestsize);
+ }
+}
+
+static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+
+ state->digest_buff_dma_addr =
+ dma_map_single(dev, state->digest_buff,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
+ dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize, state->digest_buff);
+ return -EINVAL;
+ }
+ dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->digest_buff,
+ &state->digest_buff_dma_addr);
+
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
+ state->digest_bytes_len_dma_addr =
+ dma_map_single(dev, state->digest_bytes_len,
+ HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
+ dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+ HASH_MAX_LEN_SIZE, state->digest_bytes_len);
+ goto unmap_digest_buf;
+ }
+ dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+ HASH_MAX_LEN_SIZE, state->digest_bytes_len,
+ &state->digest_bytes_len_dma_addr);
+ }
+
+ if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
+ state->opad_digest_dma_addr =
+ dma_map_single(dev, state->opad_digest_buff,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
+ dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize,
+ state->opad_digest_buff);
+ goto unmap_digest_len;
+ }
+ dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->opad_digest_buff,
+ &state->opad_digest_dma_addr);
+ }
+
+ return 0;
+
+unmap_digest_len:
+ if (state->digest_bytes_len_dma_addr) {
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+ state->digest_bytes_len_dma_addr = 0;
+ }
+unmap_digest_buf:
+ if (state->digest_buff_dma_addr) {
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ state->digest_buff_dma_addr = 0;
+ }
+
+ return -EINVAL;
+}
+
+static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ if (state->digest_buff_dma_addr) {
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &state->digest_buff_dma_addr);
+ state->digest_buff_dma_addr = 0;
+ }
+ if (state->digest_bytes_len_dma_addr) {
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
+ &state->digest_bytes_len_dma_addr);
+ state->digest_bytes_len_dma_addr = 0;
+ }
+ if (state->opad_digest_dma_addr) {
+ dma_unmap_single(dev, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
+ &state->opad_digest_dma_addr);
+ state->opad_digest_dma_addr = 0;
+ }
+}
+
+static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize, u8 *result)
+{
+ if (state->digest_result_dma_addr) {
+ dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
+ DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+ state->digest_result_buff,
+ &state->digest_result_dma_addr, digestsize);
+ memcpy(result, state->digest_result_buff, digestsize);
+ }
+ state->digest_result_dma_addr = 0;
+}
+
+static void cc_update_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
+}
+
+static void cc_digest_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
+}
+
+static void cc_hash_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
+}
+
+static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ return idx;
+}
+
+static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* store the hash digest result in the context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
+ NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx],
+ cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
+ ctx->hash_len);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ return idx;
+}
+
+static int cc_hash_digest(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ u32 larval_digest_addr;
+ int idx = 0;
+ int rc = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_digest_complete;
+ cc_req.user_arg = req;
+
+ /* If HMAC then load hash IPAD xor key, if HASH then load initial
+ * digest
+ */
+ hw_desc_init(&desc[idx]);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
+ if (is_hmac) {
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ } else {
+ larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
+ ctx->hash_mode);
+ set_din_sram(&desc[idx], larval_digest_addr,
+ ctx->inter_digestsize);
+ }
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ hw_desc_init(&desc[idx]);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
+
+ if (is_hmac) {
+ set_din_type(&desc[idx], DMA_DLLI,
+ state->digest_bytes_len_dma_addr,
+ ctx->hash_len, NS_BIT);
+ } else {
+ set_din_const(&desc[idx], 0, ctx->hash_len);
+ if (nbytes)
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ else
+ set_cipher_do(&desc[idx], DO_PAD);
+ }
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ if (is_hmac) {
+ /* HW last hash block padding (aka. "DO_PAD") */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->hash_len, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_cipher_do(&desc[idx], DO_PAD);
+ idx++;
+
+ idx = cc_fin_hmac(desc, req, idx);
+ }
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
+ struct ahash_req_ctx *state, unsigned int idx)
+{
+ /* Restore hash digest */
+ hw_desc_init(&desc[idx]);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Restore hash current length */
+ hw_desc_init(&desc[idx]);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
+ ctx->hash_len, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ return idx;
+}
+
+static int cc_hash_update(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ u32 idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
+ "hmac" : "hash", nbytes);
+
+ if (nbytes == 0) {
+ /* no real updates required */
+ return 0;
+ }
+
+ rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
+ block_size, flags);
+ if (rc) {
+ if (rc == 1) {
+ dev_dbg(dev, " data size not require HW update %x\n",
+ nbytes);
+ /* No hardware updates are required */
+ return 0;
+ }
+ dev_err(dev, "map_ahash_request_update() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ return -EINVAL;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_update_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ /* store the hash digest result in context */
+ hw_desc_init(&desc[idx]);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* store current hash length in context */
+ hw_desc_init(&desc[idx]);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
+ set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+ ctx->hash_len, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_do_finup(struct ahash_request *req, bool update)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
+ update ? "finup" : "final", nbytes);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ /* Pad the hash */
+ hw_desc_init(&desc[idx]);
+ set_cipher_do(&desc[idx], DO_PAD);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
+ set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+ ctx->hash_len, NS_BIT, 0);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ idx++;
+
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_finup(struct ahash_request *req)
+{
+ return cc_do_finup(req, true);
+}
+
+
+static int cc_hash_final(struct ahash_request *req)
+{
+ return cc_do_finup(req, false);
+}
+
+static int cc_hash_init(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ return 0;
+}
+
+static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = NULL;
+ int blocksize = 0;
+ int digestsize = 0;
+ int i, idx = 0, rc = 0;
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ u32 larval_addr;
+ struct device *dev;
+
+ ctx = crypto_ahash_ctx(ahash);
+ dev = drvdata_to_dev(ctx->drvdata);
+ dev_dbg(dev, "start keylen: %d", keylen);
+
+ blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ digestsize = crypto_ahash_digestsize(ahash);
+
+ larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+
+ /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
+ * any NON-ZERO value utilizes HMAC flow
+ */
+ ctx->key_params.keylen = keylen;
+ ctx->key_params.key_dma_addr = 0;
+ ctx->is_hmac = true;
+ ctx->key_params.key = NULL;
+
+ if (keylen) {
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, ctx->key_params.key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ ctx->key_params.key, keylen);
+ kfree_sensitive(ctx->key_params.key);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ if (keylen > blocksize) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx], larval_addr,
+ ctx->inter_digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->key_params.key_dma_addr, keylen,
+ NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get hashed key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, (blocksize - digestsize));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr +
+ digestsize),
+ (blocksize - digestsize), NS_BIT, 0);
+ idx++;
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->key_params.key_dma_addr, keylen,
+ NS_BIT);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ keylen, NS_BIT, 0);
+ idx++;
+
+ if ((blocksize - keylen)) {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0,
+ (blocksize - keylen));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr +
+ keylen), (blocksize - keylen),
+ NS_BIT, 0);
+ idx++;
+ }
+ }
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, blocksize);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
+ blocksize, NS_BIT, 0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ goto out;
+ }
+
+ /* calc derived HMAC key */
+ for (idx = 0, i = 0; i < 2; i++) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Prepare ipad key */
+ hw_desc_init(&desc[idx]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+ blocksize, NS_BIT);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_xor_active(&desc[idx]);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
+ * of the first HASH "update" state)
+ */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ if (i > 0) /* Not first iteration */
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ else /* First iteration */
+ set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+
+out:
+ if (ctx->key_params.key_dma_addr) {
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+ }
+
+ kfree_sensitive(ctx->key_params.key);
+
+ return rc;
+}
+
+static int cc_xcbc_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ int rc = 0;
+ unsigned int idx = 0;
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->key_params.keylen = keylen;
+
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
+ kfree_sensitive(ctx->key_params.key);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ ctx->is_hmac = true;
+ /* 1. Load the AES key */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
+ keylen, NS_BIT);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_key_size_aes(&desc[idx], keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ kfree_sensitive(ctx->key_params.key);
+
+ return rc;
+}
+
+static int cc_cmac_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
+
+ ctx->is_hmac = true;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->key_params.keylen = keylen;
+
+ /* STAT_PHASE_1: Copy key to ctx */
+
+ dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
+ keylen, DMA_TO_DEVICE);
+
+ memcpy(ctx->opad_tmp_keys_buff, key, keylen);
+ if (keylen == 24) {
+ memset(ctx->opad_tmp_keys_buff + 24, 0,
+ CC_AES_KEY_SIZE_MAX - 24);
+ }
+
+ dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
+ keylen, DMA_TO_DEVICE);
+
+ ctx->key_params.keylen = keylen;
+
+ return 0;
+}
+
+static void cc_free_ctx(struct cc_hash_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (ctx->digest_buff_dma_addr) {
+ dma_unmap_single(dev, ctx->digest_buff_dma_addr,
+ sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &ctx->digest_buff_dma_addr);
+ ctx->digest_buff_dma_addr = 0;
+ }
+ if (ctx->opad_tmp_keys_dma_addr) {
+ dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
+ &ctx->opad_tmp_keys_dma_addr);
+ ctx->opad_tmp_keys_dma_addr = 0;
+ }
+
+ ctx->key_params.keylen = 0;
+}
+
+static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ ctx->key_params.keylen = 0;
+
+ ctx->digest_buff_dma_addr =
+ dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
+ dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff);
+ goto fail;
+ }
+ dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff,
+ &ctx->digest_buff_dma_addr);
+
+ ctx->opad_tmp_keys_dma_addr =
+ dma_map_single(dev, ctx->opad_tmp_keys_buff,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
+ dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->opad_tmp_keys_buff),
+ ctx->opad_tmp_keys_buff);
+ goto fail;
+ }
+ dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
+ &ctx->opad_tmp_keys_dma_addr);
+
+ ctx->is_hmac = false;
+ return 0;
+
+fail:
+ cc_free_ctx(ctx);
+ return -ENOMEM;
+}
+
+static int cc_get_hash_len(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->hash_mode == DRV_HASH_SM3)
+ return CC_SM3_HASH_LEN_SIZE;
+ else
+ return cc_get_default_hash_len(ctx->drvdata);
+}
+
+static int cc_cra_init(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct hash_alg_common *hash_alg_common =
+ container_of(tfm->__crt_alg, struct hash_alg_common, base);
+ struct ahash_alg *ahash_alg =
+ container_of(hash_alg_common, struct ahash_alg, halg);
+ struct cc_hash_alg *cc_alg =
+ container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_req_ctx));
+
+ ctx->hash_mode = cc_alg->hash_mode;
+ ctx->hw_mode = cc_alg->hw_mode;
+ ctx->inter_digestsize = cc_alg->inter_digestsize;
+ ctx->drvdata = cc_alg->drvdata;
+ ctx->hash_len = cc_get_hash_len(tfm);
+ return cc_alloc_ctx(ctx);
+}
+
+static void cc_cra_exit(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "cc_cra_exit");
+ cc_free_ctx(ctx);
+}
+
+static int cc_mac_update(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int rc;
+ u32 idx = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ if (req->nbytes == 0) {
+ /* no real updates required */
+ return 0;
+ }
+
+ state->xcbc_count++;
+
+ rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
+ req->nbytes, block_size, flags);
+ if (rc) {
+ if (rc == 1) {
+ dev_dbg(dev, " data size not require HW update %x\n",
+ req->nbytes);
+ /* No hardware updates are required */
+ return 0;
+ }
+ dev_err(dev, "map_ahash_request_update() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
+ cc_setup_xcbc(req, desc, &idx);
+ else
+ cc_setup_cmac(req, desc, &idx);
+
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
+
+ /* store the hash digest result in context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_update_complete;
+ cc_req.user_arg = req;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_final(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int idx = 0;
+ int rc = 0;
+ u32 key_size, key_len;
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
+ u32 rem_cnt = *cc_hash_buf_cnt(state);
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_size = CC_AES_128_BIT_KEY_SIZE;
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ } else {
+ key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+ ctx->key_params.keylen;
+ key_len = ctx->key_params.keylen;
+ }
+
+ dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 0, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
+
+ if (state->xcbc_count && rem_cnt == 0) {
+ /* Load key for ECB decryption */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ key_size, NS_BIT);
+ set_key_size_aes(&desc[idx], key_len);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Initiate decryption of block state to previous
+ * block_state-XOR-M[n]
+ */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT, 0);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Memory Barrier: wait for axi write to complete */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+ }
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
+ cc_setup_xcbc(req, desc, &idx);
+ else
+ cc_setup_cmac(req, desc, &idx);
+
+ if (state->xcbc_count == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else if (rem_cnt > 0) {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ digestsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_finup(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int idx = 0;
+ int rc = 0;
+ u32 key_len = 0;
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
+ if (state->xcbc_count > 0 && req->nbytes == 0) {
+ dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
+ return cc_mac_final(req);
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ cc_setup_xcbc(req, desc, &idx);
+ } else {
+ key_len = ctx->key_params.keylen;
+ cc_setup_cmac(req, desc, &idx);
+ }
+
+ if (req->nbytes == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ digestsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_digest(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ u32 key_len;
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_digest_complete;
+ cc_req.user_arg = req;
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ cc_setup_xcbc(req, desc, &idx);
+ } else {
+ key_len = ctx->key_params.keylen;
+ cc_setup_cmac(req, desc, &idx);
+ }
+
+ if (req->nbytes == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ u8 *curr_buff = cc_hash_buf(state);
+ u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
+ const u32 tmp = CC_EXPORT_MAGIC;
+
+ memcpy(out, &tmp, sizeof(u32));
+ out += sizeof(u32);
+
+ memcpy(out, state->digest_buff, ctx->inter_digestsize);
+ out += ctx->inter_digestsize;
+
+ memcpy(out, state->digest_bytes_len, ctx->hash_len);
+ out += ctx->hash_len;
+
+ memcpy(out, &curr_buff_cnt, sizeof(u32));
+ out += sizeof(u32);
+
+ memcpy(out, curr_buff, curr_buff_cnt);
+
+ return 0;
+}
+
+static int cc_hash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ u32 tmp;
+
+ memcpy(&tmp, in, sizeof(u32));
+ if (tmp != CC_EXPORT_MAGIC)
+ return -EINVAL;
+ in += sizeof(u32);
+
+ cc_init_req(dev, state, ctx);
+
+ memcpy(state->digest_buff, in, ctx->inter_digestsize);
+ in += ctx->inter_digestsize;
+
+ memcpy(state->digest_bytes_len, in, ctx->hash_len);
+ in += ctx->hash_len;
+
+ /* Sanity check the data as much as possible */
+ memcpy(&tmp, in, sizeof(u32));
+ if (tmp > CC_MAX_HASH_BLCK_SIZE)
+ return -EINVAL;
+ in += sizeof(u32);
+
+ state->buf_cnt[0] = tmp;
+ memcpy(state->buffers[0], in, tmp);
+
+ return 0;
+}
+
+struct cc_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char mac_name[CRYPTO_MAX_ALG_NAME];
+ char mac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ bool is_mac;
+ bool synchronize;
+ struct ahash_alg template_ahash;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct cc_drvdata *drvdata;
+ u32 min_hw_rev;
+ enum cc_std_body std_body;
+};
+
+#define CC_STATE_SIZE(_x) \
+ ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
+
+/* hash descriptors */
+static struct cc_hash_template driver_hash[] = {
+ //Asynchronize hash template
+ {
+ .name = "sha1",
+ .driver_name = "sha1-ccree",
+ .mac_name = "hmac(sha1)",
+ .mac_driver_name = "hmac-sha1-ccree",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .is_mac = true,
+ .synchronize = false,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA1,
+ .hw_mode = DRV_HASH_HW_SHA1,
+ .inter_digestsize = SHA1_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "sha256",
+ .driver_name = "sha256-ccree",
+ .mac_name = "hmac(sha256)",
+ .mac_driver_name = "hmac-sha256-ccree",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .is_mac = true,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
+ },
+ },
+ .hash_mode = DRV_HASH_SHA256,
+ .hw_mode = DRV_HASH_HW_SHA256,
+ .inter_digestsize = SHA256_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "sha224",
+ .driver_name = "sha224-ccree",
+ .mac_name = "hmac(sha224)",
+ .mac_driver_name = "hmac-sha224-ccree",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .is_mac = true,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA224,
+ .hw_mode = DRV_HASH_HW_SHA256,
+ .inter_digestsize = SHA256_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "sha384",
+ .driver_name = "sha384-ccree",
+ .mac_name = "hmac(sha384)",
+ .mac_driver_name = "hmac-sha384-ccree",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .is_mac = true,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA384,
+ .hw_mode = DRV_HASH_HW_SHA512,
+ .inter_digestsize = SHA512_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "sha512",
+ .driver_name = "sha512-ccree",
+ .mac_name = "hmac(sha512)",
+ .mac_driver_name = "hmac-sha512-ccree",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .is_mac = true,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA512,
+ .hw_mode = DRV_HASH_HW_SHA512,
+ .inter_digestsize = SHA512_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "md5",
+ .driver_name = "md5-ccree",
+ .mac_name = "hmac(md5)",
+ .mac_driver_name = "hmac-md5-ccree",
+ .blocksize = MD5_HMAC_BLOCK_SIZE,
+ .is_mac = true,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_MD5,
+ .hw_mode = DRV_HASH_HW_MD5,
+ .inter_digestsize = MD5_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "sm3",
+ .driver_name = "sm3-ccree",
+ .blocksize = SM3_BLOCK_SIZE,
+ .is_mac = false,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SM3,
+ .hw_mode = DRV_HASH_HW_SM3,
+ .inter_digestsize = SM3_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .mac_name = "xcbc(aes)",
+ .mac_driver_name = "xcbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .is_mac = true,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_xcbc_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_NULL,
+ .hw_mode = DRV_CIPHER_XCBC_MAC,
+ .inter_digestsize = AES_BLOCK_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .mac_name = "cmac(aes)",
+ .mac_driver_name = "cmac-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .is_mac = true,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_cmac_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_NULL,
+ .hw_mode = DRV_CIPHER_CMAC,
+ .inter_digestsize = AES_BLOCK_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+};
+
+static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
+ struct device *dev, bool keyed)
+{
+ struct cc_hash_alg *t_crypto_alg;
+ struct crypto_alg *alg;
+ struct ahash_alg *halg;
+
+ t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
+ if (!t_crypto_alg)
+ return ERR_PTR(-ENOMEM);
+
+ t_crypto_alg->ahash_alg = template->template_ahash;
+ halg = &t_crypto_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->mac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->mac_driver_name);
+ } else {
+ halg->setkey = NULL;
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
+ alg->cra_priority = CC_CRA_PRIO;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_exit = cc_cra_exit;
+
+ alg->cra_init = cc_cra_init;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ t_crypto_alg->hash_mode = template->hash_mode;
+ t_crypto_alg->hw_mode = template->hw_mode;
+ t_crypto_alg->inter_digestsize = template->inter_digestsize;
+
+ return t_crypto_alg;
+}
+
+static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
+ unsigned int size, u32 *sram_buff_ofs)
+{
+ struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+ unsigned int larval_seq_len = 0;
+ int rc;
+
+ cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
+ larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ return rc;
+
+ *sram_buff_ofs += size;
+ return 0;
+}
+
+int cc_init_hash_sram(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+ u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
+ bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
+ bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
+ int rc = 0;
+
+ /* Copy-to-sram digest-len */
+ rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
+ sizeof(cc_digest_len_init), &sram_buff_ofs);
+ if (rc)
+ goto init_digest_const_err;
+
+ if (large_sha_supported) {
+ /* Copy-to-sram digest-len for sha384/512 */
+ rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
+ sizeof(cc_digest_len_sha512_init),
+ &sram_buff_ofs);
+ if (rc)
+ goto init_digest_const_err;
+ }
+
+ /* The initial digests offset */
+ hash_handle->larval_digest_sram_addr = sram_buff_ofs;
+
+ /* Copy-to-sram initial SHA* digests */
+ rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
+ &sram_buff_ofs);
+ if (rc)
+ goto init_digest_const_err;
+
+ rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
+ &sram_buff_ofs);
+ if (rc)
+ goto init_digest_const_err;
+
+ rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
+ &sram_buff_ofs);
+ if (rc)
+ goto init_digest_const_err;
+
+ rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
+ &sram_buff_ofs);
+ if (rc)
+ goto init_digest_const_err;
+
+ if (sm3_supported) {
+ rc = cc_init_copy_sram(drvdata, cc_sm3_init,
+ sizeof(cc_sm3_init), &sram_buff_ofs);
+ if (rc)
+ goto init_digest_const_err;
+ }
+
+ if (large_sha_supported) {
+ rc = cc_init_copy_sram(drvdata, cc_sha384_init,
+ sizeof(cc_sha384_init), &sram_buff_ofs);
+ if (rc)
+ goto init_digest_const_err;
+
+ rc = cc_init_copy_sram(drvdata, cc_sha512_init,
+ sizeof(cc_sha512_init), &sram_buff_ofs);
+ if (rc)
+ goto init_digest_const_err;
+ }
+
+init_digest_const_err:
+ return rc;
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_handle *hash_handle;
+ u32 sram_buff;
+ u32 sram_size_to_alloc;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+ int alg;
+
+ hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
+ if (!hash_handle)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hash_handle->hash_list);
+ drvdata->hash_handle = hash_handle;
+
+ sram_size_to_alloc = sizeof(cc_digest_len_init) +
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init) +
+ sizeof(cc_sha224_init) +
+ sizeof(cc_sha256_init);
+
+ if (drvdata->hw_rev >= CC_HW_REV_713)
+ sram_size_to_alloc += sizeof(cc_sm3_init);
+
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
+ sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
+
+ sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
+ if (sram_buff == NULL_SRAM_ADDR) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ /* The initial digest-len offset */
+ hash_handle->digest_len_sram_addr = sram_buff;
+
+ /*must be set before the alg registration as it is being used there*/
+ rc = cc_init_hash_sram(drvdata);
+ if (rc) {
+ dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
+ goto fail;
+ }
+
+ /* ahash registration */
+ for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
+ struct cc_hash_alg *t_alg;
+ int hw_mode = driver_hash[alg].hw_mode;
+
+ /* Check that the HW revision and variants are suitable */
+ if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & driver_hash[alg].std_body))
+ continue;
+
+ if (driver_hash[alg].is_mac) {
+ /* register hmac version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ }
+ if (hw_mode == DRV_CIPHER_XCBC_MAC ||
+ hw_mode == DRV_CIPHER_CMAC)
+ continue;
+
+ /* register hash version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ }
+
+ return 0;
+
+fail:
+ cc_hash_free(drvdata);
+ return rc;
+}
+
+int cc_hash_free(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_alg *t_hash_alg, *hash_n;
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+
+ list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
+ entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
+ }
+
+ return 0;
+}
+
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /* Setup XCBC MAC K1 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
+ XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Setup XCBC MAC K2 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Setup XCBC MAC K3 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Loading MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ *seq_size = idx;
+}
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /* Setup CMAC Key */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+ ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+ ctx->key_params.keylen), NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Load MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ *seq_size = idx;
+}
+
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
+ struct cc_hash_ctx *ctx, unsigned int flow_mode,
+ struct cc_hw_desc desc[], bool is_not_last_data,
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ sg_dma_address(areq_ctx->curr_sg),
+ areq_ctx->curr_sg->length, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ idx++;
+ } else {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ dev_dbg(dev, " NULL mode\n");
+ /* nothing to build */
+ return;
+ }
+ /* bypass */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
+ areq_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[idx], BYPASS);
+ idx++;
+ /* process */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI,
+ ctx->drvdata->mlli_sram_addr,
+ areq_ctx->mlli_nents, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ idx++;
+ }
+ if (is_not_last_data)
+ set_din_not_last_indication(&desc[(idx - 1)]);
+ /* return updated desc sequence size */
+ *seq_size = idx;
+}
+
+static const void *cc_larval_digest(struct device *dev, u32 mode)
+{
+ switch (mode) {
+ case DRV_HASH_MD5:
+ return cc_md5_init;
+ case DRV_HASH_SHA1:
+ return cc_sha1_init;
+ case DRV_HASH_SHA224:
+ return cc_sha224_init;
+ case DRV_HASH_SHA256:
+ return cc_sha256_init;
+ case DRV_HASH_SHA384:
+ return cc_sha384_init;
+ case DRV_HASH_SHA512:
+ return cc_sha512_init;
+ case DRV_HASH_SM3:
+ return cc_sm3_init;
+ default:
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
+ return cc_md5_init;
+ }
+}
+
+/**
+ * cc_larval_digest_addr() - Get the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * @drvdata: Associated device driver context
+ * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
+ *
+ * Return:
+ * The address of the initial digest in SRAM
+ */
+u32 cc_larval_digest_addr(void *drvdata, u32 mode)
+{
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+ struct device *dev = drvdata_to_dev(_drvdata);
+ bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
+ u32 addr;
+
+ switch (mode) {
+ case DRV_HASH_NULL:
+ break; /*Ignore*/
+ case DRV_HASH_MD5:
+ return (hash_handle->larval_digest_sram_addr);
+ case DRV_HASH_SHA1:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(cc_md5_init));
+ case DRV_HASH_SHA224:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init));
+ case DRV_HASH_SHA256:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init) +
+ sizeof(cc_sha224_init));
+ case DRV_HASH_SM3:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init) +
+ sizeof(cc_sha224_init) +
+ sizeof(cc_sha256_init));
+ case DRV_HASH_SHA384:
+ addr = (hash_handle->larval_digest_sram_addr +
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init) +
+ sizeof(cc_sha224_init) +
+ sizeof(cc_sha256_init));
+ if (sm3_supported)
+ addr += sizeof(cc_sm3_init);
+ return addr;
+ case DRV_HASH_SHA512:
+ addr = (hash_handle->larval_digest_sram_addr +
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init) +
+ sizeof(cc_sha224_init) +
+ sizeof(cc_sha256_init) +
+ sizeof(cc_sha384_init));
+ if (sm3_supported)
+ addr += sizeof(cc_sm3_init);
+ return addr;
+ default:
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
+ }
+
+ /*This is valid wrong value to avoid kernel crash*/
+ return hash_handle->larval_digest_sram_addr;
+}
+
+u32 cc_digest_len_addr(void *drvdata, u32 mode)
+{
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+ u32 digest_len_addr = hash_handle->digest_len_sram_addr;
+
+ switch (mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA224:
+ case DRV_HASH_SHA256:
+ case DRV_HASH_MD5:
+ return digest_len_addr;
+ case DRV_HASH_SHA384:
+ case DRV_HASH_SHA512:
+ return digest_len_addr + sizeof(cc_digest_len_init);
+ default:
+ return digest_len_addr; /*to avoid kernel crash*/
+ }
+}
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
new file mode 100644
index 000000000..3d0f2179e
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+/* \file cc_hash.h
+ * ARM CryptoCell Hash Crypto API
+ */
+
+#ifndef __CC_HASH_H__
+#define __CC_HASH_H__
+
+#include "cc_buffer_mgr.h"
+
+#define HMAC_IPAD_CONST 0x36363636
+#define HMAC_OPAD_CONST 0x5C5C5C5C
+#define HASH_LEN_SIZE_712 16
+#define HASH_LEN_SIZE_630 8
+#define HASH_MAX_LEN_SIZE HASH_LEN_SIZE_712
+#define CC_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define CC_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
+
+#define XCBC_MAC_K1_OFFSET 0
+#define XCBC_MAC_K2_OFFSET 16
+#define XCBC_MAC_K3_OFFSET 32
+
+#define CC_EXPORT_MAGIC 0xC2EE1070U
+
+/* this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used
+ * for xcbc/cmac statesize
+ */
+struct aeshash_state {
+ u8 state[AES_BLOCK_SIZE];
+ unsigned int count;
+ u8 buffer[AES_BLOCK_SIZE];
+};
+
+/* ahash state */
+struct ahash_req_ctx {
+ u8 buffers[2][CC_MAX_HASH_BLCK_SIZE] ____cacheline_aligned;
+ u8 digest_result_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_bytes_len[HASH_MAX_LEN_SIZE] ____cacheline_aligned;
+ struct async_gen_req_ctx gen_ctx ____cacheline_aligned;
+ enum cc_req_dma_buf_type data_dma_buf_type;
+ dma_addr_t opad_digest_dma_addr;
+ dma_addr_t digest_buff_dma_addr;
+ dma_addr_t digest_bytes_len_dma_addr;
+ dma_addr_t digest_result_dma_addr;
+ u32 buf_cnt[2];
+ u32 buff_index;
+ u32 xcbc_count; /* count xcbc update operatations */
+ struct scatterlist buff_sg[2];
+ struct scatterlist *curr_sg;
+ u32 in_nents;
+ u32 mlli_nents;
+ struct mlli_params mlli_params;
+};
+
+static inline u32 *cc_hash_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index];
+}
+
+static inline u8 *cc_hash_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index];
+}
+
+static inline u32 *cc_next_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index ^ 1];
+}
+
+static inline u8 *cc_next_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index ^ 1];
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata);
+int cc_init_hash_sram(struct cc_drvdata *drvdata);
+int cc_hash_free(struct cc_drvdata *drvdata);
+
+/**
+ * cc_digest_len_addr() - Gets the initial digest length
+ *
+ * @drvdata: Associated device driver context
+ * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * Return:
+ * Returns the address of the initial digest length in SRAM
+ */
+u32 cc_digest_len_addr(void *drvdata, u32 mode);
+
+/**
+ * cc_larval_digest_addr() - Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * @drvdata: Associated device driver context
+ * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * Return:
+ * The address of the initial digest in SRAM
+ */
+u32 cc_larval_digest_addr(void *drvdata, u32 mode);
+
+#endif /*__CC_HASH_H__*/
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
new file mode 100644
index 000000000..efe3e1d8b
--- /dev/null
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
+
+#ifndef __CC_HOST_H__
+#define __CC_HOST_H__
+
+// --------------------------------------
+// BLOCK: HOST_P
+// --------------------------------------
+
+
+/* IRR */
+#define CC_HOST_IRR_REG_OFFSET 0xA00UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT 0x3UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT 0x4UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT 0x5UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT 0x6UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT 0x7UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT 0x9UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT 0xAUL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
+#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT 0xCUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT 0xDUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT 0xEUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT 0xFUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT 0x10UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT 0x11UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT 0x12UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT 0x14UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
+
+/* IMR */
+#define CC_HOST_IMR_REG_OFFSET 0x0A04UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT 0x3UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT 0x4UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT 0x5UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT 0x6UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT 0x7UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT 0x9UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT 0xAUL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
+#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT 0xCUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT 0xDUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT 0xEUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT 0xFUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT 0x10UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT 0x11UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT 0x12UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT 0x14UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+
+/* ICR */
+#define CC_HOST_ICR_REG_OFFSET 0xA08UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_NVM_IS_IDLE_REG_OFFSET 0x0A10UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL
+#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL
+#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_BOOT_REG_OFFSET 0xA28UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_VERSION_712_REG_OFFSET 0xA40UL
+#define CC_HOST_VERSION_630_REG_OFFSET 0xAD8UL
+#define CC_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_GPR0_REG_OFFSET 0xA70UL
+#define CC_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
+#define CC_GPR_HOST_REG_OFFSET 0xA74UL
+#define CC_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
+#define CC_GPR_HOST_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REG_OFFSET 0x0A7CUL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SHIFT 0x0UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SHIFT 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SHIFT 0x2UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SHIFT 0x3UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SHIFT 0x4UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SHIFT 0x5UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SHIFT 0x6UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SHIFT 0x7UL
+#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SIZE 0x1UL
+// --------------------------------------
+// BLOCK: ID_REGISTERS
+// --------------------------------------
+#define CC_PERIPHERAL_ID_4_REG_OFFSET 0x0FD0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SIZE 0x4UL
+#define CC_PIDRESERVED0_REG_OFFSET 0x0FD4UL
+#define CC_PIDRESERVED1_REG_OFFSET 0x0FD8UL
+#define CC_PIDRESERVED2_REG_OFFSET 0x0FDCUL
+#define CC_PERIPHERAL_ID_0_REG_OFFSET 0x0FE0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_PERIPHERAL_ID_1_REG_OFFSET 0x0FE4UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_2_REG_OFFSET 0x0FE8UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SIZE 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SHIFT 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SIZE 0x1UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REG_OFFSET 0x0FECUL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_0_REG_OFFSET 0x0FF0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_1_REG_OFFSET 0x0FF4UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SHIFT 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_2_REG_OFFSET 0x0FF8UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_3_REG_OFFSET 0x0FFCUL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SIZE 0x8UL
+// --------------------------------------
+// BLOCK: HOST_SRAM
+// --------------------------------------
+#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
+#define CC_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
+#define CC_SRAM_ADDR_REG_OFFSET 0xF04UL
+#define CC_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
+#define CC_SRAM_DATA_READY_REG_OFFSET 0xF08UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL
+
+#endif //__CC_HOST_H__
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
new file mode 100644
index 000000000..15df58c66
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -0,0 +1,633 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#ifndef __CC_HW_QUEUE_DEFS_H__
+#define __CC_HW_QUEUE_DEFS_H__
+
+#include <linux/types.h>
+
+#include "cc_kernel_regs.h"
+#include <linux/bitfield.h>
+
+/******************************************************************************
+ * DEFINITIONS
+ ******************************************************************************/
+
+#define HW_DESC_SIZE_WORDS 6
+/* Define max. available slots in HW queue */
+#define HW_QUEUE_SLOTS_MAX 15
+
+#define CC_REG_LOW(name) (name ## _BIT_SHIFT)
+#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
+#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
+
+#define CC_HWQ_GENMASK(word, field) \
+ CC_GENMASK(CC_DSCRPTR_QUEUE_WORD ## word ## _ ## field)
+
+#define WORD0_VALUE CC_HWQ_GENMASK(0, VALUE)
+#define WORD0_CPP_CIPHER_MODE CC_HWQ_GENMASK(0, CPP_CIPHER_MODE)
+#define WORD1_DIN_CONST_VALUE CC_HWQ_GENMASK(1, DIN_CONST_VALUE)
+#define WORD1_DIN_DMA_MODE CC_HWQ_GENMASK(1, DIN_DMA_MODE)
+#define WORD1_DIN_SIZE CC_HWQ_GENMASK(1, DIN_SIZE)
+#define WORD1_NOT_LAST CC_HWQ_GENMASK(1, NOT_LAST)
+#define WORD1_NS_BIT CC_HWQ_GENMASK(1, NS_BIT)
+#define WORD1_LOCK_QUEUE CC_HWQ_GENMASK(1, LOCK_QUEUE)
+#define WORD2_VALUE CC_HWQ_GENMASK(2, VALUE)
+#define WORD3_DOUT_DMA_MODE CC_HWQ_GENMASK(3, DOUT_DMA_MODE)
+#define WORD3_DOUT_LAST_IND CC_HWQ_GENMASK(3, DOUT_LAST_IND)
+#define WORD3_DOUT_SIZE CC_HWQ_GENMASK(3, DOUT_SIZE)
+#define WORD3_HASH_XOR_BIT CC_HWQ_GENMASK(3, HASH_XOR_BIT)
+#define WORD3_NS_BIT CC_HWQ_GENMASK(3, NS_BIT)
+#define WORD3_QUEUE_LAST_IND CC_HWQ_GENMASK(3, QUEUE_LAST_IND)
+#define WORD4_ACK_NEEDED CC_HWQ_GENMASK(4, ACK_NEEDED)
+#define WORD4_AES_SEL_N_HASH CC_HWQ_GENMASK(4, AES_SEL_N_HASH)
+#define WORD4_AES_XOR_CRYPTO_KEY CC_HWQ_GENMASK(4, AES_XOR_CRYPTO_KEY)
+#define WORD4_BYTES_SWAP CC_HWQ_GENMASK(4, BYTES_SWAP)
+#define WORD4_CIPHER_CONF0 CC_HWQ_GENMASK(4, CIPHER_CONF0)
+#define WORD4_CIPHER_CONF1 CC_HWQ_GENMASK(4, CIPHER_CONF1)
+#define WORD4_CIPHER_CONF2 CC_HWQ_GENMASK(4, CIPHER_CONF2)
+#define WORD4_CIPHER_DO CC_HWQ_GENMASK(4, CIPHER_DO)
+#define WORD4_CIPHER_MODE CC_HWQ_GENMASK(4, CIPHER_MODE)
+#define WORD4_CMAC_SIZE0 CC_HWQ_GENMASK(4, CMAC_SIZE0)
+#define WORD4_DATA_FLOW_MODE CC_HWQ_GENMASK(4, DATA_FLOW_MODE)
+#define WORD4_KEY_SIZE CC_HWQ_GENMASK(4, KEY_SIZE)
+#define WORD4_SETUP_OPERATION CC_HWQ_GENMASK(4, SETUP_OPERATION)
+#define WORD5_DIN_ADDR_HIGH CC_HWQ_GENMASK(5, DIN_ADDR_HIGH)
+#define WORD5_DOUT_ADDR_HIGH CC_HWQ_GENMASK(5, DOUT_ADDR_HIGH)
+
+/******************************************************************************
+ * TYPE DEFINITIONS
+ ******************************************************************************/
+
+struct cc_hw_desc {
+ union {
+ u32 word[HW_DESC_SIZE_WORDS];
+ u16 hword[HW_DESC_SIZE_WORDS * 2];
+ };
+};
+
+enum cc_axi_sec {
+ AXI_SECURE = 0,
+ AXI_NOT_SECURE = 1
+};
+
+enum cc_desc_direction {
+ DESC_DIRECTION_ILLEGAL = -1,
+ DESC_DIRECTION_ENCRYPT_ENCRYPT = 0,
+ DESC_DIRECTION_DECRYPT_DECRYPT = 1,
+ DESC_DIRECTION_DECRYPT_ENCRYPT = 3,
+ DESC_DIRECTION_END = S32_MAX,
+};
+
+enum cc_dma_mode {
+ DMA_MODE_NULL = -1,
+ NO_DMA = 0,
+ DMA_SRAM = 1,
+ DMA_DLLI = 2,
+ DMA_MLLI = 3,
+ DMA_MODE_END = S32_MAX,
+};
+
+enum cc_flow_mode {
+ FLOW_MODE_NULL = -1,
+ /* data flows */
+ BYPASS = 0,
+ DIN_AES_DOUT = 1,
+ AES_to_HASH = 2,
+ AES_and_HASH = 3,
+ DIN_DES_DOUT = 4,
+ DES_to_HASH = 5,
+ DES_and_HASH = 6,
+ DIN_HASH = 7,
+ DIN_HASH_and_BYPASS = 8,
+ AESMAC_and_BYPASS = 9,
+ AES_to_HASH_and_DOUT = 10,
+ DIN_RC4_DOUT = 11,
+ DES_to_HASH_and_DOUT = 12,
+ AES_to_AES_to_HASH_and_DOUT = 13,
+ AES_to_AES_to_HASH = 14,
+ AES_to_HASH_and_AES = 15,
+ DIN_SM4_DOUT = 16,
+ DIN_AES_AESMAC = 17,
+ HASH_to_DOUT = 18,
+ /* setup flows */
+ S_DIN_to_AES = 32,
+ S_DIN_to_AES2 = 33,
+ S_DIN_to_DES = 34,
+ S_DIN_to_RC4 = 35,
+ S_DIN_to_SM4 = 36,
+ S_DIN_to_HASH = 37,
+ S_AES_to_DOUT = 38,
+ S_AES2_to_DOUT = 39,
+ S_SM4_to_DOUT = 40,
+ S_RC4_to_DOUT = 41,
+ S_DES_to_DOUT = 42,
+ S_HASH_to_DOUT = 43,
+ SET_FLOW_ID = 44,
+ FLOW_MODE_END = S32_MAX,
+};
+
+enum cc_setup_op {
+ SETUP_LOAD_NOP = 0,
+ SETUP_LOAD_STATE0 = 1,
+ SETUP_LOAD_STATE1 = 2,
+ SETUP_LOAD_STATE2 = 3,
+ SETUP_LOAD_KEY0 = 4,
+ SETUP_LOAD_XEX_KEY = 5,
+ SETUP_WRITE_STATE0 = 8,
+ SETUP_WRITE_STATE1 = 9,
+ SETUP_WRITE_STATE2 = 10,
+ SETUP_WRITE_STATE3 = 11,
+ SETUP_OP_END = S32_MAX,
+};
+
+enum cc_hash_conf_pad {
+ HASH_PADDING_DISABLED = 0,
+ HASH_PADDING_ENABLED = 1,
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
+ HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
+};
+
+enum cc_aes_mac_selector {
+ AES_SK = 1,
+ AES_CMAC_INIT = 2,
+ AES_CMAC_SIZE0 = 3,
+ AES_MAC_END = S32_MAX,
+};
+
+#define HW_KEY_MASK_CIPHER_DO 0x3
+#define HW_KEY_SHIFT_CIPHER_CFG2 2
+
+/* HwCryptoKey[1:0] is mapped to cipher_do[1:0] */
+/* HwCryptoKey[2:3] is mapped to cipher_config2[1:0] */
+enum cc_hw_crypto_key {
+ USER_KEY = 0, /* 0x0000 */
+ ROOT_KEY = 1, /* 0x0001 */
+ PROVISIONING_KEY = 2, /* 0x0010 */ /* ==KCP */
+ SESSION_KEY = 3, /* 0x0011 */
+ RESERVED_KEY = 4, /* NA */
+ PLATFORM_KEY = 5, /* 0x0101 */
+ CUSTOMER_KEY = 6, /* 0x0110 */
+ KFDE0_KEY = 7, /* 0x0111 */
+ KFDE1_KEY = 9, /* 0x1001 */
+ KFDE2_KEY = 10, /* 0x1010 */
+ KFDE3_KEY = 11, /* 0x1011 */
+ END_OF_KEYS = S32_MAX,
+};
+
+#define CC_NUM_HW_KEY_SLOTS 4
+#define CC_FIRST_HW_KEY_SLOT 0
+#define CC_LAST_HW_KEY_SLOT (CC_FIRST_HW_KEY_SLOT + CC_NUM_HW_KEY_SLOTS - 1)
+
+#define CC_NUM_CPP_KEY_SLOTS 8
+#define CC_FIRST_CPP_KEY_SLOT 16
+#define CC_LAST_CPP_KEY_SLOT (CC_FIRST_CPP_KEY_SLOT + \
+ CC_NUM_CPP_KEY_SLOTS - 1)
+
+enum cc_hw_aes_key_size {
+ AES_128_KEY = 0,
+ AES_192_KEY = 1,
+ AES_256_KEY = 2,
+ END_OF_AES_KEYS = S32_MAX,
+};
+
+enum cc_hash_cipher_pad {
+ DO_NOT_PAD = 0,
+ DO_PAD = 1,
+ HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
+};
+
+#define CC_CPP_DIN_ADDR 0xFF00FF00UL
+#define CC_CPP_DIN_SIZE 0xFF00FFUL
+
+/*****************************/
+/* Descriptor packing macros */
+/*****************************/
+
+/**
+ * hw_desc_init() - Init a HW descriptor struct
+ * @pdesc: pointer to HW descriptor struct
+ */
+static inline void hw_desc_init(struct cc_hw_desc *pdesc)
+{
+ memset(pdesc, 0, sizeof(struct cc_hw_desc));
+}
+
+/**
+ * set_queue_last_ind_bit() - Indicate the end of current HW descriptors flow
+ * and release the HW engines.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ */
+static inline void set_queue_last_ind_bit(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1);
+}
+
+/**
+ * set_din_type() - Set the DIN field of a HW descriptor
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: DIN address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_din_type(struct cc_hw_desc *pdesc,
+ enum cc_dma_mode dma_mode, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec)
+{
+ pdesc->word[0] = lower_32_bits(addr);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, upper_32_bits(addr));
+#endif
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) |
+ FIELD_PREP(WORD1_DIN_SIZE, size) |
+ FIELD_PREP(WORD1_NS_BIT, axi_sec);
+}
+
+/**
+ * set_din_no_dma() - Set the DIN field of a HW descriptor to NO DMA mode.
+ * Used for NOP descriptor, register patches and other special modes.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @addr: DIN address
+ * @size: Data size in bytes
+ */
+static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+ pdesc->word[0] = addr;
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
+}
+
+/**
+ * set_cpp_crypto_key() - Setup the special CPP descriptor
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @slot: Slot number
+ */
+static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
+{
+ pdesc->word[0] |= CC_CPP_DIN_ADDR;
+
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DIN_SIZE);
+ pdesc->word[1] |= FIELD_PREP(WORD1_LOCK_QUEUE, 1);
+
+ pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
+}
+
+/**
+ * set_din_sram() - Set the DIN field of a HW descriptor to SRAM mode.
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * the adaptor will enforce alignment checks.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @addr: DIN address
+ * @size: Data size in bytes
+ */
+static inline void set_din_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+ pdesc->word[0] = addr;
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) |
+ FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM);
+}
+
+/**
+ * set_din_const() - Set the DIN field of a HW descriptor to CONST mode
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @val: DIN const value
+ * @size: Data size in bytes
+ */
+static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size)
+{
+ pdesc->word[0] = val;
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_CONST_VALUE, 1) |
+ FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM) |
+ FIELD_PREP(WORD1_DIN_SIZE, size);
+}
+
+/**
+ * set_din_not_last_indication() - Set the DIN not last input data indicator
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ */
+static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1);
+}
+
+/**
+ * set_dout_type() - Set the DOUT field of a HW descriptor
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_type(struct cc_hw_desc *pdesc,
+ enum cc_dma_mode dma_mode, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec)
+{
+ pdesc->word[2] = lower_32_bits(addr);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, upper_32_bits(addr));
+#endif
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) |
+ FIELD_PREP(WORD3_DOUT_SIZE, size) |
+ FIELD_PREP(WORD3_NS_BIT, axi_sec);
+}
+
+/**
+ * set_dout_dlli() - Set the DOUT field of a HW descriptor to DLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ * @last_ind: The last indication bit
+ */
+static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec,
+ u32 last_ind)
+{
+ set_dout_type(pdesc, DMA_DLLI, addr, size, axi_sec);
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
+}
+
+/**
+ * set_dout_mlli() - Set the DOUT field of a HW descriptor to MLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ * @last_ind: The last indication bit
+ */
+static inline void set_dout_mlli(struct cc_hw_desc *pdesc, u32 addr, u32 size,
+ enum cc_axi_sec axi_sec, bool last_ind)
+{
+ set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec);
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
+}
+
+/**
+ * set_dout_no_dma() - Set the DOUT field of a HW descriptor to NO DMA mode.
+ * Used for NOP descriptor, register patches and other special modes.
+ *
+ * @pdesc: pointer to HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @write_enable: Enables a write operation to a register
+ */
+static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr,
+ u32 size, bool write_enable)
+{
+ pdesc->word[2] = addr;
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_SIZE, size) |
+ FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable);
+}
+
+/**
+ * set_xor_val() - Set the word for the XOR operation.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @val: XOR data value
+ */
+static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val)
+{
+ pdesc->word[2] = val;
+}
+
+/**
+ * set_xor_active() - Set the XOR indicator bit in the descriptor
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ */
+static inline void set_xor_active(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1);
+}
+
+/**
+ * set_aes_not_hash_mode() - Select the AES engine instead of HASH engine when
+ * setting up combined mode with AES XCBC MAC
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ */
+static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1);
+}
+
+/**
+ * set_aes_xor_crypto_key() - Set aes xor crypto key, which in some scenarios
+ * selects the SM3 engine
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ */
+static inline void set_aes_xor_crypto_key(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_AES_XOR_CRYPTO_KEY, 1);
+}
+
+/**
+ * set_dout_sram() - Set the DOUT field of a HW descriptor to SRAM mode
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * the adaptor will enforce alignment checks.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ */
+static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+ pdesc->word[2] = addr;
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, DMA_SRAM) |
+ FIELD_PREP(WORD3_DOUT_SIZE, size);
+}
+
+/**
+ * set_xex_data_unit_size() - Set the data unit size for XEX mode in
+ * data_out_addr[15:0]
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Data unit size for XEX mode
+ */
+static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size)
+{
+ pdesc->word[2] = size;
+}
+
+/**
+ * set_multi2_num_rounds() - Set the number of rounds for Multi2 in
+ * data_out_addr[15:0]
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @num: Number of rounds for Multi2
+ */
+static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num)
+{
+ pdesc->word[2] = num;
+}
+
+/**
+ * set_flow_mode() - Set the flow mode.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_flow_mode(struct cc_hw_desc *pdesc,
+ enum cc_flow_mode mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode);
+}
+
+/**
+ * set_cipher_mode() - Set the cipher mode.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
+}
+
+/**
+ * set_hash_cipher_mode() - Set the cipher mode for hash algorithms.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @cipher_mode: Any one of the modes defined in [CC7x-DESC]
+ * @hash_mode: specifies which hash is being handled
+ */
+static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc,
+ enum drv_cipher_mode cipher_mode,
+ enum drv_hash_mode hash_mode)
+{
+ set_cipher_mode(pdesc, cipher_mode);
+ if (hash_mode == DRV_HASH_SM3)
+ set_aes_xor_crypto_key(pdesc);
+}
+
+/**
+ * set_cipher_config0() - Set the cipher configuration fields.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
+}
+
+/**
+ * set_cipher_config1() - Set the cipher configuration fields.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @config: Padding mode
+ */
+static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
+ enum cc_hash_conf_pad config)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config);
+}
+
+/**
+ * set_hw_crypto_key() - Set HW key configuration fields.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key
+ */
+static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
+ enum cc_hw_crypto_key hw_key)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
+ (hw_key & HW_KEY_MASK_CIPHER_DO)) |
+ FIELD_PREP(WORD4_CIPHER_CONF2,
+ (hw_key >> HW_KEY_SHIFT_CIPHER_CFG2));
+}
+
+/**
+ * set_bytes_swap() - Set byte order of all setup-finalize descriptors.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @config: True to enable byte swapping
+ */
+static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config);
+}
+
+/**
+ * set_cmac_size0_mode() - Set CMAC_SIZE0 mode.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ */
+static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1);
+}
+
+/**
+ * set_key_size() - Set key size descriptor field.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Key size in bytes (NOT size code)
+ */
+static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size);
+}
+
+/**
+ * set_key_size_aes() - Set AES key size.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Key size in bytes (NOT size code)
+ */
+static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size)
+{
+ set_key_size(pdesc, ((size >> 3) - 2));
+}
+
+/**
+ * set_key_size_des() - Set DES key size.
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Key size in bytes (NOT size code)
+ */
+static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size)
+{
+ set_key_size(pdesc, ((size >> 3) - 1));
+}
+
+/**
+ * set_setup_mode() - Set the descriptor setup mode
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @mode: Any one of the setup modes defined in [CC7x-DESC]
+ */
+static inline void set_setup_mode(struct cc_hw_desc *pdesc,
+ enum cc_setup_op mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode);
+}
+
+/**
+ * set_cipher_do() - Set the descriptor cipher DO
+ *
+ * @pdesc: Pointer to HW descriptor struct
+ * @config: Any one of the cipher do defined in [CC7x-DESC]
+ */
+static inline void set_cipher_do(struct cc_hw_desc *pdesc,
+ enum cc_hash_cipher_pad config)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
+ (config & HW_KEY_MASK_CIPHER_DO));
+}
+
+#endif /*__CC_HW_QUEUE_DEFS_H__*/
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
new file mode 100644
index 000000000..582bae450
--- /dev/null
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#ifndef __CC_CRYS_KERNEL_H__
+#define __CC_CRYS_KERNEL_H__
+
+// --------------------------------------
+// BLOCK: DSCRPTR
+// --------------------------------------
+#define CC_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
+#define CC_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
+// --------------------------------------
+// BLOCK: AXI_P
+// --------------------------------------
+#define CC_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
+#define CC_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
+#define CC_AXIM_MON_COMP_REG_OFFSET 0xB80UL
+#define CC_AXIM_MON_COMP8_REG_OFFSET 0xBA0UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
+#define CC_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
+#define CC_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
+#define CC_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
+#define CC_AXIM_CFG_REG_OFFSET 0xBE8UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL
+#endif // __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
new file mode 100644
index 000000000..f891ab813
--- /dev/null
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#ifndef _CC_LLI_DEFS_H_
+#define _CC_LLI_DEFS_H_
+
+#include <linux/types.h>
+
+/* Max DLLI size
+ * AKA CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
+ */
+#define DLLI_SIZE_BIT_SIZE 0x18
+
+#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
+
+#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 8
+#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
+#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
+#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
+ (2 * LLI_MAX_NUM_OF_DATA_ENTRIES + \
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)
+
+/* Size of entry */
+#define LLI_ENTRY_WORD_SIZE 2
+#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(u32))
+
+/* Word0[31:0] = ADDR[31:0] */
+#define LLI_WORD0_OFFSET 0
+#define LLI_LADDR_BIT_OFFSET 0
+#define LLI_LADDR_BIT_SIZE 32
+/* Word1[31:16] = ADDR[47:32]; Word1[15:0] = SIZE */
+#define LLI_WORD1_OFFSET 1
+#define LLI_SIZE_BIT_OFFSET 0
+#define LLI_SIZE_BIT_SIZE 16
+#define LLI_HADDR_BIT_OFFSET 16
+#define LLI_HADDR_BIT_SIZE 16
+
+#define LLI_SIZE_MASK GENMASK((LLI_SIZE_BIT_SIZE - 1), LLI_SIZE_BIT_OFFSET)
+#define LLI_HADDR_MASK GENMASK( \
+ (LLI_HADDR_BIT_OFFSET + LLI_HADDR_BIT_SIZE - 1),\
+ LLI_HADDR_BIT_OFFSET)
+
+static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr)
+{
+ lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK;
+ lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32));
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+}
+
+static inline void cc_lli_set_size(u32 *lli_p, u16 size)
+{
+ lli_p[LLI_WORD1_OFFSET] &= ~LLI_SIZE_MASK;
+ lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_SIZE_MASK, size);
+}
+
+#endif /*_CC_LLI_DEFS_H_*/
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
new file mode 100644
index 000000000..6124fbbbe
--- /dev/null
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_hash.h"
+#include "cc_pm.h"
+#include "cc_fips.h"
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+static int cc_pm_suspend(struct device *dev)
+{
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ fini_cc_regs(drvdata);
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
+ clk_disable_unprepare(drvdata->clk);
+ return 0;
+}
+
+static int cc_pm_resume(struct device *dev)
+{
+ int rc;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ /* Enables the device source clk */
+ rc = clk_prepare_enable(drvdata->clk);
+ if (rc) {
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
+ return rc;
+ }
+ /* wait for Cryptocell reset completion */
+ if (!cc_wait_for_reset_completion(drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ clk_disable_unprepare(drvdata->clk);
+ return -EBUSY;
+ }
+
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
+ rc = init_cc_regs(drvdata);
+ if (rc) {
+ dev_err(dev, "init_cc_regs (%x)\n", rc);
+ clk_disable_unprepare(drvdata->clk);
+ return rc;
+ }
+ /* check if tee fips error occurred during power down */
+ cc_tee_handle_fips_error(drvdata);
+
+ cc_init_hash_sram(drvdata);
+
+ return 0;
+}
+
+const struct dev_pm_ops ccree_pm = {
+ SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
+};
+
+int cc_pm_get(struct device *dev)
+{
+ int rc = pm_runtime_get_sync(dev);
+ if (rc < 0) {
+ pm_runtime_put_noidle(dev);
+ return rc;
+ }
+
+ return 0;
+}
+
+void cc_pm_put_suspend(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
new file mode 100644
index 000000000..50cac33de
--- /dev/null
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+/* \file cc_pm.h
+ */
+
+#ifndef __CC_POWER_MGR_H__
+#define __CC_POWER_MGR_H__
+
+#include "cc_driver.h"
+
+#define CC_SUSPEND_TIMEOUT 3000
+
+#if defined(CONFIG_PM)
+
+extern const struct dev_pm_ops ccree_pm;
+
+int cc_pm_get(struct device *dev);
+void cc_pm_put_suspend(struct device *dev);
+
+#else
+
+static inline int cc_pm_get(struct device *dev)
+{
+ return 0;
+}
+
+static inline void cc_pm_put_suspend(struct device *dev) {}
+
+#endif
+
+#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
new file mode 100644
index 000000000..887162df5
--- /dev/null
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#include <linux/kernel.h>
+#include <linux/nospec.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_pm.h"
+
+#define CC_MAX_POLL_ITER 10
+/* The highest descriptor count in used */
+#define CC_MAX_DESC_SEQ_LEN 23
+
+struct cc_req_mgr_handle {
+ /* Request manager resources */
+ unsigned int hw_queue_size; /* HW capability */
+ unsigned int min_free_hw_slots;
+ unsigned int max_used_sw_slots;
+ struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
+ u32 req_queue_head;
+ u32 req_queue_tail;
+ u32 axi_completed;
+ u32 q_free_slots;
+ /* This lock protects access to HW register
+ * that must be single request at a time
+ */
+ spinlock_t hw_lock;
+ struct cc_hw_desc compl_desc;
+ u8 *dummy_comp_buff;
+ dma_addr_t dummy_comp_buff_dma;
+
+ /* backlog queue */
+ struct list_head backlog;
+ unsigned int bl_len;
+ spinlock_t bl_lock; /* protect backlog queue */
+
+#ifdef COMP_IN_WQ
+ struct workqueue_struct *workq;
+ struct delayed_work compwork;
+#else
+ struct tasklet_struct comptask;
+#endif
+};
+
+struct cc_bl_item {
+ struct cc_crypto_req creq;
+ struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
+ unsigned int len;
+ struct list_head list;
+ bool notif;
+};
+
+static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
+};
+
+static void comp_handler(unsigned long devarg);
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work);
+#endif
+
+static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
+{
+ alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
+ slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
+
+ return cc_cpp_int_masks[alg][slot];
+}
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (!req_mgr_h)
+ return; /* Not allocated */
+
+ if (req_mgr_h->dummy_comp_buff_dma) {
+ dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
+ req_mgr_h->dummy_comp_buff_dma);
+ }
+
+ dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
+ req_mgr_h->min_free_hw_slots));
+ dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
+
+#ifdef COMP_IN_WQ
+ destroy_workqueue(req_mgr_h->workq);
+#else
+ /* Kill tasklet */
+ tasklet_kill(&req_mgr_h->comptask);
+#endif
+ kfree_sensitive(req_mgr_h);
+ drvdata->request_mgr_handle = NULL;
+}
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *req_mgr_h;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+
+ req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
+ if (!req_mgr_h) {
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ drvdata->request_mgr_handle = req_mgr_h;
+
+ spin_lock_init(&req_mgr_h->hw_lock);
+ spin_lock_init(&req_mgr_h->bl_lock);
+ INIT_LIST_HEAD(&req_mgr_h->backlog);
+
+#ifdef COMP_IN_WQ
+ dev_dbg(dev, "Initializing completion workqueue\n");
+ req_mgr_h->workq = create_singlethread_workqueue("ccree");
+ if (!req_mgr_h->workq) {
+ dev_err(dev, "Failed creating work queue\n");
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
+#else
+ dev_dbg(dev, "Initializing completion tasklet\n");
+ tasklet_init(&req_mgr_h->comptask, comp_handler,
+ (unsigned long)drvdata);
+#endif
+ req_mgr_h->hw_queue_size = cc_ioread(drvdata,
+ CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
+ dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
+ if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
+ dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
+ req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
+ req_mgr_h->max_used_sw_slots = 0;
+
+ /* Allocate DMA word for "dummy" completion descriptor use */
+ req_mgr_h->dummy_comp_buff =
+ dma_alloc_coherent(dev, sizeof(u32),
+ &req_mgr_h->dummy_comp_buff_dma,
+ GFP_KERNEL);
+ if (!req_mgr_h->dummy_comp_buff) {
+ dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
+ sizeof(u32));
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ /* Init. "dummy" completion descriptor */
+ hw_desc_init(&req_mgr_h->compl_desc);
+ set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
+ set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
+ sizeof(u32), NS_BIT, 1);
+ set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
+ set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
+
+ return 0;
+
+req_mgr_init_err:
+ cc_req_mgr_fini(drvdata);
+ return rc;
+}
+
+static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
+ unsigned int seq_len)
+{
+ int i, w;
+ void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /*
+ * We do indeed write all 6 command words to the same
+ * register. The HW supports this.
+ */
+
+ for (i = 0; i < seq_len; i++) {
+ for (w = 0; w <= 5; w++)
+ writel_relaxed(seq[i].word[w], reg);
+
+ if (cc_dump_desc)
+ dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i, seq[i].word[0], seq[i].word[1],
+ seq[i].word[2], seq[i].word[3],
+ seq[i].word[4], seq[i].word[5]);
+ }
+}
+
+/**
+ * request_mgr_complete() - Completion will take place if and only if user
+ * requested completion by cc_send_sync_request().
+ *
+ * @dev: Device pointer
+ * @dx_compl_h: The completion event to signal
+ * @dummy: unused error code
+ */
+static void request_mgr_complete(struct device *dev, void *dx_compl_h,
+ int dummy)
+{
+ struct completion *this_compl = dx_compl_h;
+
+ complete(this_compl);
+}
+
+static int cc_queues_status(struct cc_drvdata *drvdata,
+ struct cc_req_mgr_handle *req_mgr_h,
+ unsigned int total_seq_len)
+{
+ unsigned long poll_queue;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* SW queue is checked only once as it will not
+ * be changed during the poll because the spinlock_bh
+ * is held by the thread
+ */
+ if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
+ req_mgr_h->req_queue_tail) {
+ dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+ return -ENOSPC;
+ }
+
+ if (req_mgr_h->q_free_slots >= total_seq_len)
+ return 0;
+
+ /* Wait for space in HW queue. Poll constant num of iterations. */
+ for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+ if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
+ req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
+
+ if (req_mgr_h->q_free_slots >= total_seq_len) {
+ /* If there is enough place return */
+ return 0;
+ }
+
+ dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->q_free_slots, total_seq_len);
+ }
+ /* No room in the HW queue try again later */
+ dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
+ req_mgr_h->q_free_slots, total_seq_len);
+ return -ENOSPC;
+}
+
+/**
+ * cc_do_send_request() - Enqueue caller request to crypto hardware.
+ * Need to be called with HW lock held and PM running
+ *
+ * @drvdata: Associated device driver context
+ * @cc_req: The request to enqueue
+ * @desc: The crypto sequence
+ * @len: The crypto sequence length
+ * @add_comp: If "true": add an artificial dout DMA to mark completion
+ *
+ */
+static void cc_do_send_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ bool add_comp)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int used_sw_slots;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ used_sw_slots = ((req_mgr_h->req_queue_head -
+ req_mgr_h->req_queue_tail) &
+ (MAX_REQUEST_QUEUE_SIZE - 1));
+ if (used_sw_slots > req_mgr_h->max_used_sw_slots)
+ req_mgr_h->max_used_sw_slots = used_sw_slots;
+
+ /* Enqueue request - must be locked with HW lock*/
+ req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
+ req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
+ (MAX_REQUEST_QUEUE_SIZE - 1);
+
+ dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
+
+ /*
+ * We are about to push command to the HW via the command registers
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
+ */
+ wmb();
+
+ /* STAT_PHASE_4: Push sequence */
+
+ enqueue_seq(drvdata, desc, len);
+
+ if (add_comp) {
+ enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
+ total_seq_len++;
+ }
+
+ if (req_mgr_h->q_free_slots < total_seq_len) {
+ /* This situation should never occur. Maybe indicating problem
+ * with resuming power. Set the free slot count to 0 and hope
+ * for the best.
+ */
+ dev_err(dev, "HW free slot count mismatch.");
+ req_mgr_h->q_free_slots = 0;
+ } else {
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots -= total_seq_len;
+ }
+}
+
+static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
+ struct cc_bl_item *bli)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ spin_lock_bh(&mgr->bl_lock);
+ list_add_tail(&bli->list, &mgr->backlog);
+ ++mgr->bl_len;
+ dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
+ spin_unlock_bh(&mgr->bl_lock);
+ tasklet_schedule(&mgr->comptask);
+}
+
+static void cc_proc_backlog(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct cc_bl_item *bli;
+ struct cc_crypto_req *creq;
+ void *req;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc;
+
+ spin_lock(&mgr->bl_lock);
+
+ while (mgr->bl_len) {
+ bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
+
+ spin_unlock(&mgr->bl_lock);
+
+
+ creq = &bli->creq;
+ req = creq->user_arg;
+
+ /*
+ * Notify the request we're moving out of the backlog
+ * but only if we haven't done so already.
+ */
+ if (!bli->notif) {
+ creq->user_cb(dev, req, -EINPROGRESS);
+ bli->notif = true;
+ }
+
+ spin_lock(&mgr->hw_lock);
+
+ rc = cc_queues_status(drvdata, mgr, bli->len);
+ if (rc) {
+ /*
+ * There is still no room in the FIFO for
+ * this request. Bail out. We'll return here
+ * on the next completion irq.
+ */
+ spin_unlock(&mgr->hw_lock);
+ return;
+ }
+
+ cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
+ false);
+ spin_unlock(&mgr->hw_lock);
+
+ /* Remove ourselves from the backlog list */
+ spin_lock(&mgr->bl_lock);
+ list_del(&bli->list);
+ --mgr->bl_len;
+ kfree(bli);
+ }
+
+ spin_unlock(&mgr->bl_lock);
+}
+
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ struct crypto_async_request *req)
+{
+ int rc;
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+ bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
+ gfp_t flags = cc_gfp_flags(req);
+ struct cc_bl_item *bli;
+
+ rc = cc_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
+ return rc;
+ }
+
+ spin_lock_bh(&mgr->hw_lock);
+ rc = cc_queues_status(drvdata, mgr, len);
+
+#ifdef CC_DEBUG_FORCE_BACKLOG
+ if (backlog_ok)
+ rc = -ENOSPC;
+#endif /* CC_DEBUG_FORCE_BACKLOG */
+
+ if (rc == -ENOSPC && backlog_ok) {
+ spin_unlock_bh(&mgr->hw_lock);
+
+ bli = kmalloc(sizeof(*bli), flags);
+ if (!bli) {
+ cc_pm_put_suspend(dev);
+ return -ENOMEM;
+ }
+
+ memcpy(&bli->creq, cc_req, sizeof(*cc_req));
+ memcpy(&bli->desc, desc, len * sizeof(*desc));
+ bli->len = len;
+ bli->notif = false;
+ cc_enqueue_backlog(drvdata, bli);
+ return -EBUSY;
+ }
+
+ if (!rc) {
+ cc_do_send_request(drvdata, cc_req, desc, len, false);
+ rc = -EINPROGRESS;
+ }
+
+ spin_unlock_bh(&mgr->hw_lock);
+ return rc;
+}
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+ unsigned int len)
+{
+ int rc;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+ init_completion(&cc_req->seq_compl);
+ cc_req->user_cb = request_mgr_complete;
+ cc_req->user_arg = &cc_req->seq_compl;
+
+ rc = cc_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
+ return rc;
+ }
+
+ while (true) {
+ spin_lock_bh(&mgr->hw_lock);
+ rc = cc_queues_status(drvdata, mgr, len + 1);
+
+ if (!rc)
+ break;
+
+ spin_unlock_bh(&mgr->hw_lock);
+ wait_for_completion_interruptible(&drvdata->hw_queue_avail);
+ reinit_completion(&drvdata->hw_queue_avail);
+ }
+
+ cc_do_send_request(drvdata, cc_req, desc, len, true);
+ spin_unlock_bh(&mgr->hw_lock);
+ wait_for_completion(&cc_req->seq_compl);
+ return 0;
+}
+
+/**
+ * send_request_init() - Enqueue caller request to crypto hardware during init
+ * process.
+ * Assume this function is not called in the middle of a flow,
+ * since we set QUEUE_LAST_IND flag in the last descriptor.
+ *
+ * @drvdata: Associated device driver context
+ * @desc: The crypto sequence
+ * @len: The crypto sequence length
+ *
+ * Return:
+ * Returns "0" upon success
+ */
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+ unsigned int len)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ int rc = 0;
+
+ /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
+ */
+ rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
+ if (rc)
+ return rc;
+
+ set_queue_last_ind(drvdata, &desc[(len - 1)]);
+
+ /*
+ * We are about to push command to the HW via the command registers
+ * that may reference host memory. We need to issue a memory barrier
+ * to make sure there are no outstanding memory writes
+ */
+ wmb();
+ enqueue_seq(drvdata, desc, len);
+
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+
+ return 0;
+}
+
+void complete_request(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ complete(&drvdata->hw_queue_avail);
+#ifdef COMP_IN_WQ
+ queue_delayed_work(request_mgr_handle->workq,
+ &request_mgr_handle->compwork, 0);
+#else
+ tasklet_schedule(&request_mgr_handle->comptask);
+#endif
+}
+
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work)
+{
+ struct cc_drvdata *drvdata =
+ container_of(work, struct cc_drvdata, compwork.work);
+
+ comp_handler((unsigned long)drvdata);
+}
+#endif
+
+static void proc_completions(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_req *cc_req;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+ unsigned int *tail = &request_mgr_handle->req_queue_tail;
+ unsigned int *head = &request_mgr_handle->req_queue_head;
+ int rc;
+ u32 mask;
+
+ while (request_mgr_handle->axi_completed) {
+ request_mgr_handle->axi_completed--;
+
+ /* Dequeue request */
+ if (*head == *tail) {
+ /* We are supposed to handle a completion but our
+ * queue is empty. This is not normal. Return and
+ * hope for the best.
+ */
+ dev_err(dev, "Request queue is empty head == tail %u\n",
+ *head);
+ break;
+ }
+
+ cc_req = &request_mgr_handle->req_queue[*tail];
+
+ if (cc_req->cpp.is_cpp) {
+
+ dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
+ cc_req->cpp.slot, cc_req->cpp.alg);
+ mask = cc_cpp_int_mask(cc_req->cpp.alg,
+ cc_req->cpp.slot);
+ rc = (drvdata->irq & mask ? -EPERM : 0);
+ dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
+ drvdata->irq, rc);
+ } else {
+ dev_dbg(dev, "None CPP request completion\n");
+ rc = 0;
+ }
+
+ if (cc_req->user_cb)
+ cc_req->user_cb(dev, cc_req->user_arg, rc);
+ *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
+ dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
+ dev_dbg(dev, "Request completed. axi_completed=%d\n",
+ request_mgr_handle->axi_completed);
+ cc_pm_put_suspend(dev);
+ }
+}
+
+static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
+{
+ return FIELD_GET(AXIM_MON_COMP_VALUE,
+ cc_ioread(drvdata, drvdata->axim_mon_offset));
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void comp_handler(unsigned long devarg)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u32 irq;
+
+ dev_dbg(dev, "Completion handler called!\n");
+ irq = (drvdata->irq & drvdata->comp_mask);
+
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
+
+ /* Avoid race with above clear: Test completion counter once more */
+
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
+
+ dev_dbg(dev, "AXI completion after updated: %d\n",
+ request_mgr_handle->axi_completed);
+
+ while (request_mgr_handle->axi_completed) {
+ do {
+ drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
+ irq = (drvdata->irq & drvdata->comp_mask);
+ proc_completions(drvdata);
+
+ /* At this point (after proc_completions()),
+ * request_mgr_handle->axi_completed is 0.
+ */
+ request_mgr_handle->axi_completed +=
+ cc_axi_comp_count(drvdata);
+ } while (request_mgr_handle->axi_completed > 0);
+
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
+
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
+ }
+
+ /* after verifying that there is nothing to do,
+ * unmask AXI completion interrupt
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR),
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
+
+ cc_proc_backlog(drvdata);
+ dev_dbg(dev, "Comp. handler done.\n");
+}
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
new file mode 100644
index 000000000..ae25ca843
--- /dev/null
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+/* \file cc_request_mgr.h
+ * Request Manager
+ */
+
+#ifndef __REQUEST_MGR_H__
+#define __REQUEST_MGR_H__
+
+#include "cc_hw_queue_defs.h"
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata);
+
+/**
+ * cc_send_request() - Enqueue caller request to crypto hardware.
+ *
+ * @drvdata: Associated device driver context
+ * @cc_req: The request to enqueue
+ * @desc: The crypto sequence
+ * @len: The crypto sequence length
+ * @req: Asynchronous crypto request
+ *
+ * Return:
+ * Returns -EINPROGRESS or error
+ */
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ struct crypto_async_request *req);
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+ unsigned int len);
+
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+ unsigned int len);
+
+void complete_request(struct cc_drvdata *drvdata);
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata);
+
+#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
new file mode 100644
index 000000000..37a958563
--- /dev/null
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#include "cc_driver.h"
+#include "cc_sram_mgr.h"
+
+/**
+ * cc_sram_mgr_init() - Initializes SRAM pool.
+ * The pool starts right at the beginning of SRAM.
+ * Returns zero for success, negative value otherwise.
+ *
+ * @drvdata: Associated device driver context
+ *
+ * Return:
+ * 0 for success, negative error code for failure.
+ */
+int cc_sram_mgr_init(struct cc_drvdata *drvdata)
+{
+ u32 start = 0;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (drvdata->hw_rev < CC_HW_REV_712) {
+ /* Pool starts after ROM bytes */
+ start = cc_ioread(drvdata, CC_REG(HOST_SEP_SRAM_THRESHOLD));
+ if ((start & 0x3) != 0) {
+ dev_err(dev, "Invalid SRAM offset 0x%x\n", start);
+ return -EINVAL;
+ }
+ }
+
+ drvdata->sram_free_offset = start;
+ return 0;
+}
+
+/**
+ * cc_sram_alloc() - Allocate buffer from SRAM pool.
+ *
+ * @drvdata: Associated device driver context
+ * @size: The requested numer of bytes to allocate
+ *
+ * Return:
+ * Address offset in SRAM or NULL_SRAM_ADDR for failure.
+ */
+u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
+{
+ struct device *dev = drvdata_to_dev(drvdata);
+ u32 p;
+
+ if ((size & 0x3)) {
+ dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
+ size);
+ return NULL_SRAM_ADDR;
+ }
+ if (size > (CC_CC_SRAM_SIZE - drvdata->sram_free_offset)) {
+ dev_err(dev, "Not enough space to allocate %u B (at offset %u)\n",
+ size, drvdata->sram_free_offset);
+ return NULL_SRAM_ADDR;
+ }
+
+ p = drvdata->sram_free_offset;
+ drvdata->sram_free_offset += size;
+ dev_dbg(dev, "Allocated %u B @ %u\n", size, p);
+ return p;
+}
+
+/**
+ * cc_set_sram_desc() - Create const descriptors sequence to
+ * set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src: A pointer to array of words to set as consts.
+ * @dst: The target SRAM buffer to set into
+ * @nelement: The number of words in "src" array
+ * @seq: A pointer to the given IN/OUT descriptor sequence
+ * @seq_len: A pointer to the given IN/OUT sequence length
+ */
+void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement,
+ struct cc_hw_desc *seq, unsigned int *seq_len)
+{
+ u32 i;
+ unsigned int idx = *seq_len;
+
+ for (i = 0; i < nelement; i++, idx++) {
+ hw_desc_init(&seq[idx]);
+ set_din_const(&seq[idx], src[i], sizeof(u32));
+ set_dout_sram(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32));
+ set_flow_mode(&seq[idx], BYPASS);
+ }
+
+ *seq_len = idx;
+}
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
new file mode 100644
index 000000000..1c965ef83
--- /dev/null
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
+
+#ifndef __CC_SRAM_MGR_H__
+#define __CC_SRAM_MGR_H__
+
+#ifndef CC_CC_SRAM_SIZE
+#define CC_CC_SRAM_SIZE 4096
+#endif
+
+struct cc_drvdata;
+
+#define NULL_SRAM_ADDR ((u32)-1)
+
+/**
+ * cc_sram_mgr_init() - Initializes SRAM pool.
+ * The first X bytes of SRAM are reserved for ROM usage, hence, pool
+ * starts right after X bytes.
+ *
+ * @drvdata: Associated device driver context
+ *
+ * Return:
+ * Zero for success, negative value otherwise.
+ */
+int cc_sram_mgr_init(struct cc_drvdata *drvdata);
+
+/**
+ * cc_sram_alloc() - Allocate buffer from SRAM pool.
+ *
+ * @drvdata: Associated device driver context
+ * @size: The requested bytes to allocate
+ *
+ * Return:
+ * Address offset in SRAM or NULL_SRAM_ADDR for failure.
+ */
+u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
+
+/**
+ * cc_set_sram_desc() - Create const descriptors sequence to
+ * set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src: A pointer to array of words to set as consts.
+ * @dst: The target SRAM buffer to set into
+ * @nelement: The number of words in "src" array
+ * @seq: A pointer to the given IN/OUT descriptor sequence
+ * @seq_len: A pointer to the given IN/OUT sequence length
+ */
+void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement,
+ struct cc_hw_desc *seq, unsigned int *seq_len);
+
+#endif /*__CC_SRAM_MGR_H__*/