summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/xilinx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/xilinx')
-rw-r--r--drivers/crypto/xilinx/Makefile3
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c449
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c264
3 files changed, 716 insertions, 0 deletions
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
new file mode 100644
index 000000000..730feff5b
--- /dev/null
+++ b/drivers/crypto/xilinx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
new file mode 100644
index 000000000..bf1f421e0
--- /dev/null
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP AES Driver.
+ * Copyright (c) 2020 Xilinx Inc.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/scatterwalk.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_DMA_BIT_MASK 32U
+
+#define ZYNQMP_AES_KEY_SIZE AES_KEYSIZE_256
+#define ZYNQMP_AES_AUTH_SIZE 16U
+#define ZYNQMP_KEY_SRC_SEL_KEY_LEN 1U
+#define ZYNQMP_AES_BLK_SIZE 1U
+#define ZYNQMP_AES_MIN_INPUT_BLK_SIZE 4U
+#define ZYNQMP_AES_WORD_LEN 4U
+
+#define ZYNQMP_AES_GCM_TAG_MISMATCH_ERR 0x01
+#define ZYNQMP_AES_WRONG_KEY_SRC_ERR 0x13
+#define ZYNQMP_AES_PUF_NOT_PROGRAMMED 0xE300
+
+enum zynqmp_aead_op {
+ ZYNQMP_AES_DECRYPT = 0,
+ ZYNQMP_AES_ENCRYPT
+};
+
+enum zynqmp_aead_keysrc {
+ ZYNQMP_AES_KUP_KEY = 0,
+ ZYNQMP_AES_DEV_KEY,
+ ZYNQMP_AES_PUF_KEY
+};
+
+struct zynqmp_aead_drv_ctx {
+ union {
+ struct aead_alg aead;
+ } alg;
+ struct device *dev;
+ struct crypto_engine *engine;
+};
+
+struct zynqmp_aead_hw_req {
+ u64 src;
+ u64 iv;
+ u64 key;
+ u64 dst;
+ u64 size;
+ u64 op;
+ u64 keysrc;
+};
+
+struct zynqmp_aead_tfm_ctx {
+ struct crypto_engine_ctx engine_ctx;
+ struct device *dev;
+ u8 key[ZYNQMP_AES_KEY_SIZE];
+ u8 *iv;
+ u32 keylen;
+ u32 authsize;
+ enum zynqmp_aead_keysrc keysrc;
+ struct crypto_aead *fbk_cipher;
+};
+
+struct zynqmp_aead_req_ctx {
+ enum zynqmp_aead_op op;
+};
+
+static int zynqmp_aes_aead_cipher(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ struct device *dev = tfm_ctx->dev;
+ struct zynqmp_aead_hw_req *hwreq;
+ dma_addr_t dma_addr_data, dma_addr_hw_req;
+ unsigned int data_size;
+ unsigned int status;
+ int ret;
+ size_t dma_size;
+ char *kbuf;
+ int err;
+
+ if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY)
+ dma_size = req->cryptlen + ZYNQMP_AES_KEY_SIZE
+ + GCM_AES_IV_SIZE;
+ else
+ dma_size = req->cryptlen + GCM_AES_IV_SIZE;
+
+ kbuf = dma_alloc_coherent(dev, dma_size, &dma_addr_data, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ hwreq = dma_alloc_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
+ &dma_addr_hw_req, GFP_KERNEL);
+ if (!hwreq) {
+ dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
+ return -ENOMEM;
+ }
+
+ data_size = req->cryptlen;
+ scatterwalk_map_and_copy(kbuf, req->src, 0, req->cryptlen, 0);
+ memcpy(kbuf + data_size, req->iv, GCM_AES_IV_SIZE);
+
+ hwreq->src = dma_addr_data;
+ hwreq->dst = dma_addr_data;
+ hwreq->iv = hwreq->src + data_size;
+ hwreq->keysrc = tfm_ctx->keysrc;
+ hwreq->op = rq_ctx->op;
+
+ if (hwreq->op == ZYNQMP_AES_ENCRYPT)
+ hwreq->size = data_size;
+ else
+ hwreq->size = data_size - ZYNQMP_AES_AUTH_SIZE;
+
+ if (hwreq->keysrc == ZYNQMP_AES_KUP_KEY) {
+ memcpy(kbuf + data_size + GCM_AES_IV_SIZE,
+ tfm_ctx->key, ZYNQMP_AES_KEY_SIZE);
+
+ hwreq->key = hwreq->src + data_size + GCM_AES_IV_SIZE;
+ } else {
+ hwreq->key = 0;
+ }
+
+ ret = zynqmp_pm_aes_engine(dma_addr_hw_req, &status);
+
+ if (ret) {
+ dev_err(dev, "ERROR: AES PM API failed\n");
+ err = ret;
+ } else if (status) {
+ switch (status) {
+ case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
+ dev_err(dev, "ERROR: Gcm Tag mismatch\n");
+ break;
+ case ZYNQMP_AES_WRONG_KEY_SRC_ERR:
+ dev_err(dev, "ERROR: Wrong KeySrc, enable secure mode\n");
+ break;
+ case ZYNQMP_AES_PUF_NOT_PROGRAMMED:
+ dev_err(dev, "ERROR: PUF is not registered\n");
+ break;
+ default:
+ dev_err(dev, "ERROR: Unknown error\n");
+ break;
+ }
+ err = -status;
+ } else {
+ if (hwreq->op == ZYNQMP_AES_ENCRYPT)
+ data_size = data_size + ZYNQMP_AES_AUTH_SIZE;
+ else
+ data_size = data_size - ZYNQMP_AES_AUTH_SIZE;
+
+ sg_copy_from_buffer(req->dst, sg_nents(req->dst),
+ kbuf, data_size);
+ err = 0;
+ }
+
+ if (kbuf) {
+ memzero_explicit(kbuf, dma_size);
+ dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
+ }
+ if (hwreq) {
+ memzero_explicit(hwreq, sizeof(struct zynqmp_aead_hw_req));
+ dma_free_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
+ hwreq, dma_addr_hw_req);
+ }
+ return err;
+}
+
+static int zynqmp_fallback_check(struct zynqmp_aead_tfm_ctx *tfm_ctx,
+ struct aead_request *req)
+{
+ int need_fallback = 0;
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+ if (tfm_ctx->authsize != ZYNQMP_AES_AUTH_SIZE)
+ need_fallback = 1;
+
+ if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY &&
+ tfm_ctx->keylen != ZYNQMP_AES_KEY_SIZE) {
+ need_fallback = 1;
+ }
+ if (req->assoclen != 0 ||
+ req->cryptlen < ZYNQMP_AES_MIN_INPUT_BLK_SIZE) {
+ need_fallback = 1;
+ }
+ if ((req->cryptlen % ZYNQMP_AES_WORD_LEN) != 0)
+ need_fallback = 1;
+
+ if (rq_ctx->op == ZYNQMP_AES_DECRYPT &&
+ req->cryptlen <= ZYNQMP_AES_AUTH_SIZE) {
+ need_fallback = 1;
+ }
+ return need_fallback;
+}
+
+static int zynqmp_handle_aes_req(struct crypto_engine *engine,
+ void *req)
+{
+ struct aead_request *areq =
+ container_of(req, struct aead_request, base);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(areq);
+ struct aead_request *subreq = aead_request_ctx(req);
+ int need_fallback;
+ int err;
+
+ need_fallback = zynqmp_fallback_check(tfm_ctx, areq);
+
+ if (need_fallback) {
+ aead_request_set_tfm(subreq, tfm_ctx->fbk_cipher);
+
+ aead_request_set_callback(subreq, areq->base.flags,
+ NULL, NULL);
+ aead_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ aead_request_set_ad(subreq, areq->assoclen);
+ if (rq_ctx->op == ZYNQMP_AES_ENCRYPT)
+ err = crypto_aead_encrypt(subreq);
+ else
+ err = crypto_aead_decrypt(subreq);
+ } else {
+ err = zynqmp_aes_aead_cipher(areq);
+ }
+
+ crypto_finalize_aead_request(engine, areq, err);
+ return 0;
+}
+
+static int zynqmp_aes_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+ unsigned char keysrc;
+
+ if (keylen == ZYNQMP_KEY_SRC_SEL_KEY_LEN) {
+ keysrc = *key;
+ if (keysrc == ZYNQMP_AES_KUP_KEY ||
+ keysrc == ZYNQMP_AES_DEV_KEY ||
+ keysrc == ZYNQMP_AES_PUF_KEY) {
+ tfm_ctx->keysrc = (enum zynqmp_aead_keysrc)keysrc;
+ } else {
+ tfm_ctx->keylen = keylen;
+ }
+ } else {
+ tfm_ctx->keylen = keylen;
+ if (keylen == ZYNQMP_AES_KEY_SIZE) {
+ tfm_ctx->keysrc = ZYNQMP_AES_KUP_KEY;
+ memcpy(tfm_ctx->key, key, keylen);
+ }
+ }
+
+ tfm_ctx->fbk_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ tfm_ctx->fbk_cipher->base.crt_flags |= (aead->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_aead_setkey(tfm_ctx->fbk_cipher, key, keylen);
+}
+
+static int zynqmp_aes_aead_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+
+ tfm_ctx->authsize = authsize;
+ return crypto_aead_setauthsize(tfm_ctx->fbk_cipher, authsize);
+}
+
+static int zynqmp_aes_aead_encrypt(struct aead_request *req)
+{
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+ rq_ctx->op = ZYNQMP_AES_ENCRYPT;
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+
+ return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
+}
+
+static int zynqmp_aes_aead_decrypt(struct aead_request *req)
+{
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+ rq_ctx->op = ZYNQMP_AES_DECRYPT;
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+
+ return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
+}
+
+static int zynqmp_aes_aead_init(struct crypto_aead *aead)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+ tfm_ctx->dev = drv_ctx->dev;
+
+ tfm_ctx->engine_ctx.op.do_one_request = zynqmp_handle_aes_req;
+ tfm_ctx->engine_ctx.op.prepare_request = NULL;
+ tfm_ctx->engine_ctx.op.unprepare_request = NULL;
+
+ tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.cra_name,
+ 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(tfm_ctx->fbk_cipher)) {
+ pr_err("%s() Error: failed to allocate fallback for %s\n",
+ __func__, drv_ctx->alg.aead.base.cra_name);
+ return PTR_ERR(tfm_ctx->fbk_cipher);
+ }
+
+ crypto_aead_set_reqsize(aead,
+ max(sizeof(struct zynqmp_aead_req_ctx),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(tfm_ctx->fbk_cipher)));
+ return 0;
+}
+
+static void zynqmp_aes_aead_exit(struct crypto_aead *aead)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+
+ if (tfm_ctx->fbk_cipher) {
+ crypto_free_aead(tfm_ctx->fbk_cipher);
+ tfm_ctx->fbk_cipher = NULL;
+ }
+ memzero_explicit(tfm_ctx, sizeof(struct zynqmp_aead_tfm_ctx));
+}
+
+static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
+ .alg.aead = {
+ .setkey = zynqmp_aes_aead_setkey,
+ .setauthsize = zynqmp_aes_aead_setauthsize,
+ .encrypt = zynqmp_aes_aead_encrypt,
+ .decrypt = zynqmp_aes_aead_decrypt,
+ .init = zynqmp_aes_aead_init,
+ .exit = zynqmp_aes_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = ZYNQMP_AES_AUTH_SIZE,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "xilinx-zynqmp-aes-gcm",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = ZYNQMP_AES_BLK_SIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_aead_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+static int zynqmp_aes_aead_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int err;
+
+ /* ZynqMP AES driver supports only one instance */
+ if (!aes_drv_ctx.dev)
+ aes_drv_ctx.dev = dev;
+ else
+ return -ENODEV;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
+ if (err < 0) {
+ dev_err(dev, "No usable DMA configuration\n");
+ return err;
+ }
+
+ aes_drv_ctx.engine = crypto_engine_alloc_init(dev, 1);
+ if (!aes_drv_ctx.engine) {
+ dev_err(dev, "Cannot alloc AES engine\n");
+ err = -ENOMEM;
+ goto err_engine;
+ }
+
+ err = crypto_engine_start(aes_drv_ctx.engine);
+ if (err) {
+ dev_err(dev, "Cannot start AES engine\n");
+ goto err_engine;
+ }
+
+ err = crypto_register_aead(&aes_drv_ctx.alg.aead);
+ if (err < 0) {
+ dev_err(dev, "Failed to register AEAD alg.\n");
+ goto err_aead;
+ }
+ return 0;
+
+err_aead:
+ crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+
+err_engine:
+ if (aes_drv_ctx.engine)
+ crypto_engine_exit(aes_drv_ctx.engine);
+
+ return err;
+}
+
+static int zynqmp_aes_aead_remove(struct platform_device *pdev)
+{
+ crypto_engine_exit(aes_drv_ctx.engine);
+ crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_aes_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-aes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
+
+static struct platform_driver zynqmp_aes_driver = {
+ .probe = zynqmp_aes_aead_probe,
+ .remove = zynqmp_aes_aead_remove,
+ .driver = {
+ .name = "zynqmp-aes",
+ .of_match_table = zynqmp_aes_dt_ids,
+ },
+};
+
+module_platform_driver(zynqmp_aes_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
new file mode 100644
index 000000000..43ff170ff
--- /dev/null
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP SHA Driver.
+ * Copyright (c) 2022 Xilinx Inc.
+ */
+#include <linux/cacheflush.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha3.h>
+#include <linux/crypto.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define ZYNQMP_DMA_BIT_MASK 32U
+#define ZYNQMP_DMA_ALLOC_FIXED_SIZE 0x1000U
+
+enum zynqmp_sha_op {
+ ZYNQMP_SHA3_INIT = 1,
+ ZYNQMP_SHA3_UPDATE = 2,
+ ZYNQMP_SHA3_FINAL = 4,
+};
+
+struct zynqmp_sha_drv_ctx {
+ struct shash_alg sha3_384;
+ struct device *dev;
+};
+
+struct zynqmp_sha_tfm_ctx {
+ struct device *dev;
+ struct crypto_shash *fbk_tfm;
+};
+
+struct zynqmp_sha_desc_ctx {
+ struct shash_desc fbk_req;
+};
+
+static dma_addr_t update_dma_addr, final_dma_addr;
+static char *ubuf, *fbuf;
+
+static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
+{
+ const char *fallback_driver_name = crypto_shash_alg_name(hash);
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+ struct crypto_shash *fallback_tfm;
+ struct zynqmp_sha_drv_ctx *drv_ctx;
+
+ drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384);
+ tfm_ctx->dev = drv_ctx->dev;
+
+ /* Allocate a fallback and abort if it failed. */
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm))
+ return PTR_ERR(fallback_tfm);
+
+ tfm_ctx->fbk_tfm = fallback_tfm;
+ hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm);
+
+ return 0;
+}
+
+static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
+{
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
+
+ if (tfm_ctx->fbk_tfm) {
+ crypto_free_shash(tfm_ctx->fbk_tfm);
+ tfm_ctx->fbk_tfm = NULL;
+ }
+
+ memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
+}
+
+static int zynqmp_sha_init(struct shash_desc *desc)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+
+ dctx->fbk_req.tfm = tctx->fbk_tfm;
+ return crypto_shash_init(&dctx->fbk_req);
+}
+
+static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_update(&dctx->fbk_req, data, length);
+}
+
+static int zynqmp_sha_final(struct shash_desc *desc, u8 *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_final(&dctx->fbk_req, out);
+}
+
+static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_finup(&dctx->fbk_req, data, length, out);
+}
+
+static int zynqmp_sha_import(struct shash_desc *desc, const void *in)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+
+ dctx->fbk_req.tfm = tctx->fbk_tfm;
+ return crypto_shash_import(&dctx->fbk_req, in);
+}
+
+static int zynqmp_sha_export(struct shash_desc *desc, void *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_export(&dctx->fbk_req, out);
+}
+
+static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+{
+ unsigned int remaining_len = len;
+ int update_size;
+ int ret;
+
+ ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT);
+ if (ret)
+ return ret;
+
+ while (remaining_len != 0) {
+ memzero_explicit(ubuf, ZYNQMP_DMA_ALLOC_FIXED_SIZE);
+ if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE) {
+ update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE;
+ remaining_len -= ZYNQMP_DMA_ALLOC_FIXED_SIZE;
+ } else {
+ update_size = remaining_len;
+ remaining_len = 0;
+ }
+ memcpy(ubuf, data, update_size);
+ flush_icache_range((unsigned long)ubuf, (unsigned long)ubuf + update_size);
+ ret = zynqmp_pm_sha_hash(update_dma_addr, update_size, ZYNQMP_SHA3_UPDATE);
+ if (ret)
+ return ret;
+
+ data += update_size;
+ }
+
+ ret = zynqmp_pm_sha_hash(final_dma_addr, SHA3_384_DIGEST_SIZE, ZYNQMP_SHA3_FINAL);
+ memcpy(out, fbuf, SHA3_384_DIGEST_SIZE);
+ memzero_explicit(fbuf, SHA3_384_DIGEST_SIZE);
+
+ return ret;
+}
+
+static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
+ .sha3_384 = {
+ .init = zynqmp_sha_init,
+ .update = zynqmp_sha_update,
+ .final = zynqmp_sha_final,
+ .finup = zynqmp_sha_finup,
+ .digest = zynqmp_sha_digest,
+ .export = zynqmp_sha_export,
+ .import = zynqmp_sha_import,
+ .init_tfm = zynqmp_sha_init_tfm,
+ .exit_tfm = zynqmp_sha_exit_tfm,
+ .descsize = sizeof(struct zynqmp_sha_desc_ctx),
+ .statesize = sizeof(struct sha3_state),
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "zynqmp-sha3-384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+static int zynqmp_sha_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int err;
+ u32 v;
+
+ /* Verify the hardware is present */
+ err = zynqmp_pm_get_api_version(&v);
+ if (err)
+ return err;
+
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
+ if (err < 0) {
+ dev_err(dev, "No usable DMA configuration\n");
+ return err;
+ }
+
+ err = crypto_register_shash(&sha3_drv_ctx.sha3_384);
+ if (err < 0) {
+ dev_err(dev, "Failed to register shash alg.\n");
+ return err;
+ }
+
+ sha3_drv_ctx.dev = dev;
+ platform_set_drvdata(pdev, &sha3_drv_ctx);
+
+ ubuf = dma_alloc_coherent(dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, &update_dma_addr, GFP_KERNEL);
+ if (!ubuf) {
+ err = -ENOMEM;
+ goto err_shash;
+ }
+
+ fbuf = dma_alloc_coherent(dev, SHA3_384_DIGEST_SIZE, &final_dma_addr, GFP_KERNEL);
+ if (!fbuf) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ return 0;
+
+err_mem:
+ dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
+
+err_shash:
+ crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
+
+ return err;
+}
+
+static int zynqmp_sha_remove(struct platform_device *pdev)
+{
+ sha3_drv_ctx.dev = platform_get_drvdata(pdev);
+
+ dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
+ dma_free_coherent(sha3_drv_ctx.dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
+ crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_sha_driver = {
+ .probe = zynqmp_sha_probe,
+ .remove = zynqmp_sha_remove,
+ .driver = {
+ .name = "zynqmp-sha3-384",
+ },
+};
+
+module_platform_driver(zynqmp_sha_driver);
+MODULE_DESCRIPTION("ZynqMP SHA3 hardware acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Harsha <harsha.harsha@xilinx.com>");