diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/crypto/allwinner/sun8i-ss | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/crypto/allwinner/sun8i-ss')
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ss/Makefile | 4 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c | 487 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c | 951 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c | 717 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c | 177 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h | 318 |
6 files changed, 2654 insertions, 0 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ss/Makefile b/drivers/crypto/allwinner/sun8i-ss/Makefile new file mode 100644 index 0000000000..aabfd893c8 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss.o +sun8i-ss-y += sun8i-ss-core.o sun8i-ss-cipher.o +sun8i-ss-$(CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG) += sun8i-ss-prng.o +sun8i-ss-$(CONFIG_CRYPTO_DEV_SUN8I_SS_HASH) += sun8i-ss-hash.o diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c new file mode 100644 index 0000000000..7fa359725e --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ss-cipher.c - hardware cryptographic offloader for + * Allwinner A80/A83T SoC + * + * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com> + * + * This file add support for AES cipher with 128,192,256 bits keysize in + * CBC and ECB mode. + * + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst + */ + +#include <linux/bottom_half.h> +#include <linux/crypto.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> +#include <crypto/scatterwalk.h> +#include <crypto/internal/skcipher.h> +#include "sun8i-ss.h" + +static bool sun8i_ss_need_fallback(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); + struct scatterlist *in_sg = areq->src; + struct scatterlist *out_sg = areq->dst; + struct scatterlist *sg; + unsigned int todo, len; + + if (areq->cryptlen == 0 || areq->cryptlen % 16) { + algt->stat_fb_len++; + return true; + } + + if (sg_nents_for_len(areq->src, areq->cryptlen) > 8 || + sg_nents_for_len(areq->dst, areq->cryptlen) > 8) { + algt->stat_fb_sgnum++; + return true; + } + + len = areq->cryptlen; + sg = areq->src; + while (sg) { + todo = min(len, sg->length); + if ((todo % 16) != 0) { + algt->stat_fb_sglen++; + return true; + } + if (!IS_ALIGNED(sg->offset, 16)) { + algt->stat_fb_align++; + return true; + } + len -= todo; + sg = sg_next(sg); + } + len = areq->cryptlen; + sg = areq->dst; + while (sg) { + todo = min(len, sg->length); + if ((todo % 16) != 0) { + algt->stat_fb_sglen++; + return true; + } + if (!IS_ALIGNED(sg->offset, 16)) { + algt->stat_fb_align++; + return true; + } + len -= todo; + sg = sg_next(sg); + } + + /* SS need same numbers of SG (with same length) for source and destination */ + in_sg = areq->src; + out_sg = areq->dst; + while (in_sg && out_sg) { + if (in_sg->length != out_sg->length) + return true; + in_sg = sg_next(in_sg); + out_sg = sg_next(out_sg); + } + if (in_sg || out_sg) + return true; + return false; +} + +static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + int err; + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ss_alg_template *algt __maybe_unused; + + algt = container_of(alg, struct sun8i_ss_alg_template, + alg.skcipher.base); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_fb++; +#endif + } + + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, + areq->cryptlen, areq->iv); + if (rctx->op_dir & SS_DECRYPTION) + err = crypto_skcipher_decrypt(&rctx->fallback_req); + else + err = crypto_skcipher_encrypt(&rctx->fallback_req); + return err; +} + +static int sun8i_ss_setup_ivs(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct scatterlist *sg = areq->src; + unsigned int todo, offset; + unsigned int len = areq->cryptlen; + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; + int i = 0; + dma_addr_t a; + int err; + + rctx->ivlen = ivsize; + if (rctx->op_dir & SS_DECRYPTION) { + offset = areq->cryptlen - ivsize; + scatterwalk_map_and_copy(sf->biv, areq->src, offset, + ivsize, 0); + } + + /* we need to copy all IVs from source in case DMA is bi-directionnal */ + while (sg && len) { + if (sg_dma_len(sg) == 0) { + sg = sg_next(sg); + continue; + } + if (i == 0) + memcpy(sf->iv[0], areq->iv, ivsize); + a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, a)) { + memzero_explicit(sf->iv[i], ivsize); + dev_err(ss->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; + goto dma_iv_error; + } + rctx->p_iv[i] = a; + /* we need to setup all others IVs only in the decrypt way */ + if (rctx->op_dir == SS_ENCRYPTION) + return 0; + todo = min(len, sg_dma_len(sg)); + len -= todo; + i++; + if (i < MAX_SG) { + offset = sg->length - ivsize; + scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0); + } + rctx->niv = i; + sg = sg_next(sg); + } + + return 0; +dma_iv_error: + i--; + while (i >= 0) { + dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); + memzero_explicit(sf->iv[i], ivsize); + i--; + } + return err; +} + +static int sun8i_ss_cipher(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ss_alg_template *algt; + struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; + struct scatterlist *sg; + unsigned int todo, len, offset, ivsize; + int nr_sgs = 0; + int nr_sgd = 0; + int err = 0; + int nsgs = sg_nents_for_len(areq->src, areq->cryptlen); + int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen); + int i; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); + + dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, + crypto_tfm_alg_name(areq->base.tfm), + areq->cryptlen, + rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm), + op->keylen); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_req++; +#endif + + rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode]; + rctx->method = ss->variant->alg_cipher[algt->ss_algo_id]; + rctx->keylen = op->keylen; + + rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, rctx->p_key)) { + dev_err(ss->dev, "Cannot DMA MAP KEY\n"); + err = -EFAULT; + goto theend; + } + + ivsize = crypto_skcipher_ivsize(tfm); + if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { + err = sun8i_ss_setup_ivs(areq); + if (err) + goto theend_key; + } + if (areq->src == areq->dst) { + nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL); + if (nr_sgs <= 0 || nr_sgs > 8) { + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); + err = -EINVAL; + goto theend_iv; + } + nr_sgd = nr_sgs; + } else { + nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 8) { + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); + err = -EINVAL; + goto theend_iv; + } + nr_sgd = dma_map_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); + if (nr_sgd <= 0 || nr_sgd > 8) { + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd); + err = -EINVAL; + goto theend_sgs; + } + } + + len = areq->cryptlen; + i = 0; + sg = areq->src; + while (i < nr_sgs && sg && len) { + if (sg_dma_len(sg) == 0) + goto sgs_next; + rctx->t_src[i].addr = sg_dma_address(sg); + todo = min(len, sg_dma_len(sg)); + rctx->t_src[i].len = todo / 4; + dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__, + areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo); + len -= todo; + i++; +sgs_next: + sg = sg_next(sg); + } + if (len > 0) { + dev_err(ss->dev, "remaining len %d\n", len); + err = -EINVAL; + goto theend_sgs; + } + + len = areq->cryptlen; + i = 0; + sg = areq->dst; + while (i < nr_sgd && sg && len) { + if (sg_dma_len(sg) == 0) + goto sgd_next; + rctx->t_dst[i].addr = sg_dma_address(sg); + todo = min(len, sg_dma_len(sg)); + rctx->t_dst[i].len = todo / 4; + dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__, + areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo); + len -= todo; + i++; +sgd_next: + sg = sg_next(sg); + } + if (len > 0) { + dev_err(ss->dev, "remaining len %d\n", len); + err = -EINVAL; + goto theend_sgs; + } + + err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); + +theend_sgs: + if (areq->src == areq->dst) { + dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); + dma_unmap_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); + } + +theend_iv: + if (areq->iv && ivsize > 0) { + for (i = 0; i < rctx->niv; i++) { + dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); + memzero_explicit(sf->iv[i], ivsize); + } + + offset = areq->cryptlen - ivsize; + if (rctx->op_dir & SS_DECRYPTION) { + memcpy(areq->iv, sf->biv, ivsize); + memzero_explicit(sf->biv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); + } + } + +theend_key: + dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE); + +theend: + + return err; +} + +int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq) +{ + int err; + struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); + + err = sun8i_ss_cipher(breq); + local_bh_disable(); + crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); + + return 0; +} + +int sun8i_ss_skdecrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct crypto_engine *engine; + int e; + + memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx)); + rctx->op_dir = SS_DECRYPTION; + + if (sun8i_ss_need_fallback(areq)) + return sun8i_ss_cipher_fallback(areq); + + e = sun8i_ss_get_engine_number(op->ss); + engine = op->ss->flows[e].engine; + rctx->flow = e; + + return crypto_transfer_skcipher_request_to_engine(engine, areq); +} + +int sun8i_ss_skencrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct crypto_engine *engine; + int e; + + memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx)); + rctx->op_dir = SS_ENCRYPTION; + + if (sun8i_ss_need_fallback(areq)) + return sun8i_ss_cipher_fallback(areq); + + e = sun8i_ss_get_engine_number(op->ss); + engine = op->ss->flows[e].engine; + rctx->flow = e; + + return crypto_transfer_skcipher_request_to_engine(engine, areq); +} + +int sun8i_ss_cipher_init(struct crypto_tfm *tfm) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); + struct sun8i_ss_alg_template *algt; + const char *name = crypto_tfm_alg_name(tfm); + struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(sktfm); + int err; + + memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx)); + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); + op->ss = algt->ss; + + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(op->fallback_tfm)) { + dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(op->fallback_tfm)); + return PTR_ERR(op->fallback_tfm); + } + + sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + + + memcpy(algt->fbname, + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), + CRYPTO_MAX_ALG_NAME); + + err = pm_runtime_resume_and_get(op->ss->dev); + if (err < 0) { + dev_err(op->ss->dev, "pm error %d\n", err); + goto error_pm; + } + + return 0; +error_pm: + crypto_free_skcipher(op->fallback_tfm); + return err; +} + +void sun8i_ss_cipher_exit(struct crypto_tfm *tfm) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); + + kfree_sensitive(op->key); + crypto_free_skcipher(op->fallback_tfm); + pm_runtime_put_sync(op->ss->dev); +} + +int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + + switch (keylen) { + case 128 / 8: + break; + case 192 / 8: + break; + case 256 / 8: + break; + default: + dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); + return -EINVAL; + } + kfree_sensitive(op->key); + op->keylen = keylen; + op->key = kmemdup(key, keylen, GFP_KERNEL); + if (!op->key) + return -ENOMEM; + + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); +} + +int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + + if (unlikely(keylen != 3 * DES_KEY_SIZE)) { + dev_dbg(ss->dev, "Invalid keylen %u\n", keylen); + return -EINVAL; + } + + kfree_sensitive(op->key); + op->keylen = keylen; + op->key = kmemdup(key, keylen, GFP_KERNEL); + if (!op->key) + return -ENOMEM; + + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); +} diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c new file mode 100644 index 0000000000..4a9587285c --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -0,0 +1,951 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ss-core.c - hardware cryptographic offloader for + * Allwinner A80/A83T SoC + * + * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com> + * + * Core file which registers crypto algorithms supported by the SecuritySystem + * + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst + */ + +#include <crypto/engine.h> +#include <crypto/internal/rng.h> +#include <crypto/internal/skcipher.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +#include "sun8i-ss.h" + +static const struct ss_variant ss_a80_variant = { + .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, + }, + .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, + }, + .op_mode = { SS_OP_ECB, SS_OP_CBC, + }, + .ss_clks = { + { "bus", 0, 300 * 1000 * 1000 }, + { "mod", 0, 300 * 1000 * 1000 }, + } +}; + +static const struct ss_variant ss_a83t_variant = { + .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, + }, + .alg_hash = { SS_ALG_MD5, SS_ALG_SHA1, SS_ALG_SHA224, SS_ALG_SHA256, + }, + .op_mode = { SS_OP_ECB, SS_OP_CBC, + }, + .ss_clks = { + { "bus", 0, 300 * 1000 * 1000 }, + { "mod", 0, 300 * 1000 * 1000 }, + } +}; + +/* + * sun8i_ss_get_engine_number() get the next channel slot + * This is a simple round-robin way of getting the next channel + */ +int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss) +{ + return atomic_inc_return(&ss->flow) % MAXFLOW; +} + +int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, + const char *name) +{ + int flow = rctx->flow; + unsigned int ivlen = rctx->ivlen; + u32 v = SS_START; + int i; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + ss->flows[flow].stat_req++; +#endif + + /* choose between stream0/stream1 */ + if (flow) + v |= SS_FLOW1; + else + v |= SS_FLOW0; + + v |= rctx->op_mode; + v |= rctx->method; + + if (rctx->op_dir) + v |= SS_DECRYPTION; + + switch (rctx->keylen) { + case 128 / 8: + v |= SS_AES_128BITS << 7; + break; + case 192 / 8: + v |= SS_AES_192BITS << 7; + break; + case 256 / 8: + v |= SS_AES_256BITS << 7; + break; + } + + for (i = 0; i < MAX_SG; i++) { + if (!rctx->t_dst[i].addr) + break; + + mutex_lock(&ss->mlock); + writel(rctx->p_key, ss->base + SS_KEY_ADR_REG); + + if (ivlen) { + if (rctx->op_dir == SS_ENCRYPTION) { + if (i == 0) + writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG); + else + writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG); + } else { + writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG); + } + } + + dev_dbg(ss->dev, + "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x opmode=%x opdir=%x srclen=%d\n", + i, flow, name, v, + rctx->t_src[i].len, rctx->t_dst[i].len, + rctx->method, rctx->op_mode, + rctx->op_dir, rctx->t_src[i].len); + + writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG); + writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG); + writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG); + + reinit_completion(&ss->flows[flow].complete); + ss->flows[flow].status = 0; + wmb(); + + writel(v, ss->base + SS_CTL_REG); + mutex_unlock(&ss->mlock); + wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, + msecs_to_jiffies(2000)); + if (ss->flows[flow].status == 0) { + dev_err(ss->dev, "DMA timeout for %s\n", name); + return -EFAULT; + } + } + + return 0; +} + +static irqreturn_t ss_irq_handler(int irq, void *data) +{ + struct sun8i_ss_dev *ss = (struct sun8i_ss_dev *)data; + int flow = 0; + u32 p; + + p = readl(ss->base + SS_INT_STA_REG); + for (flow = 0; flow < MAXFLOW; flow++) { + if (p & (BIT(flow))) { + writel(BIT(flow), ss->base + SS_INT_STA_REG); + ss->flows[flow].status = 1; + complete(&ss->flows[flow].complete); + } + } + + return IRQ_HANDLED; +} + +static struct sun8i_ss_alg_template ss_algs[] = { +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ss_algo_id = SS_ID_CIPHER_AES, + .ss_blockmode = SS_ID_OP_CBC, + .alg.skcipher.base = { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-sun8i-ss", + .cra_priority = 400, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ss_cipher_init, + .cra_exit = sun8i_ss_cipher_exit, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = sun8i_ss_aes_setkey, + .encrypt = sun8i_ss_skencrypt, + .decrypt = sun8i_ss_skdecrypt, + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ss_handle_cipher_request, + }, +}, +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ss_algo_id = SS_ID_CIPHER_AES, + .ss_blockmode = SS_ID_OP_ECB, + .alg.skcipher.base = { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-sun8i-ss", + .cra_priority = 400, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ss_cipher_init, + .cra_exit = sun8i_ss_cipher_exit, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = sun8i_ss_aes_setkey, + .encrypt = sun8i_ss_skencrypt, + .decrypt = sun8i_ss_skdecrypt, + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ss_handle_cipher_request, + }, +}, +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ss_algo_id = SS_ID_CIPHER_DES3, + .ss_blockmode = SS_ID_OP_CBC, + .alg.skcipher.base = { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-sun8i-ss", + .cra_priority = 400, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ss_cipher_init, + .cra_exit = sun8i_ss_cipher_exit, + }, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = sun8i_ss_des3_setkey, + .encrypt = sun8i_ss_skencrypt, + .decrypt = sun8i_ss_skdecrypt, + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ss_handle_cipher_request, + }, +}, +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ss_algo_id = SS_ID_CIPHER_DES3, + .ss_blockmode = SS_ID_OP_ECB, + .alg.skcipher.base = { + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-sun8i-ss", + .cra_priority = 400, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ss_cipher_init, + .cra_exit = sun8i_ss_cipher_exit, + }, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = sun8i_ss_des3_setkey, + .encrypt = sun8i_ss_skencrypt, + .decrypt = sun8i_ss_skdecrypt, + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ss_handle_cipher_request, + }, +}, +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG +{ + .type = CRYPTO_ALG_TYPE_RNG, + .alg.rng = { + .base = { + .cra_name = "stdrng", + .cra_driver_name = "sun8i-ss-prng", + .cra_priority = 300, + .cra_ctxsize = sizeof(struct sun8i_ss_rng_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ss_prng_init, + .cra_exit = sun8i_ss_prng_exit, + }, + .generate = sun8i_ss_prng_generate, + .seed = sun8i_ss_prng_seed, + .seedsize = PRNG_SEED_SIZE, + } +}, +#endif +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_HASH +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_MD5, + .alg.hash.base = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .init_tfm = sun8i_ss_hash_init_tfm, + .exit_tfm = sun8i_ss_hash_exit_tfm, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), + .base = { + .cra_name = "md5", + .cra_driver_name = "md5-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + } + } + }, + .alg.hash.op = { + .do_one_request = sun8i_ss_hash_run, + }, +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_SHA1, + .alg.hash.base = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .init_tfm = sun8i_ss_hash_init_tfm, + .exit_tfm = sun8i_ss_hash_exit_tfm, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + } + } + }, + .alg.hash.op = { + .do_one_request = sun8i_ss_hash_run, + }, +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_SHA224, + .alg.hash.base = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .init_tfm = sun8i_ss_hash_init_tfm, + .exit_tfm = sun8i_ss_hash_exit_tfm, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + } + } + }, + .alg.hash.op = { + .do_one_request = sun8i_ss_hash_run, + }, +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_SHA256, + .alg.hash.base = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .init_tfm = sun8i_ss_hash_init_tfm, + .exit_tfm = sun8i_ss_hash_exit_tfm, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + } + } + }, + .alg.hash.op = { + .do_one_request = sun8i_ss_hash_run, + }, +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_SHA1, + .alg.hash.base = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .init_tfm = sun8i_ss_hash_init_tfm, + .exit_tfm = sun8i_ss_hash_exit_tfm, + .setkey = sun8i_ss_hmac_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac-sha1-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + } + } + }, + .alg.hash.op = { + .do_one_request = sun8i_ss_hash_run, + }, +}, +#endif +}; + +static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v) +{ + struct sun8i_ss_dev *ss __maybe_unused = seq->private; + unsigned int i; + + for (i = 0; i < MAXFLOW; i++) + seq_printf(seq, "Channel %d: nreq %lu\n", i, +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + ss->flows[i].stat_req); +#else + 0ul); +#endif + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + if (!ss_algs[i].ss) + continue; + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", + ss_algs[i].alg.skcipher.base.base.cra_driver_name, + ss_algs[i].alg.skcipher.base.base.cra_name, + ss_algs[i].stat_req, ss_algs[i].stat_fb); + + seq_printf(seq, "\tLast fallback is: %s\n", + ss_algs[i].fbname); + seq_printf(seq, "\tFallback due to length: %lu\n", + ss_algs[i].stat_fb_len); + seq_printf(seq, "\tFallback due to SG length: %lu\n", + ss_algs[i].stat_fb_sglen); + seq_printf(seq, "\tFallback due to alignment: %lu\n", + ss_algs[i].stat_fb_align); + seq_printf(seq, "\tFallback due to SG numbers: %lu\n", + ss_algs[i].stat_fb_sgnum); + break; + case CRYPTO_ALG_TYPE_RNG: + seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n", + ss_algs[i].alg.rng.base.cra_driver_name, + ss_algs[i].alg.rng.base.cra_name, + ss_algs[i].stat_req, ss_algs[i].stat_bytes); + break; + case CRYPTO_ALG_TYPE_AHASH: + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", + ss_algs[i].alg.hash.base.halg.base.cra_driver_name, + ss_algs[i].alg.hash.base.halg.base.cra_name, + ss_algs[i].stat_req, ss_algs[i].stat_fb); + seq_printf(seq, "\tLast fallback is: %s\n", + ss_algs[i].fbname); + seq_printf(seq, "\tFallback due to length: %lu\n", + ss_algs[i].stat_fb_len); + seq_printf(seq, "\tFallback due to SG length: %lu\n", + ss_algs[i].stat_fb_sglen); + seq_printf(seq, "\tFallback due to alignment: %lu\n", + ss_algs[i].stat_fb_align); + seq_printf(seq, "\tFallback due to SG numbers: %lu\n", + ss_algs[i].stat_fb_sgnum); + break; + } + } + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(sun8i_ss_debugfs); + +static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i) +{ + while (i >= 0) { + crypto_engine_exit(ss->flows[i].engine); + i--; + } +} + +/* + * Allocate the flow list structure + */ +static int allocate_flows(struct sun8i_ss_dev *ss) +{ + int i, j, err; + + ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), + GFP_KERNEL); + if (!ss->flows) + return -ENOMEM; + + for (i = 0; i < MAXFLOW; i++) { + init_completion(&ss->flows[i].complete); + + ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, + GFP_KERNEL); + if (!ss->flows[i].biv) { + err = -ENOMEM; + goto error_engine; + } + + for (j = 0; j < MAX_SG; j++) { + ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, + GFP_KERNEL); + if (!ss->flows[i].iv[j]) { + err = -ENOMEM; + goto error_engine; + } + } + + /* the padding could be up to two block. */ + ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE, + GFP_KERNEL); + if (!ss->flows[i].pad) { + err = -ENOMEM; + goto error_engine; + } + ss->flows[i].result = + devm_kmalloc(ss->dev, max(SHA256_DIGEST_SIZE, + dma_get_cache_alignment()), + GFP_KERNEL); + if (!ss->flows[i].result) { + err = -ENOMEM; + goto error_engine; + } + + ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); + if (!ss->flows[i].engine) { + dev_err(ss->dev, "Cannot allocate engine\n"); + i--; + err = -ENOMEM; + goto error_engine; + } + err = crypto_engine_start(ss->flows[i].engine); + if (err) { + dev_err(ss->dev, "Cannot start engine\n"); + goto error_engine; + } + } + return 0; +error_engine: + sun8i_ss_free_flows(ss, i); + return err; +} + +/* + * Power management strategy: The device is suspended unless a TFM exists for + * one of the algorithms proposed by this driver. + */ +static int sun8i_ss_pm_suspend(struct device *dev) +{ + struct sun8i_ss_dev *ss = dev_get_drvdata(dev); + int i; + + reset_control_assert(ss->reset); + for (i = 0; i < SS_MAX_CLOCKS; i++) + clk_disable_unprepare(ss->ssclks[i]); + return 0; +} + +static int sun8i_ss_pm_resume(struct device *dev) +{ + struct sun8i_ss_dev *ss = dev_get_drvdata(dev); + int err, i; + + for (i = 0; i < SS_MAX_CLOCKS; i++) { + if (!ss->variant->ss_clks[i].name) + continue; + err = clk_prepare_enable(ss->ssclks[i]); + if (err) { + dev_err(ss->dev, "Cannot prepare_enable %s\n", + ss->variant->ss_clks[i].name); + goto error; + } + } + err = reset_control_deassert(ss->reset); + if (err) { + dev_err(ss->dev, "Cannot deassert reset control\n"); + goto error; + } + /* enable interrupts for all flows */ + writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG); + + return 0; +error: + sun8i_ss_pm_suspend(dev); + return err; +} + +static const struct dev_pm_ops sun8i_ss_pm_ops = { + SET_RUNTIME_PM_OPS(sun8i_ss_pm_suspend, sun8i_ss_pm_resume, NULL) +}; + +static int sun8i_ss_pm_init(struct sun8i_ss_dev *ss) +{ + int err; + + pm_runtime_use_autosuspend(ss->dev); + pm_runtime_set_autosuspend_delay(ss->dev, 2000); + + err = pm_runtime_set_suspended(ss->dev); + if (err) + return err; + pm_runtime_enable(ss->dev); + return err; +} + +static void sun8i_ss_pm_exit(struct sun8i_ss_dev *ss) +{ + pm_runtime_disable(ss->dev); +} + +static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss) +{ + int ss_method, err, id; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + ss_algs[i].ss = ss; + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + id = ss_algs[i].ss_algo_id; + ss_method = ss->variant->alg_cipher[id]; + if (ss_method == SS_ID_NOTSUPP) { + dev_info(ss->dev, + "DEBUG: Algo of %s not supported\n", + ss_algs[i].alg.skcipher.base.base.cra_name); + ss_algs[i].ss = NULL; + break; + } + id = ss_algs[i].ss_blockmode; + ss_method = ss->variant->op_mode[id]; + if (ss_method == SS_ID_NOTSUPP) { + dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n", + ss_algs[i].alg.skcipher.base.base.cra_name); + ss_algs[i].ss = NULL; + break; + } + dev_info(ss->dev, "DEBUG: Register %s\n", + ss_algs[i].alg.skcipher.base.base.cra_name); + err = crypto_engine_register_skcipher(&ss_algs[i].alg.skcipher); + if (err) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.skcipher.base.base.cra_name); + ss_algs[i].ss = NULL; + return err; + } + break; + case CRYPTO_ALG_TYPE_RNG: + err = crypto_register_rng(&ss_algs[i].alg.rng); + if (err) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.rng.base.cra_name); + ss_algs[i].ss = NULL; + } + break; + case CRYPTO_ALG_TYPE_AHASH: + id = ss_algs[i].ss_algo_id; + ss_method = ss->variant->alg_hash[id]; + if (ss_method == SS_ID_NOTSUPP) { + dev_info(ss->dev, + "DEBUG: Algo of %s not supported\n", + ss_algs[i].alg.hash.base.halg.base.cra_name); + ss_algs[i].ss = NULL; + break; + } + dev_info(ss->dev, "Register %s\n", + ss_algs[i].alg.hash.base.halg.base.cra_name); + err = crypto_engine_register_ahash(&ss_algs[i].alg.hash); + if (err) { + dev_err(ss->dev, "ERROR: Fail to register %s\n", + ss_algs[i].alg.hash.base.halg.base.cra_name); + ss_algs[i].ss = NULL; + return err; + } + break; + default: + ss_algs[i].ss = NULL; + dev_err(ss->dev, "ERROR: tried to register an unknown algo\n"); + } + } + return 0; +} + +static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + if (!ss_algs[i].ss) + continue; + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + dev_info(ss->dev, "Unregister %d %s\n", i, + ss_algs[i].alg.skcipher.base.base.cra_name); + crypto_engine_unregister_skcipher(&ss_algs[i].alg.skcipher); + break; + case CRYPTO_ALG_TYPE_RNG: + dev_info(ss->dev, "Unregister %d %s\n", i, + ss_algs[i].alg.rng.base.cra_name); + crypto_unregister_rng(&ss_algs[i].alg.rng); + break; + case CRYPTO_ALG_TYPE_AHASH: + dev_info(ss->dev, "Unregister %d %s\n", i, + ss_algs[i].alg.hash.base.halg.base.cra_name); + crypto_engine_unregister_ahash(&ss_algs[i].alg.hash); + break; + } + } +} + +static int sun8i_ss_get_clks(struct sun8i_ss_dev *ss) +{ + unsigned long cr; + int err, i; + + for (i = 0; i < SS_MAX_CLOCKS; i++) { + if (!ss->variant->ss_clks[i].name) + continue; + ss->ssclks[i] = devm_clk_get(ss->dev, ss->variant->ss_clks[i].name); + if (IS_ERR(ss->ssclks[i])) { + err = PTR_ERR(ss->ssclks[i]); + dev_err(ss->dev, "Cannot get %s SS clock err=%d\n", + ss->variant->ss_clks[i].name, err); + return err; + } + cr = clk_get_rate(ss->ssclks[i]); + if (!cr) + return -EINVAL; + if (ss->variant->ss_clks[i].freq > 0 && + cr != ss->variant->ss_clks[i].freq) { + dev_info(ss->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n", + ss->variant->ss_clks[i].name, + ss->variant->ss_clks[i].freq, + ss->variant->ss_clks[i].freq / 1000000, + cr, cr / 1000000); + err = clk_set_rate(ss->ssclks[i], ss->variant->ss_clks[i].freq); + if (err) + dev_err(ss->dev, "Fail to set %s clk speed to %lu hz\n", + ss->variant->ss_clks[i].name, + ss->variant->ss_clks[i].freq); + } + if (ss->variant->ss_clks[i].max_freq > 0 && + cr > ss->variant->ss_clks[i].max_freq) + dev_warn(ss->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)", + ss->variant->ss_clks[i].name, cr, + ss->variant->ss_clks[i].max_freq); + } + return 0; +} + +static int sun8i_ss_probe(struct platform_device *pdev) +{ + struct sun8i_ss_dev *ss; + int err, irq; + u32 v; + + ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL); + if (!ss) + return -ENOMEM; + + ss->dev = &pdev->dev; + platform_set_drvdata(pdev, ss); + + ss->variant = of_device_get_match_data(&pdev->dev); + if (!ss->variant) { + dev_err(&pdev->dev, "Missing Crypto Engine variant\n"); + return -EINVAL; + } + + ss->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ss->base)) + return PTR_ERR(ss->base); + + err = sun8i_ss_get_clks(ss); + if (err) + return err; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ss->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(ss->reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(ss->reset), + "No reset control found\n"); + + mutex_init(&ss->mlock); + + err = allocate_flows(ss); + if (err) + return err; + + err = sun8i_ss_pm_init(ss); + if (err) + goto error_pm; + + err = devm_request_irq(&pdev->dev, irq, ss_irq_handler, 0, "sun8i-ss", ss); + if (err) { + dev_err(ss->dev, "Cannot request SecuritySystem IRQ (err=%d)\n", err); + goto error_irq; + } + + err = sun8i_ss_register_algs(ss); + if (err) + goto error_alg; + + err = pm_runtime_resume_and_get(ss->dev); + if (err < 0) + goto error_alg; + + v = readl(ss->base + SS_CTL_REG); + v >>= SS_DIE_ID_SHIFT; + v &= SS_DIE_ID_MASK; + dev_info(&pdev->dev, "Security System Die ID %x\n", v); + + pm_runtime_put_sync(ss->dev); + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct dentry *dbgfs_dir __maybe_unused; + struct dentry *dbgfs_stats __maybe_unused; + + /* Ignore error of debugfs */ + dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL); + dbgfs_stats = debugfs_create_file("stats", 0444, + dbgfs_dir, ss, + &sun8i_ss_debugfs_fops); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + ss->dbgfs_dir = dbgfs_dir; + ss->dbgfs_stats = dbgfs_stats; +#endif + } + + return 0; +error_alg: + sun8i_ss_unregister_algs(ss); +error_irq: + sun8i_ss_pm_exit(ss); +error_pm: + sun8i_ss_free_flows(ss, MAXFLOW - 1); + return err; +} + +static int sun8i_ss_remove(struct platform_device *pdev) +{ + struct sun8i_ss_dev *ss = platform_get_drvdata(pdev); + + sun8i_ss_unregister_algs(ss); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + debugfs_remove_recursive(ss->dbgfs_dir); +#endif + + sun8i_ss_free_flows(ss, MAXFLOW - 1); + + sun8i_ss_pm_exit(ss); + + return 0; +} + +static const struct of_device_id sun8i_ss_crypto_of_match_table[] = { + { .compatible = "allwinner,sun8i-a83t-crypto", + .data = &ss_a83t_variant }, + { .compatible = "allwinner,sun9i-a80-crypto", + .data = &ss_a80_variant }, + {} +}; +MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table); + +static struct platform_driver sun8i_ss_driver = { + .probe = sun8i_ss_probe, + .remove = sun8i_ss_remove, + .driver = { + .name = "sun8i-ss", + .pm = &sun8i_ss_pm_ops, + .of_match_table = sun8i_ss_crypto_of_match_table, + }, +}; + +module_platform_driver(sun8i_ss_driver); + +MODULE_DESCRIPTION("Allwinner SecuritySystem cryptographic offloader"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>"); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c new file mode 100644 index 0000000000..d70b105dcf --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -0,0 +1,717 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ss-hash.c - hardware cryptographic offloader for + * Allwinner A80/A83T SoC + * + * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com> + * + * This file add support for MD5 and SHA1/SHA224/SHA256. + * + * You could find the datasheet in Documentation/arch/arm/sunxi.rst + */ + +#include <crypto/hmac.h> +#include <crypto/internal/hash.h> +#include <crypto/md5.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <linux/bottom_half.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/pm_runtime.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "sun8i-ss.h" + +static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key, + unsigned int keylen) +{ + struct crypto_shash *xtfm; + struct shash_desc *sdesc; + size_t len; + int ret = 0; + + xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(xtfm)) + return PTR_ERR(xtfm); + + len = sizeof(*sdesc) + crypto_shash_descsize(xtfm); + sdesc = kmalloc(len, GFP_KERNEL); + if (!sdesc) { + ret = -ENOMEM; + goto err_hashkey_sdesc; + } + sdesc->tfm = xtfm; + + ret = crypto_shash_init(sdesc); + if (ret) { + dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret); + goto err_hashkey; + } + ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key); + if (ret) + dev_err(tfmctx->ss->dev, "shash finup error\n"); +err_hashkey: + kfree(sdesc); +err_hashkey_sdesc: + crypto_free_shash(xtfm); + return ret; +} + +int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash); + int digestsize, i; + int bs = crypto_ahash_blocksize(ahash); + int ret; + + digestsize = crypto_ahash_digestsize(ahash); + + if (keylen > bs) { + ret = sun8i_ss_hashkey(tfmctx, key, keylen); + if (ret) + return ret; + tfmctx->keylen = digestsize; + } else { + tfmctx->keylen = keylen; + memcpy(tfmctx->key, key, keylen); + } + + tfmctx->ipad = kzalloc(bs, GFP_KERNEL); + if (!tfmctx->ipad) + return -ENOMEM; + tfmctx->opad = kzalloc(bs, GFP_KERNEL); + if (!tfmctx->opad) { + ret = -ENOMEM; + goto err_opad; + } + + memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen); + memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen); + memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen); + for (i = 0; i < bs; i++) { + tfmctx->ipad[i] ^= HMAC_IPAD_VALUE; + tfmctx->opad[i] ^= HMAC_OPAD_VALUE; + } + + ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen); + if (!ret) + return 0; + + memzero_explicit(tfmctx->key, keylen); + kfree_sensitive(tfmctx->opad); +err_opad: + kfree_sensitive(tfmctx->ipad); + return ret; +} + +int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm) +{ + struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm); + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct sun8i_ss_alg_template *algt; + int err; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); + op->ss = algt->ss; + + /* FALLBACK */ + op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(op->fallback_tfm)) { + dev_err(algt->ss->dev, "Fallback driver could no be loaded\n"); + return PTR_ERR(op->fallback_tfm); + } + + crypto_ahash_set_statesize(tfm, + crypto_ahash_statesize(op->fallback_tfm)); + + crypto_ahash_set_reqsize(tfm, + sizeof(struct sun8i_ss_hash_reqctx) + + crypto_ahash_reqsize(op->fallback_tfm)); + + memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm), + CRYPTO_MAX_ALG_NAME); + + err = pm_runtime_get_sync(op->ss->dev); + if (err < 0) + goto error_pm; + return 0; +error_pm: + pm_runtime_put_noidle(op->ss->dev); + crypto_free_ahash(op->fallback_tfm); + return err; +} + +void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm) +{ + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + kfree_sensitive(tfmctx->ipad); + kfree_sensitive(tfmctx->opad); + + crypto_free_ahash(tfmctx->fallback_tfm); + pm_runtime_put_sync_suspend(tfmctx->ss->dev); +} + +int sun8i_ss_hash_init(struct ahash_request *areq) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx)); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +int sun8i_ss_hash_export(struct ahash_request *areq, void *out) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_export(&rctx->fallback_req, out); +} + +int sun8i_ss_hash_import(struct ahash_request *areq, const void *in) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_import(&rctx->fallback_req, in); +} + +int sun8i_ss_hash_final(struct ahash_request *areq) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = areq->result; + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct sun8i_ss_alg_template *algt __maybe_unused; + + algt = container_of(alg, struct sun8i_ss_alg_template, + alg.hash.base); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_fb++; +#endif + } + + return crypto_ahash_final(&rctx->fallback_req); +} + +int sun8i_ss_hash_update(struct ahash_request *areq) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +int sun8i_ss_hash_finup(struct ahash_request *areq) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct sun8i_ss_alg_template *algt __maybe_unused; + + algt = container_of(alg, struct sun8i_ss_alg_template, + alg.hash.base); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_fb++; +#endif + } + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int sun8i_ss_hash_digest_fb(struct ahash_request *areq) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct sun8i_ss_alg_template *algt __maybe_unused; + + algt = container_of(alg, struct sun8i_ss_alg_template, + alg.hash.base); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_fb++; +#endif + } + + return crypto_ahash_digest(&rctx->fallback_req); +} + +static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss, + struct sun8i_ss_hash_reqctx *rctx, + const char *name) +{ + int flow = rctx->flow; + u32 v = SS_START; + int i; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + ss->flows[flow].stat_req++; +#endif + + /* choose between stream0/stream1 */ + if (flow) + v |= SS_FLOW1; + else + v |= SS_FLOW0; + + v |= rctx->method; + + for (i = 0; i < MAX_SG; i++) { + if (!rctx->t_dst[i].addr) + break; + + mutex_lock(&ss->mlock); + if (i > 0) { + v |= BIT(17); + writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG); + writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG); + } + + dev_dbg(ss->dev, + "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n", + i, flow, name, v, + rctx->t_src[i].len, rctx->t_dst[i].len, + rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr); + + writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG); + writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG); + writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG); + writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG); + + reinit_completion(&ss->flows[flow].complete); + ss->flows[flow].status = 0; + wmb(); + + writel(v, ss->base + SS_CTL_REG); + mutex_unlock(&ss->mlock); + wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, + msecs_to_jiffies(2000)); + if (ss->flows[flow].status == 0) { + dev_err(ss->dev, "DMA timeout for %s\n", name); + return -EFAULT; + } + } + + return 0; +} + +static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct sun8i_ss_alg_template *algt; + struct scatterlist *sg; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); + + if (areq->nbytes == 0) { + algt->stat_fb_len++; + return true; + } + + if (areq->nbytes >= MAX_PAD_SIZE - 64) { + algt->stat_fb_len++; + return true; + } + + /* we need to reserve one SG for the padding one */ + if (sg_nents(areq->src) > MAX_SG - 1) { + algt->stat_fb_sgnum++; + return true; + } + + sg = areq->src; + while (sg) { + /* SS can operate hash only on full block size + * since SS support only MD5,sha1,sha224 and sha256, blocksize + * is always 64 + */ + /* Only the last block could be bounced to the pad buffer */ + if (sg->length % 64 && sg_next(sg)) { + algt->stat_fb_sglen++; + return true; + } + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + algt->stat_fb_align++; + return true; + } + if (sg->length % 4) { + algt->stat_fb_sglen++; + return true; + } + sg = sg_next(sg); + } + return false; +} + +int sun8i_ss_hash_digest(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct sun8i_ss_alg_template *algt; + struct sun8i_ss_dev *ss; + struct crypto_engine *engine; + int e; + + if (sun8i_ss_hash_need_fallback(areq)) + return sun8i_ss_hash_digest_fb(areq); + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); + ss = algt->ss; + + e = sun8i_ss_get_engine_number(ss); + rctx->flow = e; + engine = ss->flows[e].engine; + + return crypto_transfer_hash_request_to_engine(engine, areq); +} + +static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs) +{ + u64 fill, min_fill, j, k; + __be64 *bebits; + __le64 *lebits; + + j = padi; + buf[j++] = cpu_to_le32(0x80); + + if (bs == 64) { + fill = 64 - (byte_count % 64); + min_fill = 2 * sizeof(u32) + sizeof(u32); + } else { + fill = 128 - (byte_count % 128); + min_fill = 4 * sizeof(u32) + sizeof(u32); + } + + if (fill < min_fill) + fill += bs; + + k = j; + j += (fill - min_fill) / sizeof(u32); + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + for (; k < j; k++) + buf[k] = 0; + + if (le) { + /* MD5 */ + lebits = (__le64 *)&buf[j]; + *lebits = cpu_to_le64(byte_count << 3); + j += 2; + } else { + if (bs == 64) { + /* sha1 sha224 sha256 */ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } else { + /* sha384 sha512*/ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count >> 61); + j += 2; + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } + } + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + + return j; +} + +/* sun8i_ss_hash_run - run an ahash request + * Send the data of the request to the SS along with an extra SG with padding + */ +int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) +{ + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct sun8i_ss_alg_template *algt; + struct sun8i_ss_dev *ss; + struct scatterlist *sg; + int bs = crypto_ahash_blocksize(tfm); + int nr_sgs, err, digestsize; + unsigned int len; + u64 byte_count; + void *pad, *result; + int j, i, k, todo; + dma_addr_t addr_res, addr_pad, addr_xpad; + __le32 *bf; + /* HMAC step: + * 0: normal hashing + * 1: IPAD + * 2: OPAD + */ + int hmac = 0; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); + ss = algt->ss; + + digestsize = crypto_ahash_digestsize(tfm); + if (digestsize == SHA224_DIGEST_SIZE) + digestsize = SHA256_DIGEST_SIZE; + + result = ss->flows[rctx->flow].result; + pad = ss->flows[rctx->flow].pad; + bf = (__le32 *)pad; + + for (i = 0; i < MAX_SG; i++) { + rctx->t_dst[i].addr = 0; + rctx->t_dst[i].len = 0; + } + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_req++; +#endif + + rctx->method = ss->variant->alg_hash[algt->ss_algo_id]; + + nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > MAX_SG) { + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); + err = -EINVAL; + goto theend; + } + + addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE); + if (dma_mapping_error(ss->dev, addr_res)) { + dev_err(ss->dev, "DMA map dest\n"); + err = -EINVAL; + goto err_dma_result; + } + + j = 0; + len = areq->nbytes; + sg = areq->src; + i = 0; + while (len > 0 && sg) { + if (sg_dma_len(sg) == 0) { + sg = sg_next(sg); + continue; + } + todo = min(len, sg_dma_len(sg)); + /* only the last SG could be with a size not modulo64 */ + if (todo % 64 == 0) { + rctx->t_src[i].addr = sg_dma_address(sg); + rctx->t_src[i].len = todo / 4; + rctx->t_dst[i].addr = addr_res; + rctx->t_dst[i].len = digestsize / 4; + len -= todo; + } else { + scatterwalk_map_and_copy(bf, sg, 0, todo, 0); + j += todo / 4; + len -= todo; + } + sg = sg_next(sg); + i++; + } + if (len > 0) { + dev_err(ss->dev, "remaining len %d\n", len); + err = -EINVAL; + goto theend; + } + + if (j > 0) + i--; + +retry: + byte_count = areq->nbytes; + if (tfmctx->keylen && hmac == 0) { + hmac = 1; + /* shift all SG one slot up, to free slot 0 for IPAD */ + for (k = 6; k >= 0; k--) { + rctx->t_src[k + 1].addr = rctx->t_src[k].addr; + rctx->t_src[k + 1].len = rctx->t_src[k].len; + rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr; + rctx->t_dst[k + 1].len = rctx->t_dst[k].len; + } + addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE); + err = dma_mapping_error(ss->dev, addr_xpad); + if (err) { + dev_err(ss->dev, "Fail to create DMA mapping of ipad\n"); + goto err_dma_xpad; + } + rctx->t_src[0].addr = addr_xpad; + rctx->t_src[0].len = bs / 4; + rctx->t_dst[0].addr = addr_res; + rctx->t_dst[0].len = digestsize / 4; + i++; + byte_count = areq->nbytes + bs; + } + if (tfmctx->keylen && hmac == 2) { + for (i = 0; i < MAX_SG; i++) { + rctx->t_src[i].addr = 0; + rctx->t_src[i].len = 0; + rctx->t_dst[i].addr = 0; + rctx->t_dst[i].len = 0; + } + + addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE); + if (dma_mapping_error(ss->dev, addr_res)) { + dev_err(ss->dev, "Fail to create DMA mapping of result\n"); + err = -EINVAL; + goto err_dma_result; + } + addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE); + err = dma_mapping_error(ss->dev, addr_xpad); + if (err) { + dev_err(ss->dev, "Fail to create DMA mapping of opad\n"); + goto err_dma_xpad; + } + rctx->t_src[0].addr = addr_xpad; + rctx->t_src[0].len = bs / 4; + + memcpy(bf, result, digestsize); + j = digestsize / 4; + i = 1; + byte_count = digestsize + bs; + + rctx->t_dst[0].addr = addr_res; + rctx->t_dst[0].len = digestsize / 4; + } + + switch (algt->ss_algo_id) { + case SS_ID_HASH_MD5: + j = hash_pad(bf, 4096, j, byte_count, true, bs); + break; + case SS_ID_HASH_SHA1: + case SS_ID_HASH_SHA224: + case SS_ID_HASH_SHA256: + j = hash_pad(bf, 4096, j, byte_count, false, bs); + break; + } + if (!j) { + err = -EINVAL; + goto theend; + } + + addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, addr_pad)) { + dev_err(ss->dev, "DMA error on padding SG\n"); + err = -EINVAL; + goto err_dma_pad; + } + rctx->t_src[i].addr = addr_pad; + rctx->t_src[i].len = j; + rctx->t_dst[i].addr = addr_res; + rctx->t_dst[i].len = digestsize / 4; + + err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); + + /* + * mini helper for checking dma map/unmap + * flow start for hmac = 0 (and HMAC = 1) + * HMAC = 0 + * MAP src + * MAP res + * + * retry: + * if hmac then hmac = 1 + * MAP xpad (ipad) + * if hmac == 2 + * MAP res + * MAP xpad (opad) + * MAP pad + * ACTION! + * UNMAP pad + * if hmac + * UNMAP xpad + * UNMAP res + * if hmac < 2 + * UNMAP SRC + * + * if hmac = 1 then hmac = 2 goto retry + */ + + dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE); + +err_dma_pad: + if (hmac > 0) + dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE); +err_dma_xpad: + dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE); +err_dma_result: + if (hmac < 2) + dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); + if (hmac == 1 && !err) { + hmac = 2; + goto retry; + } + + if (!err) + memcpy(areq->result, result, crypto_ahash_digestsize(tfm)); +theend: + local_bh_disable(); + crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); + return 0; +} diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c new file mode 100644 index 0000000000..a923cfc655 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ss-prng.c - hardware cryptographic offloader for + * Allwinner A80/A83T SoC + * + * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com> + * + * This file handle the PRNG found in the SS + * + * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst + */ +#include "sun8i-ss.h" +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/pm_runtime.h> +#include <crypto/internal/rng.h> + +int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen) +{ + struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); + + if (ctx->seed && ctx->slen != slen) { + kfree_sensitive(ctx->seed); + ctx->slen = 0; + ctx->seed = NULL; + } + if (!ctx->seed) + ctx->seed = kmalloc(slen, GFP_KERNEL); + if (!ctx->seed) + return -ENOMEM; + + memcpy(ctx->seed, seed, slen); + ctx->slen = slen; + + return 0; +} + +int sun8i_ss_prng_init(struct crypto_tfm *tfm) +{ + struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + memset(ctx, 0, sizeof(struct sun8i_ss_rng_tfm_ctx)); + return 0; +} + +void sun8i_ss_prng_exit(struct crypto_tfm *tfm) +{ + struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + kfree_sensitive(ctx->seed); + ctx->seed = NULL; + ctx->slen = 0; +} + +int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen) +{ + struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); + struct rng_alg *alg = crypto_rng_alg(tfm); + struct sun8i_ss_alg_template *algt; + unsigned int todo_with_padding; + struct sun8i_ss_dev *ss; + dma_addr_t dma_iv, dma_dst; + unsigned int todo; + int err = 0; + int flow; + void *d; + u32 v; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.rng); + ss = algt->ss; + + if (ctx->slen == 0) { + dev_err(ss->dev, "The PRNG is not seeded\n"); + return -EINVAL; + } + + /* The SS does not give an updated seed, so we need to get a new one. + * So we will ask for an extra PRNG_SEED_SIZE data. + * We want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE + */ + todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE; + todo -= todo % PRNG_DATA_SIZE; + + todo_with_padding = ALIGN(todo, dma_get_cache_alignment()); + if (todo_with_padding < todo || todo < dlen) + return -EOVERFLOW; + + d = kzalloc(todo_with_padding, GFP_KERNEL); + if (!d) + return -ENOMEM; + + flow = sun8i_ss_get_engine_number(ss); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_req++; + algt->stat_bytes += todo; +#endif + + v = SS_ALG_PRNG | SS_PRNG_CONTINUE | SS_START; + if (flow) + v |= SS_FLOW1; + else + v |= SS_FLOW0; + + dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, dma_iv)) { + dev_err(ss->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; + goto err_free; + } + + dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE); + if (dma_mapping_error(ss->dev, dma_dst)) { + dev_err(ss->dev, "Cannot DMA MAP DST\n"); + err = -EFAULT; + goto err_iv; + } + + err = pm_runtime_resume_and_get(ss->dev); + if (err < 0) + goto err_pm; + err = 0; + + mutex_lock(&ss->mlock); + writel(dma_iv, ss->base + SS_IV_ADR_REG); + /* the PRNG act badly (failing rngtest) without SS_KEY_ADR_REG set */ + writel(dma_iv, ss->base + SS_KEY_ADR_REG); + writel(dma_dst, ss->base + SS_DST_ADR_REG); + writel(todo / 4, ss->base + SS_LEN_ADR_REG); + + reinit_completion(&ss->flows[flow].complete); + ss->flows[flow].status = 0; + /* Be sure all data is written before enabling the task */ + wmb(); + + writel(v, ss->base + SS_CTL_REG); + + wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, + msecs_to_jiffies(todo)); + if (ss->flows[flow].status == 0) { + dev_err(ss->dev, "DMA timeout for PRNG (size=%u)\n", todo); + err = -EFAULT; + } + /* Since cipher and hash use the linux/cryptoengine and that we have + * a cryptoengine per flow, we are sure that they will issue only one + * request per flow. + * Since the cryptoengine wait for completion before submitting a new + * one, the mlock could be left just after the final writel. + * But cryptoengine cannot handle crypto_rng, so we need to be sure + * nothing will use our flow. + * The easiest way is to grab mlock until the hardware end our requests. + * We could have used a per flow lock, but this would increase + * complexity. + * The drawback is that no request could be handled for the other flow. + */ + mutex_unlock(&ss->mlock); + + pm_runtime_put(ss->dev); + +err_pm: + dma_unmap_single(ss->dev, dma_dst, todo, DMA_FROM_DEVICE); +err_iv: + dma_unmap_single(ss->dev, dma_iv, ctx->slen, DMA_TO_DEVICE); + + if (!err) { + memcpy(dst, d, dlen); + /* Update seed */ + memcpy(ctx->seed, d + dlen, ctx->slen); + } +err_free: + kfree_sensitive(d); + + return err; +} diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h new file mode 100644 index 0000000000..ae66eb45fb --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sun8i-ss.h - hardware cryptographic offloader for + * Allwinner A80/A83T SoC + * + * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com> + */ +#include <crypto/aes.h> +#include <crypto/des.h> +#include <crypto/engine.h> +#include <crypto/rng.h> +#include <crypto/skcipher.h> +#include <linux/atomic.h> +#include <linux/debugfs.h> +#include <linux/crypto.h> +#include <crypto/internal/hash.h> +#include <crypto/md5.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> + +#define SS_START 1 + +#define SS_ENCRYPTION 0 +#define SS_DECRYPTION BIT(6) + +#define SS_ALG_AES 0 +#define SS_ALG_DES (1 << 2) +#define SS_ALG_3DES (2 << 2) +#define SS_ALG_MD5 (3 << 2) +#define SS_ALG_PRNG (4 << 2) +#define SS_ALG_SHA1 (6 << 2) +#define SS_ALG_SHA224 (7 << 2) +#define SS_ALG_SHA256 (8 << 2) + +#define SS_CTL_REG 0x00 +#define SS_INT_CTL_REG 0x04 +#define SS_INT_STA_REG 0x08 +#define SS_KEY_ADR_REG 0x10 +#define SS_IV_ADR_REG 0x18 +#define SS_SRC_ADR_REG 0x20 +#define SS_DST_ADR_REG 0x28 +#define SS_LEN_ADR_REG 0x30 + +#define SS_ID_NOTSUPP 0xFF + +#define SS_ID_CIPHER_AES 0 +#define SS_ID_CIPHER_DES 1 +#define SS_ID_CIPHER_DES3 2 +#define SS_ID_CIPHER_MAX 3 + +#define SS_ID_OP_ECB 0 +#define SS_ID_OP_CBC 1 +#define SS_ID_OP_MAX 2 + +#define SS_AES_128BITS 0 +#define SS_AES_192BITS 1 +#define SS_AES_256BITS 2 + +#define SS_OP_ECB 0 +#define SS_OP_CBC (1 << 13) + +#define SS_ID_HASH_MD5 0 +#define SS_ID_HASH_SHA1 1 +#define SS_ID_HASH_SHA224 2 +#define SS_ID_HASH_SHA256 3 +#define SS_ID_HASH_MAX 4 + +#define SS_FLOW0 BIT(30) +#define SS_FLOW1 BIT(31) + +#define SS_PRNG_CONTINUE BIT(18) + +#define MAX_SG 8 + +#define MAXFLOW 2 + +#define SS_MAX_CLOCKS 2 + +#define SS_DIE_ID_SHIFT 20 +#define SS_DIE_ID_MASK 0x07 + +#define PRNG_DATA_SIZE (160 / 8) +#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8) + +#define MAX_PAD_SIZE 4096 + +/* + * struct ss_clock - Describe clocks used by sun8i-ss + * @name: Name of clock needed by this variant + * @freq: Frequency to set for each clock + * @max_freq: Maximum frequency for each clock + */ +struct ss_clock { + const char *name; + unsigned long freq; + unsigned long max_freq; +}; + +/* + * struct ss_variant - Describe SS capability for each variant hardware + * @alg_cipher: list of supported ciphers. for each SS_ID_ this will give the + * coresponding SS_ALG_XXX value + * @alg_hash: list of supported hashes. for each SS_ID_ this will give the + * corresponding SS_ALG_XXX value + * @op_mode: list of supported block modes + * @ss_clks: list of clock needed by this variant + */ +struct ss_variant { + char alg_cipher[SS_ID_CIPHER_MAX]; + char alg_hash[SS_ID_HASH_MAX]; + u32 op_mode[SS_ID_OP_MAX]; + struct ss_clock ss_clks[SS_MAX_CLOCKS]; +}; + +struct sginfo { + u32 addr; + u32 len; +}; + +/* + * struct sun8i_ss_flow - Information used by each flow + * @engine: ptr to the crypto_engine for this flow + * @complete: completion for the current task on this flow + * @status: set to 1 by interrupt if task is done + * @stat_req: number of request done by this flow + * @iv: list of IV to use for each step + * @biv: buffer which contain the backuped IV + * @pad: padding buffer for hash operations + * @result: buffer for storing the result of hash operations + */ +struct sun8i_ss_flow { + struct crypto_engine *engine; + struct completion complete; + int status; + u8 *iv[MAX_SG]; + u8 *biv; + void *pad; + void *result; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + unsigned long stat_req; +#endif +}; + +/* + * struct sun8i_ss_dev - main container for all this driver information + * @base: base address of SS + * @ssclks: clocks used by SS + * @reset: pointer to reset controller + * @dev: the platform device + * @mlock: Control access to device registers + * @flows: array of all flow + * @flow: flow to use in next request + * @variant: pointer to variant specific data + * @dbgfs_dir: Debugfs dentry for statistic directory + * @dbgfs_stats: Debugfs dentry for statistic counters + */ +struct sun8i_ss_dev { + void __iomem *base; + struct clk *ssclks[SS_MAX_CLOCKS]; + struct reset_control *reset; + struct device *dev; + struct mutex mlock; + struct sun8i_ss_flow *flows; + atomic_t flow; + const struct ss_variant *variant; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + struct dentry *dbgfs_dir; + struct dentry *dbgfs_stats; +#endif +}; + +/* + * struct sun8i_cipher_req_ctx - context for a skcipher request + * @t_src: list of mapped SGs with their size + * @t_dst: list of mapped SGs with their size + * @p_key: DMA address of the key + * @p_iv: DMA address of the IVs + * @niv: Number of IVs DMA mapped + * @method: current algorithm for this request + * @op_mode: op_mode for this request + * @op_dir: direction (encrypt vs decrypt) for this request + * @flow: the flow to use for this request + * @ivlen: size of IVs + * @keylen: keylen for this request + * @fallback_req: request struct for invoking the fallback skcipher TFM + */ +struct sun8i_cipher_req_ctx { + struct sginfo t_src[MAX_SG]; + struct sginfo t_dst[MAX_SG]; + u32 p_key; + u32 p_iv[MAX_SG]; + int niv; + u32 method; + u32 op_mode; + u32 op_dir; + int flow; + unsigned int ivlen; + unsigned int keylen; + struct skcipher_request fallback_req; // keep at the end +}; + +/* + * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM + * @key: pointer to key data + * @keylen: len of the key + * @ss: pointer to the private data of driver handling this TFM + * @fallback_tfm: pointer to the fallback TFM + */ +struct sun8i_cipher_tfm_ctx { + u32 *key; + u32 keylen; + struct sun8i_ss_dev *ss; + struct crypto_skcipher *fallback_tfm; +}; + +/* + * struct sun8i_ss_prng_ctx - context for PRNG TFM + * @seed: The seed to use + * @slen: The size of the seed + */ +struct sun8i_ss_rng_tfm_ctx { + void *seed; + unsigned int slen; +}; + +/* + * struct sun8i_ss_hash_tfm_ctx - context for an ahash TFM + * @fallback_tfm: pointer to the fallback TFM + * @ss: pointer to the private data of driver handling this TFM + */ +struct sun8i_ss_hash_tfm_ctx { + struct crypto_ahash *fallback_tfm; + struct sun8i_ss_dev *ss; + u8 *ipad; + u8 *opad; + u8 key[SHA256_BLOCK_SIZE]; + int keylen; +}; + +/* + * struct sun8i_ss_hash_reqctx - context for an ahash request + * @t_src: list of DMA address and size for source SGs + * @t_dst: list of DMA address and size for destination SGs + * @fallback_req: pre-allocated fallback request + * @method: the register value for the algorithm used by this request + * @flow: the flow to use for this request + */ +struct sun8i_ss_hash_reqctx { + struct sginfo t_src[MAX_SG]; + struct sginfo t_dst[MAX_SG]; + struct ahash_request fallback_req; + u32 method; + int flow; +}; + +/* + * struct sun8i_ss_alg_template - crypto_alg template + * @type: the CRYPTO_ALG_TYPE for this template + * @ss_algo_id: the SS_ID for this template + * @ss_blockmode: the type of block operation SS_ID + * @ss: pointer to the sun8i_ss_dev structure associated with + * this template + * @alg: one of sub struct must be used + * @stat_req: number of request done on this template + * @stat_fb: number of request which has fallbacked + * @stat_bytes: total data size done by this template + */ +struct sun8i_ss_alg_template { + u32 type; + u32 ss_algo_id; + u32 ss_blockmode; + struct sun8i_ss_dev *ss; + union { + struct skcipher_engine_alg skcipher; + struct rng_alg rng; + struct ahash_engine_alg hash; + } alg; + unsigned long stat_req; + unsigned long stat_fb; + unsigned long stat_bytes; + unsigned long stat_fb_len; + unsigned long stat_fb_sglen; + unsigned long stat_fb_align; + unsigned long stat_fb_sgnum; + char fbname[CRYPTO_MAX_ALG_NAME]; +}; + +int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int sun8i_ss_cipher_init(struct crypto_tfm *tfm); +void sun8i_ss_cipher_exit(struct crypto_tfm *tfm); +int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq); +int sun8i_ss_skdecrypt(struct skcipher_request *areq); +int sun8i_ss_skencrypt(struct skcipher_request *areq); + +int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss); + +int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, const char *name); +int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen); +int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); +int sun8i_ss_prng_init(struct crypto_tfm *tfm); +void sun8i_ss_prng_exit(struct crypto_tfm *tfm); + +int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm); +void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm); +int sun8i_ss_hash_init(struct ahash_request *areq); +int sun8i_ss_hash_export(struct ahash_request *areq, void *out); +int sun8i_ss_hash_import(struct ahash_request *areq, const void *in); +int sun8i_ss_hash_final(struct ahash_request *areq); +int sun8i_ss_hash_update(struct ahash_request *areq); +int sun8i_ss_hash_finup(struct ahash_request *areq); +int sun8i_ss_hash_digest(struct ahash_request *areq); +int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq); +int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen); |