diff options
Diffstat (limited to 'drivers/crypto/allwinner')
21 files changed, 7529 insertions, 0 deletions
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig new file mode 100644 index 000000000..b8e75210a --- /dev/null +++ b/drivers/crypto/allwinner/Kconfig @@ -0,0 +1,139 @@ +config CRYPTO_DEV_ALLWINNER + bool "Support for Allwinner cryptographic offloader" + depends on ARCH_SUNXI || COMPILE_TEST + default y if ARCH_SUNXI + help + Say Y here to get to see options for Allwinner hardware crypto devices + +config CRYPTO_DEV_SUN4I_SS + tristate "Support for Allwinner Security System cryptographic accelerator" + depends on ARCH_SUNXI + depends on PM + depends on CRYPTO_DEV_ALLWINNER + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_AES + select CRYPTO_LIB_DES + select CRYPTO_SKCIPHER + help + Some Allwinner SoC have a crypto accelerator named + Security System. Select this if you want to use it. + The Security System handle AES/DES/3DES ciphers in CBC mode + and SHA1 and MD5 hash algorithms. + + To compile this driver as a module, choose M here: the module + will be called sun4i-ss. + +config CRYPTO_DEV_SUN4I_SS_PRNG + bool "Support for Allwinner Security System PRNG" + depends on CRYPTO_DEV_SUN4I_SS + select CRYPTO_RNG + help + Select this option if you want to provide kernel-side support for + the Pseudo-Random Number Generator found in the Security System. + +config CRYPTO_DEV_SUN4I_SS_DEBUG + bool "Enable sun4i-ss stats" + depends on CRYPTO_DEV_SUN4I_SS + depends on DEBUG_FS + help + Say y to enable sun4i-ss debug stats. + This will create /sys/kernel/debug/sun4i-ss/stats for displaying + the number of requests per algorithm. + +config CRYPTO_DEV_SUN8I_CE + tristate "Support for Allwinner Crypto Engine cryptographic offloader" + select CRYPTO_SKCIPHER + select CRYPTO_ENGINE + select CRYPTO_ECB + select CRYPTO_CBC + select CRYPTO_AES + select CRYPTO_DES + depends on CRYPTO_DEV_ALLWINNER + depends on PM + help + Select y here to have support for the crypto Engine available on + Allwinner SoC H2+, H3, H5, H6, R40 and A64. + The Crypto Engine handle AES/3DES ciphers in ECB/CBC mode. + + To compile this driver as a module, choose M here: the module + will be called sun8i-ce. + +config CRYPTO_DEV_SUN8I_CE_DEBUG + bool "Enable sun8i-ce stats" + depends on CRYPTO_DEV_SUN8I_CE + depends on DEBUG_FS + help + Say y to enable sun8i-ce debug stats. + This will create /sys/kernel/debug/sun8i-ce/stats for displaying + the number of requests per flow and per algorithm. + +config CRYPTO_DEV_SUN8I_CE_HASH + bool "Enable support for hash on sun8i-ce" + depends on CRYPTO_DEV_SUN8I_CE + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + help + Say y to enable support for hash algorithms. + +config CRYPTO_DEV_SUN8I_CE_PRNG + bool "Support for Allwinner Crypto Engine PRNG" + depends on CRYPTO_DEV_SUN8I_CE + select CRYPTO_RNG + help + Select this option if you want to provide kernel-side support for + the Pseudo-Random Number Generator found in the Crypto Engine. + +config CRYPTO_DEV_SUN8I_CE_TRNG + bool "Support for Allwinner Crypto Engine TRNG" + depends on CRYPTO_DEV_SUN8I_CE + select HW_RANDOM + help + Select this option if you want to provide kernel-side support for + the True Random Number Generator found in the Crypto Engine. + +config CRYPTO_DEV_SUN8I_SS + tristate "Support for Allwinner Security System cryptographic offloader" + select CRYPTO_SKCIPHER + select CRYPTO_ENGINE + select CRYPTO_ECB + select CRYPTO_CBC + select CRYPTO_AES + select CRYPTO_DES + depends on CRYPTO_DEV_ALLWINNER + depends on PM + help + Select y here to have support for the Security System available on + Allwinner SoC A80, A83T. + The Security System handle AES/3DES ciphers in ECB/CBC mode. + + To compile this driver as a module, choose M here: the module + will be called sun8i-ss. + +config CRYPTO_DEV_SUN8I_SS_DEBUG + bool "Enable sun8i-ss stats" + depends on CRYPTO_DEV_SUN8I_SS + depends on DEBUG_FS + help + Say y to enable sun8i-ss debug stats. + This will create /sys/kernel/debug/sun8i-ss/stats for displaying + the number of requests per flow and per algorithm. + +config CRYPTO_DEV_SUN8I_SS_PRNG + bool "Support for Allwinner Security System PRNG" + depends on CRYPTO_DEV_SUN8I_SS + select CRYPTO_RNG + help + Select this option if you want to provide kernel-side support for + the Pseudo-Random Number Generator found in the Security System. + +config CRYPTO_DEV_SUN8I_SS_HASH + bool "Enable support for hash on sun8i-ss" + depends on CRYPTO_DEV_SUN8I_SS + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + Say y to enable support for hash algorithms. diff --git a/drivers/crypto/allwinner/Makefile b/drivers/crypto/allwinner/Makefile new file mode 100644 index 000000000..6effe864d --- /dev/null +++ b/drivers/crypto/allwinner/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss/ +obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce/ +obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss/ diff --git a/drivers/crypto/allwinner/sun4i-ss/Makefile b/drivers/crypto/allwinner/sun4i-ss/Makefile new file mode 100644 index 000000000..c0a2797d3 --- /dev/null +++ b/drivers/crypto/allwinner/sun4i-ss/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o +sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o +sun4i-ss-$(CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG) += sun4i-ss-prng.o diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c new file mode 100644 index 000000000..10fe9f73a --- /dev/null +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c @@ -0,0 +1,644 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC + * + * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com> + * + * This file add support for AES cipher with 128,192,256 bits + * keysize in CBC and ECB mode. + * Add support also for DES and 3DES in CBC and ECB mode. + * + * You could find the datasheet in Documentation/arm/sunxi.rst + */ +#include "sun4i-ss.h" + +static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_ss_ctx *ss = op->ss; + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); + u32 mode = ctx->mode; + /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ + u32 rx_cnt = SS_RX_DEFAULT; + u32 tx_cnt = 0; + u32 spaces; + u32 v; + int err = 0; + unsigned int i; + unsigned int ileft = areq->cryptlen; + unsigned int oleft = areq->cryptlen; + unsigned int todo; + unsigned long pi = 0, po = 0; /* progress for in and out */ + bool miter_err; + struct sg_mapping_iter mi, mo; + unsigned int oi, oo; /* offset for in and out */ + unsigned long flags; + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun4i_ss_alg_template *algt; + + if (!areq->cryptlen) + return 0; + + if (!areq->src || !areq->dst) { + dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); + return -EINVAL; + } + + if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) { + scatterwalk_map_and_copy(ctx->backup_iv, areq->src, + areq->cryptlen - ivsize, ivsize, 0); + } + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { + algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto); + algt->stat_opti++; + algt->stat_bytes += areq->cryptlen; + } + + spin_lock_irqsave(&ss->slock, flags); + + for (i = 0; i < op->keylen / 4; i++) + writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1); + + if (areq->iv) { + for (i = 0; i < 4 && i < ivsize / 4; i++) { + v = *(u32 *)(areq->iv + i * 4); + writesl(ss->base + SS_IV0 + i * 4, &v, 1); + } + } + writel(mode, ss->base + SS_CTL); + + + ileft = areq->cryptlen / 4; + oleft = areq->cryptlen / 4; + oi = 0; + oo = 0; + do { + if (ileft) { + sg_miter_start(&mi, areq->src, sg_nents(areq->src), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + if (pi) + sg_miter_skip(&mi, pi); + miter_err = sg_miter_next(&mi); + if (!miter_err || !mi.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } + todo = min(rx_cnt, ileft); + todo = min_t(size_t, todo, (mi.length - oi) / 4); + if (todo) { + ileft -= todo; + writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); + oi += todo * 4; + } + if (oi == mi.length) { + pi += mi.length; + oi = 0; + } + sg_miter_stop(&mi); + } + + spaces = readl(ss->base + SS_FCSR); + rx_cnt = SS_RXFIFO_SPACES(spaces); + tx_cnt = SS_TXFIFO_SPACES(spaces); + + sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), + SG_MITER_TO_SG | SG_MITER_ATOMIC); + if (po) + sg_miter_skip(&mo, po); + miter_err = sg_miter_next(&mo); + if (!miter_err || !mo.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } + todo = min(tx_cnt, oleft); + todo = min_t(size_t, todo, (mo.length - oo) / 4); + if (todo) { + oleft -= todo; + readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); + oo += todo * 4; + } + if (oo == mo.length) { + oo = 0; + po += mo.length; + } + sg_miter_stop(&mo); + } while (oleft); + + if (areq->iv) { + if (mode & SS_DECRYPTION) { + memcpy(areq->iv, ctx->backup_iv, ivsize); + memzero_explicit(ctx->backup_iv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize, + ivsize, 0); + } + } + +release_ss: + writel(0, ss->base + SS_CTL); + spin_unlock_irqrestore(&ss->slock, flags); + return err; +} + +static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); + int err; + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun4i_ss_alg_template *algt; + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { + algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto); + algt->stat_fb++; + } + + skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst, + areq->cryptlen, areq->iv); + if (ctx->mode & SS_DECRYPTION) + err = crypto_skcipher_decrypt(&ctx->fallback_req); + else + err = crypto_skcipher_encrypt(&ctx->fallback_req); + + return err; +} + +/* Generic function that support SG with size not multiple of 4 */ +static int sun4i_ss_cipher_poll(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_ss_ctx *ss = op->ss; + int no_chunk = 1; + struct scatterlist *in_sg = areq->src; + struct scatterlist *out_sg = areq->dst; + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun4i_ss_alg_template *algt; + u32 mode = ctx->mode; + /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ + u32 rx_cnt = SS_RX_DEFAULT; + u32 tx_cnt = 0; + u32 v; + u32 spaces; + int err = 0; + unsigned int i; + unsigned int ileft = areq->cryptlen; + unsigned int oleft = areq->cryptlen; + unsigned int todo; + struct sg_mapping_iter mi, mo; + unsigned long pi = 0, po = 0; /* progress for in and out */ + bool miter_err; + unsigned int oi, oo; /* offset for in and out */ + unsigned int ob = 0; /* offset in buf */ + unsigned int obo = 0; /* offset in bufo*/ + unsigned int obl = 0; /* length of data in bufo */ + unsigned long flags; + bool need_fallback = false; + + if (!areq->cryptlen) + return 0; + + if (!areq->src || !areq->dst) { + dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); + return -EINVAL; + } + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto); + if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize) + need_fallback = true; + + /* + * if we have only SGs with size multiple of 4, + * we can use the SS optimized function + */ + while (in_sg && no_chunk == 1) { + if ((in_sg->length | in_sg->offset) & 3u) + no_chunk = 0; + in_sg = sg_next(in_sg); + } + while (out_sg && no_chunk == 1) { + if ((out_sg->length | out_sg->offset) & 3u) + no_chunk = 0; + out_sg = sg_next(out_sg); + } + + if (no_chunk == 1 && !need_fallback) + return sun4i_ss_opti_poll(areq); + + if (need_fallback) + return sun4i_ss_cipher_poll_fallback(areq); + + if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) { + scatterwalk_map_and_copy(ctx->backup_iv, areq->src, + areq->cryptlen - ivsize, ivsize, 0); + } + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { + algt->stat_req++; + algt->stat_bytes += areq->cryptlen; + } + + spin_lock_irqsave(&ss->slock, flags); + + for (i = 0; i < op->keylen / 4; i++) + writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1); + + if (areq->iv) { + for (i = 0; i < 4 && i < ivsize / 4; i++) { + v = *(u32 *)(areq->iv + i * 4); + writesl(ss->base + SS_IV0 + i * 4, &v, 1); + } + } + writel(mode, ss->base + SS_CTL); + + ileft = areq->cryptlen; + oleft = areq->cryptlen; + oi = 0; + oo = 0; + + while (oleft) { + if (ileft) { + sg_miter_start(&mi, areq->src, sg_nents(areq->src), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + if (pi) + sg_miter_skip(&mi, pi); + miter_err = sg_miter_next(&mi); + if (!miter_err || !mi.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } + /* + * todo is the number of consecutive 4byte word that we + * can read from current SG + */ + todo = min(rx_cnt, ileft / 4); + todo = min_t(size_t, todo, (mi.length - oi) / 4); + if (todo && !ob) { + writesl(ss->base + SS_RXFIFO, mi.addr + oi, + todo); + ileft -= todo * 4; + oi += todo * 4; + } else { + /* + * not enough consecutive bytes, so we need to + * linearize in buf. todo is in bytes + * After that copy, if we have a multiple of 4 + * we need to be able to write all buf in one + * pass, so it is why we min() with rx_cnt + */ + todo = min(rx_cnt * 4 - ob, ileft); + todo = min_t(size_t, todo, mi.length - oi); + memcpy(ss->buf + ob, mi.addr + oi, todo); + ileft -= todo; + oi += todo; + ob += todo; + if (!(ob % 4)) { + writesl(ss->base + SS_RXFIFO, ss->buf, + ob / 4); + ob = 0; + } + } + if (oi == mi.length) { + pi += mi.length; + oi = 0; + } + sg_miter_stop(&mi); + } + + spaces = readl(ss->base + SS_FCSR); + rx_cnt = SS_RXFIFO_SPACES(spaces); + tx_cnt = SS_TXFIFO_SPACES(spaces); + + if (!tx_cnt) + continue; + sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), + SG_MITER_TO_SG | SG_MITER_ATOMIC); + if (po) + sg_miter_skip(&mo, po); + miter_err = sg_miter_next(&mo); + if (!miter_err || !mo.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } + /* todo in 4bytes word */ + todo = min(tx_cnt, oleft / 4); + todo = min_t(size_t, todo, (mo.length - oo) / 4); + + if (todo) { + readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); + oleft -= todo * 4; + oo += todo * 4; + if (oo == mo.length) { + po += mo.length; + oo = 0; + } + } else { + /* + * read obl bytes in bufo, we read at maximum for + * emptying the device + */ + readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt); + obl = tx_cnt * 4; + obo = 0; + do { + /* + * how many bytes we can copy ? + * no more than remaining SG size + * no more than remaining buffer + * no need to test against oleft + */ + todo = min_t(size_t, + mo.length - oo, obl - obo); + memcpy(mo.addr + oo, ss->bufo + obo, todo); + oleft -= todo; + obo += todo; + oo += todo; + if (oo == mo.length) { + po += mo.length; + sg_miter_next(&mo); + oo = 0; + } + } while (obo < obl); + /* bufo must be fully used here */ + } + sg_miter_stop(&mo); + } + if (areq->iv) { + if (mode & SS_DECRYPTION) { + memcpy(areq->iv, ctx->backup_iv, ivsize); + memzero_explicit(ctx->backup_iv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize, + ivsize, 0); + } + } + +release_ss: + writel(0, ss->base + SS_CTL); + spin_unlock_irqrestore(&ss->slock, flags); + + return err; +} + +/* CBC AES */ +int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +/* ECB AES */ +int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +/* CBC DES */ +int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +/* ECB DES */ +int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +/* CBC 3DES */ +int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +/* ECB 3DES */ +int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + + rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | + op->keymode; + return sun4i_ss_cipher_poll(areq); +} + +int sun4i_ss_cipher_init(struct crypto_tfm *tfm) +{ + struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); + struct sun4i_ss_alg_template *algt; + const char *name = crypto_tfm_alg_name(tfm); + int err; + + memset(op, 0, sizeof(struct sun4i_tfm_ctx)); + + algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template, + alg.crypto.base); + op->ss = algt->ss; + + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(op->fallback_tfm)) { + dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(op->fallback_tfm)); + return PTR_ERR(op->fallback_tfm); + } + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct sun4i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm)); + + err = pm_runtime_resume_and_get(op->ss->dev); + if (err < 0) + goto error_pm; + + return 0; +error_pm: + crypto_free_skcipher(op->fallback_tfm); + return err; +} + +void sun4i_ss_cipher_exit(struct crypto_tfm *tfm) +{ + struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); + + crypto_free_skcipher(op->fallback_tfm); + pm_runtime_put(op->ss->dev); +} + +/* check and set the AES key, prepare the mode to be used */ +int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_ss_ctx *ss = op->ss; + + switch (keylen) { + case 128 / 8: + op->keymode = SS_AES_128BITS; + break; + case 192 / 8: + op->keymode = SS_AES_192BITS; + break; + case 256 / 8: + op->keymode = SS_AES_256BITS; + break; + default: + dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); + return -EINVAL; + } + op->keylen = keylen; + memcpy(op->key, key, keylen); + + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); +} + +/* check and set the DES key, prepare the mode to be used */ +int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + int err; + + err = verify_skcipher_des_key(tfm, key); + if (err) + return err; + + op->keylen = keylen; + memcpy(op->key, key, keylen); + + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); +} + +/* check and set the 3DES key, prepare the mode to be used */ +int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + int err; + + err = verify_skcipher_des3_key(tfm, key); + if (err) + return err; + + op->keylen = keylen; + memcpy(op->key, key, keylen); + + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); +} diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c new file mode 100644 index 000000000..006e40133 --- /dev/null +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * sun4i-ss-core.c - hardware cryptographic accelerator for Allwinner A20 SoC + * + * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com> + * + * Core file which registers crypto algorithms supported by the SS. + * + * You could find a link for the datasheet in Documentation/arm/sunxi.rst + */ +#include <linux/clk.h> +#include <linux/crypto.h> +#include <linux/debugfs.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <crypto/scatterwalk.h> +#include <linux/scatterlist.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/reset.h> + +#include "sun4i-ss.h" + +static const struct ss_variant ss_a10_variant = { + .sha1_in_be = false, +}; + +static const struct ss_variant ss_a33_variant = { + .sha1_in_be = true, +}; + +static struct sun4i_ss_alg_template ss_algs[] = { +{ .type = CRYPTO_ALG_TYPE_AHASH, + .mode = SS_OP_MD5, + .alg.hash = { + .init = sun4i_hash_init, + .update = sun4i_hash_update, + .final = sun4i_hash_final, + .finup = sun4i_hash_finup, + .digest = sun4i_hash_digest, + .export = sun4i_hash_export_md5, + .import = sun4i_hash_import_md5, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), + .base = { + .cra_name = "md5", + .cra_driver_name = "md5-sun4i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun4i_hash_crainit, + .cra_exit = sun4i_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .mode = SS_OP_SHA1, + .alg.hash = { + .init = sun4i_hash_init, + .update = sun4i_hash_update, + .final = sun4i_hash_final, + .finup = sun4i_hash_finup, + .digest = sun4i_hash_digest, + .export = sun4i_hash_export_sha1, + .import = sun4i_hash_import_sha1, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-sun4i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun4i_hash_crainit, + .cra_exit = sun4i_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.crypto = { + .setkey = sun4i_ss_aes_setkey, + .encrypt = sun4i_ss_cbc_aes_encrypt, + .decrypt = sun4i_ss_cbc_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, + .cra_exit = sun4i_ss_cipher_exit, + } + } +}, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.crypto = { + .setkey = sun4i_ss_aes_setkey, + .encrypt = sun4i_ss_ecb_aes_encrypt, + .decrypt = sun4i_ss_ecb_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, + .cra_exit = sun4i_ss_cipher_exit, + } + } +}, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.crypto = { + .setkey = sun4i_ss_des_setkey, + .encrypt = sun4i_ss_cbc_des_encrypt, + .decrypt = sun4i_ss_cbc_des_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, + .cra_exit = sun4i_ss_cipher_exit, + } + } +}, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.crypto = { + .setkey = sun4i_ss_des_setkey, + .encrypt = sun4i_ss_ecb_des_encrypt, + .decrypt = sun4i_ss_ecb_des_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, + .cra_exit = sun4i_ss_cipher_exit, + } + } +}, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.crypto = { + .setkey = sun4i_ss_des3_setkey, + .encrypt = sun4i_ss_cbc_des3_encrypt, + .decrypt = sun4i_ss_cbc_des3_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, + .cra_exit = sun4i_ss_cipher_exit, + } + } +}, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, + .alg.crypto = { + .setkey = sun4i_ss_des3_setkey, + .encrypt = sun4i_ss_ecb_des3_encrypt, + .decrypt = sun4i_ss_ecb_des3_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, + .cra_exit = sun4i_ss_cipher_exit, + } + } +}, +#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG +{ + .type = CRYPTO_ALG_TYPE_RNG, + .alg.rng = { + .base = { + .cra_name = "stdrng", + .cra_driver_name = "sun4i_ss_rng", + .cra_priority = 300, + .cra_ctxsize = 0, + .cra_module = THIS_MODULE, + }, + .generate = sun4i_ss_prng_generate, + .seed = sun4i_ss_prng_seed, + .seedsize = SS_SEED_LEN / BITS_PER_BYTE, + } +}, +#endif +}; + +static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + if (!ss_algs[i].ss) + continue; + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + seq_printf(seq, "%s %s reqs=%lu opti=%lu fallback=%lu tsize=%lu\n", + ss_algs[i].alg.crypto.base.cra_driver_name, + ss_algs[i].alg.crypto.base.cra_name, + ss_algs[i].stat_req, ss_algs[i].stat_opti, ss_algs[i].stat_fb, + ss_algs[i].stat_bytes); + break; + case CRYPTO_ALG_TYPE_RNG: + seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n", + ss_algs[i].alg.rng.base.cra_driver_name, + ss_algs[i].alg.rng.base.cra_name, + ss_algs[i].stat_req, ss_algs[i].stat_bytes); + break; + case CRYPTO_ALG_TYPE_AHASH: + seq_printf(seq, "%s %s reqs=%lu\n", + ss_algs[i].alg.hash.halg.base.cra_driver_name, + ss_algs[i].alg.hash.halg.base.cra_name, + ss_algs[i].stat_req); + break; + } + } + return 0; +} +DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs); + +/* + * Power management strategy: The device is suspended unless a TFM exists for + * one of the algorithms proposed by this driver. + */ +static int sun4i_ss_pm_suspend(struct device *dev) +{ + struct sun4i_ss_ctx *ss = dev_get_drvdata(dev); + + reset_control_assert(ss->reset); + + clk_disable_unprepare(ss->ssclk); + clk_disable_unprepare(ss->busclk); + return 0; +} + +static int sun4i_ss_pm_resume(struct device *dev) +{ + struct sun4i_ss_ctx *ss = dev_get_drvdata(dev); + + int err; + + err = clk_prepare_enable(ss->busclk); + if (err) { + dev_err(ss->dev, "Cannot prepare_enable busclk\n"); + goto err_enable; + } + + err = clk_prepare_enable(ss->ssclk); + if (err) { + dev_err(ss->dev, "Cannot prepare_enable ssclk\n"); + goto err_enable; + } + + err = reset_control_deassert(ss->reset); + if (err) { + dev_err(ss->dev, "Cannot deassert reset control\n"); + goto err_enable; + } + + return err; +err_enable: + sun4i_ss_pm_suspend(dev); + return err; +} + +static const struct dev_pm_ops sun4i_ss_pm_ops = { + SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL) +}; + +/* + * When power management is enabled, this function enables the PM and set the + * device as suspended + * When power management is disabled, this function just enables the device + */ +static int sun4i_ss_pm_init(struct sun4i_ss_ctx *ss) +{ + int err; + + pm_runtime_use_autosuspend(ss->dev); + pm_runtime_set_autosuspend_delay(ss->dev, 2000); + + err = pm_runtime_set_suspended(ss->dev); + if (err) + return err; + pm_runtime_enable(ss->dev); + return err; +} + +static void sun4i_ss_pm_exit(struct sun4i_ss_ctx *ss) +{ + pm_runtime_disable(ss->dev); +} + +static int sun4i_ss_probe(struct platform_device *pdev) +{ + u32 v; + int err, i; + unsigned long cr; + const unsigned long cr_ahb = 24 * 1000 * 1000; + const unsigned long cr_mod = 150 * 1000 * 1000; + struct sun4i_ss_ctx *ss; + + if (!pdev->dev.of_node) + return -ENODEV; + + ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL); + if (!ss) + return -ENOMEM; + + ss->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ss->base)) { + dev_err(&pdev->dev, "Cannot request MMIO\n"); + return PTR_ERR(ss->base); + } + + ss->variant = of_device_get_match_data(&pdev->dev); + if (!ss->variant) { + dev_err(&pdev->dev, "Missing Security System variant\n"); + return -EINVAL; + } + + ss->ssclk = devm_clk_get(&pdev->dev, "mod"); + if (IS_ERR(ss->ssclk)) { + err = PTR_ERR(ss->ssclk); + dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err); + return err; + } + dev_dbg(&pdev->dev, "clock ss acquired\n"); + + ss->busclk = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(ss->busclk)) { + err = PTR_ERR(ss->busclk); + dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err); + return err; + } + dev_dbg(&pdev->dev, "clock ahb_ss acquired\n"); + + ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); + if (IS_ERR(ss->reset)) + return PTR_ERR(ss->reset); + if (!ss->reset) + dev_info(&pdev->dev, "no reset control found\n"); + + /* + * Check that clock have the correct rates given in the datasheet + * Try to set the clock to the maximum allowed + */ + err = clk_set_rate(ss->ssclk, cr_mod); + if (err) { + dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n"); + return err; + } + + /* + * The only impact on clocks below requirement are bad performance, + * so do not print "errors" + * warn on Overclocked clocks + */ + cr = clk_get_rate(ss->busclk); + if (cr >= cr_ahb) + dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n", + cr, cr / 1000000, cr_ahb); + else + dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n", + cr, cr / 1000000, cr_ahb); + + cr = clk_get_rate(ss->ssclk); + if (cr <= cr_mod) + if (cr < cr_mod) + dev_warn(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n", + cr, cr / 1000000, cr_mod); + else + dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n", + cr, cr / 1000000, cr_mod); + else + dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n", + cr, cr / 1000000, cr_mod); + + ss->dev = &pdev->dev; + platform_set_drvdata(pdev, ss); + + spin_lock_init(&ss->slock); + + err = sun4i_ss_pm_init(ss); + if (err) + return err; + + /* + * Datasheet named it "Die Bonding ID" + * I expect to be a sort of Security System Revision number. + * Since the A80 seems to have an other version of SS + * this info could be useful + */ + + err = pm_runtime_resume_and_get(ss->dev); + if (err < 0) + goto error_pm; + + writel(SS_ENABLED, ss->base + SS_CTL); + v = readl(ss->base + SS_CTL); + v >>= 16; + v &= 0x07; + dev_info(&pdev->dev, "Die ID %d\n", v); + writel(0, ss->base + SS_CTL); + + pm_runtime_put_sync(ss->dev); + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + ss_algs[i].ss = ss; + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + err = crypto_register_skcipher(&ss_algs[i].alg.crypto); + if (err) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.crypto.base.cra_name); + goto error_alg; + } + break; + case CRYPTO_ALG_TYPE_AHASH: + err = crypto_register_ahash(&ss_algs[i].alg.hash); + if (err) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.hash.halg.base.cra_name); + goto error_alg; + } + break; + case CRYPTO_ALG_TYPE_RNG: + err = crypto_register_rng(&ss_algs[i].alg.rng); + if (err) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.rng.base.cra_name); + } + break; + } + } + + /* Ignore error of debugfs */ + ss->dbgfs_dir = debugfs_create_dir("sun4i-ss", NULL); + ss->dbgfs_stats = debugfs_create_file("stats", 0444, ss->dbgfs_dir, ss, + &sun4i_ss_debugfs_fops); + + return 0; +error_alg: + i--; + for (; i >= 0; i--) { + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + crypto_unregister_skcipher(&ss_algs[i].alg.crypto); + break; + case CRYPTO_ALG_TYPE_AHASH: + crypto_unregister_ahash(&ss_algs[i].alg.hash); + break; + case CRYPTO_ALG_TYPE_RNG: + crypto_unregister_rng(&ss_algs[i].alg.rng); + break; + } + } +error_pm: + sun4i_ss_pm_exit(ss); + return err; +} + +static int sun4i_ss_remove(struct platform_device *pdev) +{ + int i; + struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev); + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + crypto_unregister_skcipher(&ss_algs[i].alg.crypto); + break; + case CRYPTO_ALG_TYPE_AHASH: + crypto_unregister_ahash(&ss_algs[i].alg.hash); + break; + case CRYPTO_ALG_TYPE_RNG: + crypto_unregister_rng(&ss_algs[i].alg.rng); + break; + } + } + + sun4i_ss_pm_exit(ss); + return 0; +} + +static const struct of_device_id a20ss_crypto_of_match_table[] = { + { .compatible = "allwinner,sun4i-a10-crypto", + .data = &ss_a10_variant + }, + { .compatible = "allwinner,sun8i-a33-crypto", + .data = &ss_a33_variant + }, + {} +}; +MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table); + +static struct platform_driver sun4i_ss_driver = { + .probe = sun4i_ss_probe, + .remove = sun4i_ss_remove, + .driver = { + .name = "sun4i-ss", + .pm = &sun4i_ss_pm_ops, + .of_match_table = a20ss_crypto_of_match_table, + }, +}; + +module_platform_driver(sun4i_ss_driver); + +MODULE_ALIAS("platform:sun4i-ss"); +MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>"); diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c new file mode 100644 index 000000000..d28292762 --- /dev/null +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC + * + * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com> + * + * This file add support for MD5 and SHA1. + * + * You could find the datasheet in Documentation/arm/sunxi.rst + */ +#include "sun4i-ss.h" +#include <asm/unaligned.h> +#include <linux/scatterlist.h> + +/* This is a totally arbitrary value */ +#define SS_TIMEOUT 100 + +int sun4i_hash_crainit(struct crypto_tfm *tfm) +{ + struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct sun4i_ss_alg_template *algt; + int err; + + memset(op, 0, sizeof(struct sun4i_tfm_ctx)); + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); + op->ss = algt->ss; + + err = pm_runtime_resume_and_get(op->ss->dev); + if (err < 0) + return err; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct sun4i_req_ctx)); + return 0; +} + +void sun4i_hash_craexit(struct crypto_tfm *tfm) +{ + struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); + + pm_runtime_put(op->ss->dev); +} + +/* sun4i_hash_init: initialize request context */ +int sun4i_hash_init(struct ahash_request *areq) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun4i_ss_alg_template *algt; + + memset(op, 0, sizeof(struct sun4i_req_ctx)); + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); + op->mode = algt->mode; + + return 0; +} + +int sun4i_hash_export_md5(struct ahash_request *areq, void *out) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + struct md5_state *octx = out; + int i; + + octx->byte_count = op->byte_count + op->len; + + memcpy(octx->block, op->buf, op->len); + + if (op->byte_count) { + for (i = 0; i < 4; i++) + octx->hash[i] = op->hash[i]; + } else { + octx->hash[0] = SHA1_H0; + octx->hash[1] = SHA1_H1; + octx->hash[2] = SHA1_H2; + octx->hash[3] = SHA1_H3; + } + + return 0; +} + +int sun4i_hash_import_md5(struct ahash_request *areq, const void *in) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + const struct md5_state *ictx = in; + int i; + + sun4i_hash_init(areq); + + op->byte_count = ictx->byte_count & ~0x3F; + op->len = ictx->byte_count & 0x3F; + + memcpy(op->buf, ictx->block, op->len); + + for (i = 0; i < 4; i++) + op->hash[i] = ictx->hash[i]; + + return 0; +} + +int sun4i_hash_export_sha1(struct ahash_request *areq, void *out) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + struct sha1_state *octx = out; + int i; + + octx->count = op->byte_count + op->len; + + memcpy(octx->buffer, op->buf, op->len); + + if (op->byte_count) { + for (i = 0; i < 5; i++) + octx->state[i] = op->hash[i]; + } else { + octx->state[0] = SHA1_H0; + octx->state[1] = SHA1_H1; + octx->state[2] = SHA1_H2; + octx->state[3] = SHA1_H3; + octx->state[4] = SHA1_H4; + } + + return 0; +} + +int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + const struct sha1_state *ictx = in; + int i; + + sun4i_hash_init(areq); + + op->byte_count = ictx->count & ~0x3F; + op->len = ictx->count & 0x3F; + + memcpy(op->buf, ictx->buffer, op->len); + + for (i = 0; i < 5; i++) + op->hash[i] = ictx->state[i]; + + return 0; +} + +#define SS_HASH_UPDATE 1 +#define SS_HASH_FINAL 2 + +/* + * sun4i_hash_update: update hash engine + * + * Could be used for both SHA1 and MD5 + * Write data by step of 32bits and put then in the SS. + * + * Since we cannot leave partial data and hash state in the engine, + * we need to get the hash state at the end of this function. + * We can get the hash state every 64 bytes + * + * So the first work is to get the number of bytes to write to SS modulo 64 + * The extra bytes will go to a temporary buffer op->buf storing op->len bytes + * + * So at the begin of update() + * if op->len + areq->nbytes < 64 + * => all data will be written to wait buffer (op->buf) and end=0 + * if not, write all data from op->buf to the device and position end to + * complete to 64bytes + * + * example 1: + * update1 60o => op->len=60 + * update2 60o => need one more word to have 64 bytes + * end=4 + * so write all data from op->buf and one word of SGs + * write remaining data in op->buf + * final state op->len=56 + */ +static int sun4i_hash(struct ahash_request *areq) +{ + /* + * i is the total bytes read from SGs, to be compared to areq->nbytes + * i is important because we cannot rely on SG length since the sum of + * SG->length could be greater than areq->nbytes + * + * end is the position when we need to stop writing to the device, + * to be compared to i + * + * in_i: advancement in the current SG + */ + unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo; + unsigned int in_i = 0; + u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0; + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + struct sun4i_ss_ctx *ss = tfmctx->ss; + struct sun4i_ss_alg_template *algt; + struct scatterlist *in_sg = areq->src; + struct sg_mapping_iter mi; + int in_r, err = 0; + size_t copied = 0; + u32 wb = 0; + + dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", + __func__, crypto_tfm_alg_name(areq->base.tfm), + op->byte_count, areq->nbytes, op->mode, + op->len, op->hash[0]); + + if (unlikely(!areq->nbytes) && !(op->flags & SS_HASH_FINAL)) + return 0; + + /* protect against overflow */ + if (unlikely(areq->nbytes > UINT_MAX - op->len)) { + dev_err(ss->dev, "Cannot process too large request\n"); + return -EINVAL; + } + + if (op->len + areq->nbytes < 64 && !(op->flags & SS_HASH_FINAL)) { + /* linearize data to op->buf */ + copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), + op->buf + op->len, areq->nbytes, 0); + op->len += copied; + return 0; + } + + spin_lock_bh(&ss->slock); + + /* + * if some data have been processed before, + * we need to restore the partial hash state + */ + if (op->byte_count) { + ivmode = SS_IV_ARBITRARY; + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) + writel(op->hash[i], ss->base + SS_IV0 + i * 4); + } + /* Enable the device */ + writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); + + if (!(op->flags & SS_HASH_UPDATE)) + goto hash_final; + + /* start of handling data */ + if (!(op->flags & SS_HASH_FINAL)) { + end = ((areq->nbytes + op->len) / 64) * 64 - op->len; + + if (end > areq->nbytes || areq->nbytes - end > 63) { + dev_err(ss->dev, "ERROR: Bound error %u %u\n", + end, areq->nbytes); + err = -EINVAL; + goto release_ss; + } + } else { + /* Since we have the flag final, we can go up to modulo 4 */ + if (areq->nbytes < 4) + end = 0; + else + end = ((areq->nbytes + op->len) / 4) * 4 - op->len; + } + + /* TODO if SGlen % 4 and !op->len then DMA */ + i = 1; + while (in_sg && i == 1) { + if (in_sg->length % 4) + i = 0; + in_sg = sg_next(in_sg); + } + if (i == 1 && !op->len && areq->nbytes) + dev_dbg(ss->dev, "We can DMA\n"); + + i = 0; + sg_miter_start(&mi, areq->src, sg_nents(areq->src), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + sg_miter_next(&mi); + in_i = 0; + + do { + /* + * we need to linearize in two case: + * - the buffer is already used + * - the SG does not have enough byte remaining ( < 4) + */ + if (op->len || (mi.length - in_i) < 4) { + /* + * if we have entered here we have two reason to stop + * - the buffer is full + * - reach the end + */ + while (op->len < 64 && i < end) { + /* how many bytes we can read from current SG */ + in_r = min(end - i, 64 - op->len); + in_r = min_t(size_t, mi.length - in_i, in_r); + memcpy(op->buf + op->len, mi.addr + in_i, in_r); + op->len += in_r; + i += in_r; + in_i += in_r; + if (in_i == mi.length) { + sg_miter_next(&mi); + in_i = 0; + } + } + if (op->len > 3 && !(op->len % 4)) { + /* write buf to the device */ + writesl(ss->base + SS_RXFIFO, op->buf, + op->len / 4); + op->byte_count += op->len; + op->len = 0; + } + } + if (mi.length - in_i > 3 && i < end) { + /* how many bytes we can read from current SG */ + in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i); + in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r); + /* how many bytes we can write in the device*/ + todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4); + writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo); + op->byte_count += todo * 4; + i += todo * 4; + in_i += todo * 4; + rx_cnt -= todo; + if (!rx_cnt) { + spaces = readl(ss->base + SS_FCSR); + rx_cnt = SS_RXFIFO_SPACES(spaces); + } + if (in_i == mi.length) { + sg_miter_next(&mi); + in_i = 0; + } + } + } while (i < end); + + /* + * Now we have written to the device all that we can, + * store the remaining bytes in op->buf + */ + if ((areq->nbytes - i) < 64) { + while (i < areq->nbytes && in_i < mi.length && op->len < 64) { + /* how many bytes we can read from current SG */ + in_r = min(areq->nbytes - i, 64 - op->len); + in_r = min_t(size_t, mi.length - in_i, in_r); + memcpy(op->buf + op->len, mi.addr + in_i, in_r); + op->len += in_r; + i += in_r; + in_i += in_r; + if (in_i == mi.length) { + sg_miter_next(&mi); + in_i = 0; + } + } + } + + sg_miter_stop(&mi); + + /* + * End of data process + * Now if we have the flag final go to finalize part + * If not, store the partial hash + */ + if (op->flags & SS_HASH_FINAL) + goto hash_final; + + writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); + i = 0; + do { + v = readl(ss->base + SS_CTL); + i++; + } while (i < SS_TIMEOUT && (v & SS_DATA_END)); + if (unlikely(i >= SS_TIMEOUT)) { + dev_err_ratelimited(ss->dev, + "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", + i, SS_TIMEOUT, v, areq->nbytes); + err = -EIO; + goto release_ss; + } + + /* + * The datasheet isn't very clear about when to retrieve the digest. The + * bit SS_DATA_END is cleared when the engine has processed the data and + * when the digest is computed *but* it doesn't mean the digest is + * available in the digest registers. Hence the delay to be sure we can + * read it. + */ + ndelay(1); + + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) + op->hash[i] = readl(ss->base + SS_MD0 + i * 4); + + goto release_ss; + +/* + * hash_final: finalize hashing operation + * + * If we have some remaining bytes, we write them. + * Then ask the SS for finalizing the hashing operation + * + * I do not check RX FIFO size in this function since the size is 32 + * after each enabling and this function neither write more than 32 words. + * If we come from the update part, we cannot have more than + * 3 remaining bytes to write and SS is fast enough to not care about it. + */ + +hash_final: + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { + algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); + algt->stat_req++; + } + + /* write the remaining words of the wait buffer */ + if (op->len) { + nwait = op->len / 4; + if (nwait) { + writesl(ss->base + SS_RXFIFO, op->buf, nwait); + op->byte_count += 4 * nwait; + } + + nbw = op->len - 4 * nwait; + if (nbw) { + wb = le32_to_cpup((__le32 *)(op->buf + nwait * 4)); + wb &= GENMASK((nbw * 8) - 1, 0); + + op->byte_count += nbw; + } + } + + /* write the remaining bytes of the nbw buffer */ + wb |= ((1 << 7) << (nbw * 8)); + ((__le32 *)bf)[j++] = cpu_to_le32(wb); + + /* + * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1) + * I take the operations from other MD5/SHA1 implementations + */ + + /* last block size */ + fill = 64 - (op->byte_count % 64); + min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32)); + + /* if we can't fill all data, jump to the next 64 block */ + if (fill < min_fill) + fill += 64; + + j += (fill - min_fill) / sizeof(u32); + + /* write the length of data */ + if (op->mode == SS_OP_SHA1) { + __be64 *bits = (__be64 *)&bf[j]; + *bits = cpu_to_be64(op->byte_count << 3); + j += 2; + } else { + __le64 *bits = (__le64 *)&bf[j]; + *bits = cpu_to_le64(op->byte_count << 3); + j += 2; + } + writesl(ss->base + SS_RXFIFO, bf, j); + + /* Tell the SS to stop the hashing */ + writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); + + /* + * Wait for SS to finish the hash. + * The timeout could happen only in case of bad overclocking + * or driver bug. + */ + i = 0; + do { + v = readl(ss->base + SS_CTL); + i++; + } while (i < SS_TIMEOUT && (v & SS_DATA_END)); + if (unlikely(i >= SS_TIMEOUT)) { + dev_err_ratelimited(ss->dev, + "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", + i, SS_TIMEOUT, v, areq->nbytes); + err = -EIO; + goto release_ss; + } + + /* + * The datasheet isn't very clear about when to retrieve the digest. The + * bit SS_DATA_END is cleared when the engine has processed the data and + * when the digest is computed *but* it doesn't mean the digest is + * available in the digest registers. Hence the delay to be sure we can + * read it. + */ + ndelay(1); + + /* Get the hash from the device */ + if (op->mode == SS_OP_SHA1) { + for (i = 0; i < 5; i++) { + v = readl(ss->base + SS_MD0 + i * 4); + if (ss->variant->sha1_in_be) + put_unaligned_le32(v, areq->result + i * 4); + else + put_unaligned_be32(v, areq->result + i * 4); + } + } else { + for (i = 0; i < 4; i++) { + v = readl(ss->base + SS_MD0 + i * 4); + put_unaligned_le32(v, areq->result + i * 4); + } + } + +release_ss: + writel(0, ss->base + SS_CTL); + spin_unlock_bh(&ss->slock); + return err; +} + +int sun4i_hash_final(struct ahash_request *areq) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + + op->flags = SS_HASH_FINAL; + return sun4i_hash(areq); +} + +int sun4i_hash_update(struct ahash_request *areq) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + + op->flags = SS_HASH_UPDATE; + return sun4i_hash(areq); +} + +/* sun4i_hash_finup: finalize hashing operation after an update */ +int sun4i_hash_finup(struct ahash_request *areq) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + + op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; + return sun4i_hash(areq); +} + +/* combo of init/update/final functions */ +int sun4i_hash_digest(struct ahash_request *areq) +{ + int err; + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + + err = sun4i_hash_init(areq); + if (err) + return err; + + op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; + return sun4i_hash(areq); +} diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c new file mode 100644 index 000000000..491fcb7b8 --- /dev/null +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include "sun4i-ss.h" + +int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen) +{ + struct sun4i_ss_alg_template *algt; + struct rng_alg *alg = crypto_rng_alg(tfm); + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); + memcpy(algt->ss->seed, seed, slen); + + return 0; +} + +int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen) +{ + struct sun4i_ss_alg_template *algt; + struct rng_alg *alg = crypto_rng_alg(tfm); + int i, err; + u32 v; + u32 *data = (u32 *)dst; + const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED; + size_t len; + struct sun4i_ss_ctx *ss; + unsigned int todo = (dlen / 4) * 4; + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); + ss = algt->ss; + + err = pm_runtime_resume_and_get(ss->dev); + if (err < 0) + return err; + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { + algt->stat_req++; + algt->stat_bytes += todo; + } + + spin_lock_bh(&ss->slock); + + writel(mode, ss->base + SS_CTL); + + while (todo > 0) { + /* write the seed */ + for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) + writel(ss->seed[i], ss->base + SS_KEY0 + i * 4); + + /* Read the random data */ + len = min_t(size_t, SS_DATA_LEN / BITS_PER_BYTE, todo); + readsl(ss->base + SS_TXFIFO, data, len / 4); + data += len / 4; + todo -= len; + + /* Update the seed */ + for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) { + v = readl(ss->base + SS_KEY0 + i * 4); + ss->seed[i] = v; + } + } + + writel(0, ss->base + SS_CTL); + spin_unlock_bh(&ss->slock); + + pm_runtime_put(ss->dev); + + return 0; +} diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h new file mode 100644 index 000000000..ba59c7a48 --- /dev/null +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sun4i-ss.h - hardware cryptographic accelerator for Allwinner A20 SoC + * + * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com> + * + * Support AES cipher with 128,192,256 bits keysize. + * Support MD5 and SHA1 hash algorithms. + * Support DES and 3DES + * + * You could find the datasheet in Documentation/arm/sunxi.rst + */ + +#include <linux/clk.h> +#include <linux/crypto.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <crypto/scatterwalk.h> +#include <linux/scatterlist.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <crypto/md5.h> +#include <crypto/skcipher.h> +#include <crypto/sha1.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/aes.h> +#include <crypto/internal/des.h> +#include <crypto/internal/rng.h> +#include <crypto/rng.h> + +#define SS_CTL 0x00 +#define SS_KEY0 0x04 +#define SS_KEY1 0x08 +#define SS_KEY2 0x0C +#define SS_KEY3 0x10 +#define SS_KEY4 0x14 +#define SS_KEY5 0x18 +#define SS_KEY6 0x1C +#define SS_KEY7 0x20 + +#define SS_IV0 0x24 +#define SS_IV1 0x28 +#define SS_IV2 0x2C +#define SS_IV3 0x30 + +#define SS_FCSR 0x44 + +#define SS_MD0 0x4C +#define SS_MD1 0x50 +#define SS_MD2 0x54 +#define SS_MD3 0x58 +#define SS_MD4 0x5C + +#define SS_RXFIFO 0x200 +#define SS_TXFIFO 0x204 + +/* SS_CTL configuration values */ + +/* PRNG generator mode - bit 15 */ +#define SS_PRNG_ONESHOT (0 << 15) +#define SS_PRNG_CONTINUE (1 << 15) + +/* IV mode for hash */ +#define SS_IV_ARBITRARY (1 << 14) + +/* SS operation mode - bits 12-13 */ +#define SS_ECB (0 << 12) +#define SS_CBC (1 << 12) +#define SS_CTS (3 << 12) + +/* Counter width for CNT mode - bits 10-11 */ +#define SS_CNT_16BITS (0 << 10) +#define SS_CNT_32BITS (1 << 10) +#define SS_CNT_64BITS (2 << 10) + +/* Key size for AES - bits 8-9 */ +#define SS_AES_128BITS (0 << 8) +#define SS_AES_192BITS (1 << 8) +#define SS_AES_256BITS (2 << 8) + +/* Operation direction - bit 7 */ +#define SS_ENCRYPTION (0 << 7) +#define SS_DECRYPTION (1 << 7) + +/* SS Method - bits 4-6 */ +#define SS_OP_AES (0 << 4) +#define SS_OP_DES (1 << 4) +#define SS_OP_3DES (2 << 4) +#define SS_OP_SHA1 (3 << 4) +#define SS_OP_MD5 (4 << 4) +#define SS_OP_PRNG (5 << 4) + +/* Data end bit - bit 2 */ +#define SS_DATA_END (1 << 2) + +/* PRNG start bit - bit 1 */ +#define SS_PRNG_START (1 << 1) + +/* SS Enable bit - bit 0 */ +#define SS_DISABLED (0 << 0) +#define SS_ENABLED (1 << 0) + +/* SS_FCSR configuration values */ +/* RX FIFO status - bit 30 */ +#define SS_RXFIFO_FREE (1 << 30) + +/* RX FIFO empty spaces - bits 24-29 */ +#define SS_RXFIFO_SPACES(val) (((val) >> 24) & 0x3f) + +/* TX FIFO status - bit 22 */ +#define SS_TXFIFO_AVAILABLE (1 << 22) + +/* TX FIFO available spaces - bits 16-21 */ +#define SS_TXFIFO_SPACES(val) (((val) >> 16) & 0x3f) + +#define SS_RX_MAX 32 +#define SS_RX_DEFAULT SS_RX_MAX +#define SS_TX_MAX 33 + +#define SS_RXFIFO_EMP_INT_PENDING (1 << 10) +#define SS_TXFIFO_AVA_INT_PENDING (1 << 8) +#define SS_RXFIFO_EMP_INT_ENABLE (1 << 2) +#define SS_TXFIFO_AVA_INT_ENABLE (1 << 0) + +#define SS_SEED_LEN 192 +#define SS_DATA_LEN 160 + +/* + * struct ss_variant - Describe SS hardware variant + * @sha1_in_be: The SHA1 digest is given by SS in BE, and so need to be inverted. + */ +struct ss_variant { + bool sha1_in_be; +}; + +struct sun4i_ss_ctx { + const struct ss_variant *variant; + void __iomem *base; + int irq; + struct clk *busclk; + struct clk *ssclk; + struct reset_control *reset; + struct device *dev; + struct resource *res; + char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ + char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ + spinlock_t slock; /* control the use of the device */ +#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG + u32 seed[SS_SEED_LEN / BITS_PER_LONG]; +#endif + struct dentry *dbgfs_dir; + struct dentry *dbgfs_stats; +}; + +struct sun4i_ss_alg_template { + u32 type; + u32 mode; + union { + struct skcipher_alg crypto; + struct ahash_alg hash; + struct rng_alg rng; + } alg; + struct sun4i_ss_ctx *ss; + unsigned long stat_req; + unsigned long stat_fb; + unsigned long stat_bytes; + unsigned long stat_opti; +}; + +struct sun4i_tfm_ctx { + u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */ + u32 keylen; + u32 keymode; + struct sun4i_ss_ctx *ss; + struct crypto_skcipher *fallback_tfm; +}; + +struct sun4i_cipher_req_ctx { + u32 mode; + u8 backup_iv[AES_BLOCK_SIZE]; + struct skcipher_request fallback_req; // keep at the end +}; + +struct sun4i_req_ctx { + u32 mode; + u64 byte_count; /* number of bytes "uploaded" to the device */ + u32 hash[5]; /* for storing SS_IVx register */ + char buf[64]; + unsigned int len; + int flags; +}; + +int sun4i_hash_crainit(struct crypto_tfm *tfm); +void sun4i_hash_craexit(struct crypto_tfm *tfm); +int sun4i_hash_init(struct ahash_request *areq); +int sun4i_hash_update(struct ahash_request *areq); +int sun4i_hash_final(struct ahash_request *areq); +int sun4i_hash_finup(struct ahash_request *areq); +int sun4i_hash_digest(struct ahash_request *areq); +int sun4i_hash_export_md5(struct ahash_request *areq, void *out); +int sun4i_hash_import_md5(struct ahash_request *areq, const void *in); +int sun4i_hash_export_sha1(struct ahash_request *areq, void *out); +int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in); + +int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq); +int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq); + +int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq); +int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq); + +int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq); +int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq); + +int sun4i_ss_cipher_init(struct crypto_tfm *tfm); +void sun4i_ss_cipher_exit(struct crypto_tfm *tfm); +int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen); +int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); diff --git a/drivers/crypto/allwinner/sun8i-ce/Makefile b/drivers/crypto/allwinner/sun8i-ce/Makefile new file mode 100644 index 000000000..0842eb2d9 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ce/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce.o +sun8i-ce-y += sun8i-ce-core.o sun8i-ce-cipher.o +sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_HASH) += sun8i-ce-hash.o +sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG) += sun8i-ce-prng.o +sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) += sun8i-ce-trng.o diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c new file mode 100644 index 000000000..74b4e910a --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ce-cipher.c - hardware cryptographic offloader for + * Allwinner H3/A64/H5/H2+/H6/R40 SoC + * + * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com> + * + * This file add support for AES cipher with 128,192,256 bits keysize in + * CBC and ECB mode. + * + * You could find a link for the datasheet in Documentation/arm/sunxi.rst + */ + +#include <linux/bottom_half.h> +#include <linux/crypto.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> +#include <crypto/scatterwalk.h> +#include <crypto/internal/des.h> +#include <crypto/internal/skcipher.h> +#include "sun8i-ce.h" + +static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct scatterlist *sg; + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ce_alg_template *algt; + unsigned int todo, len; + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); + + if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG || + sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) { + algt->stat_fb_maxsg++; + return true; + } + + if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) { + algt->stat_fb_leniv++; + return true; + } + + if (areq->cryptlen == 0) { + algt->stat_fb_len0++; + return true; + } + + if (areq->cryptlen % 16) { + algt->stat_fb_mod16++; + return true; + } + + len = areq->cryptlen; + sg = areq->src; + while (sg) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + algt->stat_fb_srcali++; + return true; + } + todo = min(len, sg->length); + if (todo % 4) { + algt->stat_fb_srclen++; + return true; + } + len -= todo; + sg = sg_next(sg); + } + + len = areq->cryptlen; + sg = areq->dst; + while (sg) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + algt->stat_fb_dstali++; + return true; + } + todo = min(len, sg->length); + if (todo % 4) { + algt->stat_fb_dstlen++; + return true; + } + len -= todo; + sg = sg_next(sg); + } + return false; +} + +static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + int err; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ce_alg_template *algt; + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); + algt->stat_fb++; +#endif + + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, + areq->cryptlen, areq->iv); + if (rctx->op_dir & CE_DECRYPTION) + err = crypto_skcipher_decrypt(&rctx->fallback_req); + else + err = crypto_skcipher_encrypt(&rctx->fallback_req); + return err; +} + +static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req) +{ + struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ce_dev *ce = op->ce; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ce_alg_template *algt; + struct sun8i_ce_flow *chan; + struct ce_task *cet; + struct scatterlist *sg; + unsigned int todo, len, offset, ivsize; + u32 common, sym; + int flow, i; + int nr_sgs = 0; + int nr_sgd = 0; + int err = 0; + int ns = sg_nents_for_len(areq->src, areq->cryptlen); + int nd = sg_nents_for_len(areq->dst, areq->cryptlen); + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); + + dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, + crypto_tfm_alg_name(areq->base.tfm), + areq->cryptlen, + rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm), + op->keylen); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt->stat_req++; +#endif + + flow = rctx->flow; + + chan = &ce->chanlist[flow]; + + cet = chan->tl; + memset(cet, 0, sizeof(struct ce_task)); + + cet->t_id = cpu_to_le32(flow); + common = ce->variant->alg_cipher[algt->ce_algo_id]; + common |= rctx->op_dir | CE_COMM_INT; + cet->t_common_ctl = cpu_to_le32(common); + /* CTS and recent CE (H6) need length in bytes, in word otherwise */ + if (ce->variant->cipher_t_dlen_in_bytes) + cet->t_dlen = cpu_to_le32(areq->cryptlen); + else + cet->t_dlen = cpu_to_le32(areq->cryptlen / 4); + + sym = ce->variant->op_mode[algt->ce_blockmode]; + len = op->keylen; + switch (len) { + case 128 / 8: + sym |= CE_AES_128BITS; + break; + case 192 / 8: + sym |= CE_AES_192BITS; + break; + case 256 / 8: + sym |= CE_AES_256BITS; + break; + } + + cet->t_sym_ctl = cpu_to_le32(sym); + cet->t_asym_ctl = 0; + + rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE); + if (dma_mapping_error(ce->dev, rctx->addr_key)) { + dev_err(ce->dev, "Cannot DMA MAP KEY\n"); + err = -EFAULT; + goto theend; + } + cet->t_key = cpu_to_le32(rctx->addr_key); + + ivsize = crypto_skcipher_ivsize(tfm); + if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { + rctx->ivlen = ivsize; + if (rctx->op_dir & CE_DECRYPTION) { + offset = areq->cryptlen - ivsize; + scatterwalk_map_and_copy(chan->backup_iv, areq->src, + offset, ivsize, 0); + } + memcpy(chan->bounce_iv, areq->iv, ivsize); + rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen, + DMA_TO_DEVICE); + if (dma_mapping_error(ce->dev, rctx->addr_iv)) { + dev_err(ce->dev, "Cannot DMA MAP IV\n"); + err = -ENOMEM; + goto theend_iv; + } + cet->t_iv = cpu_to_le32(rctx->addr_iv); + } + + if (areq->src == areq->dst) { + nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL); + if (nr_sgs <= 0 || nr_sgs > MAX_SG) { + dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); + err = -EINVAL; + goto theend_iv; + } + nr_sgd = nr_sgs; + } else { + nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > MAX_SG) { + dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); + err = -EINVAL; + goto theend_iv; + } + nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); + if (nr_sgd <= 0 || nr_sgd > MAX_SG) { + dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd); + err = -EINVAL; + goto theend_sgs; + } + } + + len = areq->cryptlen; + for_each_sg(areq->src, sg, nr_sgs, i) { + cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg)); + todo = min(len, sg_dma_len(sg)); + cet->t_src[i].len = cpu_to_le32(todo / 4); + dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__, + areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo); + len -= todo; + } + if (len > 0) { + dev_err(ce->dev, "remaining len %d\n", len); + err = -EINVAL; + goto theend_sgs; + } + + len = areq->cryptlen; + for_each_sg(areq->dst, sg, nr_sgd, i) { + cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg)); + todo = min(len, sg_dma_len(sg)); + cet->t_dst[i].len = cpu_to_le32(todo / 4); + dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__, + areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo); + len -= todo; + } + if (len > 0) { + dev_err(ce->dev, "remaining len %d\n", len); + err = -EINVAL; + goto theend_sgs; + } + + chan->timeout = areq->cryptlen; + rctx->nr_sgs = nr_sgs; + rctx->nr_sgd = nr_sgd; + return 0; + +theend_sgs: + if (areq->src == areq->dst) { + dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL); + } else { + if (nr_sgs > 0) + dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); + } + +theend_iv: + if (areq->iv && ivsize > 0) { + if (rctx->addr_iv) + dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); + offset = areq->cryptlen - ivsize; + if (rctx->op_dir & CE_DECRYPTION) { + memcpy(areq->iv, chan->backup_iv, ivsize); + memzero_explicit(chan->backup_iv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); + } + memzero_explicit(chan->bounce_iv, ivsize); + } + + dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); + +theend: + return err; +} + +static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) +{ + struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ce_dev *ce = op->ce; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq); + int flow, err; + + flow = rctx->flow; + err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); + local_bh_disable(); + crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); + return 0; +} + +static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req) +{ + struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ce_dev *ce = op->ce; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct sun8i_ce_flow *chan; + struct ce_task *cet; + unsigned int ivsize, offset; + int nr_sgs = rctx->nr_sgs; + int nr_sgd = rctx->nr_sgd; + int flow; + + flow = rctx->flow; + chan = &ce->chanlist[flow]; + cet = chan->tl; + ivsize = crypto_skcipher_ivsize(tfm); + + if (areq->src == areq->dst) { + dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); + } else { + if (nr_sgs > 0) + dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); + } + + if (areq->iv && ivsize > 0) { + if (cet->t_iv) + dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); + offset = areq->cryptlen - ivsize; + if (rctx->op_dir & CE_DECRYPTION) { + memcpy(areq->iv, chan->backup_iv, ivsize); + memzero_explicit(chan->backup_iv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); + } + memzero_explicit(chan->bounce_iv, ivsize); + } + + dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); + + return 0; +} + +int sun8i_ce_skdecrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct crypto_engine *engine; + int e; + + rctx->op_dir = CE_DECRYPTION; + if (sun8i_ce_cipher_need_fallback(areq)) + return sun8i_ce_cipher_fallback(areq); + + e = sun8i_ce_get_engine_number(op->ce); + rctx->flow = e; + engine = op->ce->chanlist[e].engine; + + return crypto_transfer_skcipher_request_to_engine(engine, areq); +} + +int sun8i_ce_skencrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct crypto_engine *engine; + int e; + + rctx->op_dir = CE_ENCRYPTION; + if (sun8i_ce_cipher_need_fallback(areq)) + return sun8i_ce_cipher_fallback(areq); + + e = sun8i_ce_get_engine_number(op->ce); + rctx->flow = e; + engine = op->ce->chanlist[e].engine; + + return crypto_transfer_skcipher_request_to_engine(engine, areq); +} + +int sun8i_ce_cipher_init(struct crypto_tfm *tfm) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); + struct sun8i_ce_alg_template *algt; + const char *name = crypto_tfm_alg_name(tfm); + struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(sktfm); + int err; + + memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx)); + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); + op->ce = algt->ce; + + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(op->fallback_tfm)) { + dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(op->fallback_tfm)); + return PTR_ERR(op->fallback_tfm); + } + + sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + + memcpy(algt->fbname, + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), + CRYPTO_MAX_ALG_NAME); + + op->enginectx.op.do_one_request = sun8i_ce_cipher_run; + op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare; + op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare; + + err = pm_runtime_get_sync(op->ce->dev); + if (err < 0) + goto error_pm; + + return 0; +error_pm: + pm_runtime_put_noidle(op->ce->dev); + crypto_free_skcipher(op->fallback_tfm); + return err; +} + +void sun8i_ce_cipher_exit(struct crypto_tfm *tfm) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); + + kfree_sensitive(op->key); + crypto_free_skcipher(op->fallback_tfm); + pm_runtime_put_sync_suspend(op->ce->dev); +} + +int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ce_dev *ce = op->ce; + + switch (keylen) { + case 128 / 8: + break; + case 192 / 8: + break; + case 256 / 8: + break; + default: + dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen); + return -EINVAL; + } + kfree_sensitive(op->key); + op->keylen = keylen; + op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + if (!op->key) + return -ENOMEM; + + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); +} + +int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + int err; + + err = verify_skcipher_des3_key(tfm, key); + if (err) + return err; + + kfree_sensitive(op->key); + op->keylen = keylen; + op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + if (!op->key) + return -ENOMEM; + + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); +} diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c new file mode 100644 index 000000000..9f6594699 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -0,0 +1,1078 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ce-core.c - hardware cryptographic offloader for + * Allwinner H3/A64/H5/H2+/H6/R40 SoC + * + * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com> + * + * Core file which registers crypto algorithms supported by the CryptoEngine. + * + * You could find a link for the datasheet in Documentation/arm/sunxi.rst + */ +#include <linux/clk.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <crypto/internal/rng.h> +#include <crypto/internal/skcipher.h> + +#include "sun8i-ce.h" + +/* + * mod clock is lower on H3 than other SoC due to some DMA timeout occurring + * with high value. + * If you want to tune mod clock, loading driver and passing selftest is + * insufficient, you need to test with some LUKS test (mount and write to it) + */ +static const struct ce_variant ce_h3_variant = { + .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, + }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ALG_SHA384, CE_ALG_SHA512 + }, + .op_mode = { CE_OP_ECB, CE_OP_CBC + }, + .ce_clks = { + { "bus", 0, 200000000 }, + { "mod", 50000000, 0 }, + }, + .esr = ESR_H3, + .prng = CE_ALG_PRNG, + .trng = CE_ID_NOTSUPP, +}; + +static const struct ce_variant ce_h5_variant = { + .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, + }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ID_NOTSUPP, CE_ID_NOTSUPP + }, + .op_mode = { CE_OP_ECB, CE_OP_CBC + }, + .ce_clks = { + { "bus", 0, 200000000 }, + { "mod", 300000000, 0 }, + }, + .esr = ESR_H5, + .prng = CE_ALG_PRNG, + .trng = CE_ID_NOTSUPP, +}; + +static const struct ce_variant ce_h6_variant = { + .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, + }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ALG_SHA384, CE_ALG_SHA512 + }, + .op_mode = { CE_OP_ECB, CE_OP_CBC + }, + .cipher_t_dlen_in_bytes = true, + .hash_t_dlen_in_bits = true, + .prng_t_dlen_in_bytes = true, + .trng_t_dlen_in_bytes = true, + .ce_clks = { + { "bus", 0, 200000000 }, + { "mod", 300000000, 0 }, + { "ram", 0, 400000000 }, + }, + .esr = ESR_H6, + .prng = CE_ALG_PRNG_V2, + .trng = CE_ALG_TRNG_V2, +}; + +static const struct ce_variant ce_a64_variant = { + .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, + }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ID_NOTSUPP, CE_ID_NOTSUPP + }, + .op_mode = { CE_OP_ECB, CE_OP_CBC + }, + .ce_clks = { + { "bus", 0, 200000000 }, + { "mod", 300000000, 0 }, + }, + .esr = ESR_A64, + .prng = CE_ALG_PRNG, + .trng = CE_ID_NOTSUPP, +}; + +static const struct ce_variant ce_d1_variant = { + .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, + }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ALG_SHA384, CE_ALG_SHA512 + }, + .op_mode = { CE_OP_ECB, CE_OP_CBC + }, + .ce_clks = { + { "bus", 0, 200000000 }, + { "mod", 300000000, 0 }, + { "ram", 0, 400000000 }, + }, + .esr = ESR_D1, + .prng = CE_ALG_PRNG, + .trng = CE_ALG_TRNG, +}; + +static const struct ce_variant ce_r40_variant = { + .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, + }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ID_NOTSUPP, CE_ID_NOTSUPP + }, + .op_mode = { CE_OP_ECB, CE_OP_CBC + }, + .ce_clks = { + { "bus", 0, 200000000 }, + { "mod", 300000000, 0 }, + }, + .esr = ESR_R40, + .prng = CE_ALG_PRNG, + .trng = CE_ID_NOTSUPP, +}; + +/* + * sun8i_ce_get_engine_number() get the next channel slot + * This is a simple round-robin way of getting the next channel + * The flow 3 is reserve for xRNG operations + */ +int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce) +{ + return atomic_inc_return(&ce->flow) % (MAXFLOW - 1); +} + +int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) +{ + u32 v; + int err = 0; + struct ce_task *cet = ce->chanlist[flow].tl; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + ce->chanlist[flow].stat_req++; +#endif + + mutex_lock(&ce->mlock); + + v = readl(ce->base + CE_ICR); + v |= 1 << flow; + writel(v, ce->base + CE_ICR); + + reinit_completion(&ce->chanlist[flow].complete); + writel(ce->chanlist[flow].t_phy, ce->base + CE_TDQ); + + ce->chanlist[flow].status = 0; + /* Be sure all data is written before enabling the task */ + wmb(); + + /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored + * on older SoCs, we have no reason to complicate things. + */ + v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8); + writel(v, ce->base + CE_TLR); + mutex_unlock(&ce->mlock); + + wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete, + msecs_to_jiffies(ce->chanlist[flow].timeout)); + + if (ce->chanlist[flow].status == 0) { + dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name, + ce->chanlist[flow].timeout, flow); + err = -EFAULT; + } + /* No need to lock for this read, the channel is locked so + * nothing could modify the error value for this channel + */ + v = readl(ce->base + CE_ESR); + switch (ce->variant->esr) { + case ESR_H3: + /* Sadly, the error bit is not per flow */ + if (v) { + dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); + err = -EFAULT; + print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, + cet, sizeof(struct ce_task), false); + } + if (v & CE_ERR_ALGO_NOTSUP) + dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); + if (v & CE_ERR_DATALEN) + dev_err(ce->dev, "CE ERROR: data length error\n"); + if (v & CE_ERR_KEYSRAM) + dev_err(ce->dev, "CE ERROR: keysram access error for AES\n"); + break; + case ESR_A64: + case ESR_D1: + case ESR_H5: + case ESR_R40: + v >>= (flow * 4); + v &= 0xF; + if (v) { + dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); + err = -EFAULT; + print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, + cet, sizeof(struct ce_task), false); + } + if (v & CE_ERR_ALGO_NOTSUP) + dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); + if (v & CE_ERR_DATALEN) + dev_err(ce->dev, "CE ERROR: data length error\n"); + if (v & CE_ERR_KEYSRAM) + dev_err(ce->dev, "CE ERROR: keysram access error for AES\n"); + break; + case ESR_H6: + v >>= (flow * 8); + v &= 0xFF; + if (v) { + dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); + err = -EFAULT; + print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, + cet, sizeof(struct ce_task), false); + } + if (v & CE_ERR_ALGO_NOTSUP) + dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); + if (v & CE_ERR_DATALEN) + dev_err(ce->dev, "CE ERROR: data length error\n"); + if (v & CE_ERR_KEYSRAM) + dev_err(ce->dev, "CE ERROR: keysram access error for AES\n"); + if (v & CE_ERR_ADDR_INVALID) + dev_err(ce->dev, "CE ERROR: address invalid\n"); + if (v & CE_ERR_KEYLADDER) + dev_err(ce->dev, "CE ERROR: key ladder configuration error\n"); + break; + } + + return err; +} + +static irqreturn_t ce_irq_handler(int irq, void *data) +{ + struct sun8i_ce_dev *ce = (struct sun8i_ce_dev *)data; + int flow = 0; + u32 p; + + p = readl(ce->base + CE_ISR); + for (flow = 0; flow < MAXFLOW; flow++) { + if (p & (BIT(flow))) { + writel(BIT(flow), ce->base + CE_ISR); + ce->chanlist[flow].status = 1; + complete(&ce->chanlist[flow].complete); + } + } + + return IRQ_HANDLED; +} + +static struct sun8i_ce_alg_template ce_algs[] = { +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ce_algo_id = CE_ID_CIPHER_AES, + .ce_blockmode = CE_ID_OP_CBC, + .alg.skcipher = { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-sun8i-ce", + .cra_priority = 400, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ce_cipher_init, + .cra_exit = sun8i_ce_cipher_exit, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = sun8i_ce_aes_setkey, + .encrypt = sun8i_ce_skencrypt, + .decrypt = sun8i_ce_skdecrypt, + } +}, +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ce_algo_id = CE_ID_CIPHER_AES, + .ce_blockmode = CE_ID_OP_ECB, + .alg.skcipher = { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-sun8i-ce", + .cra_priority = 400, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ce_cipher_init, + .cra_exit = sun8i_ce_cipher_exit, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = sun8i_ce_aes_setkey, + .encrypt = sun8i_ce_skencrypt, + .decrypt = sun8i_ce_skdecrypt, + } +}, +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ce_algo_id = CE_ID_CIPHER_DES3, + .ce_blockmode = CE_ID_OP_CBC, + .alg.skcipher = { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-sun8i-ce", + .cra_priority = 400, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ce_cipher_init, + .cra_exit = sun8i_ce_cipher_exit, + }, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = sun8i_ce_des3_setkey, + .encrypt = sun8i_ce_skencrypt, + .decrypt = sun8i_ce_skdecrypt, + } +}, +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ce_algo_id = CE_ID_CIPHER_DES3, + .ce_blockmode = CE_ID_OP_ECB, + .alg.skcipher = { + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-sun8i-ce", + .cra_priority = 400, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ce_cipher_init, + .cra_exit = sun8i_ce_cipher_exit, + }, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = sun8i_ce_des3_setkey, + .encrypt = sun8i_ce_skencrypt, + .decrypt = sun8i_ce_skdecrypt, + } +}, +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_MD5, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), + .base = { + .cra_name = "md5", + .cra_driver_name = "md5-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_SHA1, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_SHA224, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_SHA256, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_SHA384, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha384", + .cra_driver_name = "sha384-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ce_algo_id = CE_ID_HASH_SHA512, + .alg.hash = { + .init = sun8i_ce_hash_init, + .update = sun8i_ce_hash_update, + .final = sun8i_ce_hash_final, + .finup = sun8i_ce_hash_finup, + .digest = sun8i_ce_hash_digest, + .export = sun8i_ce_hash_export, + .import = sun8i_ce_hash_import, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-sun8i-ce", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_hash_crainit, + .cra_exit = sun8i_ce_hash_craexit, + } + } + } +}, +#endif +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG +{ + .type = CRYPTO_ALG_TYPE_RNG, + .alg.rng = { + .base = { + .cra_name = "stdrng", + .cra_driver_name = "sun8i-ce-prng", + .cra_priority = 300, + .cra_ctxsize = sizeof(struct sun8i_ce_rng_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ce_prng_init, + .cra_exit = sun8i_ce_prng_exit, + }, + .generate = sun8i_ce_prng_generate, + .seed = sun8i_ce_prng_seed, + .seedsize = PRNG_SEED_SIZE, + } +}, +#endif +}; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG +static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v) +{ + struct sun8i_ce_dev *ce = seq->private; + unsigned int i; + + for (i = 0; i < MAXFLOW; i++) + seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req); + + for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { + if (!ce_algs[i].ce) + continue; + switch (ce_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", + ce_algs[i].alg.skcipher.base.cra_driver_name, + ce_algs[i].alg.skcipher.base.cra_name, + ce_algs[i].stat_req, ce_algs[i].stat_fb); + seq_printf(seq, "\tLast fallback is: %s\n", + ce_algs[i].fbname); + seq_printf(seq, "\tFallback due to 0 length: %lu\n", + ce_algs[i].stat_fb_len0); + seq_printf(seq, "\tFallback due to length !mod16: %lu\n", + ce_algs[i].stat_fb_mod16); + seq_printf(seq, "\tFallback due to length < IV: %lu\n", + ce_algs[i].stat_fb_leniv); + seq_printf(seq, "\tFallback due to source alignment: %lu\n", + ce_algs[i].stat_fb_srcali); + seq_printf(seq, "\tFallback due to dest alignment: %lu\n", + ce_algs[i].stat_fb_dstali); + seq_printf(seq, "\tFallback due to source length: %lu\n", + ce_algs[i].stat_fb_srclen); + seq_printf(seq, "\tFallback due to dest length: %lu\n", + ce_algs[i].stat_fb_dstlen); + seq_printf(seq, "\tFallback due to SG numbers: %lu\n", + ce_algs[i].stat_fb_maxsg); + break; + case CRYPTO_ALG_TYPE_AHASH: + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", + ce_algs[i].alg.hash.halg.base.cra_driver_name, + ce_algs[i].alg.hash.halg.base.cra_name, + ce_algs[i].stat_req, ce_algs[i].stat_fb); + seq_printf(seq, "\tLast fallback is: %s\n", + ce_algs[i].fbname); + seq_printf(seq, "\tFallback due to 0 length: %lu\n", + ce_algs[i].stat_fb_len0); + seq_printf(seq, "\tFallback due to length: %lu\n", + ce_algs[i].stat_fb_srclen); + seq_printf(seq, "\tFallback due to alignment: %lu\n", + ce_algs[i].stat_fb_srcali); + seq_printf(seq, "\tFallback due to SG numbers: %lu\n", + ce_algs[i].stat_fb_maxsg); + break; + case CRYPTO_ALG_TYPE_RNG: + seq_printf(seq, "%s %s reqs=%lu bytes=%lu\n", + ce_algs[i].alg.rng.base.cra_driver_name, + ce_algs[i].alg.rng.base.cra_name, + ce_algs[i].stat_req, ce_algs[i].stat_bytes); + break; + } + } +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG + seq_printf(seq, "HWRNG %lu %lu\n", + ce->hwrng_stat_req, ce->hwrng_stat_bytes); +#endif + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs); +#endif + +static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i) +{ + while (i >= 0) { + crypto_engine_exit(ce->chanlist[i].engine); + if (ce->chanlist[i].tl) + dma_free_coherent(ce->dev, sizeof(struct ce_task), + ce->chanlist[i].tl, + ce->chanlist[i].t_phy); + i--; + } +} + +/* + * Allocate the channel list structure + */ +static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce) +{ + int i, err; + + ce->chanlist = devm_kcalloc(ce->dev, MAXFLOW, + sizeof(struct sun8i_ce_flow), GFP_KERNEL); + if (!ce->chanlist) + return -ENOMEM; + + for (i = 0; i < MAXFLOW; i++) { + init_completion(&ce->chanlist[i].complete); + + ce->chanlist[i].engine = crypto_engine_alloc_init(ce->dev, true); + if (!ce->chanlist[i].engine) { + dev_err(ce->dev, "Cannot allocate engine\n"); + i--; + err = -ENOMEM; + goto error_engine; + } + err = crypto_engine_start(ce->chanlist[i].engine); + if (err) { + dev_err(ce->dev, "Cannot start engine\n"); + goto error_engine; + } + ce->chanlist[i].tl = dma_alloc_coherent(ce->dev, + sizeof(struct ce_task), + &ce->chanlist[i].t_phy, + GFP_KERNEL); + if (!ce->chanlist[i].tl) { + dev_err(ce->dev, "Cannot get DMA memory for task %d\n", + i); + err = -ENOMEM; + goto error_engine; + } + ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ce->chanlist[i].bounce_iv) { + err = -ENOMEM; + goto error_engine; + } + ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE, + GFP_KERNEL); + if (!ce->chanlist[i].backup_iv) { + err = -ENOMEM; + goto error_engine; + } + } + return 0; +error_engine: + sun8i_ce_free_chanlist(ce, i); + return err; +} + +/* + * Power management strategy: The device is suspended unless a TFM exists for + * one of the algorithms proposed by this driver. + */ +static int sun8i_ce_pm_suspend(struct device *dev) +{ + struct sun8i_ce_dev *ce = dev_get_drvdata(dev); + int i; + + reset_control_assert(ce->reset); + for (i = 0; i < CE_MAX_CLOCKS; i++) + clk_disable_unprepare(ce->ceclks[i]); + return 0; +} + +static int sun8i_ce_pm_resume(struct device *dev) +{ + struct sun8i_ce_dev *ce = dev_get_drvdata(dev); + int err, i; + + for (i = 0; i < CE_MAX_CLOCKS; i++) { + if (!ce->variant->ce_clks[i].name) + continue; + err = clk_prepare_enable(ce->ceclks[i]); + if (err) { + dev_err(ce->dev, "Cannot prepare_enable %s\n", + ce->variant->ce_clks[i].name); + goto error; + } + } + err = reset_control_deassert(ce->reset); + if (err) { + dev_err(ce->dev, "Cannot deassert reset control\n"); + goto error; + } + return 0; +error: + sun8i_ce_pm_suspend(dev); + return err; +} + +static const struct dev_pm_ops sun8i_ce_pm_ops = { + SET_RUNTIME_PM_OPS(sun8i_ce_pm_suspend, sun8i_ce_pm_resume, NULL) +}; + +static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce) +{ + int err; + + pm_runtime_use_autosuspend(ce->dev); + pm_runtime_set_autosuspend_delay(ce->dev, 2000); + + err = pm_runtime_set_suspended(ce->dev); + if (err) + return err; + pm_runtime_enable(ce->dev); + return err; +} + +static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce) +{ + pm_runtime_disable(ce->dev); +} + +static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce) +{ + unsigned long cr; + int err, i; + + for (i = 0; i < CE_MAX_CLOCKS; i++) { + if (!ce->variant->ce_clks[i].name) + continue; + ce->ceclks[i] = devm_clk_get(ce->dev, ce->variant->ce_clks[i].name); + if (IS_ERR(ce->ceclks[i])) { + err = PTR_ERR(ce->ceclks[i]); + dev_err(ce->dev, "Cannot get %s CE clock err=%d\n", + ce->variant->ce_clks[i].name, err); + return err; + } + cr = clk_get_rate(ce->ceclks[i]); + if (!cr) + return -EINVAL; + if (ce->variant->ce_clks[i].freq > 0 && + cr != ce->variant->ce_clks[i].freq) { + dev_info(ce->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n", + ce->variant->ce_clks[i].name, + ce->variant->ce_clks[i].freq, + ce->variant->ce_clks[i].freq / 1000000, + cr, cr / 1000000); + err = clk_set_rate(ce->ceclks[i], ce->variant->ce_clks[i].freq); + if (err) + dev_err(ce->dev, "Fail to set %s clk speed to %lu hz\n", + ce->variant->ce_clks[i].name, + ce->variant->ce_clks[i].freq); + } + if (ce->variant->ce_clks[i].max_freq > 0 && + cr > ce->variant->ce_clks[i].max_freq) + dev_warn(ce->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)", + ce->variant->ce_clks[i].name, cr, + ce->variant->ce_clks[i].max_freq); + } + return 0; +} + +static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce) +{ + int ce_method, err, id; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { + ce_algs[i].ce = ce; + switch (ce_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + id = ce_algs[i].ce_algo_id; + ce_method = ce->variant->alg_cipher[id]; + if (ce_method == CE_ID_NOTSUPP) { + dev_dbg(ce->dev, + "DEBUG: Algo of %s not supported\n", + ce_algs[i].alg.skcipher.base.cra_name); + ce_algs[i].ce = NULL; + break; + } + id = ce_algs[i].ce_blockmode; + ce_method = ce->variant->op_mode[id]; + if (ce_method == CE_ID_NOTSUPP) { + dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n", + ce_algs[i].alg.skcipher.base.cra_name); + ce_algs[i].ce = NULL; + break; + } + dev_info(ce->dev, "Register %s\n", + ce_algs[i].alg.skcipher.base.cra_name); + err = crypto_register_skcipher(&ce_algs[i].alg.skcipher); + if (err) { + dev_err(ce->dev, "ERROR: Fail to register %s\n", + ce_algs[i].alg.skcipher.base.cra_name); + ce_algs[i].ce = NULL; + return err; + } + break; + case CRYPTO_ALG_TYPE_AHASH: + id = ce_algs[i].ce_algo_id; + ce_method = ce->variant->alg_hash[id]; + if (ce_method == CE_ID_NOTSUPP) { + dev_info(ce->dev, + "DEBUG: Algo of %s not supported\n", + ce_algs[i].alg.hash.halg.base.cra_name); + ce_algs[i].ce = NULL; + break; + } + dev_info(ce->dev, "Register %s\n", + ce_algs[i].alg.hash.halg.base.cra_name); + err = crypto_register_ahash(&ce_algs[i].alg.hash); + if (err) { + dev_err(ce->dev, "ERROR: Fail to register %s\n", + ce_algs[i].alg.hash.halg.base.cra_name); + ce_algs[i].ce = NULL; + return err; + } + break; + case CRYPTO_ALG_TYPE_RNG: + if (ce->variant->prng == CE_ID_NOTSUPP) { + dev_info(ce->dev, + "DEBUG: Algo of %s not supported\n", + ce_algs[i].alg.rng.base.cra_name); + ce_algs[i].ce = NULL; + break; + } + dev_info(ce->dev, "Register %s\n", + ce_algs[i].alg.rng.base.cra_name); + err = crypto_register_rng(&ce_algs[i].alg.rng); + if (err) { + dev_err(ce->dev, "Fail to register %s\n", + ce_algs[i].alg.rng.base.cra_name); + ce_algs[i].ce = NULL; + } + break; + default: + ce_algs[i].ce = NULL; + dev_err(ce->dev, "ERROR: tried to register an unknown algo\n"); + } + } + return 0; +} + +static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { + if (!ce_algs[i].ce) + continue; + switch (ce_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + dev_info(ce->dev, "Unregister %d %s\n", i, + ce_algs[i].alg.skcipher.base.cra_name); + crypto_unregister_skcipher(&ce_algs[i].alg.skcipher); + break; + case CRYPTO_ALG_TYPE_AHASH: + dev_info(ce->dev, "Unregister %d %s\n", i, + ce_algs[i].alg.hash.halg.base.cra_name); + crypto_unregister_ahash(&ce_algs[i].alg.hash); + break; + case CRYPTO_ALG_TYPE_RNG: + dev_info(ce->dev, "Unregister %d %s\n", i, + ce_algs[i].alg.rng.base.cra_name); + crypto_unregister_rng(&ce_algs[i].alg.rng); + break; + } + } +} + +static int sun8i_ce_probe(struct platform_device *pdev) +{ + struct sun8i_ce_dev *ce; + int err, irq; + u32 v; + + ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL); + if (!ce) + return -ENOMEM; + + ce->dev = &pdev->dev; + platform_set_drvdata(pdev, ce); + + ce->variant = of_device_get_match_data(&pdev->dev); + if (!ce->variant) { + dev_err(&pdev->dev, "Missing Crypto Engine variant\n"); + return -EINVAL; + } + + ce->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ce->base)) + return PTR_ERR(ce->base); + + err = sun8i_ce_get_clks(ce); + if (err) + return err; + + /* Get Non Secure IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ce->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(ce->reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset), + "No reset control found\n"); + + mutex_init(&ce->mlock); + mutex_init(&ce->rnglock); + + err = sun8i_ce_allocate_chanlist(ce); + if (err) + return err; + + err = sun8i_ce_pm_init(ce); + if (err) + goto error_pm; + + err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, + "sun8i-ce-ns", ce); + if (err) { + dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err); + goto error_irq; + } + + err = sun8i_ce_register_algs(ce); + if (err) + goto error_alg; + + err = pm_runtime_resume_and_get(ce->dev); + if (err < 0) + goto error_alg; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG + sun8i_ce_hwrng_register(ce); +#endif + + v = readl(ce->base + CE_CTR); + v >>= CE_DIE_ID_SHIFT; + v &= CE_DIE_ID_MASK; + dev_info(&pdev->dev, "CryptoEngine Die ID %x\n", v); + + pm_runtime_put_sync(ce->dev); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + /* Ignore error of debugfs */ + ce->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL); + ce->dbgfs_stats = debugfs_create_file("stats", 0444, + ce->dbgfs_dir, ce, + &sun8i_ce_debugfs_fops); +#endif + + return 0; +error_alg: + sun8i_ce_unregister_algs(ce); +error_irq: + sun8i_ce_pm_exit(ce); +error_pm: + sun8i_ce_free_chanlist(ce, MAXFLOW - 1); + return err; +} + +static int sun8i_ce_remove(struct platform_device *pdev) +{ + struct sun8i_ce_dev *ce = platform_get_drvdata(pdev); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG + sun8i_ce_hwrng_unregister(ce); +#endif + + sun8i_ce_unregister_algs(ce); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + debugfs_remove_recursive(ce->dbgfs_dir); +#endif + + sun8i_ce_free_chanlist(ce, MAXFLOW - 1); + + sun8i_ce_pm_exit(ce); + return 0; +} + +static const struct of_device_id sun8i_ce_crypto_of_match_table[] = { + { .compatible = "allwinner,sun8i-h3-crypto", + .data = &ce_h3_variant }, + { .compatible = "allwinner,sun8i-r40-crypto", + .data = &ce_r40_variant }, + { .compatible = "allwinner,sun20i-d1-crypto", + .data = &ce_d1_variant }, + { .compatible = "allwinner,sun50i-a64-crypto", + .data = &ce_a64_variant }, + { .compatible = "allwinner,sun50i-h5-crypto", + .data = &ce_h5_variant }, + { .compatible = "allwinner,sun50i-h6-crypto", + .data = &ce_h6_variant }, + {} +}; +MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table); + +static struct platform_driver sun8i_ce_driver = { + .probe = sun8i_ce_probe, + .remove = sun8i_ce_remove, + .driver = { + .name = "sun8i-ce", + .pm = &sun8i_ce_pm_ops, + .of_match_table = sun8i_ce_crypto_of_match_table, + }, +}; + +module_platform_driver(sun8i_ce_driver); + +MODULE_DESCRIPTION("Allwinner Crypto Engine cryptographic offloader"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>"); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c new file mode 100644 index 000000000..8b5b9b9d0 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ce-hash.c - hardware cryptographic offloader for + * Allwinner H3/A64/H5/H2+/H6/R40 SoC + * + * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com> + * + * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512. + * + * You could find the datasheet in Documentation/arm/sunxi.rst + */ +#include <linux/bottom_half.h> +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <linux/scatterlist.h> +#include <crypto/internal/hash.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <crypto/md5.h> +#include "sun8i-ce.h" + +int sun8i_ce_hash_crainit(struct crypto_tfm *tfm) +{ + struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct sun8i_ce_alg_template *algt; + int err; + + memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx)); + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + op->ce = algt->ce; + + op->enginectx.op.do_one_request = sun8i_ce_hash_run; + op->enginectx.op.prepare_request = NULL; + op->enginectx.op.unprepare_request = NULL; + + /* FALLBACK */ + op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(op->fallback_tfm)) { + dev_err(algt->ce->dev, "Fallback driver could no be loaded\n"); + return PTR_ERR(op->fallback_tfm); + } + + if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm)) + algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct sun8i_ce_hash_reqctx) + + crypto_ahash_reqsize(op->fallback_tfm)); + + memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), + CRYPTO_MAX_ALG_NAME); + + err = pm_runtime_get_sync(op->ce->dev); + if (err < 0) + goto error_pm; + return 0; +error_pm: + pm_runtime_put_noidle(op->ce->dev); + crypto_free_ahash(op->fallback_tfm); + return err; +} + +void sun8i_ce_hash_craexit(struct crypto_tfm *tfm) +{ + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm); + + crypto_free_ahash(tfmctx->fallback_tfm); + pm_runtime_put_sync_suspend(tfmctx->ce->dev); +} + +int sun8i_ce_hash_init(struct ahash_request *areq) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx)); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +int sun8i_ce_hash_export(struct ahash_request *areq, void *out) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_export(&rctx->fallback_req, out); +} + +int sun8i_ce_hash_import(struct ahash_request *areq, const void *in) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_import(&rctx->fallback_req, in); +} + +int sun8i_ce_hash_final(struct ahash_request *areq) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_alg_template *algt; +#endif + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = areq->result; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt->stat_fb++; +#endif + + return crypto_ahash_final(&rctx->fallback_req); +} + +int sun8i_ce_hash_update(struct ahash_request *areq) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +int sun8i_ce_hash_finup(struct ahash_request *areq) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_alg_template *algt; +#endif + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt->stat_fb++; +#endif + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int sun8i_ce_hash_digest_fb(struct ahash_request *areq) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_alg_template *algt; +#endif + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt->stat_fb++; +#endif + + return crypto_ahash_digest(&rctx->fallback_req); +} + +static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_alg_template *algt; + struct scatterlist *sg; + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + + if (areq->nbytes == 0) { + algt->stat_fb_len0++; + return true; + } + /* we need to reserve one SG for padding one */ + if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) { + algt->stat_fb_maxsg++; + return true; + } + sg = areq->src; + while (sg) { + if (sg->length % 4) { + algt->stat_fb_srclen++; + return true; + } + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + algt->stat_fb_srcali++; + return true; + } + sg = sg_next(sg); + } + return false; +} + +int sun8i_ce_hash_digest(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_alg_template *algt; + struct sun8i_ce_dev *ce; + struct crypto_engine *engine; + struct scatterlist *sg; + int nr_sgs, e, i; + + if (sun8i_ce_hash_need_fallback(areq)) + return sun8i_ce_hash_digest_fb(areq); + + nr_sgs = sg_nents_for_len(areq->src, areq->nbytes); + if (nr_sgs > MAX_SG - 1) + return sun8i_ce_hash_digest_fb(areq); + + for_each_sg(areq->src, sg, nr_sgs, i) { + if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) + return sun8i_ce_hash_digest_fb(areq); + } + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + ce = algt->ce; + + e = sun8i_ce_get_engine_number(ce); + rctx->flow = e; + engine = ce->chanlist[e].engine; + + return crypto_transfer_hash_request_to_engine(engine, areq); +} + +static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs) +{ + u64 fill, min_fill, j, k; + __be64 *bebits; + __le64 *lebits; + + j = padi; + buf[j++] = cpu_to_le32(0x80); + + if (bs == 64) { + fill = 64 - (byte_count % 64); + min_fill = 2 * sizeof(u32) + sizeof(u32); + } else { + fill = 128 - (byte_count % 128); + min_fill = 4 * sizeof(u32) + sizeof(u32); + } + + if (fill < min_fill) + fill += bs; + + k = j; + j += (fill - min_fill) / sizeof(u32); + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + for (; k < j; k++) + buf[k] = 0; + + if (le) { + /* MD5 */ + lebits = (__le64 *)&buf[j]; + *lebits = cpu_to_le64(byte_count << 3); + j += 2; + } else { + if (bs == 64) { + /* sha1 sha224 sha256 */ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } else { + /* sha384 sha512*/ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count >> 61); + j += 2; + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } + } + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + + return j; +} + +int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) +{ + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_alg_template *algt; + struct sun8i_ce_dev *ce; + struct sun8i_ce_flow *chan; + struct ce_task *cet; + struct scatterlist *sg; + int nr_sgs, flow, err; + unsigned int len; + u32 common; + u64 byte_count; + __le32 *bf; + void *buf = NULL; + int j, i, todo; + void *result = NULL; + u64 bs; + int digestsize; + dma_addr_t addr_res, addr_pad; + int ns = sg_nents_for_len(areq->src, areq->nbytes); + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + ce = algt->ce; + + bs = algt->alg.hash.halg.base.cra_blocksize; + digestsize = algt->alg.hash.halg.digestsize; + if (digestsize == SHA224_DIGEST_SIZE) + digestsize = SHA256_DIGEST_SIZE; + if (digestsize == SHA384_DIGEST_SIZE) + digestsize = SHA512_DIGEST_SIZE; + + /* the padding could be up to two block. */ + buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA); + if (!buf) { + err = -ENOMEM; + goto theend; + } + bf = (__le32 *)buf; + + result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); + if (!result) { + err = -ENOMEM; + goto theend; + } + + flow = rctx->flow; + chan = &ce->chanlist[flow]; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt->stat_req++; +#endif + dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes); + + cet = chan->tl; + memset(cet, 0, sizeof(struct ce_task)); + + cet->t_id = cpu_to_le32(flow); + common = ce->variant->alg_hash[algt->ce_algo_id]; + common |= CE_COMM_INT; + cet->t_common_ctl = cpu_to_le32(common); + + cet->t_sym_ctl = 0; + cet->t_asym_ctl = 0; + + nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > MAX_SG) { + dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); + err = -EINVAL; + goto theend; + } + + len = areq->nbytes; + for_each_sg(areq->src, sg, nr_sgs, i) { + cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg)); + todo = min(len, sg_dma_len(sg)); + cet->t_src[i].len = cpu_to_le32(todo / 4); + len -= todo; + } + if (len > 0) { + dev_err(ce->dev, "remaining len %d\n", len); + err = -EINVAL; + goto theend; + } + addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE); + cet->t_dst[0].addr = cpu_to_le32(addr_res); + cet->t_dst[0].len = cpu_to_le32(digestsize / 4); + if (dma_mapping_error(ce->dev, addr_res)) { + dev_err(ce->dev, "DMA map dest\n"); + err = -EINVAL; + goto theend; + } + + byte_count = areq->nbytes; + j = 0; + + switch (algt->ce_algo_id) { + case CE_ID_HASH_MD5: + j = hash_pad(bf, 2 * bs, j, byte_count, true, bs); + break; + case CE_ID_HASH_SHA1: + case CE_ID_HASH_SHA224: + case CE_ID_HASH_SHA256: + j = hash_pad(bf, 2 * bs, j, byte_count, false, bs); + break; + case CE_ID_HASH_SHA384: + case CE_ID_HASH_SHA512: + j = hash_pad(bf, 2 * bs, j, byte_count, false, bs); + break; + } + if (!j) { + err = -EINVAL; + goto theend; + } + + addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); + cet->t_src[i].addr = cpu_to_le32(addr_pad); + cet->t_src[i].len = cpu_to_le32(j); + if (dma_mapping_error(ce->dev, addr_pad)) { + dev_err(ce->dev, "DMA error on padding SG\n"); + err = -EINVAL; + goto theend; + } + + if (ce->variant->hash_t_dlen_in_bits) + cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8); + else + cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j); + + chan->timeout = areq->nbytes; + + err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm)); + + dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); + + + memcpy(areq->result, result, algt->alg.hash.halg.digestsize); +theend: + kfree(buf); + kfree(result); + local_bh_disable(); + crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); + return 0; +} diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c new file mode 100644 index 000000000..b3cc43ea6 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ce-prng.c - hardware cryptographic offloader for + * Allwinner H3/A64/H5/H2+/H6/R40 SoC + * + * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com> + * + * This file handle the PRNG + * + * You could find a link for the datasheet in Documentation/arm/sunxi.rst + */ +#include "sun8i-ce.h" +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <crypto/internal/rng.h> + +int sun8i_ce_prng_init(struct crypto_tfm *tfm) +{ + struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + memset(ctx, 0, sizeof(struct sun8i_ce_rng_tfm_ctx)); + return 0; +} + +void sun8i_ce_prng_exit(struct crypto_tfm *tfm) +{ + struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + kfree_sensitive(ctx->seed); + ctx->seed = NULL; + ctx->slen = 0; +} + +int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen) +{ + struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); + + if (ctx->seed && ctx->slen != slen) { + kfree_sensitive(ctx->seed); + ctx->slen = 0; + ctx->seed = NULL; + } + if (!ctx->seed) + ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA); + if (!ctx->seed) + return -ENOMEM; + + memcpy(ctx->seed, seed, slen); + ctx->slen = slen; + + return 0; +} + +int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen) +{ + struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); + struct rng_alg *alg = crypto_rng_alg(tfm); + struct sun8i_ce_alg_template *algt; + struct sun8i_ce_dev *ce; + dma_addr_t dma_iv, dma_dst; + int err = 0; + int flow = 3; + unsigned int todo; + struct sun8i_ce_flow *chan; + struct ce_task *cet; + u32 common, sym; + void *d; + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.rng); + ce = algt->ce; + + if (ctx->slen == 0) { + dev_err(ce->dev, "not seeded\n"); + return -EINVAL; + } + + /* we want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE */ + todo = dlen + ctx->slen + PRNG_DATA_SIZE * 2; + todo -= todo % PRNG_DATA_SIZE; + + d = kzalloc(todo, GFP_KERNEL | GFP_DMA); + if (!d) { + err = -ENOMEM; + goto err_mem; + } + + dev_dbg(ce->dev, "%s PRNG slen=%u dlen=%u todo=%u multi=%u\n", __func__, + slen, dlen, todo, todo / PRNG_DATA_SIZE); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt->stat_req++; + algt->stat_bytes += todo; +#endif + + dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); + if (dma_mapping_error(ce->dev, dma_iv)) { + dev_err(ce->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; + goto err_iv; + } + + dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE); + if (dma_mapping_error(ce->dev, dma_dst)) { + dev_err(ce->dev, "Cannot DMA MAP DST\n"); + err = -EFAULT; + goto err_dst; + } + + err = pm_runtime_resume_and_get(ce->dev); + if (err < 0) + goto err_pm; + + mutex_lock(&ce->rnglock); + chan = &ce->chanlist[flow]; + + cet = &chan->tl[0]; + memset(cet, 0, sizeof(struct ce_task)); + + cet->t_id = cpu_to_le32(flow); + common = ce->variant->prng | CE_COMM_INT; + cet->t_common_ctl = cpu_to_le32(common); + + /* recent CE (H6) need length in bytes, in word otherwise */ + if (ce->variant->prng_t_dlen_in_bytes) + cet->t_dlen = cpu_to_le32(todo); + else + cet->t_dlen = cpu_to_le32(todo / 4); + + sym = PRNG_LD; + cet->t_sym_ctl = cpu_to_le32(sym); + cet->t_asym_ctl = 0; + + cet->t_key = cpu_to_le32(dma_iv); + cet->t_iv = cpu_to_le32(dma_iv); + + cet->t_dst[0].addr = cpu_to_le32(dma_dst); + cet->t_dst[0].len = cpu_to_le32(todo / 4); + ce->chanlist[flow].timeout = 2000; + + err = sun8i_ce_run_task(ce, 3, "PRNG"); + mutex_unlock(&ce->rnglock); + + pm_runtime_put(ce->dev); + +err_pm: + dma_unmap_single(ce->dev, dma_dst, todo, DMA_FROM_DEVICE); +err_dst: + dma_unmap_single(ce->dev, dma_iv, ctx->slen, DMA_TO_DEVICE); + + if (!err) { + memcpy(dst, d, dlen); + memcpy(ctx->seed, d + dlen, ctx->slen); + } +err_iv: + kfree_sensitive(d); +err_mem: + return err; +} diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c new file mode 100644 index 000000000..c4b0a8b58 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ce-trng.c - hardware cryptographic offloader for + * Allwinner H3/A64/H5/H2+/H6/R40 SoC + * + * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com> + * + * This file handle the TRNG + * + * You could find a link for the datasheet in Documentation/arm/sunxi.rst + */ +#include "sun8i-ce.h" +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <linux/hw_random.h> +/* + * Note that according to the algorithm ID, 2 versions of the TRNG exists, + * The first present in H3/H5/R40/A64 and the second present in H6. + * This file adds support for both, but only the second is working + * reliabily according to rngtest. + **/ + +static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct sun8i_ce_dev *ce; + dma_addr_t dma_dst; + int err = 0; + int flow = 3; + unsigned int todo; + struct sun8i_ce_flow *chan; + struct ce_task *cet; + u32 common; + void *d; + + ce = container_of(rng, struct sun8i_ce_dev, trng); + + /* round the data length to a multiple of 32*/ + todo = max + 32; + todo -= todo % 32; + + d = kzalloc(todo, GFP_KERNEL | GFP_DMA); + if (!d) + return -ENOMEM; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + ce->hwrng_stat_req++; + ce->hwrng_stat_bytes += todo; +#endif + + dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE); + if (dma_mapping_error(ce->dev, dma_dst)) { + dev_err(ce->dev, "Cannot DMA MAP DST\n"); + err = -EFAULT; + goto err_dst; + } + + err = pm_runtime_resume_and_get(ce->dev); + if (err < 0) + goto err_pm; + + mutex_lock(&ce->rnglock); + chan = &ce->chanlist[flow]; + + cet = &chan->tl[0]; + memset(cet, 0, sizeof(struct ce_task)); + + cet->t_id = cpu_to_le32(flow); + common = ce->variant->trng | CE_COMM_INT; + cet->t_common_ctl = cpu_to_le32(common); + + /* recent CE (H6) need length in bytes, in word otherwise */ + if (ce->variant->trng_t_dlen_in_bytes) + cet->t_dlen = cpu_to_le32(todo); + else + cet->t_dlen = cpu_to_le32(todo / 4); + + cet->t_sym_ctl = 0; + cet->t_asym_ctl = 0; + + cet->t_dst[0].addr = cpu_to_le32(dma_dst); + cet->t_dst[0].len = cpu_to_le32(todo / 4); + ce->chanlist[flow].timeout = todo; + + err = sun8i_ce_run_task(ce, 3, "TRNG"); + mutex_unlock(&ce->rnglock); + + pm_runtime_put(ce->dev); + +err_pm: + dma_unmap_single(ce->dev, dma_dst, todo, DMA_FROM_DEVICE); + + if (!err) { + memcpy(data, d, max); + err = max; + } +err_dst: + kfree_sensitive(d); + return err; +} + +int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce) +{ + int ret; + + if (ce->variant->trng == CE_ID_NOTSUPP) { + dev_info(ce->dev, "TRNG not supported\n"); + return 0; + } + ce->trng.name = "sun8i Crypto Engine TRNG"; + ce->trng.read = sun8i_ce_trng_read; + ce->trng.quality = 1000; + + ret = hwrng_register(&ce->trng); + if (ret) + dev_err(ce->dev, "Fail to register the TRNG\n"); + return ret; +} + +void sun8i_ce_hwrng_unregister(struct sun8i_ce_dev *ce) +{ + if (ce->variant->trng == CE_ID_NOTSUPP) + return; + hwrng_unregister(&ce->trng); +} diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h new file mode 100644 index 000000000..8177aaba4 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -0,0 +1,384 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sun8i-ce.h - hardware cryptographic offloader for + * Allwinner H3/A64/H5/H2+/H6 SoC + * + * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com> + */ +#include <crypto/aes.h> +#include <crypto/des.h> +#include <crypto/engine.h> +#include <crypto/skcipher.h> +#include <linux/atomic.h> +#include <linux/debugfs.h> +#include <linux/crypto.h> +#include <linux/hw_random.h> +#include <crypto/internal/hash.h> +#include <crypto/md5.h> +#include <crypto/rng.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> + +/* CE Registers */ +#define CE_TDQ 0x00 +#define CE_CTR 0x04 +#define CE_ICR 0x08 +#define CE_ISR 0x0C +#define CE_TLR 0x10 +#define CE_TSR 0x14 +#define CE_ESR 0x18 +#define CE_CSSGR 0x1C +#define CE_CDSGR 0x20 +#define CE_CSAR 0x24 +#define CE_CDAR 0x28 +#define CE_TPR 0x2C + +/* Used in struct ce_task */ +/* ce_task common */ +#define CE_ENCRYPTION 0 +#define CE_DECRYPTION BIT(8) + +#define CE_COMM_INT BIT(31) + +/* ce_task symmetric */ +#define CE_AES_128BITS 0 +#define CE_AES_192BITS 1 +#define CE_AES_256BITS 2 + +#define CE_OP_ECB 0 +#define CE_OP_CBC (1 << 8) + +#define CE_ALG_AES 0 +#define CE_ALG_DES 1 +#define CE_ALG_3DES 2 +#define CE_ALG_MD5 16 +#define CE_ALG_SHA1 17 +#define CE_ALG_SHA224 18 +#define CE_ALG_SHA256 19 +#define CE_ALG_SHA384 20 +#define CE_ALG_SHA512 21 +#define CE_ALG_TRNG 48 +#define CE_ALG_PRNG 49 +#define CE_ALG_TRNG_V2 0x1c +#define CE_ALG_PRNG_V2 0x1d + +/* Used in ce_variant */ +#define CE_ID_NOTSUPP 0xFF + +#define CE_ID_CIPHER_AES 0 +#define CE_ID_CIPHER_DES 1 +#define CE_ID_CIPHER_DES3 2 +#define CE_ID_CIPHER_MAX 3 + +#define CE_ID_HASH_MD5 0 +#define CE_ID_HASH_SHA1 1 +#define CE_ID_HASH_SHA224 2 +#define CE_ID_HASH_SHA256 3 +#define CE_ID_HASH_SHA384 4 +#define CE_ID_HASH_SHA512 5 +#define CE_ID_HASH_MAX 6 + +#define CE_ID_OP_ECB 0 +#define CE_ID_OP_CBC 1 +#define CE_ID_OP_MAX 2 + +/* Used in CE registers */ +#define CE_ERR_ALGO_NOTSUP BIT(0) +#define CE_ERR_DATALEN BIT(1) +#define CE_ERR_KEYSRAM BIT(2) +#define CE_ERR_ADDR_INVALID BIT(5) +#define CE_ERR_KEYLADDER BIT(6) + +#define ESR_H3 0 +#define ESR_A64 1 +#define ESR_R40 2 +#define ESR_H5 3 +#define ESR_H6 4 +#define ESR_D1 5 + +#define PRNG_DATA_SIZE (160 / 8) +#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8) +#define PRNG_LD BIT(17) + +#define CE_DIE_ID_SHIFT 16 +#define CE_DIE_ID_MASK 0x07 + +#define MAX_SG 8 + +#define CE_MAX_CLOCKS 3 + +#define MAXFLOW 4 + +/* + * struct ce_clock - Describe clocks used by sun8i-ce + * @name: Name of clock needed by this variant + * @freq: Frequency to set for each clock + * @max_freq: Maximum frequency for each clock (generally given by datasheet) + */ +struct ce_clock { + const char *name; + unsigned long freq; + unsigned long max_freq; +}; + +/* + * struct ce_variant - Describe CE capability for each variant hardware + * @alg_cipher: list of supported ciphers. for each CE_ID_ this will give the + * coresponding CE_ALG_XXX value + * @alg_hash: list of supported hashes. for each CE_ID_ this will give the + * corresponding CE_ALG_XXX value + * @op_mode: list of supported block modes + * @cipher_t_dlen_in_bytes: Does the request size for cipher is in + * bytes or words + * @hash_t_dlen_in_bytes: Does the request size for hash is in + * bits or words + * @prng_t_dlen_in_bytes: Does the request size for PRNG is in + * bytes or words + * @trng_t_dlen_in_bytes: Does the request size for TRNG is in + * bytes or words + * @ce_clks: list of clocks needed by this variant + * @esr: The type of error register + * @prng: The CE_ALG_XXX value for the PRNG + * @trng: The CE_ALG_XXX value for the TRNG + */ +struct ce_variant { + char alg_cipher[CE_ID_CIPHER_MAX]; + char alg_hash[CE_ID_HASH_MAX]; + u32 op_mode[CE_ID_OP_MAX]; + bool cipher_t_dlen_in_bytes; + bool hash_t_dlen_in_bits; + bool prng_t_dlen_in_bytes; + bool trng_t_dlen_in_bytes; + struct ce_clock ce_clks[CE_MAX_CLOCKS]; + int esr; + unsigned char prng; + unsigned char trng; +}; + +struct sginfo { + __le32 addr; + __le32 len; +} __packed; + +/* + * struct ce_task - CE Task descriptor + * The structure of this descriptor could be found in the datasheet + */ +struct ce_task { + __le32 t_id; + __le32 t_common_ctl; + __le32 t_sym_ctl; + __le32 t_asym_ctl; + __le32 t_key; + __le32 t_iv; + __le32 t_ctr; + __le32 t_dlen; + struct sginfo t_src[MAX_SG]; + struct sginfo t_dst[MAX_SG]; + __le32 next; + __le32 reserved[3]; +} __packed __aligned(8); + +/* + * struct sun8i_ce_flow - Information used by each flow + * @engine: ptr to the crypto_engine for this flow + * @complete: completion for the current task on this flow + * @status: set to 1 by interrupt if task is done + * @t_phy: Physical address of task + * @tl: pointer to the current ce_task for this flow + * @backup_iv: buffer which contain the next IV to store + * @bounce_iv: buffer which contain the IV + * @stat_req: number of request done by this flow + */ +struct sun8i_ce_flow { + struct crypto_engine *engine; + struct completion complete; + int status; + dma_addr_t t_phy; + int timeout; + struct ce_task *tl; + void *backup_iv; + void *bounce_iv; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + unsigned long stat_req; +#endif +}; + +/* + * struct sun8i_ce_dev - main container for all this driver information + * @base: base address of CE + * @ceclks: clocks used by CE + * @reset: pointer to reset controller + * @dev: the platform device + * @mlock: Control access to device registers + * @rnglock: Control access to the RNG (dedicated channel 3) + * @chanlist: array of all flow + * @flow: flow to use in next request + * @variant: pointer to variant specific data + * @dbgfs_dir: Debugfs dentry for statistic directory + * @dbgfs_stats: Debugfs dentry for statistic counters + */ +struct sun8i_ce_dev { + void __iomem *base; + struct clk *ceclks[CE_MAX_CLOCKS]; + struct reset_control *reset; + struct device *dev; + struct mutex mlock; + struct mutex rnglock; + struct sun8i_ce_flow *chanlist; + atomic_t flow; + const struct ce_variant *variant; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + struct dentry *dbgfs_dir; + struct dentry *dbgfs_stats; +#endif +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG + struct hwrng trng; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + unsigned long hwrng_stat_req; + unsigned long hwrng_stat_bytes; +#endif +#endif +}; + +/* + * struct sun8i_cipher_req_ctx - context for a skcipher request + * @op_dir: direction (encrypt vs decrypt) for this request + * @flow: the flow to use for this request + * @ivlen: size of bounce_iv + * @nr_sgs: The number of source SG (as given by dma_map_sg()) + * @nr_sgd: The number of destination SG (as given by dma_map_sg()) + * @addr_iv: The IV addr returned by dma_map_single, need to unmap later + * @addr_key: The key addr returned by dma_map_single, need to unmap later + * @fallback_req: request struct for invoking the fallback skcipher TFM + */ +struct sun8i_cipher_req_ctx { + u32 op_dir; + int flow; + unsigned int ivlen; + int nr_sgs; + int nr_sgd; + dma_addr_t addr_iv; + dma_addr_t addr_key; + struct skcipher_request fallback_req; // keep at the end +}; + +/* + * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM + * @enginectx: crypto_engine used by this TFM + * @key: pointer to key data + * @keylen: len of the key + * @ce: pointer to the private data of driver handling this TFM + * @fallback_tfm: pointer to the fallback TFM + */ +struct sun8i_cipher_tfm_ctx { + struct crypto_engine_ctx enginectx; + u32 *key; + u32 keylen; + struct sun8i_ce_dev *ce; + struct crypto_skcipher *fallback_tfm; +}; + +/* + * struct sun8i_ce_hash_tfm_ctx - context for an ahash TFM + * @enginectx: crypto_engine used by this TFM + * @ce: pointer to the private data of driver handling this TFM + * @fallback_tfm: pointer to the fallback TFM + */ +struct sun8i_ce_hash_tfm_ctx { + struct crypto_engine_ctx enginectx; + struct sun8i_ce_dev *ce; + struct crypto_ahash *fallback_tfm; +}; + +/* + * struct sun8i_ce_hash_reqctx - context for an ahash request + * @fallback_req: pre-allocated fallback request + * @flow: the flow to use for this request + */ +struct sun8i_ce_hash_reqctx { + struct ahash_request fallback_req; + int flow; +}; + +/* + * struct sun8i_ce_prng_ctx - context for PRNG TFM + * @seed: The seed to use + * @slen: The size of the seed + */ +struct sun8i_ce_rng_tfm_ctx { + void *seed; + unsigned int slen; +}; + +/* + * struct sun8i_ce_alg_template - crypto_alg template + * @type: the CRYPTO_ALG_TYPE for this template + * @ce_algo_id: the CE_ID for this template + * @ce_blockmode: the type of block operation CE_ID + * @ce: pointer to the sun8i_ce_dev structure associated with + * this template + * @alg: one of sub struct must be used + * @stat_req: number of request done on this template + * @stat_fb: number of request which has fallbacked + * @stat_bytes: total data size done by this template + */ +struct sun8i_ce_alg_template { + u32 type; + u32 ce_algo_id; + u32 ce_blockmode; + struct sun8i_ce_dev *ce; + union { + struct skcipher_alg skcipher; + struct ahash_alg hash; + struct rng_alg rng; + } alg; + unsigned long stat_req; + unsigned long stat_fb; + unsigned long stat_bytes; + unsigned long stat_fb_maxsg; + unsigned long stat_fb_leniv; + unsigned long stat_fb_len0; + unsigned long stat_fb_mod16; + unsigned long stat_fb_srcali; + unsigned long stat_fb_srclen; + unsigned long stat_fb_dstali; + unsigned long stat_fb_dstlen; + char fbname[CRYPTO_MAX_ALG_NAME]; +}; + +int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type); + +int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int sun8i_ce_cipher_init(struct crypto_tfm *tfm); +void sun8i_ce_cipher_exit(struct crypto_tfm *tfm); +int sun8i_ce_skdecrypt(struct skcipher_request *areq); +int sun8i_ce_skencrypt(struct skcipher_request *areq); + +int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce); + +int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name); + +int sun8i_ce_hash_crainit(struct crypto_tfm *tfm); +void sun8i_ce_hash_craexit(struct crypto_tfm *tfm); +int sun8i_ce_hash_init(struct ahash_request *areq); +int sun8i_ce_hash_export(struct ahash_request *areq, void *out); +int sun8i_ce_hash_import(struct ahash_request *areq, const void *in); +int sun8i_ce_hash(struct ahash_request *areq); +int sun8i_ce_hash_final(struct ahash_request *areq); +int sun8i_ce_hash_update(struct ahash_request *areq); +int sun8i_ce_hash_finup(struct ahash_request *areq); +int sun8i_ce_hash_digest(struct ahash_request *areq); +int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq); + +int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen); +int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); +void sun8i_ce_prng_exit(struct crypto_tfm *tfm); +int sun8i_ce_prng_init(struct crypto_tfm *tfm); + +int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce); +void sun8i_ce_hwrng_unregister(struct sun8i_ce_dev *ce); diff --git a/drivers/crypto/allwinner/sun8i-ss/Makefile b/drivers/crypto/allwinner/sun8i-ss/Makefile new file mode 100644 index 000000000..aabfd893c --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss.o +sun8i-ss-y += sun8i-ss-core.o sun8i-ss-cipher.o +sun8i-ss-$(CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG) += sun8i-ss-prng.o +sun8i-ss-$(CONFIG_CRYPTO_DEV_SUN8I_SS_HASH) += sun8i-ss-hash.o diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c new file mode 100644 index 000000000..e97fb2036 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ss-cipher.c - hardware cryptographic offloader for + * Allwinner A80/A83T SoC + * + * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com> + * + * This file add support for AES cipher with 128,192,256 bits keysize in + * CBC and ECB mode. + * + * You could find a link for the datasheet in Documentation/arm/sunxi.rst + */ + +#include <linux/bottom_half.h> +#include <linux/crypto.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> +#include <crypto/scatterwalk.h> +#include <crypto/internal/skcipher.h> +#include "sun8i-ss.h" + +static bool sun8i_ss_need_fallback(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); + struct scatterlist *in_sg = areq->src; + struct scatterlist *out_sg = areq->dst; + struct scatterlist *sg; + unsigned int todo, len; + + if (areq->cryptlen == 0 || areq->cryptlen % 16) { + algt->stat_fb_len++; + return true; + } + + if (sg_nents_for_len(areq->src, areq->cryptlen) > 8 || + sg_nents_for_len(areq->dst, areq->cryptlen) > 8) { + algt->stat_fb_sgnum++; + return true; + } + + len = areq->cryptlen; + sg = areq->src; + while (sg) { + todo = min(len, sg->length); + if ((todo % 16) != 0) { + algt->stat_fb_sglen++; + return true; + } + if (!IS_ALIGNED(sg->offset, 16)) { + algt->stat_fb_align++; + return true; + } + len -= todo; + sg = sg_next(sg); + } + len = areq->cryptlen; + sg = areq->dst; + while (sg) { + todo = min(len, sg->length); + if ((todo % 16) != 0) { + algt->stat_fb_sglen++; + return true; + } + if (!IS_ALIGNED(sg->offset, 16)) { + algt->stat_fb_align++; + return true; + } + len -= todo; + sg = sg_next(sg); + } + + /* SS need same numbers of SG (with same length) for source and destination */ + in_sg = areq->src; + out_sg = areq->dst; + while (in_sg && out_sg) { + if (in_sg->length != out_sg->length) + return true; + in_sg = sg_next(in_sg); + out_sg = sg_next(out_sg); + } + if (in_sg || out_sg) + return true; + return false; +} + +static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + int err; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ss_alg_template *algt; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); + algt->stat_fb++; +#endif + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, + areq->cryptlen, areq->iv); + if (rctx->op_dir & SS_DECRYPTION) + err = crypto_skcipher_decrypt(&rctx->fallback_req); + else + err = crypto_skcipher_encrypt(&rctx->fallback_req); + return err; +} + +static int sun8i_ss_setup_ivs(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct scatterlist *sg = areq->src; + unsigned int todo, offset; + unsigned int len = areq->cryptlen; + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; + int i = 0; + dma_addr_t a; + int err; + + rctx->ivlen = ivsize; + if (rctx->op_dir & SS_DECRYPTION) { + offset = areq->cryptlen - ivsize; + scatterwalk_map_and_copy(sf->biv, areq->src, offset, + ivsize, 0); + } + + /* we need to copy all IVs from source in case DMA is bi-directionnal */ + while (sg && len) { + if (sg_dma_len(sg) == 0) { + sg = sg_next(sg); + continue; + } + if (i == 0) + memcpy(sf->iv[0], areq->iv, ivsize); + a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, a)) { + memzero_explicit(sf->iv[i], ivsize); + dev_err(ss->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; + goto dma_iv_error; + } + rctx->p_iv[i] = a; + /* we need to setup all others IVs only in the decrypt way */ + if (rctx->op_dir == SS_ENCRYPTION) + return 0; + todo = min(len, sg_dma_len(sg)); + len -= todo; + i++; + if (i < MAX_SG) { + offset = sg->length - ivsize; + scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0); + } + rctx->niv = i; + sg = sg_next(sg); + } + + return 0; +dma_iv_error: + i--; + while (i >= 0) { + dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); + memzero_explicit(sf->iv[i], ivsize); + i--; + } + return err; +} + +static int sun8i_ss_cipher(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ss_alg_template *algt; + struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; + struct scatterlist *sg; + unsigned int todo, len, offset, ivsize; + int nr_sgs = 0; + int nr_sgd = 0; + int err = 0; + int nsgs = sg_nents_for_len(areq->src, areq->cryptlen); + int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen); + int i; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); + + dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, + crypto_tfm_alg_name(areq->base.tfm), + areq->cryptlen, + rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm), + op->keylen); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_req++; +#endif + + rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode]; + rctx->method = ss->variant->alg_cipher[algt->ss_algo_id]; + rctx->keylen = op->keylen; + + rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, rctx->p_key)) { + dev_err(ss->dev, "Cannot DMA MAP KEY\n"); + err = -EFAULT; + goto theend; + } + + ivsize = crypto_skcipher_ivsize(tfm); + if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { + err = sun8i_ss_setup_ivs(areq); + if (err) + goto theend_key; + } + if (areq->src == areq->dst) { + nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL); + if (nr_sgs <= 0 || nr_sgs > 8) { + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); + err = -EINVAL; + goto theend_iv; + } + nr_sgd = nr_sgs; + } else { + nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 8) { + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); + err = -EINVAL; + goto theend_iv; + } + nr_sgd = dma_map_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); + if (nr_sgd <= 0 || nr_sgd > 8) { + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd); + err = -EINVAL; + goto theend_sgs; + } + } + + len = areq->cryptlen; + i = 0; + sg = areq->src; + while (i < nr_sgs && sg && len) { + if (sg_dma_len(sg) == 0) + goto sgs_next; + rctx->t_src[i].addr = sg_dma_address(sg); + todo = min(len, sg_dma_len(sg)); + rctx->t_src[i].len = todo / 4; + dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__, + areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo); + len -= todo; + i++; +sgs_next: + sg = sg_next(sg); + } + if (len > 0) { + dev_err(ss->dev, "remaining len %d\n", len); + err = -EINVAL; + goto theend_sgs; + } + + len = areq->cryptlen; + i = 0; + sg = areq->dst; + while (i < nr_sgd && sg && len) { + if (sg_dma_len(sg) == 0) + goto sgd_next; + rctx->t_dst[i].addr = sg_dma_address(sg); + todo = min(len, sg_dma_len(sg)); + rctx->t_dst[i].len = todo / 4; + dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__, + areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo); + len -= todo; + i++; +sgd_next: + sg = sg_next(sg); + } + if (len > 0) { + dev_err(ss->dev, "remaining len %d\n", len); + err = -EINVAL; + goto theend_sgs; + } + + err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); + +theend_sgs: + if (areq->src == areq->dst) { + dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); + dma_unmap_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); + } + +theend_iv: + if (areq->iv && ivsize > 0) { + for (i = 0; i < rctx->niv; i++) { + dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); + memzero_explicit(sf->iv[i], ivsize); + } + + offset = areq->cryptlen - ivsize; + if (rctx->op_dir & SS_DECRYPTION) { + memcpy(areq->iv, sf->biv, ivsize); + memzero_explicit(sf->biv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); + } + } + +theend_key: + dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE); + +theend: + + return err; +} + +static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq) +{ + int err; + struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); + + err = sun8i_ss_cipher(breq); + local_bh_disable(); + crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); + + return 0; +} + +int sun8i_ss_skdecrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct crypto_engine *engine; + int e; + + memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx)); + rctx->op_dir = SS_DECRYPTION; + + if (sun8i_ss_need_fallback(areq)) + return sun8i_ss_cipher_fallback(areq); + + e = sun8i_ss_get_engine_number(op->ss); + engine = op->ss->flows[e].engine; + rctx->flow = e; + + return crypto_transfer_skcipher_request_to_engine(engine, areq); +} + +int sun8i_ss_skencrypt(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct crypto_engine *engine; + int e; + + memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx)); + rctx->op_dir = SS_ENCRYPTION; + + if (sun8i_ss_need_fallback(areq)) + return sun8i_ss_cipher_fallback(areq); + + e = sun8i_ss_get_engine_number(op->ss); + engine = op->ss->flows[e].engine; + rctx->flow = e; + + return crypto_transfer_skcipher_request_to_engine(engine, areq); +} + +int sun8i_ss_cipher_init(struct crypto_tfm *tfm) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); + struct sun8i_ss_alg_template *algt; + const char *name = crypto_tfm_alg_name(tfm); + struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(sktfm); + int err; + + memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx)); + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); + op->ss = algt->ss; + + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(op->fallback_tfm)) { + dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(op->fallback_tfm)); + return PTR_ERR(op->fallback_tfm); + } + + sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + + crypto_skcipher_reqsize(op->fallback_tfm); + + + memcpy(algt->fbname, + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), + CRYPTO_MAX_ALG_NAME); + + op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request; + op->enginectx.op.prepare_request = NULL; + op->enginectx.op.unprepare_request = NULL; + + err = pm_runtime_resume_and_get(op->ss->dev); + if (err < 0) { + dev_err(op->ss->dev, "pm error %d\n", err); + goto error_pm; + } + + return 0; +error_pm: + crypto_free_skcipher(op->fallback_tfm); + return err; +} + +void sun8i_ss_cipher_exit(struct crypto_tfm *tfm) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); + + kfree_sensitive(op->key); + crypto_free_skcipher(op->fallback_tfm); + pm_runtime_put_sync(op->ss->dev); +} + +int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + + switch (keylen) { + case 128 / 8: + break; + case 192 / 8: + break; + case 256 / 8: + break; + default: + dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); + return -EINVAL; + } + kfree_sensitive(op->key); + op->keylen = keylen; + op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + if (!op->key) + return -ENOMEM; + + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); +} + +int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + + if (unlikely(keylen != 3 * DES_KEY_SIZE)) { + dev_dbg(ss->dev, "Invalid keylen %u\n", keylen); + return -EINVAL; + } + + kfree_sensitive(op->key); + op->keylen = keylen; + op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + if (!op->key) + return -ENOMEM; + + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); +} diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c new file mode 100644 index 000000000..ac2329e2b --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -0,0 +1,909 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ss-core.c - hardware cryptographic offloader for + * Allwinner A80/A83T SoC + * + * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com> + * + * Core file which registers crypto algorithms supported by the SecuritySystem + * + * You could find a link for the datasheet in Documentation/arm/sunxi.rst + */ +#include <linux/clk.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <crypto/internal/rng.h> +#include <crypto/internal/skcipher.h> + +#include "sun8i-ss.h" + +static const struct ss_variant ss_a80_variant = { + .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, + }, + .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, + }, + .op_mode = { SS_OP_ECB, SS_OP_CBC, + }, + .ss_clks = { + { "bus", 0, 300 * 1000 * 1000 }, + { "mod", 0, 300 * 1000 * 1000 }, + } +}; + +static const struct ss_variant ss_a83t_variant = { + .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, + }, + .alg_hash = { SS_ALG_MD5, SS_ALG_SHA1, SS_ALG_SHA224, SS_ALG_SHA256, + }, + .op_mode = { SS_OP_ECB, SS_OP_CBC, + }, + .ss_clks = { + { "bus", 0, 300 * 1000 * 1000 }, + { "mod", 0, 300 * 1000 * 1000 }, + } +}; + +/* + * sun8i_ss_get_engine_number() get the next channel slot + * This is a simple round-robin way of getting the next channel + */ +int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss) +{ + return atomic_inc_return(&ss->flow) % MAXFLOW; +} + +int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, + const char *name) +{ + int flow = rctx->flow; + unsigned int ivlen = rctx->ivlen; + u32 v = SS_START; + int i; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + ss->flows[flow].stat_req++; +#endif + + /* choose between stream0/stream1 */ + if (flow) + v |= SS_FLOW1; + else + v |= SS_FLOW0; + + v |= rctx->op_mode; + v |= rctx->method; + + if (rctx->op_dir) + v |= SS_DECRYPTION; + + switch (rctx->keylen) { + case 128 / 8: + v |= SS_AES_128BITS << 7; + break; + case 192 / 8: + v |= SS_AES_192BITS << 7; + break; + case 256 / 8: + v |= SS_AES_256BITS << 7; + break; + } + + for (i = 0; i < MAX_SG; i++) { + if (!rctx->t_dst[i].addr) + break; + + mutex_lock(&ss->mlock); + writel(rctx->p_key, ss->base + SS_KEY_ADR_REG); + + if (ivlen) { + if (rctx->op_dir == SS_ENCRYPTION) { + if (i == 0) + writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG); + else + writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG); + } else { + writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG); + } + } + + dev_dbg(ss->dev, + "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x opmode=%x opdir=%x srclen=%d\n", + i, flow, name, v, + rctx->t_src[i].len, rctx->t_dst[i].len, + rctx->method, rctx->op_mode, + rctx->op_dir, rctx->t_src[i].len); + + writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG); + writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG); + writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG); + + reinit_completion(&ss->flows[flow].complete); + ss->flows[flow].status = 0; + wmb(); + + writel(v, ss->base + SS_CTL_REG); + mutex_unlock(&ss->mlock); + wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, + msecs_to_jiffies(2000)); + if (ss->flows[flow].status == 0) { + dev_err(ss->dev, "DMA timeout for %s\n", name); + return -EFAULT; + } + } + + return 0; +} + +static irqreturn_t ss_irq_handler(int irq, void *data) +{ + struct sun8i_ss_dev *ss = (struct sun8i_ss_dev *)data; + int flow = 0; + u32 p; + + p = readl(ss->base + SS_INT_STA_REG); + for (flow = 0; flow < MAXFLOW; flow++) { + if (p & (BIT(flow))) { + writel(BIT(flow), ss->base + SS_INT_STA_REG); + ss->flows[flow].status = 1; + complete(&ss->flows[flow].complete); + } + } + + return IRQ_HANDLED; +} + +static struct sun8i_ss_alg_template ss_algs[] = { +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ss_algo_id = SS_ID_CIPHER_AES, + .ss_blockmode = SS_ID_OP_CBC, + .alg.skcipher = { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-sun8i-ss", + .cra_priority = 400, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ss_cipher_init, + .cra_exit = sun8i_ss_cipher_exit, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = sun8i_ss_aes_setkey, + .encrypt = sun8i_ss_skencrypt, + .decrypt = sun8i_ss_skdecrypt, + } +}, +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ss_algo_id = SS_ID_CIPHER_AES, + .ss_blockmode = SS_ID_OP_ECB, + .alg.skcipher = { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-sun8i-ss", + .cra_priority = 400, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ss_cipher_init, + .cra_exit = sun8i_ss_cipher_exit, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = sun8i_ss_aes_setkey, + .encrypt = sun8i_ss_skencrypt, + .decrypt = sun8i_ss_skdecrypt, + } +}, +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ss_algo_id = SS_ID_CIPHER_DES3, + .ss_blockmode = SS_ID_OP_CBC, + .alg.skcipher = { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-sun8i-ss", + .cra_priority = 400, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ss_cipher_init, + .cra_exit = sun8i_ss_cipher_exit, + }, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = sun8i_ss_des3_setkey, + .encrypt = sun8i_ss_skencrypt, + .decrypt = sun8i_ss_skdecrypt, + } +}, +{ + .type = CRYPTO_ALG_TYPE_SKCIPHER, + .ss_algo_id = SS_ID_CIPHER_DES3, + .ss_blockmode = SS_ID_OP_ECB, + .alg.skcipher = { + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-sun8i-ss", + .cra_priority = 400, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 0xf, + .cra_init = sun8i_ss_cipher_init, + .cra_exit = sun8i_ss_cipher_exit, + }, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = sun8i_ss_des3_setkey, + .encrypt = sun8i_ss_skencrypt, + .decrypt = sun8i_ss_skdecrypt, + } +}, +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG +{ + .type = CRYPTO_ALG_TYPE_RNG, + .alg.rng = { + .base = { + .cra_name = "stdrng", + .cra_driver_name = "sun8i-ss-prng", + .cra_priority = 300, + .cra_ctxsize = sizeof(struct sun8i_ss_rng_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ss_prng_init, + .cra_exit = sun8i_ss_prng_exit, + }, + .generate = sun8i_ss_prng_generate, + .seed = sun8i_ss_prng_seed, + .seedsize = PRNG_SEED_SIZE, + } +}, +#endif +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_HASH +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_MD5, + .alg.hash = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), + .base = { + .cra_name = "md5", + .cra_driver_name = "md5-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ss_hash_crainit, + .cra_exit = sun8i_ss_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_SHA1, + .alg.hash = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ss_hash_crainit, + .cra_exit = sun8i_ss_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_SHA224, + .alg.hash = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ss_hash_crainit, + .cra_exit = sun8i_ss_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_SHA256, + .alg.hash = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ss_hash_crainit, + .cra_exit = sun8i_ss_hash_craexit, + } + } + } +}, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_SHA1, + .alg.hash = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .setkey = sun8i_ss_hmac_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac-sha1-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ss_hash_crainit, + .cra_exit = sun8i_ss_hash_craexit, + } + } + } +}, +#endif +}; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG +static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v) +{ + struct sun8i_ss_dev *ss = seq->private; + unsigned int i; + + for (i = 0; i < MAXFLOW; i++) + seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req); + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + if (!ss_algs[i].ss) + continue; + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", + ss_algs[i].alg.skcipher.base.cra_driver_name, + ss_algs[i].alg.skcipher.base.cra_name, + ss_algs[i].stat_req, ss_algs[i].stat_fb); + + seq_printf(seq, "\tLast fallback is: %s\n", + ss_algs[i].fbname); + seq_printf(seq, "\tFallback due to length: %lu\n", + ss_algs[i].stat_fb_len); + seq_printf(seq, "\tFallback due to SG length: %lu\n", + ss_algs[i].stat_fb_sglen); + seq_printf(seq, "\tFallback due to alignment: %lu\n", + ss_algs[i].stat_fb_align); + seq_printf(seq, "\tFallback due to SG numbers: %lu\n", + ss_algs[i].stat_fb_sgnum); + break; + case CRYPTO_ALG_TYPE_RNG: + seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n", + ss_algs[i].alg.rng.base.cra_driver_name, + ss_algs[i].alg.rng.base.cra_name, + ss_algs[i].stat_req, ss_algs[i].stat_bytes); + break; + case CRYPTO_ALG_TYPE_AHASH: + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", + ss_algs[i].alg.hash.halg.base.cra_driver_name, + ss_algs[i].alg.hash.halg.base.cra_name, + ss_algs[i].stat_req, ss_algs[i].stat_fb); + seq_printf(seq, "\tLast fallback is: %s\n", + ss_algs[i].fbname); + seq_printf(seq, "\tFallback due to length: %lu\n", + ss_algs[i].stat_fb_len); + seq_printf(seq, "\tFallback due to SG length: %lu\n", + ss_algs[i].stat_fb_sglen); + seq_printf(seq, "\tFallback due to alignment: %lu\n", + ss_algs[i].stat_fb_align); + seq_printf(seq, "\tFallback due to SG numbers: %lu\n", + ss_algs[i].stat_fb_sgnum); + break; + } + } + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(sun8i_ss_debugfs); +#endif + +static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i) +{ + while (i >= 0) { + crypto_engine_exit(ss->flows[i].engine); + i--; + } +} + +/* + * Allocate the flow list structure + */ +static int allocate_flows(struct sun8i_ss_dev *ss) +{ + int i, j, err; + + ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), + GFP_KERNEL); + if (!ss->flows) + return -ENOMEM; + + for (i = 0; i < MAXFLOW; i++) { + init_completion(&ss->flows[i].complete); + + ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].biv) { + err = -ENOMEM; + goto error_engine; + } + + for (j = 0; j < MAX_SG; j++) { + ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].iv[j]) { + err = -ENOMEM; + goto error_engine; + } + } + + /* the padding could be up to two block. */ + ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].pad) { + err = -ENOMEM; + goto error_engine; + } + ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].result) { + err = -ENOMEM; + goto error_engine; + } + + ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); + if (!ss->flows[i].engine) { + dev_err(ss->dev, "Cannot allocate engine\n"); + i--; + err = -ENOMEM; + goto error_engine; + } + err = crypto_engine_start(ss->flows[i].engine); + if (err) { + dev_err(ss->dev, "Cannot start engine\n"); + goto error_engine; + } + } + return 0; +error_engine: + sun8i_ss_free_flows(ss, i); + return err; +} + +/* + * Power management strategy: The device is suspended unless a TFM exists for + * one of the algorithms proposed by this driver. + */ +static int sun8i_ss_pm_suspend(struct device *dev) +{ + struct sun8i_ss_dev *ss = dev_get_drvdata(dev); + int i; + + reset_control_assert(ss->reset); + for (i = 0; i < SS_MAX_CLOCKS; i++) + clk_disable_unprepare(ss->ssclks[i]); + return 0; +} + +static int sun8i_ss_pm_resume(struct device *dev) +{ + struct sun8i_ss_dev *ss = dev_get_drvdata(dev); + int err, i; + + for (i = 0; i < SS_MAX_CLOCKS; i++) { + if (!ss->variant->ss_clks[i].name) + continue; + err = clk_prepare_enable(ss->ssclks[i]); + if (err) { + dev_err(ss->dev, "Cannot prepare_enable %s\n", + ss->variant->ss_clks[i].name); + goto error; + } + } + err = reset_control_deassert(ss->reset); + if (err) { + dev_err(ss->dev, "Cannot deassert reset control\n"); + goto error; + } + /* enable interrupts for all flows */ + writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG); + + return 0; +error: + sun8i_ss_pm_suspend(dev); + return err; +} + +static const struct dev_pm_ops sun8i_ss_pm_ops = { + SET_RUNTIME_PM_OPS(sun8i_ss_pm_suspend, sun8i_ss_pm_resume, NULL) +}; + +static int sun8i_ss_pm_init(struct sun8i_ss_dev *ss) +{ + int err; + + pm_runtime_use_autosuspend(ss->dev); + pm_runtime_set_autosuspend_delay(ss->dev, 2000); + + err = pm_runtime_set_suspended(ss->dev); + if (err) + return err; + pm_runtime_enable(ss->dev); + return err; +} + +static void sun8i_ss_pm_exit(struct sun8i_ss_dev *ss) +{ + pm_runtime_disable(ss->dev); +} + +static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss) +{ + int ss_method, err, id; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + ss_algs[i].ss = ss; + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + id = ss_algs[i].ss_algo_id; + ss_method = ss->variant->alg_cipher[id]; + if (ss_method == SS_ID_NOTSUPP) { + dev_info(ss->dev, + "DEBUG: Algo of %s not supported\n", + ss_algs[i].alg.skcipher.base.cra_name); + ss_algs[i].ss = NULL; + break; + } + id = ss_algs[i].ss_blockmode; + ss_method = ss->variant->op_mode[id]; + if (ss_method == SS_ID_NOTSUPP) { + dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n", + ss_algs[i].alg.skcipher.base.cra_name); + ss_algs[i].ss = NULL; + break; + } + dev_info(ss->dev, "DEBUG: Register %s\n", + ss_algs[i].alg.skcipher.base.cra_name); + err = crypto_register_skcipher(&ss_algs[i].alg.skcipher); + if (err) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.skcipher.base.cra_name); + ss_algs[i].ss = NULL; + return err; + } + break; + case CRYPTO_ALG_TYPE_RNG: + err = crypto_register_rng(&ss_algs[i].alg.rng); + if (err) { + dev_err(ss->dev, "Fail to register %s\n", + ss_algs[i].alg.rng.base.cra_name); + ss_algs[i].ss = NULL; + } + break; + case CRYPTO_ALG_TYPE_AHASH: + id = ss_algs[i].ss_algo_id; + ss_method = ss->variant->alg_hash[id]; + if (ss_method == SS_ID_NOTSUPP) { + dev_info(ss->dev, + "DEBUG: Algo of %s not supported\n", + ss_algs[i].alg.hash.halg.base.cra_name); + ss_algs[i].ss = NULL; + break; + } + dev_info(ss->dev, "Register %s\n", + ss_algs[i].alg.hash.halg.base.cra_name); + err = crypto_register_ahash(&ss_algs[i].alg.hash); + if (err) { + dev_err(ss->dev, "ERROR: Fail to register %s\n", + ss_algs[i].alg.hash.halg.base.cra_name); + ss_algs[i].ss = NULL; + return err; + } + break; + default: + ss_algs[i].ss = NULL; + dev_err(ss->dev, "ERROR: tried to register an unknown algo\n"); + } + } + return 0; +} + +static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { + if (!ss_algs[i].ss) + continue; + switch (ss_algs[i].type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + dev_info(ss->dev, "Unregister %d %s\n", i, + ss_algs[i].alg.skcipher.base.cra_name); + crypto_unregister_skcipher(&ss_algs[i].alg.skcipher); + break; + case CRYPTO_ALG_TYPE_RNG: + dev_info(ss->dev, "Unregister %d %s\n", i, + ss_algs[i].alg.rng.base.cra_name); + crypto_unregister_rng(&ss_algs[i].alg.rng); + break; + case CRYPTO_ALG_TYPE_AHASH: + dev_info(ss->dev, "Unregister %d %s\n", i, + ss_algs[i].alg.hash.halg.base.cra_name); + crypto_unregister_ahash(&ss_algs[i].alg.hash); + break; + } + } +} + +static int sun8i_ss_get_clks(struct sun8i_ss_dev *ss) +{ + unsigned long cr; + int err, i; + + for (i = 0; i < SS_MAX_CLOCKS; i++) { + if (!ss->variant->ss_clks[i].name) + continue; + ss->ssclks[i] = devm_clk_get(ss->dev, ss->variant->ss_clks[i].name); + if (IS_ERR(ss->ssclks[i])) { + err = PTR_ERR(ss->ssclks[i]); + dev_err(ss->dev, "Cannot get %s SS clock err=%d\n", + ss->variant->ss_clks[i].name, err); + return err; + } + cr = clk_get_rate(ss->ssclks[i]); + if (!cr) + return -EINVAL; + if (ss->variant->ss_clks[i].freq > 0 && + cr != ss->variant->ss_clks[i].freq) { + dev_info(ss->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n", + ss->variant->ss_clks[i].name, + ss->variant->ss_clks[i].freq, + ss->variant->ss_clks[i].freq / 1000000, + cr, cr / 1000000); + err = clk_set_rate(ss->ssclks[i], ss->variant->ss_clks[i].freq); + if (err) + dev_err(ss->dev, "Fail to set %s clk speed to %lu hz\n", + ss->variant->ss_clks[i].name, + ss->variant->ss_clks[i].freq); + } + if (ss->variant->ss_clks[i].max_freq > 0 && + cr > ss->variant->ss_clks[i].max_freq) + dev_warn(ss->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)", + ss->variant->ss_clks[i].name, cr, + ss->variant->ss_clks[i].max_freq); + } + return 0; +} + +static int sun8i_ss_probe(struct platform_device *pdev) +{ + struct sun8i_ss_dev *ss; + int err, irq; + u32 v; + + ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL); + if (!ss) + return -ENOMEM; + + ss->dev = &pdev->dev; + platform_set_drvdata(pdev, ss); + + ss->variant = of_device_get_match_data(&pdev->dev); + if (!ss->variant) { + dev_err(&pdev->dev, "Missing Crypto Engine variant\n"); + return -EINVAL; + } + + ss->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ss->base)) + return PTR_ERR(ss->base); + + err = sun8i_ss_get_clks(ss); + if (err) + return err; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ss->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(ss->reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(ss->reset), + "No reset control found\n"); + + mutex_init(&ss->mlock); + + err = allocate_flows(ss); + if (err) + return err; + + err = sun8i_ss_pm_init(ss); + if (err) + goto error_pm; + + err = devm_request_irq(&pdev->dev, irq, ss_irq_handler, 0, "sun8i-ss", ss); + if (err) { + dev_err(ss->dev, "Cannot request SecuritySystem IRQ (err=%d)\n", err); + goto error_irq; + } + + err = sun8i_ss_register_algs(ss); + if (err) + goto error_alg; + + err = pm_runtime_resume_and_get(ss->dev); + if (err < 0) + goto error_alg; + + v = readl(ss->base + SS_CTL_REG); + v >>= SS_DIE_ID_SHIFT; + v &= SS_DIE_ID_MASK; + dev_info(&pdev->dev, "Security System Die ID %x\n", v); + + pm_runtime_put_sync(ss->dev); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + /* Ignore error of debugfs */ + ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL); + ss->dbgfs_stats = debugfs_create_file("stats", 0444, + ss->dbgfs_dir, ss, + &sun8i_ss_debugfs_fops); +#endif + + return 0; +error_alg: + sun8i_ss_unregister_algs(ss); +error_irq: + sun8i_ss_pm_exit(ss); +error_pm: + sun8i_ss_free_flows(ss, MAXFLOW - 1); + return err; +} + +static int sun8i_ss_remove(struct platform_device *pdev) +{ + struct sun8i_ss_dev *ss = platform_get_drvdata(pdev); + + sun8i_ss_unregister_algs(ss); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + debugfs_remove_recursive(ss->dbgfs_dir); +#endif + + sun8i_ss_free_flows(ss, MAXFLOW - 1); + + sun8i_ss_pm_exit(ss); + + return 0; +} + +static const struct of_device_id sun8i_ss_crypto_of_match_table[] = { + { .compatible = "allwinner,sun8i-a83t-crypto", + .data = &ss_a83t_variant }, + { .compatible = "allwinner,sun9i-a80-crypto", + .data = &ss_a80_variant }, + {} +}; +MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table); + +static struct platform_driver sun8i_ss_driver = { + .probe = sun8i_ss_probe, + .remove = sun8i_ss_remove, + .driver = { + .name = "sun8i-ss", + .pm = &sun8i_ss_pm_ops, + .of_match_table = sun8i_ss_crypto_of_match_table, + }, +}; + +module_platform_driver(sun8i_ss_driver); + +MODULE_DESCRIPTION("Allwinner SecuritySystem cryptographic offloader"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>"); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c new file mode 100644 index 000000000..36a82b229 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -0,0 +1,709 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ss-hash.c - hardware cryptographic offloader for + * Allwinner A80/A83T SoC + * + * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com> + * + * This file add support for MD5 and SHA1/SHA224/SHA256. + * + * You could find the datasheet in Documentation/arm/sunxi.rst + */ +#include <linux/bottom_half.h> +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <linux/scatterlist.h> +#include <crypto/internal/hash.h> +#include <crypto/hmac.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <crypto/md5.h> +#include "sun8i-ss.h" + +static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key, + unsigned int keylen) +{ + struct crypto_shash *xtfm; + struct shash_desc *sdesc; + size_t len; + int ret = 0; + + xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(xtfm)) + return PTR_ERR(xtfm); + + len = sizeof(*sdesc) + crypto_shash_descsize(xtfm); + sdesc = kmalloc(len, GFP_KERNEL); + if (!sdesc) { + ret = -ENOMEM; + goto err_hashkey_sdesc; + } + sdesc->tfm = xtfm; + + ret = crypto_shash_init(sdesc); + if (ret) { + dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret); + goto err_hashkey; + } + ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key); + if (ret) + dev_err(tfmctx->ss->dev, "shash finup error\n"); +err_hashkey: + kfree(sdesc); +err_hashkey_sdesc: + crypto_free_shash(xtfm); + return ret; +} + +int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash); + struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg); + struct sun8i_ss_alg_template *algt; + int digestsize, i; + int bs = crypto_ahash_blocksize(ahash); + int ret; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + digestsize = algt->alg.hash.halg.digestsize; + + if (keylen > bs) { + ret = sun8i_ss_hashkey(tfmctx, key, keylen); + if (ret) + return ret; + tfmctx->keylen = digestsize; + } else { + tfmctx->keylen = keylen; + memcpy(tfmctx->key, key, keylen); + } + + tfmctx->ipad = kzalloc(bs, GFP_KERNEL | GFP_DMA); + if (!tfmctx->ipad) + return -ENOMEM; + tfmctx->opad = kzalloc(bs, GFP_KERNEL | GFP_DMA); + if (!tfmctx->opad) { + ret = -ENOMEM; + goto err_opad; + } + + memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen); + memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen); + memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen); + for (i = 0; i < bs; i++) { + tfmctx->ipad[i] ^= HMAC_IPAD_VALUE; + tfmctx->opad[i] ^= HMAC_OPAD_VALUE; + } + + ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen); + if (!ret) + return 0; + + memzero_explicit(tfmctx->key, keylen); + kfree_sensitive(tfmctx->opad); +err_opad: + kfree_sensitive(tfmctx->ipad); + return ret; +} + +int sun8i_ss_hash_crainit(struct crypto_tfm *tfm) +{ + struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct sun8i_ss_alg_template *algt; + int err; + + memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx)); + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + op->ss = algt->ss; + + op->enginectx.op.do_one_request = sun8i_ss_hash_run; + op->enginectx.op.prepare_request = NULL; + op->enginectx.op.unprepare_request = NULL; + + /* FALLBACK */ + op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(op->fallback_tfm)) { + dev_err(algt->ss->dev, "Fallback driver could no be loaded\n"); + return PTR_ERR(op->fallback_tfm); + } + + if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm)) + algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct sun8i_ss_hash_reqctx) + + crypto_ahash_reqsize(op->fallback_tfm)); + + memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME); + + err = pm_runtime_get_sync(op->ss->dev); + if (err < 0) + goto error_pm; + return 0; +error_pm: + pm_runtime_put_noidle(op->ss->dev); + crypto_free_ahash(op->fallback_tfm); + return err; +} + +void sun8i_ss_hash_craexit(struct crypto_tfm *tfm) +{ + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm); + + kfree_sensitive(tfmctx->ipad); + kfree_sensitive(tfmctx->opad); + + crypto_free_ahash(tfmctx->fallback_tfm); + pm_runtime_put_sync_suspend(tfmctx->ss->dev); +} + +int sun8i_ss_hash_init(struct ahash_request *areq) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx)); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_init(&rctx->fallback_req); +} + +int sun8i_ss_hash_export(struct ahash_request *areq, void *out) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_export(&rctx->fallback_req, out); +} + +int sun8i_ss_hash_import(struct ahash_request *areq, const void *in) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_import(&rctx->fallback_req, in); +} + +int sun8i_ss_hash_final(struct ahash_request *areq) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ss_alg_template *algt; +#endif + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.result = areq->result; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + algt->stat_fb++; +#endif + + return crypto_ahash_final(&rctx->fallback_req); +} + +int sun8i_ss_hash_update(struct ahash_request *areq) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + + return crypto_ahash_update(&rctx->fallback_req); +} + +int sun8i_ss_hash_finup(struct ahash_request *areq) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ss_alg_template *algt; +#endif + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + algt->stat_fb++; +#endif + + return crypto_ahash_finup(&rctx->fallback_req); +} + +static int sun8i_ss_hash_digest_fb(struct ahash_request *areq) +{ + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ss_alg_template *algt; +#endif + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + algt->stat_fb++; +#endif + + return crypto_ahash_digest(&rctx->fallback_req); +} + +static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss, + struct sun8i_ss_hash_reqctx *rctx, + const char *name) +{ + int flow = rctx->flow; + u32 v = SS_START; + int i; + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + ss->flows[flow].stat_req++; +#endif + + /* choose between stream0/stream1 */ + if (flow) + v |= SS_FLOW1; + else + v |= SS_FLOW0; + + v |= rctx->method; + + for (i = 0; i < MAX_SG; i++) { + if (!rctx->t_dst[i].addr) + break; + + mutex_lock(&ss->mlock); + if (i > 0) { + v |= BIT(17); + writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG); + writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG); + } + + dev_dbg(ss->dev, + "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n", + i, flow, name, v, + rctx->t_src[i].len, rctx->t_dst[i].len, + rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr); + + writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG); + writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG); + writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG); + writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG); + + reinit_completion(&ss->flows[flow].complete); + ss->flows[flow].status = 0; + wmb(); + + writel(v, ss->base + SS_CTL_REG); + mutex_unlock(&ss->mlock); + wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, + msecs_to_jiffies(2000)); + if (ss->flows[flow].status == 0) { + dev_err(ss->dev, "DMA timeout for %s\n", name); + return -EFAULT; + } + } + + return 0; +} + +static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ss_alg_template *algt; + struct scatterlist *sg; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + + if (areq->nbytes == 0) { + algt->stat_fb_len++; + return true; + } + + if (areq->nbytes >= MAX_PAD_SIZE - 64) { + algt->stat_fb_len++; + return true; + } + + /* we need to reserve one SG for the padding one */ + if (sg_nents(areq->src) > MAX_SG - 1) { + algt->stat_fb_sgnum++; + return true; + } + + sg = areq->src; + while (sg) { + /* SS can operate hash only on full block size + * since SS support only MD5,sha1,sha224 and sha256, blocksize + * is always 64 + */ + /* Only the last block could be bounced to the pad buffer */ + if (sg->length % 64 && sg_next(sg)) { + algt->stat_fb_sglen++; + return true; + } + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + algt->stat_fb_align++; + return true; + } + if (sg->length % 4) { + algt->stat_fb_sglen++; + return true; + } + sg = sg_next(sg); + } + return false; +} + +int sun8i_ss_hash_digest(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ss_alg_template *algt; + struct sun8i_ss_dev *ss; + struct crypto_engine *engine; + int e; + + if (sun8i_ss_hash_need_fallback(areq)) + return sun8i_ss_hash_digest_fb(areq); + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + ss = algt->ss; + + e = sun8i_ss_get_engine_number(ss); + rctx->flow = e; + engine = ss->flows[e].engine; + + return crypto_transfer_hash_request_to_engine(engine, areq); +} + +static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs) +{ + u64 fill, min_fill, j, k; + __be64 *bebits; + __le64 *lebits; + + j = padi; + buf[j++] = cpu_to_le32(0x80); + + if (bs == 64) { + fill = 64 - (byte_count % 64); + min_fill = 2 * sizeof(u32) + sizeof(u32); + } else { + fill = 128 - (byte_count % 128); + min_fill = 4 * sizeof(u32) + sizeof(u32); + } + + if (fill < min_fill) + fill += bs; + + k = j; + j += (fill - min_fill) / sizeof(u32); + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + for (; k < j; k++) + buf[k] = 0; + + if (le) { + /* MD5 */ + lebits = (__le64 *)&buf[j]; + *lebits = cpu_to_le64(byte_count << 3); + j += 2; + } else { + if (bs == 64) { + /* sha1 sha224 sha256 */ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } else { + /* sha384 sha512*/ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count >> 61); + j += 2; + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } + } + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + + return j; +} + +/* sun8i_ss_hash_run - run an ahash request + * Send the data of the request to the SS along with an extra SG with padding + */ +int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) +{ + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ss_alg_template *algt; + struct sun8i_ss_dev *ss; + struct scatterlist *sg; + int bs = crypto_ahash_blocksize(tfm); + int nr_sgs, err, digestsize; + unsigned int len; + u64 byte_count; + void *pad, *result; + int j, i, k, todo; + dma_addr_t addr_res, addr_pad, addr_xpad; + __le32 *bf; + /* HMAC step: + * 0: normal hashing + * 1: IPAD + * 2: OPAD + */ + int hmac = 0; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + ss = algt->ss; + + digestsize = algt->alg.hash.halg.digestsize; + if (digestsize == SHA224_DIGEST_SIZE) + digestsize = SHA256_DIGEST_SIZE; + + result = ss->flows[rctx->flow].result; + pad = ss->flows[rctx->flow].pad; + bf = (__le32 *)pad; + + for (i = 0; i < MAX_SG; i++) { + rctx->t_dst[i].addr = 0; + rctx->t_dst[i].len = 0; + } + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_req++; +#endif + + rctx->method = ss->variant->alg_hash[algt->ss_algo_id]; + + nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > MAX_SG) { + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); + err = -EINVAL; + goto theend; + } + + addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE); + if (dma_mapping_error(ss->dev, addr_res)) { + dev_err(ss->dev, "DMA map dest\n"); + err = -EINVAL; + goto err_dma_result; + } + + j = 0; + len = areq->nbytes; + sg = areq->src; + i = 0; + while (len > 0 && sg) { + if (sg_dma_len(sg) == 0) { + sg = sg_next(sg); + continue; + } + todo = min(len, sg_dma_len(sg)); + /* only the last SG could be with a size not modulo64 */ + if (todo % 64 == 0) { + rctx->t_src[i].addr = sg_dma_address(sg); + rctx->t_src[i].len = todo / 4; + rctx->t_dst[i].addr = addr_res; + rctx->t_dst[i].len = digestsize / 4; + len -= todo; + } else { + scatterwalk_map_and_copy(bf, sg, 0, todo, 0); + j += todo / 4; + len -= todo; + } + sg = sg_next(sg); + i++; + } + if (len > 0) { + dev_err(ss->dev, "remaining len %d\n", len); + err = -EINVAL; + goto theend; + } + + if (j > 0) + i--; + +retry: + byte_count = areq->nbytes; + if (tfmctx->keylen && hmac == 0) { + hmac = 1; + /* shift all SG one slot up, to free slot 0 for IPAD */ + for (k = 6; k >= 0; k--) { + rctx->t_src[k + 1].addr = rctx->t_src[k].addr; + rctx->t_src[k + 1].len = rctx->t_src[k].len; + rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr; + rctx->t_dst[k + 1].len = rctx->t_dst[k].len; + } + addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE); + err = dma_mapping_error(ss->dev, addr_xpad); + if (err) { + dev_err(ss->dev, "Fail to create DMA mapping of ipad\n"); + goto err_dma_xpad; + } + rctx->t_src[0].addr = addr_xpad; + rctx->t_src[0].len = bs / 4; + rctx->t_dst[0].addr = addr_res; + rctx->t_dst[0].len = digestsize / 4; + i++; + byte_count = areq->nbytes + bs; + } + if (tfmctx->keylen && hmac == 2) { + for (i = 0; i < MAX_SG; i++) { + rctx->t_src[i].addr = 0; + rctx->t_src[i].len = 0; + rctx->t_dst[i].addr = 0; + rctx->t_dst[i].len = 0; + } + + addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE); + if (dma_mapping_error(ss->dev, addr_res)) { + dev_err(ss->dev, "Fail to create DMA mapping of result\n"); + err = -EINVAL; + goto err_dma_result; + } + addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE); + err = dma_mapping_error(ss->dev, addr_xpad); + if (err) { + dev_err(ss->dev, "Fail to create DMA mapping of opad\n"); + goto err_dma_xpad; + } + rctx->t_src[0].addr = addr_xpad; + rctx->t_src[0].len = bs / 4; + + memcpy(bf, result, digestsize); + j = digestsize / 4; + i = 1; + byte_count = digestsize + bs; + + rctx->t_dst[0].addr = addr_res; + rctx->t_dst[0].len = digestsize / 4; + } + + switch (algt->ss_algo_id) { + case SS_ID_HASH_MD5: + j = hash_pad(bf, 4096, j, byte_count, true, bs); + break; + case SS_ID_HASH_SHA1: + case SS_ID_HASH_SHA224: + case SS_ID_HASH_SHA256: + j = hash_pad(bf, 4096, j, byte_count, false, bs); + break; + } + if (!j) { + err = -EINVAL; + goto theend; + } + + addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, addr_pad)) { + dev_err(ss->dev, "DMA error on padding SG\n"); + err = -EINVAL; + goto err_dma_pad; + } + rctx->t_src[i].addr = addr_pad; + rctx->t_src[i].len = j; + rctx->t_dst[i].addr = addr_res; + rctx->t_dst[i].len = digestsize / 4; + + err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); + + /* + * mini helper for checking dma map/unmap + * flow start for hmac = 0 (and HMAC = 1) + * HMAC = 0 + * MAP src + * MAP res + * + * retry: + * if hmac then hmac = 1 + * MAP xpad (ipad) + * if hmac == 2 + * MAP res + * MAP xpad (opad) + * MAP pad + * ACTION! + * UNMAP pad + * if hmac + * UNMAP xpad + * UNMAP res + * if hmac < 2 + * UNMAP SRC + * + * if hmac = 1 then hmac = 2 goto retry + */ + + dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE); + +err_dma_pad: + if (hmac > 0) + dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE); +err_dma_xpad: + dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE); +err_dma_result: + if (hmac < 2) + dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); + if (hmac == 1 && !err) { + hmac = 2; + goto retry; + } + + if (!err) + memcpy(areq->result, result, algt->alg.hash.halg.digestsize); +theend: + local_bh_disable(); + crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); + return 0; +} diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c new file mode 100644 index 000000000..dd677e9ed --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sun8i-ss-prng.c - hardware cryptographic offloader for + * Allwinner A80/A83T SoC + * + * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com> + * + * This file handle the PRNG found in the SS + * + * You could find a link for the datasheet in Documentation/arm/sunxi.rst + */ +#include "sun8i-ss.h" +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> +#include <crypto/internal/rng.h> + +int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen) +{ + struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); + + if (ctx->seed && ctx->slen != slen) { + kfree_sensitive(ctx->seed); + ctx->slen = 0; + ctx->seed = NULL; + } + if (!ctx->seed) + ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA); + if (!ctx->seed) + return -ENOMEM; + + memcpy(ctx->seed, seed, slen); + ctx->slen = slen; + + return 0; +} + +int sun8i_ss_prng_init(struct crypto_tfm *tfm) +{ + struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + memset(ctx, 0, sizeof(struct sun8i_ss_rng_tfm_ctx)); + return 0; +} + +void sun8i_ss_prng_exit(struct crypto_tfm *tfm) +{ + struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + kfree_sensitive(ctx->seed); + ctx->seed = NULL; + ctx->slen = 0; +} + +int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen) +{ + struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); + struct rng_alg *alg = crypto_rng_alg(tfm); + struct sun8i_ss_alg_template *algt; + struct sun8i_ss_dev *ss; + dma_addr_t dma_iv, dma_dst; + unsigned int todo; + int err = 0; + int flow; + void *d; + u32 v; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.rng); + ss = algt->ss; + + if (ctx->slen == 0) { + dev_err(ss->dev, "The PRNG is not seeded\n"); + return -EINVAL; + } + + /* The SS does not give an updated seed, so we need to get a new one. + * So we will ask for an extra PRNG_SEED_SIZE data. + * We want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE + */ + todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE; + todo -= todo % PRNG_DATA_SIZE; + + d = kzalloc(todo, GFP_KERNEL | GFP_DMA); + if (!d) + return -ENOMEM; + + flow = sun8i_ss_get_engine_number(ss); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_req++; + algt->stat_bytes += todo; +#endif + + v = SS_ALG_PRNG | SS_PRNG_CONTINUE | SS_START; + if (flow) + v |= SS_FLOW1; + else + v |= SS_FLOW0; + + dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, dma_iv)) { + dev_err(ss->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; + goto err_free; + } + + dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE); + if (dma_mapping_error(ss->dev, dma_dst)) { + dev_err(ss->dev, "Cannot DMA MAP DST\n"); + err = -EFAULT; + goto err_iv; + } + + err = pm_runtime_resume_and_get(ss->dev); + if (err < 0) + goto err_pm; + err = 0; + + mutex_lock(&ss->mlock); + writel(dma_iv, ss->base + SS_IV_ADR_REG); + /* the PRNG act badly (failing rngtest) without SS_KEY_ADR_REG set */ + writel(dma_iv, ss->base + SS_KEY_ADR_REG); + writel(dma_dst, ss->base + SS_DST_ADR_REG); + writel(todo / 4, ss->base + SS_LEN_ADR_REG); + + reinit_completion(&ss->flows[flow].complete); + ss->flows[flow].status = 0; + /* Be sure all data is written before enabling the task */ + wmb(); + + writel(v, ss->base + SS_CTL_REG); + + wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, + msecs_to_jiffies(todo)); + if (ss->flows[flow].status == 0) { + dev_err(ss->dev, "DMA timeout for PRNG (size=%u)\n", todo); + err = -EFAULT; + } + /* Since cipher and hash use the linux/cryptoengine and that we have + * a cryptoengine per flow, we are sure that they will issue only one + * request per flow. + * Since the cryptoengine wait for completion before submitting a new + * one, the mlock could be left just after the final writel. + * But cryptoengine cannot handle crypto_rng, so we need to be sure + * nothing will use our flow. + * The easiest way is to grab mlock until the hardware end our requests. + * We could have used a per flow lock, but this would increase + * complexity. + * The drawback is that no request could be handled for the other flow. + */ + mutex_unlock(&ss->mlock); + + pm_runtime_put(ss->dev); + +err_pm: + dma_unmap_single(ss->dev, dma_dst, todo, DMA_FROM_DEVICE); +err_iv: + dma_unmap_single(ss->dev, dma_iv, ctx->slen, DMA_TO_DEVICE); + + if (!err) { + memcpy(dst, d, dlen); + /* Update seed */ + memcpy(ctx->seed, d + dlen, ctx->slen); + } +err_free: + kfree_sensitive(d); + + return err; +} diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h new file mode 100644 index 000000000..df6f08f60 --- /dev/null +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sun8i-ss.h - hardware cryptographic offloader for + * Allwinner A80/A83T SoC + * + * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com> + */ +#include <crypto/aes.h> +#include <crypto/des.h> +#include <crypto/engine.h> +#include <crypto/rng.h> +#include <crypto/skcipher.h> +#include <linux/atomic.h> +#include <linux/debugfs.h> +#include <linux/crypto.h> +#include <crypto/internal/hash.h> +#include <crypto/md5.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> + +#define SS_START 1 + +#define SS_ENCRYPTION 0 +#define SS_DECRYPTION BIT(6) + +#define SS_ALG_AES 0 +#define SS_ALG_DES (1 << 2) +#define SS_ALG_3DES (2 << 2) +#define SS_ALG_MD5 (3 << 2) +#define SS_ALG_PRNG (4 << 2) +#define SS_ALG_SHA1 (6 << 2) +#define SS_ALG_SHA224 (7 << 2) +#define SS_ALG_SHA256 (8 << 2) + +#define SS_CTL_REG 0x00 +#define SS_INT_CTL_REG 0x04 +#define SS_INT_STA_REG 0x08 +#define SS_KEY_ADR_REG 0x10 +#define SS_IV_ADR_REG 0x18 +#define SS_SRC_ADR_REG 0x20 +#define SS_DST_ADR_REG 0x28 +#define SS_LEN_ADR_REG 0x30 + +#define SS_ID_NOTSUPP 0xFF + +#define SS_ID_CIPHER_AES 0 +#define SS_ID_CIPHER_DES 1 +#define SS_ID_CIPHER_DES3 2 +#define SS_ID_CIPHER_MAX 3 + +#define SS_ID_OP_ECB 0 +#define SS_ID_OP_CBC 1 +#define SS_ID_OP_MAX 2 + +#define SS_AES_128BITS 0 +#define SS_AES_192BITS 1 +#define SS_AES_256BITS 2 + +#define SS_OP_ECB 0 +#define SS_OP_CBC (1 << 13) + +#define SS_ID_HASH_MD5 0 +#define SS_ID_HASH_SHA1 1 +#define SS_ID_HASH_SHA224 2 +#define SS_ID_HASH_SHA256 3 +#define SS_ID_HASH_MAX 4 + +#define SS_FLOW0 BIT(30) +#define SS_FLOW1 BIT(31) + +#define SS_PRNG_CONTINUE BIT(18) + +#define MAX_SG 8 + +#define MAXFLOW 2 + +#define SS_MAX_CLOCKS 2 + +#define SS_DIE_ID_SHIFT 20 +#define SS_DIE_ID_MASK 0x07 + +#define PRNG_DATA_SIZE (160 / 8) +#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8) + +#define MAX_PAD_SIZE 4096 + +/* + * struct ss_clock - Describe clocks used by sun8i-ss + * @name: Name of clock needed by this variant + * @freq: Frequency to set for each clock + * @max_freq: Maximum frequency for each clock + */ +struct ss_clock { + const char *name; + unsigned long freq; + unsigned long max_freq; +}; + +/* + * struct ss_variant - Describe SS capability for each variant hardware + * @alg_cipher: list of supported ciphers. for each SS_ID_ this will give the + * coresponding SS_ALG_XXX value + * @alg_hash: list of supported hashes. for each SS_ID_ this will give the + * corresponding SS_ALG_XXX value + * @op_mode: list of supported block modes + * @ss_clks: list of clock needed by this variant + */ +struct ss_variant { + char alg_cipher[SS_ID_CIPHER_MAX]; + char alg_hash[SS_ID_HASH_MAX]; + u32 op_mode[SS_ID_OP_MAX]; + struct ss_clock ss_clks[SS_MAX_CLOCKS]; +}; + +struct sginfo { + u32 addr; + u32 len; +}; + +/* + * struct sun8i_ss_flow - Information used by each flow + * @engine: ptr to the crypto_engine for this flow + * @complete: completion for the current task on this flow + * @status: set to 1 by interrupt if task is done + * @stat_req: number of request done by this flow + * @iv: list of IV to use for each step + * @biv: buffer which contain the backuped IV + * @pad: padding buffer for hash operations + * @result: buffer for storing the result of hash operations + */ +struct sun8i_ss_flow { + struct crypto_engine *engine; + struct completion complete; + int status; + u8 *iv[MAX_SG]; + u8 *biv; + void *pad; + void *result; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + unsigned long stat_req; +#endif +}; + +/* + * struct sun8i_ss_dev - main container for all this driver information + * @base: base address of SS + * @ssclks: clocks used by SS + * @reset: pointer to reset controller + * @dev: the platform device + * @mlock: Control access to device registers + * @flows: array of all flow + * @flow: flow to use in next request + * @variant: pointer to variant specific data + * @dbgfs_dir: Debugfs dentry for statistic directory + * @dbgfs_stats: Debugfs dentry for statistic counters + */ +struct sun8i_ss_dev { + void __iomem *base; + struct clk *ssclks[SS_MAX_CLOCKS]; + struct reset_control *reset; + struct device *dev; + struct mutex mlock; + struct sun8i_ss_flow *flows; + atomic_t flow; + const struct ss_variant *variant; +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + struct dentry *dbgfs_dir; + struct dentry *dbgfs_stats; +#endif +}; + +/* + * struct sun8i_cipher_req_ctx - context for a skcipher request + * @t_src: list of mapped SGs with their size + * @t_dst: list of mapped SGs with their size + * @p_key: DMA address of the key + * @p_iv: DMA address of the IVs + * @niv: Number of IVs DMA mapped + * @method: current algorithm for this request + * @op_mode: op_mode for this request + * @op_dir: direction (encrypt vs decrypt) for this request + * @flow: the flow to use for this request + * @ivlen: size of IVs + * @keylen: keylen for this request + * @fallback_req: request struct for invoking the fallback skcipher TFM + */ +struct sun8i_cipher_req_ctx { + struct sginfo t_src[MAX_SG]; + struct sginfo t_dst[MAX_SG]; + u32 p_key; + u32 p_iv[MAX_SG]; + int niv; + u32 method; + u32 op_mode; + u32 op_dir; + int flow; + unsigned int ivlen; + unsigned int keylen; + struct skcipher_request fallback_req; // keep at the end +}; + +/* + * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM + * @enginectx: crypto_engine used by this TFM + * @key: pointer to key data + * @keylen: len of the key + * @ss: pointer to the private data of driver handling this TFM + * @fallback_tfm: pointer to the fallback TFM + * + * enginectx must be the first element + */ +struct sun8i_cipher_tfm_ctx { + struct crypto_engine_ctx enginectx; + u32 *key; + u32 keylen; + struct sun8i_ss_dev *ss; + struct crypto_skcipher *fallback_tfm; +}; + +/* + * struct sun8i_ss_prng_ctx - context for PRNG TFM + * @seed: The seed to use + * @slen: The size of the seed + */ +struct sun8i_ss_rng_tfm_ctx { + void *seed; + unsigned int slen; +}; + +/* + * struct sun8i_ss_hash_tfm_ctx - context for an ahash TFM + * @enginectx: crypto_engine used by this TFM + * @fallback_tfm: pointer to the fallback TFM + * @ss: pointer to the private data of driver handling this TFM + * + * enginectx must be the first element + */ +struct sun8i_ss_hash_tfm_ctx { + struct crypto_engine_ctx enginectx; + struct crypto_ahash *fallback_tfm; + struct sun8i_ss_dev *ss; + u8 *ipad; + u8 *opad; + u8 key[SHA256_BLOCK_SIZE]; + int keylen; +}; + +/* + * struct sun8i_ss_hash_reqctx - context for an ahash request + * @t_src: list of DMA address and size for source SGs + * @t_dst: list of DMA address and size for destination SGs + * @fallback_req: pre-allocated fallback request + * @method: the register value for the algorithm used by this request + * @flow: the flow to use for this request + */ +struct sun8i_ss_hash_reqctx { + struct sginfo t_src[MAX_SG]; + struct sginfo t_dst[MAX_SG]; + struct ahash_request fallback_req; + u32 method; + int flow; +}; + +/* + * struct sun8i_ss_alg_template - crypto_alg template + * @type: the CRYPTO_ALG_TYPE for this template + * @ss_algo_id: the SS_ID for this template + * @ss_blockmode: the type of block operation SS_ID + * @ss: pointer to the sun8i_ss_dev structure associated with + * this template + * @alg: one of sub struct must be used + * @stat_req: number of request done on this template + * @stat_fb: number of request which has fallbacked + * @stat_bytes: total data size done by this template + */ +struct sun8i_ss_alg_template { + u32 type; + u32 ss_algo_id; + u32 ss_blockmode; + struct sun8i_ss_dev *ss; + union { + struct skcipher_alg skcipher; + struct rng_alg rng; + struct ahash_alg hash; + } alg; + unsigned long stat_req; + unsigned long stat_fb; + unsigned long stat_bytes; + unsigned long stat_fb_len; + unsigned long stat_fb_sglen; + unsigned long stat_fb_align; + unsigned long stat_fb_sgnum; + char fbname[CRYPTO_MAX_ALG_NAME]; +}; + +int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type); + +int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int sun8i_ss_cipher_init(struct crypto_tfm *tfm); +void sun8i_ss_cipher_exit(struct crypto_tfm *tfm); +int sun8i_ss_skdecrypt(struct skcipher_request *areq); +int sun8i_ss_skencrypt(struct skcipher_request *areq); + +int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss); + +int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, const char *name); +int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int dlen); +int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); +int sun8i_ss_prng_init(struct crypto_tfm *tfm); +void sun8i_ss_prng_exit(struct crypto_tfm *tfm); + +int sun8i_ss_hash_crainit(struct crypto_tfm *tfm); +void sun8i_ss_hash_craexit(struct crypto_tfm *tfm); +int sun8i_ss_hash_init(struct ahash_request *areq); +int sun8i_ss_hash_export(struct ahash_request *areq, void *out); +int sun8i_ss_hash_import(struct ahash_request *areq, const void *in); +int sun8i_ss_hash_final(struct ahash_request *areq); +int sun8i_ss_hash_update(struct ahash_request *areq); +int sun8i_ss_hash_finup(struct ahash_request *areq); +int sun8i_ss_hash_digest(struct ahash_request *areq); +int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq); +int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen); |