summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/rockchip
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/crypto/rockchip
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/crypto/rockchip/Makefile5
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c273
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h260
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c442
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c603
5 files changed, 1583 insertions, 0 deletions
diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile
new file mode 100644
index 000000000..785277aca
--- /dev/null
+++ b/drivers/crypto/rockchip/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
+rk_crypto-objs := rk3288_crypto.o \
+ rk3288_crypto_skcipher.o \
+ rk3288_crypto_ahash.o
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
new file mode 100644
index 000000000..14a0aef18
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+ */
+
+#include "rk3288_crypto.h"
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/reset.h>
+
+static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
+{
+ int err;
+
+ err = clk_prepare_enable(dev->sclk);
+ if (err) {
+ dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
+ __func__, __LINE__);
+ goto err_return;
+ }
+ err = clk_prepare_enable(dev->aclk);
+ if (err) {
+ dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
+ __func__, __LINE__);
+ goto err_aclk;
+ }
+ err = clk_prepare_enable(dev->hclk);
+ if (err) {
+ dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
+ __func__, __LINE__);
+ goto err_hclk;
+ }
+ err = clk_prepare_enable(dev->dmaclk);
+ if (err) {
+ dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
+ __func__, __LINE__);
+ goto err_dmaclk;
+ }
+ return err;
+err_dmaclk:
+ clk_disable_unprepare(dev->hclk);
+err_hclk:
+ clk_disable_unprepare(dev->aclk);
+err_aclk:
+ clk_disable_unprepare(dev->sclk);
+err_return:
+ return err;
+}
+
+static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
+{
+ clk_disable_unprepare(dev->dmaclk);
+ clk_disable_unprepare(dev->hclk);
+ clk_disable_unprepare(dev->aclk);
+ clk_disable_unprepare(dev->sclk);
+}
+
+static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
+{
+ struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
+ u32 interrupt_status;
+
+ interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+
+ dev->status = 1;
+ if (interrupt_status & 0x0a) {
+ dev_warn(dev->dev, "DMA Error\n");
+ dev->status = 0;
+ }
+ complete(&dev->complete);
+
+ return IRQ_HANDLED;
+}
+
+static struct rk_crypto_tmp *rk_cipher_algs[] = {
+ &rk_ecb_aes_alg,
+ &rk_cbc_aes_alg,
+ &rk_ecb_des_alg,
+ &rk_cbc_des_alg,
+ &rk_ecb_des3_ede_alg,
+ &rk_cbc_des3_ede_alg,
+ &rk_ahash_sha1,
+ &rk_ahash_sha256,
+ &rk_ahash_md5,
+};
+
+static int rk_crypto_register(struct rk_crypto_info *crypto_info)
+{
+ unsigned int i, k;
+ int err = 0;
+
+ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+ rk_cipher_algs[i]->dev = crypto_info;
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ err = crypto_register_skcipher(
+ &rk_cipher_algs[i]->alg.skcipher);
+ else
+ err = crypto_register_ahash(
+ &rk_cipher_algs[i]->alg.hash);
+ if (err)
+ goto err_cipher_algs;
+ }
+ return 0;
+
+err_cipher_algs:
+ for (k = 0; k < i; k++) {
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
+ else
+ crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ }
+ return err;
+}
+
+static void rk_crypto_unregister(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
+ else
+ crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ }
+}
+
+static void rk_crypto_action(void *data)
+{
+ struct rk_crypto_info *crypto_info = data;
+
+ reset_control_assert(crypto_info->rst);
+}
+
+static const struct of_device_id crypto_of_id_table[] = {
+ { .compatible = "rockchip,rk3288-crypto" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, crypto_of_id_table);
+
+static int rk_crypto_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rk_crypto_info *crypto_info;
+ int err = 0;
+
+ crypto_info = devm_kzalloc(&pdev->dev,
+ sizeof(*crypto_info), GFP_KERNEL);
+ if (!crypto_info) {
+ err = -ENOMEM;
+ goto err_crypto;
+ }
+
+ crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
+ if (IS_ERR(crypto_info->rst)) {
+ err = PTR_ERR(crypto_info->rst);
+ goto err_crypto;
+ }
+
+ reset_control_assert(crypto_info->rst);
+ usleep_range(10, 20);
+ reset_control_deassert(crypto_info->rst);
+
+ err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
+ if (err)
+ goto err_crypto;
+
+ crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(crypto_info->reg)) {
+ err = PTR_ERR(crypto_info->reg);
+ goto err_crypto;
+ }
+
+ crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
+ if (IS_ERR(crypto_info->aclk)) {
+ err = PTR_ERR(crypto_info->aclk);
+ goto err_crypto;
+ }
+
+ crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(crypto_info->hclk)) {
+ err = PTR_ERR(crypto_info->hclk);
+ goto err_crypto;
+ }
+
+ crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(crypto_info->sclk)) {
+ err = PTR_ERR(crypto_info->sclk);
+ goto err_crypto;
+ }
+
+ crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(crypto_info->dmaclk)) {
+ err = PTR_ERR(crypto_info->dmaclk);
+ goto err_crypto;
+ }
+
+ crypto_info->irq = platform_get_irq(pdev, 0);
+ if (crypto_info->irq < 0) {
+ dev_warn(crypto_info->dev,
+ "control Interrupt is not available.\n");
+ err = crypto_info->irq;
+ goto err_crypto;
+ }
+
+ err = devm_request_irq(&pdev->dev, crypto_info->irq,
+ rk_crypto_irq_handle, IRQF_SHARED,
+ "rk-crypto", pdev);
+
+ if (err) {
+ dev_err(crypto_info->dev, "irq request failed.\n");
+ goto err_crypto;
+ }
+
+ crypto_info->dev = &pdev->dev;
+ platform_set_drvdata(pdev, crypto_info);
+
+ crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true);
+ crypto_engine_start(crypto_info->engine);
+ init_completion(&crypto_info->complete);
+
+ rk_crypto_enable_clk(crypto_info);
+
+ err = rk_crypto_register(crypto_info);
+ if (err) {
+ dev_err(dev, "err in register alg");
+ goto err_register_alg;
+ }
+
+ dev_info(dev, "Crypto Accelerator successfully registered\n");
+ return 0;
+
+err_register_alg:
+ crypto_engine_exit(crypto_info->engine);
+err_crypto:
+ dev_err(dev, "Crypto Accelerator not successfully registered\n");
+ return err;
+}
+
+static int rk_crypto_remove(struct platform_device *pdev)
+{
+ struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
+
+ rk_crypto_unregister();
+ rk_crypto_disable_clk(crypto_tmp);
+ crypto_engine_exit(crypto_tmp->engine);
+ return 0;
+}
+
+static struct platform_driver crypto_driver = {
+ .probe = rk_crypto_probe,
+ .remove = rk_crypto_remove,
+ .driver = {
+ .name = "rk3288-crypto",
+ .of_match_table = crypto_of_id_table,
+ },
+};
+
+module_platform_driver(crypto_driver);
+
+MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
+MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
new file mode 100644
index 000000000..045e811b4
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __RK3288_CRYPTO_H__
+#define __RK3288_CRYPTO_H__
+
+#include <crypto/aes.h>
+#include <crypto/internal/des.h>
+#include <crypto/algapi.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include <crypto/md5.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+
+#define _SBF(v, f) ((v) << (f))
+
+/* Crypto control registers*/
+#define RK_CRYPTO_INTSTS 0x0000
+#define RK_CRYPTO_PKA_DONE_INT BIT(5)
+#define RK_CRYPTO_HASH_DONE_INT BIT(4)
+#define RK_CRYPTO_HRDMA_ERR_INT BIT(3)
+#define RK_CRYPTO_HRDMA_DONE_INT BIT(2)
+#define RK_CRYPTO_BCDMA_ERR_INT BIT(1)
+#define RK_CRYPTO_BCDMA_DONE_INT BIT(0)
+
+#define RK_CRYPTO_INTENA 0x0004
+#define RK_CRYPTO_PKA_DONE_ENA BIT(5)
+#define RK_CRYPTO_HASH_DONE_ENA BIT(4)
+#define RK_CRYPTO_HRDMA_ERR_ENA BIT(3)
+#define RK_CRYPTO_HRDMA_DONE_ENA BIT(2)
+#define RK_CRYPTO_BCDMA_ERR_ENA BIT(1)
+#define RK_CRYPTO_BCDMA_DONE_ENA BIT(0)
+
+#define RK_CRYPTO_CTRL 0x0008
+#define RK_CRYPTO_WRITE_MASK _SBF(0xFFFF, 16)
+#define RK_CRYPTO_TRNG_FLUSH BIT(9)
+#define RK_CRYPTO_TRNG_START BIT(8)
+#define RK_CRYPTO_PKA_FLUSH BIT(7)
+#define RK_CRYPTO_HASH_FLUSH BIT(6)
+#define RK_CRYPTO_BLOCK_FLUSH BIT(5)
+#define RK_CRYPTO_PKA_START BIT(4)
+#define RK_CRYPTO_HASH_START BIT(3)
+#define RK_CRYPTO_BLOCK_START BIT(2)
+#define RK_CRYPTO_TDES_START BIT(1)
+#define RK_CRYPTO_AES_START BIT(0)
+
+#define RK_CRYPTO_CONF 0x000c
+/* HASH Receive DMA Address Mode: fix | increment */
+#define RK_CRYPTO_HR_ADDR_MODE BIT(8)
+/* Block Transmit DMA Address Mode: fix | increment */
+#define RK_CRYPTO_BT_ADDR_MODE BIT(7)
+/* Block Receive DMA Address Mode: fix | increment */
+#define RK_CRYPTO_BR_ADDR_MODE BIT(6)
+#define RK_CRYPTO_BYTESWAP_HRFIFO BIT(5)
+#define RK_CRYPTO_BYTESWAP_BTFIFO BIT(4)
+#define RK_CRYPTO_BYTESWAP_BRFIFO BIT(3)
+/* AES = 0 OR DES = 1 */
+#define RK_CRYPTO_DESSEL BIT(2)
+#define RK_CYYPTO_HASHINSEL_INDEPENDENT_SOURCE _SBF(0x00, 0)
+#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_INPUT _SBF(0x01, 0)
+#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_OUTPUT _SBF(0x02, 0)
+
+/* Block Receiving DMA Start Address Register */
+#define RK_CRYPTO_BRDMAS 0x0010
+/* Block Transmitting DMA Start Address Register */
+#define RK_CRYPTO_BTDMAS 0x0014
+/* Block Receiving DMA Length Register */
+#define RK_CRYPTO_BRDMAL 0x0018
+/* Hash Receiving DMA Start Address Register */
+#define RK_CRYPTO_HRDMAS 0x001c
+/* Hash Receiving DMA Length Register */
+#define RK_CRYPTO_HRDMAL 0x0020
+
+/* AES registers */
+#define RK_CRYPTO_AES_CTRL 0x0080
+#define RK_CRYPTO_AES_BYTESWAP_CNT BIT(11)
+#define RK_CRYPTO_AES_BYTESWAP_KEY BIT(10)
+#define RK_CRYPTO_AES_BYTESWAP_IV BIT(9)
+#define RK_CRYPTO_AES_BYTESWAP_DO BIT(8)
+#define RK_CRYPTO_AES_BYTESWAP_DI BIT(7)
+#define RK_CRYPTO_AES_KEY_CHANGE BIT(6)
+#define RK_CRYPTO_AES_ECB_MODE _SBF(0x00, 4)
+#define RK_CRYPTO_AES_CBC_MODE _SBF(0x01, 4)
+#define RK_CRYPTO_AES_CTR_MODE _SBF(0x02, 4)
+#define RK_CRYPTO_AES_128BIT_key _SBF(0x00, 2)
+#define RK_CRYPTO_AES_192BIT_key _SBF(0x01, 2)
+#define RK_CRYPTO_AES_256BIT_key _SBF(0x02, 2)
+/* Slave = 0 / fifo = 1 */
+#define RK_CRYPTO_AES_FIFO_MODE BIT(1)
+/* Encryption = 0 , Decryption = 1 */
+#define RK_CRYPTO_AES_DEC BIT(0)
+
+#define RK_CRYPTO_AES_STS 0x0084
+#define RK_CRYPTO_AES_DONE BIT(0)
+
+/* AES Input Data 0-3 Register */
+#define RK_CRYPTO_AES_DIN_0 0x0088
+#define RK_CRYPTO_AES_DIN_1 0x008c
+#define RK_CRYPTO_AES_DIN_2 0x0090
+#define RK_CRYPTO_AES_DIN_3 0x0094
+
+/* AES output Data 0-3 Register */
+#define RK_CRYPTO_AES_DOUT_0 0x0098
+#define RK_CRYPTO_AES_DOUT_1 0x009c
+#define RK_CRYPTO_AES_DOUT_2 0x00a0
+#define RK_CRYPTO_AES_DOUT_3 0x00a4
+
+/* AES IV Data 0-3 Register */
+#define RK_CRYPTO_AES_IV_0 0x00a8
+#define RK_CRYPTO_AES_IV_1 0x00ac
+#define RK_CRYPTO_AES_IV_2 0x00b0
+#define RK_CRYPTO_AES_IV_3 0x00b4
+
+/* AES Key Data 0-3 Register */
+#define RK_CRYPTO_AES_KEY_0 0x00b8
+#define RK_CRYPTO_AES_KEY_1 0x00bc
+#define RK_CRYPTO_AES_KEY_2 0x00c0
+#define RK_CRYPTO_AES_KEY_3 0x00c4
+#define RK_CRYPTO_AES_KEY_4 0x00c8
+#define RK_CRYPTO_AES_KEY_5 0x00cc
+#define RK_CRYPTO_AES_KEY_6 0x00d0
+#define RK_CRYPTO_AES_KEY_7 0x00d4
+
+/* des/tdes */
+#define RK_CRYPTO_TDES_CTRL 0x0100
+#define RK_CRYPTO_TDES_BYTESWAP_KEY BIT(8)
+#define RK_CRYPTO_TDES_BYTESWAP_IV BIT(7)
+#define RK_CRYPTO_TDES_BYTESWAP_DO BIT(6)
+#define RK_CRYPTO_TDES_BYTESWAP_DI BIT(5)
+/* 0: ECB, 1: CBC */
+#define RK_CRYPTO_TDES_CHAINMODE_CBC BIT(4)
+/* TDES Key Mode, 0 : EDE, 1 : EEE */
+#define RK_CRYPTO_TDES_EEE BIT(3)
+/* 0: DES, 1:TDES */
+#define RK_CRYPTO_TDES_SELECT BIT(2)
+/* 0: Slave, 1:Fifo */
+#define RK_CRYPTO_TDES_FIFO_MODE BIT(1)
+/* Encryption = 0 , Decryption = 1 */
+#define RK_CRYPTO_TDES_DEC BIT(0)
+
+#define RK_CRYPTO_TDES_STS 0x0104
+#define RK_CRYPTO_TDES_DONE BIT(0)
+
+#define RK_CRYPTO_TDES_DIN_0 0x0108
+#define RK_CRYPTO_TDES_DIN_1 0x010c
+#define RK_CRYPTO_TDES_DOUT_0 0x0110
+#define RK_CRYPTO_TDES_DOUT_1 0x0114
+#define RK_CRYPTO_TDES_IV_0 0x0118
+#define RK_CRYPTO_TDES_IV_1 0x011c
+#define RK_CRYPTO_TDES_KEY1_0 0x0120
+#define RK_CRYPTO_TDES_KEY1_1 0x0124
+#define RK_CRYPTO_TDES_KEY2_0 0x0128
+#define RK_CRYPTO_TDES_KEY2_1 0x012c
+#define RK_CRYPTO_TDES_KEY3_0 0x0130
+#define RK_CRYPTO_TDES_KEY3_1 0x0134
+
+/* HASH */
+#define RK_CRYPTO_HASH_CTRL 0x0180
+#define RK_CRYPTO_HASH_SWAP_DO BIT(3)
+#define RK_CRYPTO_HASH_SWAP_DI BIT(2)
+#define RK_CRYPTO_HASH_SHA1 _SBF(0x00, 0)
+#define RK_CRYPTO_HASH_MD5 _SBF(0x01, 0)
+#define RK_CRYPTO_HASH_SHA256 _SBF(0x02, 0)
+#define RK_CRYPTO_HASH_PRNG _SBF(0x03, 0)
+
+#define RK_CRYPTO_HASH_STS 0x0184
+#define RK_CRYPTO_HASH_DONE BIT(0)
+
+#define RK_CRYPTO_HASH_MSG_LEN 0x0188
+#define RK_CRYPTO_HASH_DOUT_0 0x018c
+#define RK_CRYPTO_HASH_DOUT_1 0x0190
+#define RK_CRYPTO_HASH_DOUT_2 0x0194
+#define RK_CRYPTO_HASH_DOUT_3 0x0198
+#define RK_CRYPTO_HASH_DOUT_4 0x019c
+#define RK_CRYPTO_HASH_DOUT_5 0x01a0
+#define RK_CRYPTO_HASH_DOUT_6 0x01a4
+#define RK_CRYPTO_HASH_DOUT_7 0x01a8
+
+#define CRYPTO_READ(dev, offset) \
+ readl_relaxed(((dev)->reg + (offset)))
+#define CRYPTO_WRITE(dev, offset, val) \
+ writel_relaxed((val), ((dev)->reg + (offset)))
+
+struct rk_crypto_info {
+ struct device *dev;
+ struct clk *aclk;
+ struct clk *hclk;
+ struct clk *sclk;
+ struct clk *dmaclk;
+ struct reset_control *rst;
+ void __iomem *reg;
+ int irq;
+
+ struct crypto_engine *engine;
+ struct completion complete;
+ int status;
+};
+
+/* the private variable of hash */
+struct rk_ahash_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct rk_crypto_info *dev;
+ /* for fallback */
+ struct crypto_ahash *fallback_tfm;
+};
+
+/* the privete variable of hash for fallback */
+struct rk_ahash_rctx {
+ struct ahash_request fallback_req;
+ u32 mode;
+ int nrsg;
+};
+
+/* the private variable of cipher */
+struct rk_cipher_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct rk_crypto_info *dev;
+ unsigned int keylen;
+ u8 key[AES_MAX_KEY_SIZE];
+ u8 iv[AES_BLOCK_SIZE];
+ struct crypto_skcipher *fallback_tfm;
+};
+
+struct rk_cipher_rctx {
+ u8 backup_iv[AES_BLOCK_SIZE];
+ u32 mode;
+ struct skcipher_request fallback_req; // keep at the end
+};
+
+enum alg_type {
+ ALG_TYPE_HASH,
+ ALG_TYPE_CIPHER,
+};
+
+struct rk_crypto_tmp {
+ struct rk_crypto_info *dev;
+ union {
+ struct skcipher_alg skcipher;
+ struct ahash_alg hash;
+ } alg;
+ enum alg_type type;
+};
+
+extern struct rk_crypto_tmp rk_ecb_aes_alg;
+extern struct rk_crypto_tmp rk_cbc_aes_alg;
+extern struct rk_crypto_tmp rk_ecb_des_alg;
+extern struct rk_crypto_tmp rk_cbc_des_alg;
+extern struct rk_crypto_tmp rk_ecb_des3_ede_alg;
+extern struct rk_crypto_tmp rk_cbc_des3_ede_alg;
+
+extern struct rk_crypto_tmp rk_ahash_sha1;
+extern struct rk_crypto_tmp rk_ahash_sha256;
+extern struct rk_crypto_tmp rk_ahash_md5;
+
+#endif
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
new file mode 100644
index 000000000..edd40e16a
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
+ */
+#include <linux/device.h>
+#include <asm/unaligned.h>
+#include "rk3288_crypto.h"
+
+/*
+ * IC can not process zero message hash,
+ * so we put the fixed hash out when met zero message.
+ */
+
+static bool rk_ahash_need_fallback(struct ahash_request *req)
+{
+ struct scatterlist *sg;
+
+ sg = req->src;
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ return true;
+ }
+ if (sg->length % 4) {
+ return true;
+ }
+ sg = sg_next(sg);
+ }
+ return false;
+}
+
+static int rk_ahash_digest_fb(struct ahash_request *areq)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+
+ return crypto_ahash_digest(&rctx->fallback_req);
+}
+
+static int zero_message_process(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int rk_digest_size = crypto_ahash_digestsize(tfm);
+
+ switch (rk_digest_size) {
+ case SHA1_DIGEST_SIZE:
+ memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
+ break;
+ case SHA256_DIGEST_SIZE:
+ memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
+ break;
+ case MD5_DIGEST_SIZE:
+ memcpy(req->result, md5_zero_message_hash, rk_digest_size);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rk_ahash_reg_init(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct rk_crypto_info *dev = tctx->dev;
+ int reg_status;
+
+ reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
+ RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
+
+ reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
+ reg_status &= (~RK_CRYPTO_HASH_FLUSH);
+ reg_status |= _SBF(0xffff, 16);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
+
+ memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
+ RK_CRYPTO_HRDMA_DONE_ENA);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
+ RK_CRYPTO_HRDMA_DONE_INT);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
+ RK_CRYPTO_HASH_SWAP_DO);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
+ RK_CRYPTO_BYTESWAP_BRFIFO |
+ RK_CRYPTO_BYTESWAP_BTFIFO);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes);
+}
+
+static int rk_ahash_init(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int rk_ahash_update(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int rk_ahash_final(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int rk_ahash_finup(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int rk_ahash_import(struct ahash_request *req, const void *in)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int rk_ahash_export(struct ahash_request *req, void *out)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+static int rk_ahash_digest(struct ahash_request *req)
+{
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct rk_crypto_info *dev = tctx->dev;
+
+ if (rk_ahash_need_fallback(req))
+ return rk_ahash_digest_fb(req);
+
+ if (!req->nbytes)
+ return zero_message_process(req);
+
+ return crypto_transfer_hash_request_to_engine(dev->engine, req);
+}
+
+static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
+{
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg));
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
+ (RK_CRYPTO_HASH_START << 16));
+}
+
+static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
+{
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+ int ret;
+
+ ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ if (ret <= 0)
+ return -EINVAL;
+
+ rctx->nrsg = ret;
+
+ return 0;
+}
+
+static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
+{
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
+ return 0;
+}
+
+static int rk_hash_run(struct crypto_engine *engine, void *breq)
+{
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct scatterlist *sg = areq->src;
+ int err = 0;
+ int i;
+ u32 v;
+
+ rctx->mode = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_SHA1;
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_SHA256;
+ break;
+ case MD5_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_MD5;
+ break;
+ default:
+ err = -EINVAL;
+ goto theend;
+ }
+
+ rk_ahash_reg_init(areq);
+
+ while (sg) {
+ reinit_completion(&tctx->dev->complete);
+ tctx->dev->status = 0;
+ crypto_ahash_dma_start(tctx->dev, sg);
+ wait_for_completion_interruptible_timeout(&tctx->dev->complete,
+ msecs_to_jiffies(2000));
+ if (!tctx->dev->status) {
+ dev_err(tctx->dev->dev, "DMA timeout\n");
+ err = -EFAULT;
+ goto theend;
+ }
+ sg = sg_next(sg);
+ }
+
+ /*
+ * it will take some time to process date after last dma
+ * transmission.
+ *
+ * waiting time is relative with the last date len,
+ * so cannot set a fixed time here.
+ * 10us makes system not call here frequently wasting
+ * efficiency, and make it response quickly when dma
+ * complete.
+ */
+ while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS))
+ udelay(10);
+
+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
+ v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
+ put_unaligned_le32(v, areq->result + i * 4);
+ }
+
+theend:
+ local_bh_disable();
+ crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
+
+ return 0;
+}
+
+static int rk_cra_hash_init(struct crypto_tfm *tfm)
+{
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct rk_crypto_tmp *algt;
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+
+ tctx->dev = algt->dev;
+
+ /* for fallback */
+ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback_tfm)) {
+ dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
+ return PTR_ERR(tctx->fallback_tfm);
+ }
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct rk_ahash_rctx) +
+ crypto_ahash_reqsize(tctx->fallback_tfm));
+
+ tctx->enginectx.op.do_one_request = rk_hash_run;
+ tctx->enginectx.op.prepare_request = rk_hash_prepare;
+ tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
+
+ return 0;
+}
+
+static void rk_cra_hash_exit(struct crypto_tfm *tfm)
+{
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(tctx->fallback_tfm);
+}
+
+struct rk_crypto_tmp rk_ahash_sha1 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "rk-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
+
+struct rk_crypto_tmp rk_ahash_sha256 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "rk-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
+
+struct rk_crypto_tmp rk_ahash_md5 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "rk-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
new file mode 100644
index 000000000..67a7e05d5
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+ */
+#include <linux/device.h>
+#include <crypto/scatterwalk.h>
+#include "rk3288_crypto.h"
+
+#define RK_CRYPTO_DEC BIT(0)
+
+static int rk_cipher_need_fallback(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int bs = crypto_skcipher_blocksize(tfm);
+ struct scatterlist *sgs, *sgd;
+ unsigned int stodo, dtodo, len;
+
+ if (!req->cryptlen)
+ return true;
+
+ len = req->cryptlen;
+ sgs = req->src;
+ sgd = req->dst;
+ while (sgs && sgd) {
+ if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
+ return true;
+ }
+ if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
+ return true;
+ }
+ stodo = min(len, sgs->length);
+ if (stodo % bs) {
+ return true;
+ }
+ dtodo = min(len, sgd->length);
+ if (dtodo % bs) {
+ return true;
+ }
+ if (stodo != dtodo) {
+ return true;
+ }
+ len -= stodo;
+ sgs = sg_next(sgs);
+ sgd = sg_next(sgd);
+ }
+ return false;
+}
+
+static int rk_cipher_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
+ int err;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->mode & RK_CRYPTO_DEC)
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ return err;
+}
+
+static int rk_handle_req(struct rk_crypto_info *dev,
+ struct skcipher_request *req)
+{
+ struct crypto_engine *engine = dev->engine;
+
+ if (rk_cipher_need_fallback(req))
+ return rk_cipher_fallback(req);
+
+ return crypto_transfer_skcipher_request_to_engine(engine, req);
+}
+
+static int rk_aes_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int rk_des_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
+
+ err = verify_skcipher_des_key(cipher, key);
+ if (err)
+ return err;
+
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int rk_tdes_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
+
+ err = verify_skcipher_des3_key(cipher, key);
+ if (err)
+ return err;
+
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int rk_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_AES_ECB_MODE;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_AES_CBC_MODE;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = 0;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_TDES_SELECT;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_crypto_info *dev = ctx->dev;
+
+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ RK_CRYPTO_DEC;
+ return rk_handle_req(dev, req);
+}
+
+static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ u32 block, conf_reg = 0;
+
+ block = crypto_tfm_alg_blocksize(tfm);
+
+ if (block == DES_BLOCK_SIZE) {
+ rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ RK_CRYPTO_TDES_BYTESWAP_KEY |
+ RK_CRYPTO_TDES_BYTESWAP_IV;
+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode);
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen);
+ conf_reg = RK_CRYPTO_DESSEL;
+ } else {
+ rctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ RK_CRYPTO_AES_KEY_CHANGE |
+ RK_CRYPTO_AES_BYTESWAP_KEY |
+ RK_CRYPTO_AES_BYTESWAP_IV;
+ if (ctx->keylen == AES_KEYSIZE_192)
+ rctx->mode |= RK_CRYPTO_AES_192BIT_key;
+ else if (ctx->keylen == AES_KEYSIZE_256)
+ rctx->mode |= RK_CRYPTO_AES_256BIT_key;
+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode);
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen);
+ }
+ conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
+ RK_CRYPTO_BYTESWAP_BRFIFO;
+ CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
+ RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
+}
+
+static void crypto_dma_start(struct rk_crypto_info *dev,
+ struct scatterlist *sgs,
+ struct scatterlist *sgd, unsigned int todo)
+{
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs));
+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo);
+ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd));
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
+ _SBF(RK_CRYPTO_BLOCK_START, 16));
+}
+
+static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
+{
+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
+ struct scatterlist *sgs, *sgd;
+ int err = 0;
+ int ivsize = crypto_skcipher_ivsize(tfm);
+ int offset;
+ u8 iv[AES_BLOCK_SIZE];
+ u8 biv[AES_BLOCK_SIZE];
+ u8 *ivtouse = areq->iv;
+ unsigned int len = areq->cryptlen;
+ unsigned int todo;
+
+ ivsize = crypto_skcipher_ivsize(tfm);
+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
+ offset, ivsize, 0);
+ }
+ }
+
+ sgs = areq->src;
+ sgd = areq->dst;
+
+ while (sgs && sgd && len) {
+ if (!sgs->length) {
+ sgs = sg_next(sgs);
+ sgd = sg_next(sgd);
+ continue;
+ }
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ /* we backup last block of source to be used as IV at next step */
+ offset = sgs->length - ivsize;
+ scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
+ }
+ if (sgs == sgd) {
+ err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
+ if (err <= 0) {
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ } else {
+ err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
+ if (err <= 0) {
+ err = -EINVAL;
+ goto theend_iv;
+ }
+ err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
+ if (err <= 0) {
+ err = -EINVAL;
+ goto theend_sgs;
+ }
+ }
+ err = 0;
+ rk_ablk_hw_init(ctx->dev, areq);
+ if (ivsize) {
+ if (ivsize == DES_BLOCK_SIZE)
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
+ else
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
+ }
+ reinit_completion(&ctx->dev->complete);
+ ctx->dev->status = 0;
+
+ todo = min(sg_dma_len(sgs), len);
+ len -= todo;
+ crypto_dma_start(ctx->dev, sgs, sgd, todo / 4);
+ wait_for_completion_interruptible_timeout(&ctx->dev->complete,
+ msecs_to_jiffies(2000));
+ if (!ctx->dev->status) {
+ dev_err(ctx->dev->dev, "DMA timeout\n");
+ err = -EFAULT;
+ goto theend;
+ }
+ if (sgs == sgd) {
+ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
+ }
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ memcpy(iv, biv, ivsize);
+ ivtouse = iv;
+ } else {
+ offset = sgd->length - ivsize;
+ scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0);
+ ivtouse = iv;
+ }
+ sgs = sg_next(sgs);
+ sgd = sg_next(sgd);
+ }
+
+ if (areq->iv && ivsize > 0) {
+ offset = areq->cryptlen - ivsize;
+ if (rctx->mode & RK_CRYPTO_DEC) {
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
+ }
+ }
+
+theend:
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, areq, err);
+ local_bh_enable();
+ return 0;
+
+theend_sgs:
+ if (sgs == sgd) {
+ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
+ }
+theend_iv:
+ return err;
+}
+
+static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct rk_crypto_tmp *algt;
+
+ algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+
+ ctx->dev = algt->dev;
+
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ tfm->reqsize = sizeof(struct rk_cipher_rctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm);
+
+ ctx->enginectx.op.do_one_request = rk_cipher_run;
+
+ return 0;
+}
+
+static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ memzero_explicit(ctx->key, ctx->keylen);
+ crypto_free_skcipher(ctx->fallback_tfm);
+}
+
+struct rk_crypto_tmp rk_ecb_aes_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = rk_aes_setkey,
+ .encrypt = rk_aes_ecb_encrypt,
+ .decrypt = rk_aes_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_aes_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x0f,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = rk_aes_setkey,
+ .encrypt = rk_aes_cbc_encrypt,
+ .decrypt = rk_aes_cbc_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_ecb_des_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = rk_des_setkey,
+ .encrypt = rk_des_ecb_encrypt,
+ .decrypt = rk_des_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_des_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_des_setkey,
+ .encrypt = rk_des_cbc_encrypt,
+ .decrypt = rk_des_cbc_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-ede-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = rk_tdes_setkey,
+ .encrypt = rk_des3_ede_ecb_encrypt,
+ .decrypt = rk_des3_ede_ecb_decrypt,
+ }
+};
+
+struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-ede-rk",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
+ .base.cra_alignmask = 0x07,
+ .base.cra_module = THIS_MODULE,
+
+ .init = rk_ablk_init_tfm,
+ .exit = rk_ablk_exit_tfm,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = rk_tdes_setkey,
+ .encrypt = rk_des3_ede_cbc_encrypt,
+ .decrypt = rk_des3_ede_cbc_decrypt,
+ }
+};