summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/amcc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/amcc')
-rw-r--r--drivers/crypto/amcc/Makefile4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c721
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c1549
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h245
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h282
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h309
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c129
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.h25
8 files changed, 3264 insertions, 0 deletions
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
new file mode 100644
index 000000000..d66239913
--- /dev/null
+++ b/drivers/crypto/amcc/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
+crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o
+crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
new file mode 100644
index 000000000..ded732242
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This file implements the Linux crypto algorithms.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/sha1.h>
+#include <crypto/ctr.h>
+#include <crypto/skcipher.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
+
+static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
+ u32 save_iv, u32 ld_h, u32 ld_iv,
+ u32 hdr_proc, u32 h, u32 c, u32 pad_type,
+ u32 op_grp, u32 op, u32 dir)
+{
+ sa->sa_command_0.w = 0;
+ sa->sa_command_0.bf.save_hash_state = save_h;
+ sa->sa_command_0.bf.save_iv = save_iv;
+ sa->sa_command_0.bf.load_hash_state = ld_h;
+ sa->sa_command_0.bf.load_iv = ld_iv;
+ sa->sa_command_0.bf.hdr_proc = hdr_proc;
+ sa->sa_command_0.bf.hash_alg = h;
+ sa->sa_command_0.bf.cipher_alg = c;
+ sa->sa_command_0.bf.pad_type = pad_type & 3;
+ sa->sa_command_0.bf.extend_pad = pad_type >> 2;
+ sa->sa_command_0.bf.op_group = op_grp;
+ sa->sa_command_0.bf.opcode = op;
+ sa->sa_command_0.bf.dir = dir;
+}
+
+static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
+ u32 hmac_mc, u32 cfb, u32 esn,
+ u32 sn_mask, u32 mute, u32 cp_pad,
+ u32 cp_pay, u32 cp_hdr)
+{
+ sa->sa_command_1.w = 0;
+ sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
+ sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
+ sa->sa_command_1.bf.feedback_mode = cfb;
+ sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_command_1.bf.hmac_muting = hmac_mc;
+ sa->sa_command_1.bf.extended_seq_num = esn;
+ sa->sa_command_1.bf.seq_num_mask = sn_mask;
+ sa->sa_command_1.bf.mutable_bit_proc = mute;
+ sa->sa_command_1.bf.copy_pad = cp_pad;
+ sa->sa_command_1.bf.copy_payload = cp_pay;
+ sa->sa_command_1.bf.copy_hdr = cp_hdr;
+}
+
+static inline int crypto4xx_crypt(struct skcipher_request *req,
+ const unsigned int ivlen, bool decrypt,
+ bool check_blocksize)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ __le32 iv[AES_IV_SIZE];
+
+ if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
+ ctx->sa_len, 0, NULL);
+}
+
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, 0, false, true);
+}
+
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
+}
+
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, 0, true, true);
+}
+
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
+}
+
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
+}
+
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
+}
+
+/*
+ * AES Functions
+ */
+static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned char cm,
+ u8 fb)
+{
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct dynamic_sa_ctl *sa;
+ int rc;
+
+ if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_128)
+ return -EINVAL;
+
+ /* Create SA */
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
+ if (rc)
+ return rc;
+
+ /* Setup SA */
+ sa = ctx->sa_in;
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
+ SA_NOT_SAVE_IV : SA_SAVE_IV),
+ SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
+ SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
+ SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH,
+ fb, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+ key, keylen);
+ sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ /*
+ * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
+ * it's the DIR_(IN|OUT)BOUND that matters
+ */
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
+
+ return 0;
+}
+
+int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
+ CRYPTO_FEEDBACK_MODE_128BIT_CFB);
+}
+
+int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
+ CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int rc;
+
+ rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
+ CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+ if (rc)
+ return rc;
+
+ ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen -
+ CTR_RFC3686_NONCE_SIZE]);
+
+ return 0;
+}
+
+int crypto4xx_rfc3686_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ __le32 iv[AES_IV_SIZE / 4] = {
+ ctx->iv_nonce,
+ cpu_to_le32p((u32 *) req->iv),
+ cpu_to_le32p((u32 *) (req->iv + 4)),
+ cpu_to_le32(1) };
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len, 0, NULL);
+}
+
+int crypto4xx_rfc3686_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ __le32 iv[AES_IV_SIZE / 4] = {
+ ctx->iv_nonce,
+ cpu_to_le32p((u32 *) req->iv),
+ cpu_to_le32p((u32 *) (req->iv + 4)),
+ cpu_to_le32(1) };
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len, 0, NULL);
+}
+
+static int
+crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ size_t iv_len = crypto_skcipher_ivsize(cipher);
+ unsigned int counter = be32_to_cpup((__be32 *)(req->iv + iv_len - 4));
+ unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
+ AES_BLOCK_SIZE;
+
+ /*
+ * The hardware uses only the last 32-bits as the counter while the
+ * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
+ * the whole IV is a counter. So fallback if the counter is going to
+ * overlow.
+ */
+ if (counter + nblks < counter) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher.cipher);
+ int ret;
+
+ skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher.cipher);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq)
+ : crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+ return ret;
+ }
+
+ return encrypt ? crypto4xx_encrypt_iv_stream(req)
+ : crypto4xx_decrypt_iv_stream(req);
+}
+
+static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
+ struct crypto_skcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
+ crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+ return crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
+}
+
+int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int rc;
+
+ rc = crypto4xx_sk_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ return crypto4xx_setkey_aes(cipher, key, keylen,
+ CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_encrypt_ctr(struct skcipher_request *req)
+{
+ return crypto4xx_ctr_crypt(req, true);
+}
+
+int crypto4xx_decrypt_ctr(struct skcipher_request *req)
+{
+ return crypto4xx_ctr_crypt(req, false);
+}
+
+static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
+ unsigned int len,
+ bool is_ccm, bool decrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ /* authsize has to be a multiple of 4 */
+ if (aead->authsize & 3)
+ return true;
+
+ /*
+ * hardware does not handle cases where plaintext
+ * is less than a block.
+ */
+ if (len < AES_BLOCK_SIZE)
+ return true;
+
+ /* assoc len needs to be a multiple of 4 and <= 1020 */
+ if (req->assoclen & 0x3 || req->assoclen > 1020)
+ return true;
+
+ /* CCM supports only counter field length of 2 and 4 bytes */
+ if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
+ return true;
+
+ return false;
+}
+
+static int crypto4xx_aead_fallback(struct aead_request *req,
+ struct crypto4xx_ctx *ctx, bool do_decrypt)
+{
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+ return do_decrypt ? crypto_aead_decrypt(subreq) :
+ crypto_aead_encrypt(subreq);
+}
+
+static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
+ struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(ctx->sw_cipher.aead,
+ crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+ return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
+}
+
+/*
+ * AES-CCM Functions
+ */
+
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4);
+ if (rc)
+ return rc;
+
+ /* Setup SA */
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
+
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen);
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+ return 0;
+}
+
+static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ __le32 iv[16];
+ u32 tmp_sa[SA_AES128_CCM_LEN + 4];
+ struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
+ unsigned int len = req->cryptlen;
+
+ if (decrypt)
+ len -= crypto_aead_authsize(aead);
+
+ if (crypto4xx_aead_need_fallback(req, len, true, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, ctx->sa_len * 4);
+ sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
+
+ if (req->iv[0] == 1) {
+ /* CRYPTO_MODE_AES_ICM */
+ sa->sa_command_1.bf.crypto_mode9_8 = 1;
+ }
+
+ iv[3] = cpu_to_le32(0);
+ crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, iv, sizeof(iv),
+ sa, ctx->sa_len, req->assoclen, rctx->dst);
+}
+
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_ccm(req, false);
+}
+
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_ccm(req, true);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
+}
+
+/*
+ * AES-GCM Functions
+ */
+
+static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
+{
+ switch (keylen) {
+ case 16:
+ case 24:
+ case 32:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_aes_ctx ctx;
+ uint8_t src[16] = { 0 };
+ int rc;
+
+ rc = aes_expandkey(&ctx, key, keylen);
+ if (rc) {
+ pr_err("aes_expandkey() failed: %d\n", rc);
+ return rc;
+ }
+
+ aes_encrypt(&ctx, src, src);
+ crypto4xx_memcpy_to_le32(hash_start, src, 16);
+ memzero_explicit(&ctx, sizeof(ctx));
+ return 0;
+}
+
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0)
+ return -EINVAL;
+
+ rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4);
+ if (rc)
+ return rc;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2);
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON, SA_MC_DISABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+ key, keylen);
+
+ rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa),
+ key, keylen);
+ if (rc) {
+ pr_err("GCM hash key setting failed = %d\n", rc);
+ goto err;
+ }
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
+ bool decrypt)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
+ __le32 iv[4];
+ unsigned int len = req->cryptlen;
+
+ if (decrypt)
+ len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ if (crypto4xx_aead_need_fallback(req, len, false, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
+ iv[3] = cpu_to_le32(1);
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, iv, sizeof(iv),
+ decrypt ? ctx->sa_in : ctx->sa_out,
+ ctx->sa_len, req->assoclen, rctx->dst);
+}
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_gcm(req, false);
+}
+
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_gcm(req, true);
+}
+
+/*
+ * HASH SHA1 Functions
+ */
+static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
+ unsigned int sa_len,
+ unsigned char ha,
+ unsigned char hm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg;
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_hash160 *sa;
+ int rc;
+
+ my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
+ alg.u.hash);
+ ctx->dev = my_alg->dev;
+
+ /* Create SA */
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, sa_len);
+ if (rc)
+ return rc;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct crypto4xx_ctx));
+ sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
+ set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+ SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_HASH, DIR_INBOUND);
+ set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+ /* Need to zero hash digest in SA */
+ memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
+ memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
+
+ return 0;
+}
+
+int crypto4xx_hash_init(struct ahash_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ int ds;
+ struct dynamic_sa_ctl *sa;
+
+ sa = ctx->sa_in;
+ ds = crypto_ahash_digestsize(
+ __crypto_ahash_cast(req->base.tfm));
+ sa->sa_command_0.bf.digest_len = ds >> 2;
+ sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
+
+ return 0;
+}
+
+int crypto4xx_hash_update(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist dst;
+ unsigned int ds = crypto_ahash_digestsize(ahash);
+
+ sg_init_one(&dst, req->result, ds);
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ req->nbytes, NULL, 0, ctx->sa_in,
+ ctx->sa_len, 0, NULL);
+}
+
+int crypto4xx_hash_final(struct ahash_request *req)
+{
+ return 0;
+}
+
+int crypto4xx_hash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist dst;
+ unsigned int ds = crypto_ahash_digestsize(ahash);
+
+ sg_init_one(&dst, req->result, ds);
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ req->nbytes, NULL, 0, ctx->sa_in,
+ ctx->sa_len, 0, NULL);
+}
+
+/*
+ * SHA1 Algorithm
+ */
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
+{
+ return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
+ SA_HASH_MODE_HASH);
+}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
new file mode 100644
index 000000000..50dc78382
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -0,0 +1,1549 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This file implements AMCC crypto offload Linux device driver for use with
+ * Linux CryptoAPI.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/cacheflush.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/gcm.h>
+#include <crypto/sha1.h>
+#include <crypto/rng.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
+#include "crypto4xx_trng.h"
+
+#define PPC4XX_SEC_VERSION_STR "0.5"
+
+/*
+ * PPC4xx Crypto Engine Initialization Routine
+ */
+static void crypto4xx_hw_init(struct crypto4xx_device *dev)
+{
+ union ce_ring_size ring_size;
+ union ce_ring_control ring_ctrl;
+ union ce_part_ring_size part_ring_size;
+ union ce_io_threshold io_threshold;
+ u32 rand_num;
+ union ce_pe_dma_cfg pe_dma_cfg;
+ u32 device_ctrl;
+
+ writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
+ /* setup pe dma, include reset sg, pdr and pe, then release reset */
+ pe_dma_cfg.w = 0;
+ pe_dma_cfg.bf.bo_sgpd_en = 1;
+ pe_dma_cfg.bf.bo_data_en = 0;
+ pe_dma_cfg.bf.bo_sa_en = 1;
+ pe_dma_cfg.bf.bo_pd_en = 1;
+ pe_dma_cfg.bf.dynamic_sa_en = 1;
+ pe_dma_cfg.bf.reset_sg = 1;
+ pe_dma_cfg.bf.reset_pdr = 1;
+ pe_dma_cfg.bf.reset_pe = 1;
+ writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+ /* un reset pe,sg and pdr */
+ pe_dma_cfg.bf.pe_mode = 0;
+ pe_dma_cfg.bf.reset_sg = 0;
+ pe_dma_cfg.bf.reset_pdr = 0;
+ pe_dma_cfg.bf.reset_pe = 0;
+ pe_dma_cfg.bf.bo_td_en = 0;
+ writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+ writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
+ writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
+ writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+ get_random_bytes(&rand_num, sizeof(rand_num));
+ writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
+ get_random_bytes(&rand_num, sizeof(rand_num));
+ writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
+ ring_size.w = 0;
+ ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
+ ring_size.bf.ring_size = PPC4XX_NUM_PD;
+ writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
+ ring_ctrl.w = 0;
+ writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
+ device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+ device_ctrl |= PPC4XX_DC_3DES_EN;
+ writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+ writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
+ writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
+ part_ring_size.w = 0;
+ part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
+ part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
+ writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
+ writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
+ io_threshold.w = 0;
+ io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
+ io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
+ writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
+ writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
+ /* un reset pe,sg and pdr */
+ pe_dma_cfg.bf.pe_mode = 1;
+ pe_dma_cfg.bf.reset_sg = 0;
+ pe_dma_cfg.bf.reset_pdr = 0;
+ pe_dma_cfg.bf.reset_pe = 0;
+ pe_dma_cfg.bf.bo_td_en = 0;
+ writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+ /*clear all pending interrupt*/
+ writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
+ writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
+ writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
+ writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
+ if (dev->is_revb) {
+ writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+ dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+ writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+ dev->ce_base + CRYPTO4XX_INT_EN);
+ } else {
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ }
+}
+
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
+{
+ ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
+ if (ctx->sa_in == NULL)
+ return -ENOMEM;
+
+ ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
+ if (ctx->sa_out == NULL) {
+ kfree(ctx->sa_in);
+ ctx->sa_in = NULL;
+ return -ENOMEM;
+ }
+
+ ctx->sa_len = size;
+
+ return 0;
+}
+
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
+{
+ kfree(ctx->sa_in);
+ ctx->sa_in = NULL;
+ kfree(ctx->sa_out);
+ ctx->sa_out = NULL;
+ ctx->sa_len = 0;
+}
+
+/*
+ * alloc memory for the gather ring
+ * no need to alloc buf for the ring
+ * gdr_tail, gdr_head and gdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
+{
+ int i;
+ dev->pdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+ &dev->pdr_pa, GFP_KERNEL);
+ if (!dev->pdr)
+ return -ENOMEM;
+
+ dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
+ GFP_KERNEL);
+ if (!dev->pdr_uinfo) {
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+ dev->pdr,
+ dev->pdr_pa);
+ return -ENOMEM;
+ }
+ dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
+ &dev->shadow_sa_pool_pa,
+ GFP_KERNEL);
+ if (!dev->shadow_sa_pool)
+ return -ENOMEM;
+
+ dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
+ &dev->shadow_sr_pool_pa, GFP_KERNEL);
+ if (!dev->shadow_sr_pool)
+ return -ENOMEM;
+ for (i = 0; i < PPC4XX_NUM_PD; i++) {
+ struct ce_pd *pd = &dev->pdr[i];
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
+
+ pd->sa = dev->shadow_sa_pool_pa +
+ sizeof(union shadow_sa_buf) * i;
+
+ /* alloc 256 bytes which is enough for any kind of dynamic sa */
+ pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
+
+ /* alloc state record */
+ pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
+ pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
+ sizeof(struct sa_state_record) * i;
+ }
+
+ return 0;
+}
+
+static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
+{
+ if (dev->pdr)
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+ dev->pdr, dev->pdr_pa);
+
+ if (dev->shadow_sa_pool)
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
+ dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+
+ if (dev->shadow_sr_pool)
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
+ dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
+
+ kfree(dev->pdr_uinfo);
+}
+
+static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
+{
+ u32 retval;
+ u32 tmp;
+
+ retval = dev->pdr_head;
+ tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
+
+ if (tmp == dev->pdr_tail)
+ return ERING_WAS_FULL;
+
+ dev->pdr_head = tmp;
+
+ return retval;
+}
+
+static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
+{
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+ u32 tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->core_dev->lock, flags);
+ pd_uinfo->state = PD_ENTRY_FREE;
+
+ if (dev->pdr_tail != PPC4XX_LAST_PD)
+ dev->pdr_tail++;
+ else
+ dev->pdr_tail = 0;
+ tail = dev->pdr_tail;
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+ return tail;
+}
+
+/*
+ * alloc memory for the gather ring
+ * no need to alloc buf for the ring
+ * gdr_tail, gdr_head and gdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
+{
+ dev->gdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_KERNEL);
+ if (!dev->gdr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
+{
+ if (dev->gdr)
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ dev->gdr, dev->gdr_pa);
+}
+
+/*
+ * when this function is called.
+ * preemption or interrupt must be disabled
+ */
+static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
+{
+ u32 retval;
+ u32 tmp;
+
+ if (n >= PPC4XX_NUM_GD)
+ return ERING_WAS_FULL;
+
+ retval = dev->gdr_head;
+ tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
+ if (dev->gdr_head > dev->gdr_tail) {
+ if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
+ return ERING_WAS_FULL;
+ } else if (dev->gdr_head < dev->gdr_tail) {
+ if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
+ return ERING_WAS_FULL;
+ }
+ dev->gdr_head = tmp;
+
+ return retval;
+}
+
+static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->core_dev->lock, flags);
+ if (dev->gdr_tail == dev->gdr_head) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return 0;
+ }
+
+ if (dev->gdr_tail != PPC4XX_LAST_GD)
+ dev->gdr_tail++;
+ else
+ dev->gdr_tail = 0;
+
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+ return 0;
+}
+
+static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
+ dma_addr_t *gd_dma, u32 idx)
+{
+ *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
+
+ return &dev->gdr[idx];
+}
+
+/*
+ * alloc memory for the scatter ring
+ * need to alloc buf for the ring
+ * sdr_tail, sdr_head and sdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
+{
+ int i;
+
+ dev->scatter_buffer_va =
+ dma_alloc_coherent(dev->core_dev->device,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
+ &dev->scatter_buffer_pa, GFP_KERNEL);
+ if (!dev->scatter_buffer_va)
+ return -ENOMEM;
+
+ /* alloc memory for scatter descriptor ring */
+ dev->sdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ &dev->sdr_pa, GFP_KERNEL);
+ if (!dev->sdr)
+ return -ENOMEM;
+
+ for (i = 0; i < PPC4XX_NUM_SD; i++) {
+ dev->sdr[i].ptr = dev->scatter_buffer_pa +
+ PPC4XX_SD_BUFFER_SIZE * i;
+ }
+
+ return 0;
+}
+
+static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
+{
+ if (dev->sdr)
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ dev->sdr, dev->sdr_pa);
+
+ if (dev->scatter_buffer_va)
+ dma_free_coherent(dev->core_dev->device,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
+ dev->scatter_buffer_va,
+ dev->scatter_buffer_pa);
+}
+
+/*
+ * when this function is called.
+ * preemption or interrupt must be disabled
+ */
+static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
+{
+ u32 retval;
+ u32 tmp;
+
+ if (n >= PPC4XX_NUM_SD)
+ return ERING_WAS_FULL;
+
+ retval = dev->sdr_head;
+ tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
+ if (dev->sdr_head > dev->gdr_tail) {
+ if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
+ return ERING_WAS_FULL;
+ } else if (dev->sdr_head < dev->sdr_tail) {
+ if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
+ return ERING_WAS_FULL;
+ } /* the head = tail, or empty case is already take cared */
+ dev->sdr_head = tmp;
+
+ return retval;
+}
+
+static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->core_dev->lock, flags);
+ if (dev->sdr_tail == dev->sdr_head) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return 0;
+ }
+ if (dev->sdr_tail != PPC4XX_LAST_SD)
+ dev->sdr_tail++;
+ else
+ dev->sdr_tail = 0;
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+ return 0;
+}
+
+static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
+ dma_addr_t *sd_dma, u32 idx)
+{
+ *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
+
+ return &dev->sdr[idx];
+}
+
+static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
+ struct ce_pd *pd,
+ struct pd_uinfo *pd_uinfo,
+ u32 nbytes,
+ struct scatterlist *dst)
+{
+ unsigned int first_sd = pd_uinfo->first_sd;
+ unsigned int last_sd;
+ unsigned int overflow = 0;
+ unsigned int to_copy;
+ unsigned int dst_start = 0;
+
+ /*
+ * Because the scatter buffers are all neatly organized in one
+ * big continuous ringbuffer; scatterwalk_map_and_copy() can
+ * be instructed to copy a range of buffers in one go.
+ */
+
+ last_sd = (first_sd + pd_uinfo->num_sd);
+ if (last_sd > PPC4XX_LAST_SD) {
+ last_sd = PPC4XX_LAST_SD;
+ overflow = last_sd % PPC4XX_NUM_SD;
+ }
+
+ while (nbytes) {
+ void *buf = dev->scatter_buffer_va +
+ first_sd * PPC4XX_SD_BUFFER_SIZE;
+
+ to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
+ (1 + last_sd - first_sd));
+ scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
+ nbytes -= to_copy;
+
+ if (overflow) {
+ first_sd = 0;
+ last_sd = overflow;
+ dst_start += to_copy;
+ overflow = 0;
+ }
+ }
+}
+
+static void crypto4xx_copy_digest_to_dst(void *dst,
+ struct pd_uinfo *pd_uinfo,
+ struct crypto4xx_ctx *ctx)
+{
+ struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
+ memcpy(dst, pd_uinfo->sr_va->save_digest,
+ SA_HASH_ALG_SHA1_DIGEST_SIZE);
+ }
+}
+
+static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo)
+{
+ int i;
+ if (pd_uinfo->num_gd) {
+ for (i = 0; i < pd_uinfo->num_gd; i++)
+ crypto4xx_put_gd_to_gdr(dev);
+ pd_uinfo->first_gd = 0xffffffff;
+ pd_uinfo->num_gd = 0;
+ }
+ if (pd_uinfo->num_sd) {
+ for (i = 0; i < pd_uinfo->num_sd; i++)
+ crypto4xx_put_sd_to_sdr(dev);
+
+ pd_uinfo->first_sd = 0xffffffff;
+ pd_uinfo->num_sd = 0;
+ }
+}
+
+static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
+{
+ struct skcipher_request *req;
+ struct scatterlist *dst;
+
+ req = skcipher_request_cast(pd_uinfo->async_req);
+
+ if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ req->cryptlen, req->dst);
+ } else {
+ dst = pd_uinfo->dest_va;
+ dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
+ DMA_FROM_DEVICE);
+ }
+
+ if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+
+ crypto4xx_memcpy_from_le32((u32 *)req->iv,
+ pd_uinfo->sr_va->save_iv,
+ crypto_skcipher_ivsize(skcipher));
+ }
+
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ skcipher_request_complete(req, -EINPROGRESS);
+ skcipher_request_complete(req, 0);
+}
+
+static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo)
+{
+ struct crypto4xx_ctx *ctx;
+ struct ahash_request *ahash_req;
+
+ ahash_req = ahash_request_cast(pd_uinfo->async_req);
+ ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
+
+ crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ ahash_request_complete(ahash_req, -EINPROGRESS);
+ ahash_request_complete(ahash_req, 0);
+}
+
+static void crypto4xx_aead_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
+{
+ struct aead_request *aead_req = container_of(pd_uinfo->async_req,
+ struct aead_request, base);
+ struct scatterlist *dst = pd_uinfo->dest_va;
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[AES_BLOCK_SIZE];
+ int err = 0;
+
+ if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ pd->pd_ctl_len.bf.pkt_len,
+ dst);
+ } else {
+ dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
+ DMA_FROM_DEVICE);
+ }
+
+ if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
+ /* append icv at the end */
+ crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
+ sizeof(icv));
+
+ scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
+ cp_len, 1);
+ } else {
+ /* check icv at the end */
+ scatterwalk_map_and_copy(icv, aead_req->src,
+ aead_req->assoclen + aead_req->cryptlen -
+ cp_len, cp_len, 0);
+
+ crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
+
+ if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
+ err = -EBADMSG;
+ }
+
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+ if (pd->pd_ctl.bf.status & 0xff) {
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
+ }
+ err = -EINVAL;
+ }
+
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ aead_request_complete(aead_req, -EINPROGRESS);
+
+ aead_request_complete(aead_req, err);
+}
+
+static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+{
+ struct ce_pd *pd = &dev->pdr[idx];
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+
+ switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto4xx_cipher_done(dev, pd_uinfo, pd);
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto4xx_aead_done(dev, pd_uinfo, pd);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto4xx_ahash_done(dev, pd_uinfo);
+ break;
+ }
+}
+
+static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
+{
+ crypto4xx_destroy_pdr(core_dev->dev);
+ crypto4xx_destroy_gdr(core_dev->dev);
+ crypto4xx_destroy_sdr(core_dev->dev);
+ iounmap(core_dev->dev->ce_base);
+ kfree(core_dev->dev);
+ kfree(core_dev);
+}
+
+static u32 get_next_gd(u32 current)
+{
+ if (current != PPC4XX_LAST_GD)
+ return current + 1;
+ else
+ return 0;
+}
+
+static u32 get_next_sd(u32 current)
+{
+ if (current != PPC4XX_LAST_SD)
+ return current + 1;
+ else
+ return 0;
+}
+
+int crypto4xx_build_pd(struct crypto_async_request *req,
+ struct crypto4xx_ctx *ctx,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ const unsigned int datalen,
+ const __le32 *iv, const u32 iv_len,
+ const struct dynamic_sa_ctl *req_sa,
+ const unsigned int sa_len,
+ const unsigned int assoclen,
+ struct scatterlist *_dst)
+{
+ struct crypto4xx_device *dev = ctx->dev;
+ struct dynamic_sa_ctl *sa;
+ struct ce_gd *gd;
+ struct ce_pd *pd;
+ u32 num_gd, num_sd;
+ u32 fst_gd = 0xffffffff;
+ u32 fst_sd = 0xffffffff;
+ u32 pd_entry;
+ unsigned long flags;
+ struct pd_uinfo *pd_uinfo;
+ unsigned int nbytes = datalen;
+ size_t offset_to_sr_ptr;
+ u32 gd_idx = 0;
+ int tmp;
+ bool is_busy, force_sd;
+
+ /*
+ * There's a very subtile/disguised "bug" in the hardware that
+ * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
+ * of the hardware spec:
+ * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
+ * operation modes for >>> "Block ciphers" <<<.
+ *
+ * To workaround this issue and stop the hardware from causing
+ * "overran dst buffer" on crypttexts that are not a multiple
+ * of 16 (AES_BLOCK_SIZE), we force the driver to use the
+ * scatter buffers.
+ */
+ force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
+ || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
+ && (datalen % AES_BLOCK_SIZE);
+
+ /* figure how many gd are needed */
+ tmp = sg_nents_for_len(src, assoclen + datalen);
+ if (tmp < 0) {
+ dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
+ return tmp;
+ }
+ if (tmp == 1)
+ tmp = 0;
+ num_gd = tmp;
+
+ if (assoclen) {
+ nbytes += assoclen;
+ dst = scatterwalk_ffwd(_dst, dst, assoclen);
+ }
+
+ /* figure how many sd are needed */
+ if (sg_is_last(dst) && force_sd == false) {
+ num_sd = 0;
+ } else {
+ if (datalen > PPC4XX_SD_BUFFER_SIZE) {
+ num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
+ if (datalen % PPC4XX_SD_BUFFER_SIZE)
+ num_sd++;
+ } else {
+ num_sd = 1;
+ }
+ }
+
+ /*
+ * The follow section of code needs to be protected
+ * The gather ring and scatter ring needs to be consecutive
+ * In case of run out of any kind of descriptor, the descriptor
+ * already got must be return the original place.
+ */
+ spin_lock_irqsave(&dev->core_dev->lock, flags);
+ /*
+ * Let the caller know to slow down, once more than 13/16ths = 81%
+ * of the available data contexts are being used simultaneously.
+ *
+ * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
+ * 31 more contexts. Before new requests have to be rejected.
+ */
+ if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+ ((PPC4XX_NUM_PD * 13) / 16);
+ } else {
+ /*
+ * To fix contention issues between ipsec (no blacklog) and
+ * dm-crypto (backlog) reserve 32 entries for "no backlog"
+ * data contexts.
+ */
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+ ((PPC4XX_NUM_PD * 15) / 16);
+
+ if (is_busy) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EBUSY;
+ }
+ }
+
+ if (num_gd) {
+ fst_gd = crypto4xx_get_n_gd(dev, num_gd);
+ if (fst_gd == ERING_WAS_FULL) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EAGAIN;
+ }
+ }
+ if (num_sd) {
+ fst_sd = crypto4xx_get_n_sd(dev, num_sd);
+ if (fst_sd == ERING_WAS_FULL) {
+ if (num_gd)
+ dev->gdr_head = fst_gd;
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EAGAIN;
+ }
+ }
+ pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
+ if (pd_entry == ERING_WAS_FULL) {
+ if (num_gd)
+ dev->gdr_head = fst_gd;
+ if (num_sd)
+ dev->sdr_head = fst_sd;
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EAGAIN;
+ }
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+ pd = &dev->pdr[pd_entry];
+ pd->sa_len = sa_len;
+
+ pd_uinfo = &dev->pdr_uinfo[pd_entry];
+ pd_uinfo->num_gd = num_gd;
+ pd_uinfo->num_sd = num_sd;
+ pd_uinfo->dest_va = dst;
+ pd_uinfo->async_req = req;
+
+ if (iv_len)
+ memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
+
+ sa = pd_uinfo->sa_va;
+ memcpy(sa, req_sa, sa_len * 4);
+
+ sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
+ offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
+
+ if (num_gd) {
+ dma_addr_t gd_dma;
+ struct scatterlist *sg;
+
+ /* get first gd we are going to use */
+ gd_idx = fst_gd;
+ pd_uinfo->first_gd = fst_gd;
+ gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+ pd->src = gd_dma;
+ /* enable gather */
+ sa->sa_command_0.bf.gather = 1;
+ /* walk the sg, and setup gather array */
+
+ sg = src;
+ while (nbytes) {
+ size_t len;
+
+ len = min(sg->length, nbytes);
+ gd->ptr = dma_map_page(dev->core_dev->device,
+ sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
+ gd->ctl_len.len = len;
+ gd->ctl_len.done = 0;
+ gd->ctl_len.ready = 1;
+ if (len >= nbytes)
+ break;
+
+ nbytes -= sg->length;
+ gd_idx = get_next_gd(gd_idx);
+ gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+ sg = sg_next(sg);
+ }
+ } else {
+ pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
+ src->offset, min(nbytes, src->length),
+ DMA_TO_DEVICE);
+ /*
+ * Disable gather in sa command
+ */
+ sa->sa_command_0.bf.gather = 0;
+ /*
+ * Indicate gather array is not used
+ */
+ pd_uinfo->first_gd = 0xffffffff;
+ }
+ if (!num_sd) {
+ /*
+ * we know application give us dst a whole piece of memory
+ * no need to use scatter ring.
+ */
+ pd_uinfo->first_sd = 0xffffffff;
+ sa->sa_command_0.bf.scatter = 0;
+ pd->dest = (u32)dma_map_page(dev->core_dev->device,
+ sg_page(dst), dst->offset,
+ min(datalen, dst->length),
+ DMA_TO_DEVICE);
+ } else {
+ dma_addr_t sd_dma;
+ struct ce_sd *sd = NULL;
+
+ u32 sd_idx = fst_sd;
+ nbytes = datalen;
+ sa->sa_command_0.bf.scatter = 1;
+ pd_uinfo->first_sd = fst_sd;
+ sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
+ pd->dest = sd_dma;
+ /* setup scatter descriptor */
+ sd->ctl.done = 0;
+ sd->ctl.rdy = 1;
+ /* sd->ptr should be setup by sd_init routine*/
+ if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+ nbytes -= PPC4XX_SD_BUFFER_SIZE;
+ else
+ nbytes = 0;
+ while (nbytes) {
+ sd_idx = get_next_sd(sd_idx);
+ sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
+ /* setup scatter descriptor */
+ sd->ctl.done = 0;
+ sd->ctl.rdy = 1;
+ if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
+ nbytes -= PPC4XX_SD_BUFFER_SIZE;
+ } else {
+ /*
+ * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
+ * which is more than nbytes, so done.
+ */
+ nbytes = 0;
+ }
+ }
+ }
+
+ pd->pd_ctl.w = PD_CTL_HOST_READY |
+ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
+ (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+ PD_CTL_HASH_FINAL : 0);
+ pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
+ pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
+
+ wmb();
+ /* write any value to push engine to read a pd */
+ writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+ writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+ return is_busy ? -EBUSY : -EINPROGRESS;
+}
+
+/*
+ * Algorithm Registration Functions
+ */
+static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
+ struct crypto4xx_ctx *ctx)
+{
+ ctx->dev = amcc_alg->dev;
+ ctx->sa_in = NULL;
+ ctx->sa_out = NULL;
+ ctx->sa_len = 0;
+}
+
+static int crypto4xx_sk_init(struct crypto_skcipher *sk)
+{
+ struct skcipher_alg *alg = crypto_skcipher_alg(sk);
+ struct crypto4xx_alg *amcc_alg;
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
+
+ if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->sw_cipher.cipher =
+ crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->sw_cipher.cipher))
+ return PTR_ERR(ctx->sw_cipher.cipher);
+ }
+
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
+ crypto4xx_ctx_init(amcc_alg, ctx);
+ return 0;
+}
+
+static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
+{
+ crypto4xx_free_sa(ctx);
+}
+
+static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
+{
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
+
+ crypto4xx_common_exit(ctx);
+ if (ctx->sw_cipher.cipher)
+ crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
+}
+
+static int crypto4xx_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto4xx_alg *amcc_alg;
+
+ ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->sw_cipher.aead))
+ return PTR_ERR(ctx->sw_cipher.aead);
+
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
+ crypto4xx_ctx_init(amcc_alg, ctx);
+ crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
+ crypto_aead_reqsize(ctx->sw_cipher.aead),
+ sizeof(struct crypto4xx_aead_reqctx)));
+ return 0;
+}
+
+static void crypto4xx_aead_exit(struct crypto_aead *tfm)
+{
+ struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto4xx_common_exit(ctx);
+ crypto_free_aead(ctx->sw_cipher.aead);
+}
+
+static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+ struct crypto4xx_alg_common *crypto_alg,
+ int array_size)
+{
+ struct crypto4xx_alg *alg;
+ int i;
+ int rc = 0;
+
+ for (i = 0; i < array_size; i++) {
+ alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
+ if (!alg)
+ return -ENOMEM;
+
+ alg->alg = crypto_alg[i];
+ alg->dev = sec_dev;
+
+ switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ rc = crypto_register_aead(&alg->alg.u.aead);
+ break;
+
+ case CRYPTO_ALG_TYPE_AHASH:
+ rc = crypto_register_ahash(&alg->alg.u.hash);
+ break;
+
+ case CRYPTO_ALG_TYPE_RNG:
+ rc = crypto_register_rng(&alg->alg.u.rng);
+ break;
+
+ default:
+ rc = crypto_register_skcipher(&alg->alg.u.cipher);
+ break;
+ }
+
+ if (rc)
+ kfree(alg);
+ else
+ list_add_tail(&alg->entry, &sec_dev->alg_list);
+ }
+
+ return 0;
+}
+
+static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
+{
+ struct crypto4xx_alg *alg, *tmp;
+
+ list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
+ list_del(&alg->entry);
+ switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&alg->alg.u.hash);
+ break;
+
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_aead(&alg->alg.u.aead);
+ break;
+
+ case CRYPTO_ALG_TYPE_RNG:
+ crypto_unregister_rng(&alg->alg.u.rng);
+ break;
+
+ default:
+ crypto_unregister_skcipher(&alg->alg.u.cipher);
+ }
+ kfree(alg);
+ }
+}
+
+static void crypto4xx_bh_tasklet_cb(unsigned long data)
+{
+ struct device *dev = (struct device *)data;
+ struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+ struct pd_uinfo *pd_uinfo;
+ struct ce_pd *pd;
+ u32 tail = core_dev->dev->pdr_tail;
+ u32 head = core_dev->dev->pdr_head;
+
+ do {
+ pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
+ pd = &core_dev->dev->pdr[tail];
+ if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
+ ((READ_ONCE(pd->pd_ctl.w) &
+ (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
+ PD_CTL_PE_DONE)) {
+ crypto4xx_pd_done(core_dev->dev, tail);
+ tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
+ } else {
+ /* if tail not done, break */
+ break;
+ }
+ } while (head != tail);
+}
+
+/*
+ * Top Half of isr.
+ */
+static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
+ u32 clr_val)
+{
+ struct device *dev = (struct device *)data;
+ struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+
+ writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ tasklet_schedule(&core_dev->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
+ PPC4XX_TMO_ERR_INT);
+}
+
+static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
+ u8 *data, unsigned int max)
+{
+ unsigned int i, curr = 0;
+ u32 val[2];
+
+ do {
+ /* trigger PRN generation */
+ writel(PPC4XX_PRNG_CTRL_AUTO_EN,
+ dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+
+ for (i = 0; i < 1024; i++) {
+ /* usually 19 iterations are enough */
+ if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
+ CRYPTO4XX_PRNG_STAT_BUSY))
+ continue;
+
+ val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
+ val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
+ break;
+ }
+ if (i == 1024)
+ return -ETIMEDOUT;
+
+ if ((max - curr) >= 8) {
+ memcpy(data, &val, 8);
+ data += 8;
+ curr += 8;
+ } else {
+ /* copy only remaining bytes */
+ memcpy(data, &val, max - curr);
+ break;
+ }
+ } while (curr < max);
+
+ return curr;
+}
+
+static int crypto4xx_prng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dstn, unsigned int dlen)
+{
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ struct crypto4xx_alg *amcc_alg;
+ struct crypto4xx_device *dev;
+ int ret;
+
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
+ dev = amcc_alg->dev;
+
+ mutex_lock(&dev->core_dev->rng_lock);
+ ret = ppc4xx_prng_data_read(dev, dstn, dlen);
+ mutex_unlock(&dev->core_dev->rng_lock);
+ return ret;
+}
+
+
+static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ return 0;
+}
+
+/*
+ * Supported Crypto Algorithms
+ */
+static struct crypto4xx_alg_common crypto4xx_alg[] = {
+ /* Crypto AES modes */
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cbc,
+ .encrypt = crypto4xx_encrypt_iv_block,
+ .decrypt = crypto4xx_decrypt_iv_block,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "cfb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+ .encrypt = crypto4xx_encrypt_iv_stream,
+ .decrypt = crypto4xx_decrypt_iv_stream,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ctr,
+ .encrypt = crypto4xx_encrypt_ctr,
+ .decrypt = crypto4xx_decrypt_ctr,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = crypto4xx_setkey_rfc3686,
+ .encrypt = crypto4xx_rfc3686_encrypt,
+ .decrypt = crypto4xx_rfc3686_decrypt,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+ .encrypt = crypto4xx_encrypt_noiv_block,
+ .decrypt = crypto4xx_decrypt_noiv_block,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ofb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ofb,
+ .encrypt = crypto4xx_encrypt_iv_stream,
+ .decrypt = crypto4xx_decrypt_iv_stream,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+
+ /* AEAD */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+ .setkey = crypto4xx_setkey_aes_ccm,
+ .setauthsize = crypto4xx_setauthsize_aead,
+ .encrypt = crypto4xx_encrypt_aes_ccm,
+ .decrypt = crypto4xx_decrypt_aes_ccm,
+ .init = crypto4xx_aead_init,
+ .exit = crypto4xx_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ } },
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+ .setkey = crypto4xx_setkey_aes_gcm,
+ .setauthsize = crypto4xx_setauthsize_aead,
+ .encrypt = crypto4xx_encrypt_aes_gcm,
+ .decrypt = crypto4xx_decrypt_aes_gcm,
+ .init = crypto4xx_aead_init,
+ .exit = crypto4xx_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ } },
+ { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "crypto4xx_rng",
+ .cra_priority = 300,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .generate = crypto4xx_prng_generate,
+ .seed = crypto4xx_prng_seed,
+ .seedsize = 0,
+ } },
+};
+
+/*
+ * Module Initialization Routine
+ */
+static int crypto4xx_probe(struct platform_device *ofdev)
+{
+ int rc;
+ struct resource res;
+ struct device *dev = &ofdev->dev;
+ struct crypto4xx_core_device *core_dev;
+ struct device_node *np;
+ u32 pvr;
+ bool is_revb = true;
+
+ rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
+ if (rc)
+ return -ENODEV;
+
+ np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto");
+ if (np) {
+ mtdcri(SDR0, PPC460EX_SDR0_SRST,
+ mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
+ mtdcri(SDR0, PPC460EX_SDR0_SRST,
+ mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
+ } else {
+ np = of_find_compatible_node(NULL, NULL, "amcc,ppc405ex-crypto");
+ if (np) {
+ mtdcri(SDR0, PPC405EX_SDR0_SRST,
+ mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
+ mtdcri(SDR0, PPC405EX_SDR0_SRST,
+ mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ is_revb = false;
+ } else {
+ np = of_find_compatible_node(NULL, NULL, "amcc,ppc460sx-crypto");
+ if (np) {
+ mtdcri(SDR0, PPC460SX_SDR0_SRST,
+ mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
+ mtdcri(SDR0, PPC460SX_SDR0_SRST,
+ mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
+ } else {
+ printk(KERN_ERR "Crypto Function Not supported!\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ of_node_put(np);
+
+ core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
+ if (!core_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, core_dev);
+ core_dev->ofdev = ofdev;
+ core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
+ rc = -ENOMEM;
+ if (!core_dev->dev)
+ goto err_alloc_dev;
+
+ /*
+ * Older version of 460EX/GT have a hardware bug.
+ * Hence they do not support H/W based security intr coalescing
+ */
+ pvr = mfspr(SPRN_PVR);
+ if (is_revb && ((pvr >> 4) == 0x130218A)) {
+ u32 min = PVR_MIN(pvr);
+
+ if (min < 4) {
+ dev_info(dev, "RevA detected - disable interrupt coalescing\n");
+ is_revb = false;
+ }
+ }
+
+ core_dev->dev->core_dev = core_dev;
+ core_dev->dev->is_revb = is_revb;
+ core_dev->device = dev;
+ mutex_init(&core_dev->rng_lock);
+ spin_lock_init(&core_dev->lock);
+ INIT_LIST_HEAD(&core_dev->dev->alg_list);
+ ratelimit_default_init(&core_dev->dev->aead_ratelimit);
+ rc = crypto4xx_build_sdr(core_dev->dev);
+ if (rc)
+ goto err_build_sdr;
+ rc = crypto4xx_build_pdr(core_dev->dev);
+ if (rc)
+ goto err_build_sdr;
+
+ rc = crypto4xx_build_gdr(core_dev->dev);
+ if (rc)
+ goto err_build_sdr;
+
+ /* Init tasklet for bottom half processing */
+ tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
+ (unsigned long) dev);
+
+ core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
+ if (!core_dev->dev->ce_base) {
+ dev_err(dev, "failed to of_iomap\n");
+ rc = -ENOMEM;
+ goto err_iomap;
+ }
+
+ /* Register for Crypto isr, Crypto Engine IRQ */
+ core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ rc = request_irq(core_dev->irq, is_revb ?
+ crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler, 0,
+ KBUILD_MODNAME, dev);
+ if (rc)
+ goto err_request_irq;
+
+ /* need to setup pdr, rdr, gdr and sdr before this */
+ crypto4xx_hw_init(core_dev->dev);
+
+ /* Register security algorithms with Linux CryptoAPI */
+ rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
+ ARRAY_SIZE(crypto4xx_alg));
+ if (rc)
+ goto err_start_dev;
+
+ ppc4xx_trng_probe(core_dev);
+ return 0;
+
+err_start_dev:
+ free_irq(core_dev->irq, dev);
+err_request_irq:
+ irq_dispose_mapping(core_dev->irq);
+ iounmap(core_dev->dev->ce_base);
+err_iomap:
+ tasklet_kill(&core_dev->tasklet);
+err_build_sdr:
+ crypto4xx_destroy_sdr(core_dev->dev);
+ crypto4xx_destroy_gdr(core_dev->dev);
+ crypto4xx_destroy_pdr(core_dev->dev);
+ kfree(core_dev->dev);
+err_alloc_dev:
+ kfree(core_dev);
+
+ return rc;
+}
+
+static int crypto4xx_remove(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+
+ ppc4xx_trng_remove(core_dev);
+
+ free_irq(core_dev->irq, dev);
+ irq_dispose_mapping(core_dev->irq);
+
+ tasklet_kill(&core_dev->tasklet);
+ /* Un-register with Linux CryptoAPI */
+ crypto4xx_unregister_alg(core_dev->dev);
+ mutex_destroy(&core_dev->rng_lock);
+ /* Free all allocated memory */
+ crypto4xx_stop_all(core_dev);
+
+ return 0;
+}
+
+static const struct of_device_id crypto4xx_match[] = {
+ { .compatible = "amcc,ppc4xx-crypto",},
+ { },
+};
+MODULE_DEVICE_TABLE(of, crypto4xx_match);
+
+static struct platform_driver crypto4xx_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = crypto4xx_match,
+ },
+ .probe = crypto4xx_probe,
+ .remove = crypto4xx_remove,
+};
+
+module_platform_driver(crypto4xx_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
+MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
new file mode 100644
index 000000000..56c10668c
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This is the header file for AMCC Crypto offload Linux device driver for
+ * use with Linux CryptoAPI.
+
+ */
+
+#ifndef __CRYPTO4XX_CORE_H__
+#define __CRYPTO4XX_CORE_H__
+
+#include <linux/ratelimit.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
+
+#define PPC460SX_SDR0_SRST 0x201
+#define PPC405EX_SDR0_SRST 0x200
+#define PPC460EX_SDR0_SRST 0x201
+#define PPC460EX_CE_RESET 0x08000000
+#define PPC460SX_CE_RESET 0x20000000
+#define PPC405EX_CE_RESET 0x00000008
+
+#define CRYPTO4XX_CRYPTO_PRIORITY 300
+#define PPC4XX_NUM_PD 256
+#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1)
+#define PPC4XX_NUM_GD 1024
+#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1)
+#define PPC4XX_NUM_SD 256
+#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1)
+#define PPC4XX_SD_BUFFER_SIZE 2048
+
+#define PD_ENTRY_BUSY BIT(1)
+#define PD_ENTRY_INUSE BIT(0)
+#define PD_ENTRY_FREE 0
+#define ERING_WAS_FULL 0xffffffff
+
+struct crypto4xx_device;
+
+union shadow_sa_buf {
+ struct dynamic_sa_ctl sa;
+
+ /* alloc 256 bytes which is enough for any kind of dynamic sa */
+ u8 buf[256];
+} __packed;
+
+struct pd_uinfo {
+ struct crypto4xx_device *dev;
+ u32 state;
+ u32 first_gd; /* first gather discriptor
+ used by this packet */
+ u32 num_gd; /* number of gather discriptor
+ used by this packet */
+ u32 first_sd; /* first scatter discriptor
+ used by this packet */
+ u32 num_sd; /* number of scatter discriptors
+ used by this packet */
+ struct dynamic_sa_ctl *sa_va; /* shadow sa */
+ struct sa_state_record *sr_va; /* state record for shadow sa */
+ u32 sr_pa;
+ struct scatterlist *dest_va;
+ struct crypto_async_request *async_req; /* base crypto request
+ for this packet */
+};
+
+struct crypto4xx_device {
+ struct crypto4xx_core_device *core_dev;
+ void __iomem *ce_base;
+ void __iomem *trng_base;
+
+ struct ce_pd *pdr; /* base address of packet descriptor ring */
+ dma_addr_t pdr_pa; /* physical address of pdr_base_register */
+ struct ce_gd *gdr; /* gather descriptor ring */
+ dma_addr_t gdr_pa; /* physical address of gdr_base_register */
+ struct ce_sd *sdr; /* scatter descriptor ring */
+ dma_addr_t sdr_pa; /* physical address of sdr_base_register */
+ void *scatter_buffer_va;
+ dma_addr_t scatter_buffer_pa;
+
+ union shadow_sa_buf *shadow_sa_pool;
+ dma_addr_t shadow_sa_pool_pa;
+ struct sa_state_record *shadow_sr_pool;
+ dma_addr_t shadow_sr_pool_pa;
+ u32 pdr_tail;
+ u32 pdr_head;
+ u32 gdr_tail;
+ u32 gdr_head;
+ u32 sdr_tail;
+ u32 sdr_head;
+ struct pd_uinfo *pdr_uinfo;
+ struct list_head alg_list; /* List of algorithm supported
+ by this device */
+ struct ratelimit_state aead_ratelimit;
+ bool is_revb;
+};
+
+struct crypto4xx_core_device {
+ struct device *device;
+ struct platform_device *ofdev;
+ struct crypto4xx_device *dev;
+ struct hwrng *trng;
+ u32 int_status;
+ u32 irq;
+ struct tasklet_struct tasklet;
+ spinlock_t lock;
+ struct mutex rng_lock;
+};
+
+struct crypto4xx_ctx {
+ struct crypto4xx_device *dev;
+ struct dynamic_sa_ctl *sa_in;
+ struct dynamic_sa_ctl *sa_out;
+ __le32 iv_nonce;
+ u32 sa_len;
+ union {
+ struct crypto_sync_skcipher *cipher;
+ struct crypto_aead *aead;
+ } sw_cipher;
+};
+
+struct crypto4xx_aead_reqctx {
+ struct scatterlist dst[2];
+};
+
+struct crypto4xx_alg_common {
+ u32 type;
+ union {
+ struct skcipher_alg cipher;
+ struct ahash_alg hash;
+ struct aead_alg aead;
+ struct rng_alg rng;
+ } u;
+};
+
+struct crypto4xx_alg {
+ struct list_head entry;
+ struct crypto4xx_alg_common alg;
+ struct crypto4xx_device *dev;
+};
+
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+int crypto4xx_build_pd(struct crypto_async_request *req,
+ struct crypto4xx_ctx *ctx,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ const unsigned int datalen,
+ const __le32 *iv, const u32 iv_len,
+ const struct dynamic_sa_ctl *sa,
+ const unsigned int sa_len,
+ const unsigned int assoclen,
+ struct scatterlist *dst_tmp);
+int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_ctr(struct skcipher_request *req);
+int crypto4xx_decrypt_ctr(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
+int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
+int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+int crypto4xx_hash_digest(struct ahash_request *req);
+int crypto4xx_hash_final(struct ahash_request *req);
+int crypto4xx_hash_update(struct ahash_request *req);
+int crypto4xx_hash_init(struct ahash_request *req);
+
+/*
+ * Note: Only use this function to copy items that is word aligned.
+ */
+static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
+ size_t len)
+{
+ for (; len >= 4; buf += 4, len -= 4)
+ *dst++ = __swab32p((u32 *) buf);
+
+ if (len) {
+ const u8 *tmp = (u8 *)buf;
+
+ switch (len) {
+ case 3:
+ *dst = (tmp[2] << 16) |
+ (tmp[1] << 8) |
+ tmp[0];
+ break;
+ case 2:
+ *dst = (tmp[1] << 8) |
+ tmp[0];
+ break;
+ case 1:
+ *dst = tmp[0];
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf,
+ size_t len)
+{
+ crypto4xx_memcpy_swab32(dst, buf, len);
+}
+
+static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf,
+ size_t len)
+{
+ crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *ciper,
+ unsigned int authsize);
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
+
+#endif
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
new file mode 100644
index 000000000..103806122
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This filr defines the register set for Security Subsystem
+ */
+
+#ifndef __CRYPTO4XX_REG_DEF_H__
+#define __CRYPTO4XX_REG_DEF_H__
+
+/* CRYPTO4XX Register offset */
+#define CRYPTO4XX_DESCRIPTOR 0x00000000
+#define CRYPTO4XX_CTRL_STAT 0x00000000
+#define CRYPTO4XX_SOURCE 0x00000004
+#define CRYPTO4XX_DEST 0x00000008
+#define CRYPTO4XX_SA 0x0000000C
+#define CRYPTO4XX_SA_LENGTH 0x00000010
+#define CRYPTO4XX_LENGTH 0x00000014
+
+#define CRYPTO4XX_PE_DMA_CFG 0x00000040
+#define CRYPTO4XX_PE_DMA_STAT 0x00000044
+#define CRYPTO4XX_PDR_BASE 0x00000048
+#define CRYPTO4XX_RDR_BASE 0x0000004c
+#define CRYPTO4XX_RING_SIZE 0x00000050
+#define CRYPTO4XX_RING_CTRL 0x00000054
+#define CRYPTO4XX_INT_RING_STAT 0x00000058
+#define CRYPTO4XX_EXT_RING_STAT 0x0000005c
+#define CRYPTO4XX_IO_THRESHOLD 0x00000060
+#define CRYPTO4XX_GATH_RING_BASE 0x00000064
+#define CRYPTO4XX_SCAT_RING_BASE 0x00000068
+#define CRYPTO4XX_PART_RING_SIZE 0x0000006c
+#define CRYPTO4XX_PART_RING_CFG 0x00000070
+
+#define CRYPTO4XX_PDR_BASE_UADDR 0x00000080
+#define CRYPTO4XX_RDR_BASE_UADDR 0x00000084
+#define CRYPTO4XX_PKT_SRC_UADDR 0x00000088
+#define CRYPTO4XX_PKT_DEST_UADDR 0x0000008c
+#define CRYPTO4XX_SA_UADDR 0x00000090
+#define CRYPTO4XX_GATH_RING_BASE_UADDR 0x000000A0
+#define CRYPTO4XX_SCAT_RING_BASE_UADDR 0x000000A4
+
+#define CRYPTO4XX_SEQ_RD 0x00000408
+#define CRYPTO4XX_SEQ_MASK_RD 0x0000040C
+
+#define CRYPTO4XX_SA_CMD_0 0x00010600
+#define CRYPTO4XX_SA_CMD_1 0x00010604
+
+#define CRYPTO4XX_STATE_PTR 0x000106dc
+#define CRYPTO4XX_STATE_IV 0x00010700
+#define CRYPTO4XX_STATE_HASH_BYTE_CNT_0 0x00010710
+#define CRYPTO4XX_STATE_HASH_BYTE_CNT_1 0x00010714
+
+#define CRYPTO4XX_STATE_IDIGEST_0 0x00010718
+#define CRYPTO4XX_STATE_IDIGEST_1 0x0001071c
+
+#define CRYPTO4XX_DATA_IN 0x00018000
+#define CRYPTO4XX_DATA_OUT 0x0001c000
+
+#define CRYPTO4XX_INT_UNMASK_STAT 0x000500a0
+#define CRYPTO4XX_INT_MASK_STAT 0x000500a4
+#define CRYPTO4XX_INT_CLR 0x000500a4
+#define CRYPTO4XX_INT_EN 0x000500a8
+
+#define CRYPTO4XX_INT_PKA 0x00000002
+#define CRYPTO4XX_INT_PDR_DONE 0x00008000
+#define CRYPTO4XX_INT_MA_WR_ERR 0x00020000
+#define CRYPTO4XX_INT_MA_RD_ERR 0x00010000
+#define CRYPTO4XX_INT_PE_ERR 0x00000200
+#define CRYPTO4XX_INT_USER_DMA_ERR 0x00000040
+#define CRYPTO4XX_INT_SLAVE_ERR 0x00000010
+#define CRYPTO4XX_INT_MASTER_ERR 0x00000008
+#define CRYPTO4XX_INT_ERROR 0x00030258
+
+#define CRYPTO4XX_INT_CFG 0x000500ac
+#define CRYPTO4XX_INT_DESCR_RD 0x000500b0
+#define CRYPTO4XX_INT_DESCR_CNT 0x000500b4
+#define CRYPTO4XX_INT_TIMEOUT_CNT 0x000500b8
+
+#define CRYPTO4XX_DEVICE_CTRL 0x00060080
+#define CRYPTO4XX_DEVICE_ID 0x00060084
+#define CRYPTO4XX_DEVICE_INFO 0x00060088
+#define CRYPTO4XX_DMA_USER_SRC 0x00060094
+#define CRYPTO4XX_DMA_USER_DEST 0x00060098
+#define CRYPTO4XX_DMA_USER_CMD 0x0006009C
+
+#define CRYPTO4XX_DMA_CFG 0x000600d4
+#define CRYPTO4XX_BYTE_ORDER_CFG 0x000600d8
+#define CRYPTO4XX_ENDIAN_CFG 0x000600d8
+
+#define CRYPTO4XX_PRNG_STAT 0x00070000
+#define CRYPTO4XX_PRNG_STAT_BUSY 0x1
+#define CRYPTO4XX_PRNG_CTRL 0x00070004
+#define CRYPTO4XX_PRNG_SEED_L 0x00070008
+#define CRYPTO4XX_PRNG_SEED_H 0x0007000c
+
+#define CRYPTO4XX_PRNG_RES_0 0x00070020
+#define CRYPTO4XX_PRNG_RES_1 0x00070024
+#define CRYPTO4XX_PRNG_RES_2 0x00070028
+#define CRYPTO4XX_PRNG_RES_3 0x0007002C
+
+#define CRYPTO4XX_PRNG_LFSR_L 0x00070030
+#define CRYPTO4XX_PRNG_LFSR_H 0x00070034
+
+/*
+ * Initialize CRYPTO ENGINE registers, and memory bases.
+ */
+#define PPC4XX_PDR_POLL 0x3ff
+#define PPC4XX_OUTPUT_THRESHOLD 2
+#define PPC4XX_INPUT_THRESHOLD 2
+#define PPC4XX_PD_SIZE 6
+#define PPC4XX_CTX_DONE_INT 0x2000
+#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_TMO_ERR_INT 0x40000
+#define PPC4XX_BYTE_ORDER 0x22222
+#define PPC4XX_INTERRUPT_CLR 0x3ffff
+#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
+#define PPC4XX_DC_3DES_EN 1
+#define PPC4XX_TRNG_EN 0x00020000
+#define PPC4XX_INT_DESCR_CNT 7
+#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
+#define PPC4XX_INT_CFG 1
+/*
+ * all follow define are ad hoc
+ */
+#define PPC4XX_RING_RETRY 100
+#define PPC4XX_RING_POLL 100
+#define PPC4XX_SDR_SIZE PPC4XX_NUM_SD
+#define PPC4XX_GDR_SIZE PPC4XX_NUM_GD
+
+/*
+ * Generic Security Association (SA) with all possible fields. These will
+ * never likely used except for reference purpose. These structure format
+ * can be not changed as the hardware expects them to be layout as defined.
+ * Field can be removed or reduced but ordering can not be changed.
+ */
+#define CRYPTO4XX_DMA_CFG_OFFSET 0x40
+union ce_pe_dma_cfg {
+ struct {
+ u32 rsv:7;
+ u32 dir_host:1;
+ u32 rsv1:2;
+ u32 bo_td_en:1;
+ u32 dis_pdr_upd:1;
+ u32 bo_sgpd_en:1;
+ u32 bo_data_en:1;
+ u32 bo_sa_en:1;
+ u32 bo_pd_en:1;
+ u32 rsv2:4;
+ u32 dynamic_sa_en:1;
+ u32 pdr_mode:2;
+ u32 pe_mode:1;
+ u32 rsv3:5;
+ u32 reset_sg:1;
+ u32 reset_pdr:1;
+ u32 reset_pe:1;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_PDR_BASE_OFFSET 0x48
+#define CRYPTO4XX_RDR_BASE_OFFSET 0x4c
+#define CRYPTO4XX_RING_SIZE_OFFSET 0x50
+union ce_ring_size {
+ struct {
+ u32 ring_offset:16;
+ u32 rsv:6;
+ u32 ring_size:10;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_RING_CONTROL_OFFSET 0x54
+union ce_ring_control {
+ struct {
+ u32 continuous:1;
+ u32 rsv:5;
+ u32 ring_retry_divisor:10;
+ u32 rsv1:4;
+ u32 ring_poll_divisor:10;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_IO_THRESHOLD_OFFSET 0x60
+union ce_io_threshold {
+ struct {
+ u32 rsv:6;
+ u32 output_threshold:10;
+ u32 rsv1:6;
+ u32 input_threshold:10;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_GATHER_RING_BASE_OFFSET 0x64
+#define CRYPTO4XX_SCATTER_RING_BASE_OFFSET 0x68
+
+union ce_part_ring_size {
+ struct {
+ u32 sdr_size:16;
+ u32 gdr_size:16;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define MAX_BURST_SIZE_32 0
+#define MAX_BURST_SIZE_64 1
+#define MAX_BURST_SIZE_128 2
+#define MAX_BURST_SIZE_256 3
+
+/* gather descriptor control length */
+struct gd_ctl_len {
+ u32 len:16;
+ u32 rsv:14;
+ u32 done:1;
+ u32 ready:1;
+} __attribute__((packed));
+
+struct ce_gd {
+ u32 ptr;
+ struct gd_ctl_len ctl_len;
+} __attribute__((packed));
+
+struct sd_ctl {
+ u32 ctl:30;
+ u32 done:1;
+ u32 rdy:1;
+} __attribute__((packed));
+
+struct ce_sd {
+ u32 ptr;
+ struct sd_ctl ctl;
+} __attribute__((packed));
+
+#define PD_PAD_CTL_32 0x10
+#define PD_PAD_CTL_64 0x20
+#define PD_PAD_CTL_128 0x40
+#define PD_PAD_CTL_256 0x80
+union ce_pd_ctl {
+ struct {
+ u32 pd_pad_ctl:8;
+ u32 status:8;
+ u32 next_hdr:8;
+ u32 rsv:2;
+ u32 cached_sa:1;
+ u32 hash_final:1;
+ u32 init_arc4:1;
+ u32 rsv1:1;
+ u32 pe_done:1;
+ u32 host_ready:1;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+#define PD_CTL_HASH_FINAL BIT(4)
+#define PD_CTL_PE_DONE BIT(1)
+#define PD_CTL_HOST_READY BIT(0)
+
+union ce_pd_ctl_len {
+ struct {
+ u32 bypass:8;
+ u32 pe_done:1;
+ u32 host_ready:1;
+ u32 rsv:2;
+ u32 pkt_len:20;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+struct ce_pd {
+ union ce_pd_ctl pd_ctl;
+ u32 src;
+ u32 dest;
+ u32 sa; /* get from ctx->sa_dma_addr */
+ u32 sa_len; /* only if dynamic sa is used */
+ union ce_pd_ctl_len pd_ctl_len;
+
+} __attribute__((packed));
+#endif
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
new file mode 100644
index 000000000..e98e4e7ab
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This file defines the security context
+ * associate format.
+ */
+
+#ifndef __CRYPTO4XX_SA_H__
+#define __CRYPTO4XX_SA_H__
+
+#define AES_IV_SIZE 16
+
+/*
+ * Contents of Dynamic Security Association (SA) with all possible fields
+ */
+union dynamic_sa_contents {
+ struct {
+ u32 arc4_state_ptr:1;
+ u32 arc4_ij_ptr:1;
+ u32 state_ptr:1;
+ u32 iv3:1;
+ u32 iv2:1;
+ u32 iv1:1;
+ u32 iv0:1;
+ u32 seq_num_mask3:1;
+ u32 seq_num_mask2:1;
+ u32 seq_num_mask1:1;
+ u32 seq_num_mask0:1;
+ u32 seq_num1:1;
+ u32 seq_num0:1;
+ u32 spi:1;
+ u32 outer_size:5;
+ u32 inner_size:5;
+ u32 key_size:4;
+ u32 cmd_size:4;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define DIR_OUTBOUND 0
+#define DIR_INBOUND 1
+#define SA_OP_GROUP_BASIC 0
+#define SA_OPCODE_ENCRYPT 0
+#define SA_OPCODE_DECRYPT 0
+#define SA_OPCODE_ENCRYPT_HASH 1
+#define SA_OPCODE_HASH_DECRYPT 1
+#define SA_OPCODE_HASH 3
+#define SA_CIPHER_ALG_DES 0
+#define SA_CIPHER_ALG_3DES 1
+#define SA_CIPHER_ALG_ARC4 2
+#define SA_CIPHER_ALG_AES 3
+#define SA_CIPHER_ALG_KASUMI 4
+#define SA_CIPHER_ALG_NULL 15
+
+#define SA_HASH_ALG_MD5 0
+#define SA_HASH_ALG_SHA1 1
+#define SA_HASH_ALG_GHASH 12
+#define SA_HASH_ALG_CBC_MAC 14
+#define SA_HASH_ALG_NULL 15
+#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
+
+#define SA_LOAD_HASH_FROM_SA 0
+#define SA_LOAD_HASH_FROM_STATE 2
+#define SA_NOT_LOAD_HASH 3
+#define SA_LOAD_IV_FROM_SA 0
+#define SA_LOAD_IV_FROM_INPUT 1
+#define SA_LOAD_IV_FROM_STATE 2
+#define SA_LOAD_IV_GEN_IV 3
+
+#define SA_PAD_TYPE_CONSTANT 2
+#define SA_PAD_TYPE_ZERO 3
+#define SA_PAD_TYPE_TLS 5
+#define SA_PAD_TYPE_DTLS 5
+#define SA_NOT_SAVE_HASH 0
+#define SA_SAVE_HASH 1
+#define SA_NOT_SAVE_IV 0
+#define SA_SAVE_IV 1
+#define SA_HEADER_PROC 1
+#define SA_NO_HEADER_PROC 0
+
+union sa_command_0 {
+ struct {
+ u32 scatter:1;
+ u32 gather:1;
+ u32 save_hash_state:1;
+ u32 save_iv:1;
+ u32 load_hash_state:2;
+ u32 load_iv:2;
+ u32 digest_len:4;
+ u32 hdr_proc:1;
+ u32 extend_pad:1;
+ u32 stream_cipher_pad:1;
+ u32 rsv:1;
+ u32 hash_alg:4;
+ u32 cipher_alg:4;
+ u32 pad_type:2;
+ u32 op_group:2;
+ u32 dir:1;
+ u32 opcode:3;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define CRYPTO_MODE_ECB 0
+#define CRYPTO_MODE_CBC 1
+#define CRYPTO_MODE_OFB 2
+#define CRYPTO_MODE_CFB 3
+#define CRYPTO_MODE_CTR 4
+
+#define CRYPTO_FEEDBACK_MODE_NO_FB 0
+#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0
+#define CRYPTO_FEEDBACK_MODE_8BIT_CFB 1
+#define CRYPTO_FEEDBACK_MODE_1BIT_CFB 2
+#define CRYPTO_FEEDBACK_MODE_128BIT_CFB 3
+
+#define SA_AES_KEY_LEN_128 2
+#define SA_AES_KEY_LEN_192 3
+#define SA_AES_KEY_LEN_256 4
+
+#define SA_REV2 1
+/*
+ * The follow defines bits sa_command_1
+ * In Basic hash mode this bit define simple hash or hmac.
+ * In IPsec mode, this bit define muting control.
+ */
+#define SA_HASH_MODE_HASH 0
+#define SA_HASH_MODE_HMAC 1
+#define SA_MC_ENABLE 0
+#define SA_MC_DISABLE 1
+#define SA_NOT_COPY_HDR 0
+#define SA_COPY_HDR 1
+#define SA_NOT_COPY_PAD 0
+#define SA_COPY_PAD 1
+#define SA_NOT_COPY_PAYLOAD 0
+#define SA_COPY_PAYLOAD 1
+#define SA_EXTENDED_SN_OFF 0
+#define SA_EXTENDED_SN_ON 1
+#define SA_SEQ_MASK_OFF 0
+#define SA_SEQ_MASK_ON 1
+
+union sa_command_1 {
+ struct {
+ u32 crypto_mode31:1;
+ u32 save_arc4_state:1;
+ u32 arc4_stateful:1;
+ u32 key_len:5;
+ u32 hash_crypto_offset:8;
+ u32 sa_rev:2;
+ u32 byte_offset:1;
+ u32 hmac_muting:1;
+ u32 feedback_mode:2;
+ u32 crypto_mode9_8:2;
+ u32 extended_seq_num:1;
+ u32 seq_num_mask:1;
+ u32 mutable_bit_proc:1;
+ u32 ip_version:1;
+ u32 copy_pad:1;
+ u32 copy_payload:1;
+ u32 copy_hdr:1;
+ u32 rsv1:1;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+struct dynamic_sa_ctl {
+ union dynamic_sa_contents sa_contents;
+ union sa_command_0 sa_command_0;
+ union sa_command_1 sa_command_1;
+} __attribute__((packed));
+
+/*
+ * State Record for Security Association (SA)
+ */
+struct sa_state_record {
+ __le32 save_iv[4];
+ __le32 save_hash_byte_cnt[2];
+ union {
+ u32 save_digest[16]; /* for MD5/SHA */
+ __le32 save_digest_le32[16]; /* GHASH / CBC */
+ };
+} __attribute__((packed));
+
+/*
+ * Security Association (SA) for AES128
+ *
+ */
+struct dynamic_sa_aes128 {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES128_LEN (sizeof(struct dynamic_sa_aes128)/4)
+#define SA_AES128_CONTENTS 0x3e000042
+
+/*
+ * Security Association (SA) for AES192
+ */
+struct dynamic_sa_aes192 {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[6];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4)
+#define SA_AES192_CONTENTS 0x3e000062
+
+/*
+ * Security Association (SA) for AES256
+ */
+struct dynamic_sa_aes256 {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[8];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES256_LEN (sizeof(struct dynamic_sa_aes256)/4)
+#define SA_AES256_CONTENTS 0x3e000082
+#define SA_AES_CONTENTS 0x3e000002
+
+/*
+ * Security Association (SA) for AES128 CCM
+ */
+struct dynamic_sa_aes128_ccm {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __packed;
+#define SA_AES128_CCM_LEN (sizeof(struct dynamic_sa_aes128_ccm)/4)
+#define SA_AES128_CCM_CONTENTS 0x3e000042
+#define SA_AES_CCM_CONTENTS 0x3e000002
+
+/*
+ * Security Association (SA) for AES128_GCM
+ */
+struct dynamic_sa_aes128_gcm {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 inner_digest[4];
+ __le32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __packed;
+
+#define SA_AES128_GCM_LEN (sizeof(struct dynamic_sa_aes128_gcm)/4)
+#define SA_AES128_GCM_CONTENTS 0x3e000442
+#define SA_AES_GCM_CONTENTS 0x3e000402
+
+/*
+ * Security Association (SA) for HASH160: HMAC-SHA1
+ */
+struct dynamic_sa_hash160 {
+ struct dynamic_sa_ctl ctrl;
+ __le32 inner_digest[5];
+ __le32 outer_digest[5];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4)
+#define SA_HASH160_CONTENTS 0x2000a502
+
+static inline u32
+get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts)
+{
+ u32 offset;
+
+ offset = cts->sa_contents.bf.key_size
+ + cts->sa_contents.bf.inner_size
+ + cts->sa_contents.bf.outer_size
+ + cts->sa_contents.bf.spi
+ + cts->sa_contents.bf.seq_num0
+ + cts->sa_contents.bf.seq_num1
+ + cts->sa_contents.bf.seq_num_mask0
+ + cts->sa_contents.bf.seq_num_mask1
+ + cts->sa_contents.bf.seq_num_mask2
+ + cts->sa_contents.bf.seq_num_mask3
+ + cts->sa_contents.bf.iv0
+ + cts->sa_contents.bf.iv1
+ + cts->sa_contents.bf.iv2
+ + cts->sa_contents.bf.iv3;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+{
+ return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+}
+
+static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts)
+{
+ return (__le32 *) ((unsigned long)cts +
+ sizeof(struct dynamic_sa_ctl) +
+ cts->sa_contents.bf.key_size * 4);
+}
+
+#endif
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
new file mode 100644
index 000000000..f10a87e54
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic PowerPC 44x RNG driver
+ *
+ * Copyright 2011 IBM Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include "crypto4xx_core.h"
+#include "crypto4xx_trng.h"
+#include "crypto4xx_reg_def.h"
+
+#define PPC4XX_TRNG_CTRL 0x0008
+#define PPC4XX_TRNG_CTRL_DALM 0x20
+#define PPC4XX_TRNG_STAT 0x0004
+#define PPC4XX_TRNG_STAT_B 0x1
+#define PPC4XX_TRNG_DATA 0x0000
+
+static int ppc4xx_trng_data_present(struct hwrng *rng, int wait)
+{
+ struct crypto4xx_device *dev = (void *)rng->priv;
+ int busy, i, present = 0;
+
+ for (i = 0; i < 20; i++) {
+ busy = (in_le32(dev->trng_base + PPC4XX_TRNG_STAT) &
+ PPC4XX_TRNG_STAT_B);
+ if (!busy || !wait) {
+ present = 1;
+ break;
+ }
+ udelay(10);
+ }
+ return present;
+}
+
+static int ppc4xx_trng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct crypto4xx_device *dev = (void *)rng->priv;
+ *data = in_le32(dev->trng_base + PPC4XX_TRNG_DATA);
+ return 4;
+}
+
+static void ppc4xx_trng_enable(struct crypto4xx_device *dev, bool enable)
+{
+ u32 device_ctrl;
+
+ device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+ if (enable)
+ device_ctrl |= PPC4XX_TRNG_EN;
+ else
+ device_ctrl &= ~PPC4XX_TRNG_EN;
+ writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+}
+
+static const struct of_device_id ppc4xx_trng_match[] = {
+ { .compatible = "ppc4xx-rng", },
+ { .compatible = "amcc,ppc460ex-rng", },
+ { .compatible = "amcc,ppc440epx-rng", },
+ {},
+};
+
+void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
+{
+ struct crypto4xx_device *dev = core_dev->dev;
+ struct device_node *trng = NULL;
+ struct hwrng *rng = NULL;
+ int err;
+
+ /* Find the TRNG device node and map it */
+ trng = of_find_matching_node(NULL, ppc4xx_trng_match);
+ if (!trng || !of_device_is_available(trng)) {
+ of_node_put(trng);
+ return;
+ }
+
+ dev->trng_base = of_iomap(trng, 0);
+ of_node_put(trng);
+ if (!dev->trng_base)
+ goto err_out;
+
+ rng = kzalloc(sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ goto err_out;
+
+ rng->name = KBUILD_MODNAME;
+ rng->data_present = ppc4xx_trng_data_present;
+ rng->data_read = ppc4xx_trng_data_read;
+ rng->priv = (unsigned long) dev;
+ core_dev->trng = rng;
+ ppc4xx_trng_enable(dev, true);
+ out_le32(dev->trng_base + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
+ err = devm_hwrng_register(core_dev->device, core_dev->trng);
+ if (err) {
+ ppc4xx_trng_enable(dev, false);
+ dev_err(core_dev->device, "failed to register hwrng (%d).\n",
+ err);
+ goto err_out;
+ }
+ return;
+
+err_out:
+ iounmap(dev->trng_base);
+ kfree(rng);
+ dev->trng_base = NULL;
+ core_dev->trng = NULL;
+}
+
+void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev)
+{
+ if (core_dev && core_dev->trng) {
+ struct crypto4xx_device *dev = core_dev->dev;
+
+ devm_hwrng_unregister(core_dev->device, core_dev->trng);
+ ppc4xx_trng_enable(dev, false);
+ iounmap(dev->trng_base);
+ kfree(core_dev->trng);
+ }
+}
+
+MODULE_ALIAS("ppc4xx_rng");
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
new file mode 100644
index 000000000..735671627
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This file defines the security context
+ * associate format.
+ */
+
+#ifndef __CRYPTO4XX_TRNG_H__
+#define __CRYPTO4XX_TRNG_H__
+
+#ifdef CONFIG_HW_RANDOM_PPC4XX
+void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
+void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
+#else
+static inline void ppc4xx_trng_probe(
+ struct crypto4xx_core_device *dev __maybe_unused) { }
+static inline void ppc4xx_trng_remove(
+ struct crypto4xx_core_device *dev __maybe_unused) { }
+#endif
+
+#endif