summaryrefslogtreecommitdiffstats
path: root/include/crypto/internal
diff options
context:
space:
mode:
Diffstat (limited to 'include/crypto/internal')
-rw-r--r--include/crypto/internal/acompress.h112
-rw-r--r--include/crypto/internal/aead.h168
-rw-r--r--include/crypto/internal/akcipher.h155
-rw-r--r--include/crypto/internal/blake2b.h115
-rw-r--r--include/crypto/internal/blake2s.h21
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/cipher.h220
-rw-r--r--include/crypto/internal/cryptouser.h16
-rw-r--r--include/crypto/internal/des.h127
-rw-r--r--include/crypto/internal/ecc.h281
-rw-r--r--include/crypto/internal/engine.h74
-rw-r--r--include/crypto/internal/geniv.h27
-rw-r--r--include/crypto/internal/hash.h283
-rw-r--r--include/crypto/internal/kdf_selftest.h71
-rw-r--r--include/crypto/internal/kpp.h245
-rw-r--r--include/crypto/internal/poly1305.h34
-rw-r--r--include/crypto/internal/rng.h40
-rw-r--r--include/crypto/internal/rsa.h57
-rw-r--r--include/crypto/internal/scompress.h127
-rw-r--r--include/crypto/internal/sig.h17
-rw-r--r--include/crypto/internal/simd.h69
-rw-r--r--include/crypto/internal/skcipher.h253
22 files changed, 2555 insertions, 0 deletions
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
new file mode 100644
index 0000000000..4ac46bafba
--- /dev/null
+++ b/include/crypto/internal/acompress.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ */
+#ifndef _CRYPTO_ACOMP_INT_H
+#define _CRYPTO_ACOMP_INT_H
+
+#include <crypto/acompress.h>
+#include <crypto/algapi.h>
+
+/**
+ * struct acomp_alg - asynchronous compression algorithm
+ *
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @dst_free: Frees destination buffer if allocated inside the algorithm
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @reqsize: Context size for (de)compression requests
+ * @stat: Statistics for compress algorithm
+ * @base: Common crypto API algorithm data structure
+ * @calg: Cmonn algorithm data structure shared with scomp
+ */
+struct acomp_alg {
+ int (*compress)(struct acomp_req *req);
+ int (*decompress)(struct acomp_req *req);
+ void (*dst_free)(struct scatterlist *dst);
+ int (*init)(struct crypto_acomp *tfm);
+ void (*exit)(struct crypto_acomp *tfm);
+
+ unsigned int reqsize;
+
+ union {
+ struct COMP_ALG_COMMON;
+ struct comp_alg_common calg;
+ };
+};
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *acomp_request_ctx(struct acomp_req *req)
+{
+ return req->__ctx;
+}
+
+static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
+{
+ return tfm->base.__crt_ctx;
+}
+
+static inline void acomp_request_complete(struct acomp_req *req,
+ int err)
+{
+ crypto_request_complete(&req->base, err);
+}
+
+static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
+{
+ struct acomp_req *req;
+
+ req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
+ if (likely(req))
+ acomp_request_set_tfm(req, tfm);
+ return req;
+}
+
+static inline void __acomp_request_free(struct acomp_req *req)
+{
+ kfree_sensitive(req);
+}
+
+/**
+ * crypto_register_acomp() -- Register asynchronous compression algorithm
+ *
+ * Function registers an implementation of an asynchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_acomp(struct acomp_alg *alg);
+
+/**
+ * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
+ *
+ * Function unregisters an implementation of an asynchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_acomp(struct acomp_alg *alg);
+
+int crypto_register_acomps(struct acomp_alg *algs, int count);
+void crypto_unregister_acomps(struct acomp_alg *algs, int count);
+
+#endif
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
new file mode 100644
index 0000000000..28a95eb318
--- /dev/null
+++ b/include/crypto/internal/aead.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AEAD: Authenticated Encryption with Associated Data
+ *
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_AEAD_H
+#define _CRYPTO_INTERNAL_AEAD_H
+
+#include <crypto/aead.h>
+#include <crypto/algapi.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct rtattr;
+
+struct aead_instance {
+ void (*free)(struct aead_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct aead_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct aead_alg alg;
+ };
+};
+
+struct crypto_aead_spawn {
+ struct crypto_spawn base;
+};
+
+struct aead_queue {
+ struct crypto_queue base;
+};
+
+static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *crypto_aead_ctx_dma(struct crypto_aead *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
+}
+
+static inline struct crypto_instance *aead_crypto_instance(
+ struct aead_instance *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
+{
+ return container_of(&inst->alg, struct aead_instance, alg.base);
+}
+
+static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
+{
+ return aead_instance(crypto_tfm_alg_instance(&aead->base));
+}
+
+static inline void *aead_instance_ctx(struct aead_instance *inst)
+{
+ return crypto_instance_ctx(aead_crypto_instance(inst));
+}
+
+static inline void *aead_request_ctx(struct aead_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void *aead_request_ctx_dma(struct aead_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(aead_request_ctx(req), align);
+}
+
+static inline void aead_request_complete(struct aead_request *req, int err)
+{
+ crypto_request_complete(&req->base, err);
+}
+
+static inline u32 aead_request_flags(struct aead_request *req)
+{
+ return req->base.flags;
+}
+
+static inline struct aead_request *aead_request_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(req, struct aead_request, base);
+}
+
+int crypto_grab_aead(struct crypto_aead_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct aead_alg *crypto_spawn_aead_alg(
+ struct crypto_aead_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct aead_alg, base);
+}
+
+static inline struct crypto_aead *crypto_spawn_aead(
+ struct crypto_aead_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
+ unsigned int reqsize)
+{
+ aead->reqsize = reqsize;
+}
+
+static inline void crypto_aead_set_reqsize_dma(struct crypto_aead *aead,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ aead->reqsize = reqsize;
+}
+
+static inline void aead_init_queue(struct aead_queue *queue,
+ unsigned int max_qlen)
+{
+ crypto_init_queue(&queue->base, max_qlen);
+}
+
+static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
+{
+ return alg->chunksize;
+}
+
+/**
+ * crypto_aead_chunksize() - obtain chunk size
+ * @tfm: cipher handle
+ *
+ * The block size is set to one for ciphers such as CCM. However,
+ * you still need to provide incremental updates in multiples of
+ * the underlying block size as the IV does not have sub-block
+ * granularity. This is known in this API as the chunk size.
+ *
+ * Return: chunk size in bytes
+ */
+static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm)
+{
+ return crypto_aead_alg_chunksize(crypto_aead_alg(tfm));
+}
+
+int crypto_register_aead(struct aead_alg *alg);
+void crypto_unregister_aead(struct aead_alg *alg);
+int crypto_register_aeads(struct aead_alg *algs, int count);
+void crypto_unregister_aeads(struct aead_alg *algs, int count);
+int aead_register_instance(struct crypto_template *tmpl,
+ struct aead_instance *inst);
+
+#endif /* _CRYPTO_INTERNAL_AEAD_H */
+
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
new file mode 100644
index 0000000000..a0fba4b2ec
--- /dev/null
+++ b/include/crypto/internal/akcipher.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Encryption
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ */
+#ifndef _CRYPTO_AKCIPHER_INT_H
+#define _CRYPTO_AKCIPHER_INT_H
+#include <crypto/akcipher.h>
+#include <crypto/algapi.h>
+
+struct akcipher_instance {
+ void (*free)(struct akcipher_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct akcipher_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct akcipher_alg alg;
+ };
+};
+
+struct crypto_akcipher_spawn {
+ struct crypto_spawn base;
+};
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *akcipher_request_ctx(struct akcipher_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void *akcipher_request_ctx_dma(struct akcipher_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(akcipher_request_ctx(req), align);
+}
+
+static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher,
+ unsigned int reqsize)
+{
+ akcipher->reqsize = reqsize;
+}
+
+static inline void akcipher_set_reqsize_dma(struct crypto_akcipher *akcipher,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ akcipher->reqsize = reqsize;
+}
+
+static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *akcipher_tfm_ctx_dma(struct crypto_akcipher *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
+}
+
+static inline void akcipher_request_complete(struct akcipher_request *req,
+ int err)
+{
+ crypto_request_complete(&req->base, err);
+}
+
+static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm)
+{
+ return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name;
+}
+
+static inline struct crypto_instance *akcipher_crypto_instance(
+ struct akcipher_instance *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline struct akcipher_instance *akcipher_instance(
+ struct crypto_instance *inst)
+{
+ return container_of(&inst->alg, struct akcipher_instance, alg.base);
+}
+
+static inline struct akcipher_instance *akcipher_alg_instance(
+ struct crypto_akcipher *akcipher)
+{
+ return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base));
+}
+
+static inline void *akcipher_instance_ctx(struct akcipher_instance *inst)
+{
+ return crypto_instance_ctx(akcipher_crypto_instance(inst));
+}
+
+int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+static inline struct crypto_akcipher *crypto_spawn_akcipher(
+ struct crypto_akcipher_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct akcipher_alg *crypto_spawn_akcipher_alg(
+ struct crypto_akcipher_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct akcipher_alg, base);
+}
+
+/**
+ * crypto_register_akcipher() -- Register public key algorithm
+ *
+ * Function registers an implementation of a public key verify algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_akcipher(struct akcipher_alg *alg);
+
+/**
+ * crypto_unregister_akcipher() -- Unregister public key algorithm
+ *
+ * Function unregisters an implementation of a public key verify algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_akcipher(struct akcipher_alg *alg);
+
+/**
+ * akcipher_register_instance() -- Unregister public key template instance
+ *
+ * Function registers an implementation of an asymmetric key algorithm
+ * created from a template
+ *
+ * @tmpl: the template from which the algorithm was created
+ * @inst: the template instance
+ */
+int akcipher_register_instance(struct crypto_template *tmpl,
+ struct akcipher_instance *inst);
+#endif
diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h
new file mode 100644
index 0000000000..982fe5e847
--- /dev/null
+++ b/include/crypto/internal/blake2b.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Helper functions for BLAKE2b implementations.
+ * Keep this in sync with the corresponding BLAKE2s header.
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
+#define _CRYPTO_INTERNAL_BLAKE2B_H
+
+#include <crypto/blake2b.h>
+#include <crypto/internal/hash.h>
+#include <linux/string.h>
+
+void blake2b_compress_generic(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
+
+static inline void blake2b_set_lastblock(struct blake2b_state *state)
+{
+ state->f[0] = -1;
+}
+
+typedef void (*blake2b_compress_t)(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
+
+static inline void __blake2b_update(struct blake2b_state *state,
+ const u8 *in, size_t inlen,
+ blake2b_compress_t compress)
+{
+ const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2B_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
+ /* Hash one less (full) block than strictly possible */
+ (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
+ in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
+}
+
+static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
+ blake2b_compress_t compress)
+{
+ int i;
+
+ blake2b_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
+ (*compress)(state, state->buf, 1, state->buflen);
+ for (i = 0; i < ARRAY_SIZE(state->h); i++)
+ __cpu_to_le64s(&state->h[i]);
+ memcpy(out, state->h, state->outlen);
+}
+
+/* Helper functions for shash implementations of BLAKE2b */
+
+struct blake2b_tfm_ctx {
+ u8 key[BLAKE2B_KEY_SIZE];
+ unsigned int keylen;
+};
+
+static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+
+ if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
+ return -EINVAL;
+
+ memcpy(tctx->key, key, keylen);
+ tctx->keylen = keylen;
+
+ return 0;
+}
+
+static inline int crypto_blake2b_init(struct shash_desc *desc)
+{
+ const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct blake2b_state *state = shash_desc_ctx(desc);
+ unsigned int outlen = crypto_shash_digestsize(desc->tfm);
+
+ __blake2b_init(state, outlen, tctx->key, tctx->keylen);
+ return 0;
+}
+
+static inline int crypto_blake2b_update(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen,
+ blake2b_compress_t compress)
+{
+ struct blake2b_state *state = shash_desc_ctx(desc);
+
+ __blake2b_update(state, in, inlen, compress);
+ return 0;
+}
+
+static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
+ blake2b_compress_t compress)
+{
+ struct blake2b_state *state = shash_desc_ctx(desc);
+
+ __blake2b_final(state, out, compress);
+ return 0;
+}
+
+#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */
diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
new file mode 100644
index 0000000000..506d56530c
--- /dev/null
+++ b/include/crypto/internal/blake2s.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Helper functions for BLAKE2s implementations.
+ * Keep this in sync with the corresponding BLAKE2b header.
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLAKE2S_H
+#define _CRYPTO_INTERNAL_BLAKE2S_H
+
+#include <crypto/blake2s.h>
+#include <linux/string.h>
+
+void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
+ size_t nblocks, const u32 inc);
+
+void blake2s_compress(struct blake2s_state *state, const u8 *block,
+ size_t nblocks, const u32 inc);
+
+bool blake2s_selftest(void);
+
+#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
new file mode 100644
index 0000000000..b085dc1ac1
--- /dev/null
+++ b/include/crypto/internal/chacha.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CRYPTO_INTERNAL_CHACHA_H
+#define _CRYPTO_INTERNAL_CHACHA_H
+
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/crypto.h>
+
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize, int nrounds)
+{
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int i;
+
+ if (keysize != CHACHA_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
+
+ ctx->nrounds = nrounds;
+ return 0;
+}
+
+static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 20);
+}
+
+static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 12);
+}
+
+#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/internal/cipher.h b/include/crypto/internal/cipher.h
new file mode 100644
index 0000000000..5030f6d2df
--- /dev/null
+++ b/include/crypto/internal/cipher.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
+ * and Nettle, by Niels Möller.
+ */
+
+#ifndef _CRYPTO_INTERNAL_CIPHER_H
+#define _CRYPTO_INTERNAL_CIPHER_H
+
+#include <crypto/algapi.h>
+
+struct crypto_cipher {
+ struct crypto_tfm base;
+};
+
+/**
+ * DOC: Single Block Cipher API
+ *
+ * The single block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
+ *
+ * Using the single block cipher API calls, operations with the basic cipher
+ * primitive can be implemented. These cipher primitives exclude any block
+ * chaining operations including IV handling.
+ *
+ * The purpose of this single block cipher API is to support the implementation
+ * of templates or other concepts that only need to perform the cipher operation
+ * on one block at a time. Templates invoke the underlying cipher primitive
+ * block-wise and process either the input or the output data of these cipher
+ * operations.
+ */
+
+static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
+{
+ return (struct crypto_cipher *)tfm;
+}
+
+/**
+ * crypto_alloc_cipher() - allocate single block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a single block cipher. The returned struct
+ * crypto_cipher is the cipher handle that is required for any subsequent API
+ * invocation for that single block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_cipher() - zeroize and free the single block cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_cipher(struct crypto_cipher *tfm)
+{
+ crypto_free_tfm(crypto_cipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_cipher() - Search for the availability of a single block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the single block cipher is known to the kernel crypto API;
+ * false otherwise
+ */
+static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+/**
+ * crypto_cipher_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the single block cipher referenced with the cipher handle
+ * tfm is returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
+}
+
+static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
+}
+
+static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_cipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the single block cipher referenced by the
+ * cipher handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_cipher_setkey(struct crypto_cipher *tfm,
+ const u8 *key, unsigned int keylen);
+
+/**
+ * crypto_cipher_encrypt_one() - encrypt one block of plaintext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the ciphertext
+ * @src: buffer holding the plaintext to be encrypted
+ *
+ * Invoke the encryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
+
+/**
+ * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the plaintext
+ * @src: buffer holding the ciphertext to be decrypted
+ *
+ * Invoke the decryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
+
+struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher);
+
+struct crypto_cipher_spawn {
+ struct crypto_spawn base;
+};
+
+static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+
+static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct crypto_alg *crypto_spawn_cipher_alg(
+ struct crypto_cipher_spawn *spawn)
+{
+ return spawn->base.alg;
+}
+
+static inline struct crypto_cipher *crypto_spawn_cipher(
+ struct crypto_cipher_spawn *spawn)
+{
+ u32 type = CRYPTO_ALG_TYPE_CIPHER;
+ u32 mask = CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
+}
+
+static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
+{
+ return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
+}
+
+#endif
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
new file mode 100644
index 0000000000..fd54074332
--- /dev/null
+++ b/include/crypto/internal/cryptouser.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+
+struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
+
+#ifdef CONFIG_CRYPTO_STATS
+int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
+#else
+static inline int crypto_reportstat(struct sk_buff *in_skb,
+ struct nlmsghdr *in_nlh,
+ struct nlattr **attrs)
+{
+ return -ENOTSUPP;
+}
+#endif
diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h
new file mode 100644
index 0000000000..723fe5bf16
--- /dev/null
+++ b/include/crypto/internal/des.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DES & Triple DES EDE key verification helpers
+ */
+
+#ifndef __CRYPTO_INTERNAL_DES_H
+#define __CRYPTO_INTERNAL_DES_H
+
+#include <linux/crypto.h>
+#include <linux/fips.h>
+#include <crypto/des.h>
+#include <crypto/aead.h>
+#include <crypto/skcipher.h>
+
+/**
+ * crypto_des_verify_key - Check whether a DES key is weak
+ * @tfm: the crypto algo
+ * @key: the key buffer
+ *
+ * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak
+ * keys. Otherwise, 0 is returned.
+ *
+ * It is the job of the caller to ensure that the size of the key equals
+ * DES_KEY_SIZE.
+ */
+static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key)
+{
+ struct des_ctx tmp;
+ int err;
+
+ err = des_expand_key(&tmp, key, DES_KEY_SIZE);
+ if (err == -ENOKEY) {
+ if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
+ err = -EINVAL;
+ else
+ err = 0;
+ }
+ memzero_explicit(&tmp, sizeof(tmp));
+ return err;
+}
+
+/*
+ * RFC2451:
+ *
+ * For DES-EDE3, there is no known need to reject weak or
+ * complementation keys. Any weakness is obviated by the use of
+ * multiple keys.
+ *
+ * However, if the first two or last two independent 64-bit keys are
+ * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
+ * same as DES. Implementers MUST reject keys that exhibit this
+ * property.
+ *
+ */
+static inline int des3_ede_verify_key(const u8 *key, unsigned int key_len,
+ bool check_weak)
+{
+ int ret = fips_enabled ? -EINVAL : -ENOKEY;
+ u32 K[6];
+
+ memcpy(K, key, DES3_EDE_KEY_SIZE);
+
+ if ((!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (fips_enabled || check_weak))
+ goto bad;
+
+ if ((!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled)
+ goto bad;
+
+ ret = 0;
+bad:
+ memzero_explicit(K, DES3_EDE_KEY_SIZE);
+
+ return ret;
+}
+
+/**
+ * crypto_des3_ede_verify_key - Check whether a DES3-EDE key is weak
+ * @tfm: the crypto algo
+ * @key: the key buffer
+ *
+ * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak
+ * keys or when running in FIPS mode. Otherwise, 0 is returned. Note that some
+ * keys are rejected in FIPS mode even if weak keys are permitted by the TFM
+ * flags.
+ *
+ * It is the job of the caller to ensure that the size of the key equals
+ * DES3_EDE_KEY_SIZE.
+ */
+static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm,
+ const u8 *key)
+{
+ return des3_ede_verify_key(key, DES3_EDE_KEY_SIZE,
+ crypto_tfm_get_flags(tfm) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+}
+
+static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm,
+ const u8 *key)
+{
+ return crypto_des_verify_key(crypto_skcipher_tfm(tfm), key);
+}
+
+static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm,
+ const u8 *key)
+{
+ return crypto_des3_ede_verify_key(crypto_skcipher_tfm(tfm), key);
+}
+
+static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key,
+ int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+ return crypto_des_verify_key(crypto_aead_tfm(tfm), key);
+}
+
+static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key,
+ int keylen)
+{
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+ return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key);
+}
+
+#endif /* __CRYPTO_INTERNAL_DES_H */
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
new file mode 100644
index 0000000000..4f6c1a6888
--- /dev/null
+++ b/include/crypto/internal/ecc.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2013, Kenneth MacKay
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _CRYPTO_ECC_H
+#define _CRYPTO_ECC_H
+
+#include <crypto/ecc_curve.h>
+#include <asm/unaligned.h>
+
+/* One digit is u64 qword. */
+#define ECC_CURVE_NIST_P192_DIGITS 3
+#define ECC_CURVE_NIST_P256_DIGITS 4
+#define ECC_CURVE_NIST_P384_DIGITS 6
+#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */
+
+#define ECC_DIGITS_TO_BYTES_SHIFT 3
+
+#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT)
+
+#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
+
+/**
+ * ecc_swap_digits() - Copy ndigits from big endian array to native array
+ * @in: Input array
+ * @out: Output array
+ * @ndigits: Number of digits to copy
+ */
+static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
+{
+ const __be64 *src = (__force __be64 *)in;
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+ out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
+}
+
+/**
+ * ecc_is_key_valid() - Validate a given ECDH private key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: private key to be used for the given curve
+ * @private_key_len: private key length
+ *
+ * Returns 0 if the key is acceptable, a negative value otherwise
+ */
+int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, unsigned int private_key_len);
+
+/**
+ * ecc_gen_privkey() - Generates an ECC private key.
+ * The private key is a random integer in the range 0 < random < n, where n is a
+ * prime that is the order of the cyclic subgroup generated by the distinguished
+ * point G.
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve number of digits
+ * @private_key: buffer for storing the generated private key
+ *
+ * Returns 0 if the private key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey);
+
+/**
+ * ecc_make_pub_key() - Compute an ECC public key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: pregenerated private key for the given curve
+ * @public_key: buffer for storing the generated public key
+ *
+ * Returns 0 if the public key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, u64 *public_key);
+
+/**
+ * crypto_ecdh_shared_secret() - Compute a shared secret
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: private key of part A
+ * @public_key: public key of counterpart B
+ * @secret: buffer for storing the calculated shared secret
+ *
+ * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
+ * before using it for symmetric encryption or HMAC.
+ *
+ * Returns 0 if the shared secret was generated successfully, a negative value
+ * if an error occurred.
+ */
+int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, const u64 *public_key,
+ u64 *secret);
+
+/**
+ * ecc_is_pubkey_valid_partial() - Partial public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial
+ * Public-Key Validation Routine.
+ *
+ * Note: There is no check that the public key is in the correct elliptic curve
+ * subgroup.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * ecc_is_pubkey_valid_full() - Full public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full
+ * Public-Key Validation Routine.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * vli_is_zero() - Determine is vli is zero
+ *
+ * @vli: vli to check.
+ * @ndigits: length of the @vli
+ */
+bool vli_is_zero(const u64 *vli, unsigned int ndigits);
+
+/**
+ * vli_cmp() - compare left and right vlis
+ *
+ * @left: vli
+ * @right: vli
+ * @ndigits: length of both vlis
+ *
+ * Returns sign of @left - @right, i.e. -1 if @left < @right,
+ * 0 if @left == @right, 1 if @left > @right.
+ */
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
+
+/**
+ * vli_sub() - Subtracts right from left
+ *
+ * @result: where to write result
+ * @left: vli
+ * @right vli
+ * @ndigits: length of all vlis
+ *
+ * Note: can modify in-place.
+ *
+ * Return: carry bit.
+ */
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+ unsigned int ndigits);
+
+/**
+ * vli_from_be64() - Load vli from big-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 BE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_from_le64() - Load vli from little-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 LE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_mod_inv() - Modular inversion
+ *
+ * @result: where to write vli number
+ * @input: vli value to operate on
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ */
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+ unsigned int ndigits);
+
+/**
+ * vli_mod_mult_slow() - Modular multiplication
+ *
+ * @result: where to write result value
+ * @left: vli number to multiply with @right
+ * @right: vli number to multiply with @left
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ *
+ * Note: Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits);
+
+/**
+ * vli_num_bits() - Counts the number of bits required for vli.
+ *
+ * @vli: vli to check.
+ * @ndigits: Length of the @vli
+ *
+ * Return: The number of bits required to represent @vli.
+ */
+unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits);
+
+/**
+ * ecc_aloc_point() - Allocate ECC point.
+ *
+ * @ndigits: Length of vlis in u64 qwords.
+ *
+ * Return: Pointer to the allocated point or NULL if allocation failed.
+ */
+struct ecc_point *ecc_alloc_point(unsigned int ndigits);
+
+/**
+ * ecc_free_point() - Free ECC point.
+ *
+ * @p: The point to free.
+ */
+void ecc_free_point(struct ecc_point *p);
+
+/**
+ * ecc_point_is_zero() - Check if point is zero.
+ *
+ * @p: Point to check for zero.
+ *
+ * Return: true if point is the point at infinity, false otherwise.
+ */
+bool ecc_point_is_zero(const struct ecc_point *point);
+
+/**
+ * ecc_point_mult_shamir() - Add two points multiplied by scalars
+ *
+ * @result: resulting point
+ * @x: scalar to multiply with @p
+ * @p: point to multiply with @x
+ * @y: scalar to multiply with @q
+ * @q: point to multiply with @y
+ * @curve: curve
+ *
+ * Returns result = x * p + x * q over the curve.
+ * This works faster than two multiplications and addition.
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+ const u64 *x, const struct ecc_point *p,
+ const u64 *y, const struct ecc_point *q,
+ const struct ecc_curve *curve);
+
+#endif
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
new file mode 100644
index 0000000000..fbf4be56cf
--- /dev/null
+++ b/include/crypto/internal/engine.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Crypto engine API
+ *
+ * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_ENGINE_H
+#define _CRYPTO_INTERNAL_ENGINE_H
+
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <linux/kthread.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#define ENGINE_NAME_LEN 30
+
+struct device;
+
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @retry_support: indication that the hardware allows re-execution
+ * of a failed backlog request
+ * crypto-engine, in head position to keep order
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to synchronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @do_batch_requests: execute a batch of requests. Depends on multiple
+ * requests support.
+ * @kworker: kthread worker struct for request pump
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+ char name[ENGINE_NAME_LEN];
+ bool idling;
+ bool busy;
+ bool running;
+
+ bool retry_support;
+
+ struct list_head list;
+ spinlock_t queue_lock;
+ struct crypto_queue queue;
+ struct device *dev;
+
+ bool rt;
+
+ int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*do_batch_requests)(struct crypto_engine *engine);
+
+
+ struct kthread_worker *kworker;
+ struct kthread_work pump_requests;
+
+ void *priv_data;
+ struct crypto_async_request *cur_req;
+};
+
+#endif
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
new file mode 100644
index 0000000000..7fd7126f59
--- /dev/null
+++ b/include/crypto/internal/geniv.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * geniv: IV generation
+ *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_GENIV_H
+#define _CRYPTO_INTERNAL_GENIV_H
+
+#include <crypto/internal/aead.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct aead_geniv_ctx {
+ spinlock_t lock;
+ struct crypto_aead *child;
+ struct crypto_sync_skcipher *sknull;
+ u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
+};
+
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+ struct rtattr **tb);
+int aead_init_geniv(struct crypto_aead *tfm);
+void aead_exit_geniv(struct crypto_aead *tfm);
+
+#endif /* _CRYPTO_INTERNAL_GENIV_H */
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
new file mode 100644
index 0000000000..cf65676e45
--- /dev/null
+++ b/include/crypto/internal/hash.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Hash algorithms.
+ *
+ * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_HASH_H
+#define _CRYPTO_INTERNAL_HASH_H
+
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+
+struct ahash_request;
+struct scatterlist;
+
+struct crypto_hash_walk {
+ char *data;
+
+ unsigned int offset;
+ unsigned int alignmask;
+
+ struct page *pg;
+ unsigned int entrylen;
+
+ unsigned int total;
+ struct scatterlist *sg;
+
+ unsigned int flags;
+};
+
+struct ahash_instance {
+ void (*free)(struct ahash_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct ahash_alg, halg.base)];
+ struct crypto_instance base;
+ } s;
+ struct ahash_alg alg;
+ };
+};
+
+struct shash_instance {
+ void (*free)(struct shash_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct shash_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct shash_alg alg;
+ };
+};
+
+struct crypto_ahash_spawn {
+ struct crypto_spawn base;
+};
+
+struct crypto_shash_spawn {
+ struct crypto_spawn base;
+};
+
+int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
+int crypto_hash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk);
+
+static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
+{
+ return !(walk->entrylen | walk->total);
+}
+
+int crypto_register_ahash(struct ahash_alg *alg);
+void crypto_unregister_ahash(struct ahash_alg *alg);
+int crypto_register_ahashes(struct ahash_alg *algs, int count);
+void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
+int ahash_register_instance(struct crypto_template *tmpl,
+ struct ahash_instance *inst);
+
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen);
+
+static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
+{
+ return alg->setkey != shash_no_setkey;
+}
+
+static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
+{
+ return crypto_shash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
+int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct hash_alg_common *crypto_spawn_ahash_alg(
+ struct crypto_ahash_spawn *spawn)
+{
+ return __crypto_hash_alg_common(spawn->base.alg);
+}
+
+int crypto_register_shash(struct shash_alg *alg);
+void crypto_unregister_shash(struct shash_alg *alg);
+int crypto_register_shashes(struct shash_alg *algs, int count);
+void crypto_unregister_shashes(struct shash_alg *algs, int count);
+int shash_register_instance(struct crypto_template *tmpl,
+ struct shash_instance *inst);
+void shash_free_singlespawn_instance(struct shash_instance *inst);
+
+int crypto_grab_shash(struct crypto_shash_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct shash_alg *crypto_spawn_shash_alg(
+ struct crypto_shash_spawn *spawn)
+{
+ return __crypto_shash_alg(spawn->base.alg);
+}
+
+int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
+int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
+int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
+
+static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+}
+
+static inline void *crypto_ahash_ctx_dma(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_ctx_dma(crypto_ahash_tfm(tfm));
+}
+
+static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
+{
+ return container_of(__crypto_hash_alg_common(alg), struct ahash_alg,
+ halg);
+}
+
+static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
+{
+ return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
+ halg);
+}
+
+static inline void crypto_ahash_set_statesize(struct crypto_ahash *tfm,
+ unsigned int size)
+{
+ tfm->statesize = size;
+}
+
+static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
+ unsigned int reqsize)
+{
+ tfm->reqsize = reqsize;
+}
+
+static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ ahash->reqsize = reqsize;
+}
+
+static inline struct crypto_instance *ahash_crypto_instance(
+ struct ahash_instance *inst)
+{
+ return &inst->s.base;
+}
+
+static inline struct ahash_instance *ahash_instance(
+ struct crypto_instance *inst)
+{
+ return container_of(inst, struct ahash_instance, s.base);
+}
+
+static inline struct ahash_instance *ahash_alg_instance(
+ struct crypto_ahash *ahash)
+{
+ return ahash_instance(crypto_tfm_alg_instance(&ahash->base));
+}
+
+static inline void *ahash_instance_ctx(struct ahash_instance *inst)
+{
+ return crypto_instance_ctx(ahash_crypto_instance(inst));
+}
+
+static inline void *ahash_request_ctx_dma(struct ahash_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(ahash_request_ctx(req), align);
+}
+
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+ crypto_request_complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+ return req->base.flags;
+}
+
+static inline struct crypto_ahash *crypto_spawn_ahash(
+ struct crypto_ahash_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline int ahash_enqueue_request(struct crypto_queue *queue,
+ struct ahash_request *request)
+{
+ return crypto_enqueue_request(queue, &request->base);
+}
+
+static inline struct ahash_request *ahash_dequeue_request(
+ struct crypto_queue *queue)
+{
+ return ahash_request_cast(crypto_dequeue_request(queue));
+}
+
+static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline struct crypto_instance *shash_crypto_instance(
+ struct shash_instance *inst)
+{
+ return &inst->s.base;
+}
+
+static inline struct shash_instance *shash_instance(
+ struct crypto_instance *inst)
+{
+ return container_of(inst, struct shash_instance, s.base);
+}
+
+static inline struct shash_instance *shash_alg_instance(
+ struct crypto_shash *shash)
+{
+ return shash_instance(crypto_tfm_alg_instance(&shash->base));
+}
+
+static inline void *shash_instance_ctx(struct shash_instance *inst)
+{
+ return crypto_instance_ctx(shash_crypto_instance(inst));
+}
+
+static inline struct crypto_shash *crypto_spawn_shash(
+ struct crypto_shash_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm)
+{
+ return crypto_tfm_ctx_aligned(&tfm->base);
+}
+
+static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_shash, base);
+}
+
+#endif /* _CRYPTO_INTERNAL_HASH_H */
+
diff --git a/include/crypto/internal/kdf_selftest.h b/include/crypto/internal/kdf_selftest.h
new file mode 100644
index 0000000000..4d03d2af57
--- /dev/null
+++ b/include/crypto/internal/kdf_selftest.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _CRYPTO_KDF_SELFTEST_H
+#define _CRYPTO_KDF_SELFTEST_H
+
+#include <crypto/hash.h>
+#include <linux/uio.h>
+
+struct kdf_testvec {
+ unsigned char *key;
+ size_t keylen;
+ unsigned char *ikm;
+ size_t ikmlen;
+ struct kvec info;
+ unsigned char *expected;
+ size_t expectedlen;
+};
+
+static inline int
+kdf_test(const struct kdf_testvec *test, const char *name,
+ int (*crypto_kdf_setkey)(struct crypto_shash *kmd,
+ const u8 *key, size_t keylen,
+ const u8 *ikm, size_t ikmlen),
+ int (*crypto_kdf_generate)(struct crypto_shash *kmd,
+ const struct kvec *info,
+ unsigned int info_nvec,
+ u8 *dst, unsigned int dlen))
+{
+ struct crypto_shash *kmd;
+ int ret;
+ u8 *buf = kzalloc(test->expectedlen, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ kmd = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(kmd)) {
+ pr_err("alg: kdf: could not allocate hash handle for %s\n",
+ name);
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ ret = crypto_kdf_setkey(kmd, test->key, test->keylen,
+ test->ikm, test->ikmlen);
+ if (ret) {
+ pr_err("alg: kdf: could not set key derivation key\n");
+ goto err;
+ }
+
+ ret = crypto_kdf_generate(kmd, &test->info, 1, buf, test->expectedlen);
+ if (ret) {
+ pr_err("alg: kdf: could not obtain key data\n");
+ goto err;
+ }
+
+ ret = memcmp(test->expected, buf, test->expectedlen);
+ if (ret)
+ ret = -EINVAL;
+
+err:
+ crypto_free_shash(kmd);
+ kfree(buf);
+ return ret;
+}
+
+#endif /* _CRYPTO_KDF_SELFTEST_H */
diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h
new file mode 100644
index 0000000000..0a6db8c4a9
--- /dev/null
+++ b/include/crypto/internal/kpp.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Key-agreement Protocol Primitives (KPP)
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
+ */
+#ifndef _CRYPTO_KPP_INT_H
+#define _CRYPTO_KPP_INT_H
+#include <crypto/kpp.h>
+#include <crypto/algapi.h>
+
+/**
+ * struct kpp_instance - KPP template instance
+ * @free: Callback getting invoked upon instance destruction. Must be set.
+ * @s: Internal. Generic crypto core instance state properly layout
+ * to alias with @alg as needed.
+ * @alg: The &struct kpp_alg implementation provided by the instance.
+ */
+struct kpp_instance {
+ void (*free)(struct kpp_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct kpp_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct kpp_alg alg;
+ };
+};
+
+/**
+ * struct crypto_kpp_spawn - KPP algorithm spawn
+ * @base: Internal. Generic crypto core spawn state.
+ *
+ * Template instances can get a hold on some inner KPP algorithm by
+ * binding a &struct crypto_kpp_spawn via
+ * crypto_grab_kpp(). Transforms may subsequently get instantiated
+ * from the referenced inner &struct kpp_alg by means of
+ * crypto_spawn_kpp().
+ */
+struct crypto_kpp_spawn {
+ struct crypto_spawn base;
+};
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *kpp_request_ctx(struct kpp_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void *kpp_request_ctx_dma(struct kpp_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(kpp_request_ctx(req), align);
+}
+
+static inline void kpp_set_reqsize(struct crypto_kpp *kpp,
+ unsigned int reqsize)
+{
+ kpp->reqsize = reqsize;
+}
+
+static inline void kpp_set_reqsize_dma(struct crypto_kpp *kpp,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ kpp->reqsize = reqsize;
+}
+
+static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *kpp_tfm_ctx_dma(struct crypto_kpp *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
+}
+
+static inline void kpp_request_complete(struct kpp_request *req, int err)
+{
+ crypto_request_complete(&req->base, err);
+}
+
+static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
+{
+ return crypto_kpp_tfm(tfm)->__crt_alg->cra_name;
+}
+
+/*
+ * Template instance internal helpers.
+ */
+/**
+ * kpp_crypto_instance() - Cast a &struct kpp_instance to the corresponding
+ * generic &struct crypto_instance.
+ * @inst: Pointer to the &struct kpp_instance to be cast.
+ * Return: A pointer to the &struct crypto_instance embedded in @inst.
+ */
+static inline struct crypto_instance *kpp_crypto_instance(
+ struct kpp_instance *inst)
+{
+ return &inst->s.base;
+}
+
+/**
+ * kpp_instance() - Cast a generic &struct crypto_instance to the corresponding
+ * &struct kpp_instance.
+ * @inst: Pointer to the &struct crypto_instance to be cast.
+ * Return: A pointer to the &struct kpp_instance @inst is embedded in.
+ */
+static inline struct kpp_instance *kpp_instance(struct crypto_instance *inst)
+{
+ return container_of(inst, struct kpp_instance, s.base);
+}
+
+/**
+ * kpp_alg_instance() - Get the &struct kpp_instance a given KPP transform has
+ * been instantiated from.
+ * @kpp: The KPP transform instantiated from some &struct kpp_instance.
+ * Return: The &struct kpp_instance associated with @kpp.
+ */
+static inline struct kpp_instance *kpp_alg_instance(struct crypto_kpp *kpp)
+{
+ return kpp_instance(crypto_tfm_alg_instance(&kpp->base));
+}
+
+/**
+ * kpp_instance_ctx() - Get a pointer to a &struct kpp_instance's implementation
+ * specific context data.
+ * @inst: The &struct kpp_instance whose context data to access.
+ *
+ * A KPP template implementation may allocate extra memory beyond the
+ * end of a &struct kpp_instance instantiated from &crypto_template.create().
+ * This function provides a means to obtain a pointer to this area.
+ *
+ * Return: A pointer to the implementation specific context data.
+ */
+static inline void *kpp_instance_ctx(struct kpp_instance *inst)
+{
+ return crypto_instance_ctx(kpp_crypto_instance(inst));
+}
+
+/*
+ * KPP algorithm (un)registration functions.
+ */
+/**
+ * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm
+ *
+ * Function registers an implementation of a key-agreement protocol primitive
+ * algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_kpp(struct kpp_alg *alg);
+
+/**
+ * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive
+ * algorithm
+ *
+ * Function unregisters an implementation of a key-agreement protocol primitive
+ * algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_kpp(struct kpp_alg *alg);
+
+/**
+ * kpp_register_instance() - Register a KPP template instance.
+ * @tmpl: The instantiating template.
+ * @inst: The KPP template instance to be registered.
+ * Return: %0 on success, negative error code otherwise.
+ */
+int kpp_register_instance(struct crypto_template *tmpl,
+ struct kpp_instance *inst);
+
+/*
+ * KPP spawn related functions.
+ */
+/**
+ * crypto_grab_kpp() - Look up a KPP algorithm and bind a spawn to it.
+ * @spawn: The KPP spawn to bind.
+ * @inst: The template instance owning @spawn.
+ * @name: The KPP algorithm name to look up.
+ * @type: The type bitset to pass on to the lookup.
+ * @mask: The mask bismask to pass on to the lookup.
+ * Return: %0 on success, a negative error code otherwise.
+ */
+int crypto_grab_kpp(struct crypto_kpp_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+/**
+ * crypto_drop_kpp() - Release a spawn previously bound via crypto_grab_kpp().
+ * @spawn: The spawn to release.
+ */
+static inline void crypto_drop_kpp(struct crypto_kpp_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+/**
+ * crypto_spawn_kpp_alg() - Get the algorithm a KPP spawn has been bound to.
+ * @spawn: The spawn to get the referenced &struct kpp_alg for.
+ *
+ * This function as well as the returned result are safe to use only
+ * after @spawn has been successfully bound via crypto_grab_kpp() and
+ * up to until the template instance owning @spawn has either been
+ * registered successfully or the spawn has been released again via
+ * crypto_drop_spawn().
+ *
+ * Return: A pointer to the &struct kpp_alg referenced from the spawn.
+ */
+static inline struct kpp_alg *crypto_spawn_kpp_alg(
+ struct crypto_kpp_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct kpp_alg, base);
+}
+
+/**
+ * crypto_spawn_kpp() - Create a transform from a KPP spawn.
+ * @spawn: The spawn previously bound to some &struct kpp_alg via
+ * crypto_grab_kpp().
+ *
+ * Once a &struct crypto_kpp_spawn has been successfully bound to a
+ * &struct kpp_alg via crypto_grab_kpp(), transforms for the latter
+ * may get instantiated from the former by means of this function.
+ *
+ * Return: A pointer to the freshly created KPP transform on success
+ * or an ``ERR_PTR()`` otherwise.
+ */
+static inline struct crypto_kpp *crypto_spawn_kpp(
+ struct crypto_kpp_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+#endif
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
new file mode 100644
index 0000000000..196aa769f2
--- /dev/null
+++ b/include/crypto/internal/poly1305.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Poly1305 algorithm
+ */
+
+#ifndef _CRYPTO_INTERNAL_POLY1305_H
+#define _CRYPTO_INTERNAL_POLY1305_H
+
+#include <asm/unaligned.h>
+#include <linux/types.h>
+#include <crypto/poly1305.h>
+
+/*
+ * Poly1305 core functions. These only accept whole blocks; the caller must
+ * handle any needed block buffering and padding. 'hibit' must be 1 for any
+ * full blocks, or 0 for the final block if it had to be padded. If 'nonce' is
+ * non-NULL, then it's added at the end to compute the Poly1305 MAC. Otherwise,
+ * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
+ */
+
+void poly1305_core_setkey(struct poly1305_core_key *key,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+static inline void poly1305_core_init(struct poly1305_state *state)
+{
+ *state = (struct poly1305_state){};
+}
+
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_core_key *key, const void *src,
+ unsigned int nblocks, u32 hibit);
+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
+ void *dst);
+
+#endif
diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h
new file mode 100644
index 0000000000..e0711b6a59
--- /dev/null
+++ b/include/crypto/internal/rng.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RNG: Random Number Generator algorithms under the crypto API
+ *
+ * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_RNG_H
+#define _CRYPTO_INTERNAL_RNG_H
+
+#include <crypto/algapi.h>
+#include <crypto/rng.h>
+
+int crypto_register_rng(struct rng_alg *alg);
+void crypto_unregister_rng(struct rng_alg *alg);
+int crypto_register_rngs(struct rng_alg *algs, int count);
+void crypto_unregister_rngs(struct rng_alg *algs, int count);
+
+#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE)
+int crypto_del_default_rng(void);
+#else
+static inline int crypto_del_default_rng(void)
+{
+ return 0;
+}
+#endif
+
+static inline void *crypto_rng_ctx(struct crypto_rng *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void crypto_rng_set_entropy(struct crypto_rng *tfm,
+ const u8 *data, unsigned int len)
+{
+ crypto_rng_alg(tfm)->set_ent(tfm, data, len);
+}
+
+#endif
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
new file mode 100644
index 0000000000..e870133f4b
--- /dev/null
+++ b/include/crypto/internal/rsa.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RSA internal helpers
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ */
+#ifndef _RSA_HELPER_
+#define _RSA_HELPER_
+#include <linux/types.h>
+
+/**
+ * rsa_key - RSA key structure
+ * @n : RSA modulus raw byte stream
+ * @e : RSA public exponent raw byte stream
+ * @d : RSA private exponent raw byte stream
+ * @p : RSA prime factor p of n raw byte stream
+ * @q : RSA prime factor q of n raw byte stream
+ * @dp : RSA exponent d mod (p - 1) raw byte stream
+ * @dq : RSA exponent d mod (q - 1) raw byte stream
+ * @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream
+ * @n_sz : length in bytes of RSA modulus n
+ * @e_sz : length in bytes of RSA public exponent
+ * @d_sz : length in bytes of RSA private exponent
+ * @p_sz : length in bytes of p field
+ * @q_sz : length in bytes of q field
+ * @dp_sz : length in bytes of dp field
+ * @dq_sz : length in bytes of dq field
+ * @qinv_sz : length in bytes of qinv field
+ */
+struct rsa_key {
+ const u8 *n;
+ const u8 *e;
+ const u8 *d;
+ const u8 *p;
+ const u8 *q;
+ const u8 *dp;
+ const u8 *dq;
+ const u8 *qinv;
+ size_t n_sz;
+ size_t e_sz;
+ size_t d_sz;
+ size_t p_sz;
+ size_t q_sz;
+ size_t dp_sz;
+ size_t dq_sz;
+ size_t qinv_sz;
+};
+
+int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
+ unsigned int key_len);
+
+int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
+ unsigned int key_len);
+
+extern struct crypto_template rsa_pkcs1pad_tmpl;
+#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
new file mode 100644
index 0000000000..858fe3965a
--- /dev/null
+++ b/include/crypto/internal/scompress.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Synchronous Compression operations
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ */
+#ifndef _CRYPTO_SCOMP_INT_H
+#define _CRYPTO_SCOMP_INT_H
+
+#include <crypto/acompress.h>
+#include <crypto/algapi.h>
+
+#define SCOMP_SCRATCH_SIZE 131072
+
+struct acomp_req;
+
+struct crypto_scomp {
+ struct crypto_tfm base;
+};
+
+/**
+ * struct scomp_alg - synchronous compression algorithm
+ *
+ * @alloc_ctx: Function allocates algorithm specific context
+ * @free_ctx: Function frees context allocated with alloc_ctx
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @stat: Statistics for compress algorithm
+ * @base: Common crypto API algorithm data structure
+ * @calg: Cmonn algorithm data structure shared with acomp
+ */
+struct scomp_alg {
+ void *(*alloc_ctx)(struct crypto_scomp *tfm);
+ void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
+ int (*compress)(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx);
+ int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx);
+
+ union {
+ struct COMP_ALG_COMMON;
+ struct comp_alg_common calg;
+ };
+};
+
+static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct scomp_alg, base);
+}
+
+static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_scomp, base);
+}
+
+static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
+{
+ return &tfm->base;
+}
+
+static inline void crypto_free_scomp(struct crypto_scomp *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
+}
+
+static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
+{
+ return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
+}
+
+static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
+{
+ return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
+}
+
+static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
+ void *ctx)
+{
+ return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
+}
+
+static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
+}
+
+static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
+ ctx);
+}
+
+/**
+ * crypto_register_scomp() -- Register synchronous compression algorithm
+ *
+ * Function registers an implementation of a synchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_scomp(struct scomp_alg *alg);
+
+/**
+ * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
+ *
+ * Function unregisters an implementation of a synchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_scomp(struct scomp_alg *alg);
+
+int crypto_register_scomps(struct scomp_alg *algs, int count);
+void crypto_unregister_scomps(struct scomp_alg *algs, int count);
+
+#endif
diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h
new file mode 100644
index 0000000000..97cb26ef81
--- /dev/null
+++ b/include/crypto/internal/sig.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_SIG_H
+#define _CRYPTO_INTERNAL_SIG_H
+
+#include <crypto/algapi.h>
+#include <crypto/sig.h>
+
+static inline void *crypto_sig_ctx(struct crypto_sig *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+#endif
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
new file mode 100644
index 0000000000..d2316242a9
--- /dev/null
+++ b/include/crypto/internal/simd.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared crypto simd helpers
+ */
+
+#ifndef _CRYPTO_INTERNAL_SIMD_H
+#define _CRYPTO_INTERNAL_SIMD_H
+
+#include <linux/percpu.h>
+#include <linux/types.h>
+
+/* skcipher support */
+
+struct simd_skcipher_alg;
+struct skcipher_alg;
+
+struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+ const char *drvname,
+ const char *basename);
+struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
+ const char *basename);
+void simd_skcipher_free(struct simd_skcipher_alg *alg);
+
+int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
+ struct simd_skcipher_alg **simd_algs);
+
+void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
+ struct simd_skcipher_alg **simd_algs);
+
+/* AEAD support */
+
+struct simd_aead_alg;
+struct aead_alg;
+
+struct simd_aead_alg *simd_aead_create_compat(const char *algname,
+ const char *drvname,
+ const char *basename);
+struct simd_aead_alg *simd_aead_create(const char *algname,
+ const char *basename);
+void simd_aead_free(struct simd_aead_alg *alg);
+
+int simd_register_aeads_compat(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs);
+
+void simd_unregister_aeads(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs);
+
+/*
+ * crypto_simd_usable() - is it allowed at this time to use SIMD instructions or
+ * access the SIMD register file?
+ *
+ * This delegates to may_use_simd(), except that this also returns false if SIMD
+ * in crypto code has been temporarily disabled on this CPU by the crypto
+ * self-tests, in order to test the no-SIMD fallback code. This override is
+ * currently limited to configurations where the extra self-tests are enabled,
+ * because it might be a bit too invasive to be part of the regular self-tests.
+ *
+ * This is a macro so that <asm/simd.h>, which some architectures don't have,
+ * doesn't have to be included directly here.
+ */
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
+#define crypto_simd_usable() \
+ (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
+#else
+#define crypto_simd_usable() may_use_simd()
+#endif
+
+#endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
new file mode 100644
index 0000000000..fb3d9e899f
--- /dev/null
+++ b/include/crypto/internal/skcipher.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Symmetric key ciphers.
+ *
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_SKCIPHER_H
+#define _CRYPTO_INTERNAL_SKCIPHER_H
+
+#include <crypto/algapi.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/skcipher.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+/*
+ * Set this if your algorithm is sync but needs a reqsize larger
+ * than MAX_SYNC_SKCIPHER_REQSIZE.
+ *
+ * Reuse bit that is specific to hash algorithms.
+ */
+#define CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE CRYPTO_ALG_OPTIONAL_KEY
+
+struct aead_request;
+struct rtattr;
+
+struct skcipher_instance {
+ void (*free)(struct skcipher_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct skcipher_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct skcipher_alg alg;
+ };
+};
+
+struct crypto_skcipher_spawn {
+ struct crypto_spawn base;
+};
+
+struct skcipher_walk {
+ union {
+ struct {
+ struct page *page;
+ unsigned long offset;
+ } phys;
+
+ struct {
+ u8 *page;
+ void *addr;
+ } virt;
+ } src, dst;
+
+ struct scatter_walk in;
+ unsigned int nbytes;
+
+ struct scatter_walk out;
+ unsigned int total;
+
+ struct list_head buffers;
+
+ u8 *page;
+ u8 *buffer;
+ u8 *oiv;
+ void *iv;
+
+ unsigned int ivsize;
+
+ int flags;
+ unsigned int blocksize;
+ unsigned int stride;
+ unsigned int alignmask;
+};
+
+static inline struct crypto_instance *skcipher_crypto_instance(
+ struct skcipher_instance *inst)
+{
+ return &inst->s.base;
+}
+
+static inline struct skcipher_instance *skcipher_alg_instance(
+ struct crypto_skcipher *skcipher)
+{
+ return container_of(crypto_skcipher_alg(skcipher),
+ struct skcipher_instance, alg);
+}
+
+static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
+{
+ return crypto_instance_ctx(skcipher_crypto_instance(inst));
+}
+
+static inline void skcipher_request_complete(struct skcipher_request *req, int err)
+{
+ crypto_request_complete(&req->base, err);
+}
+
+int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
+ struct crypto_skcipher_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct skcipher_alg, base);
+}
+
+static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
+ struct crypto_skcipher_spawn *spawn)
+{
+ return crypto_skcipher_spawn_alg(spawn);
+}
+
+static inline struct crypto_skcipher *crypto_spawn_skcipher(
+ struct crypto_skcipher_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void crypto_skcipher_set_reqsize(
+ struct crypto_skcipher *skcipher, unsigned int reqsize)
+{
+ skcipher->reqsize = reqsize;
+}
+
+static inline void crypto_skcipher_set_reqsize_dma(
+ struct crypto_skcipher *skcipher, unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ skcipher->reqsize = reqsize;
+}
+
+int crypto_register_skcipher(struct skcipher_alg *alg);
+void crypto_unregister_skcipher(struct skcipher_alg *alg);
+int crypto_register_skciphers(struct skcipher_alg *algs, int count);
+void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
+int skcipher_register_instance(struct crypto_template *tmpl,
+ struct skcipher_instance *inst);
+
+int skcipher_walk_done(struct skcipher_walk *walk, int err);
+int skcipher_walk_virt(struct skcipher_walk *walk,
+ struct skcipher_request *req,
+ bool atomic);
+int skcipher_walk_async(struct skcipher_walk *walk,
+ struct skcipher_request *req);
+int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
+ struct aead_request *req, bool atomic);
+int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
+ struct aead_request *req, bool atomic);
+void skcipher_walk_complete(struct skcipher_walk *walk, int err);
+
+static inline void skcipher_walk_abort(struct skcipher_walk *walk)
+{
+ skcipher_walk_done(walk, -ECANCELED);
+}
+
+static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
+}
+
+static inline void *skcipher_request_ctx(struct skcipher_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void *skcipher_request_ctx_dma(struct skcipher_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(skcipher_request_ctx(req), align);
+}
+
+static inline u32 skcipher_request_flags(struct skcipher_request *req)
+{
+ return req->base.flags;
+}
+
+static inline unsigned int crypto_skcipher_alg_min_keysize(
+ struct skcipher_alg *alg)
+{
+ return alg->min_keysize;
+}
+
+static inline unsigned int crypto_skcipher_alg_max_keysize(
+ struct skcipher_alg *alg)
+{
+ return alg->max_keysize;
+}
+
+static inline unsigned int crypto_skcipher_alg_walksize(
+ struct skcipher_alg *alg)
+{
+ return alg->walksize;
+}
+
+/**
+ * crypto_skcipher_walksize() - obtain walk size
+ * @tfm: cipher handle
+ *
+ * In some cases, algorithms can only perform optimally when operating on
+ * multiple blocks in parallel. This is reflected by the walksize, which
+ * must be a multiple of the chunksize (or equal if the concern does not
+ * apply)
+ *
+ * Return: walk size in bytes
+ */
+static inline unsigned int crypto_skcipher_walksize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
+}
+
+/* Helpers for simple block cipher modes of operation */
+struct skcipher_ctx_simple {
+ struct crypto_cipher *cipher; /* underlying block cipher */
+};
+static inline struct crypto_cipher *
+skcipher_cipher_simple(struct crypto_skcipher *tfm)
+{
+ struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
+
+ return ctx->cipher;
+}
+
+struct skcipher_instance *skcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb);
+
+static inline struct crypto_alg *skcipher_ialg_simple(
+ struct skcipher_instance *inst)
+{
+ struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
+
+ return crypto_spawn_cipher_alg(spawn);
+}
+
+#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
+