diff options
Diffstat (limited to 'include/crypto/internal')
-rw-r--r-- | include/crypto/internal/acompress.h | 77 | ||||
-rw-r--r-- | include/crypto/internal/aead.h | 171 | ||||
-rw-r--r-- | include/crypto/internal/akcipher.h | 133 | ||||
-rw-r--r-- | include/crypto/internal/blake2s.h | 129 | ||||
-rw-r--r-- | include/crypto/internal/chacha.h | 43 | ||||
-rw-r--r-- | include/crypto/internal/cryptouser.h | 16 | ||||
-rw-r--r-- | include/crypto/internal/des.h | 127 | ||||
-rw-r--r-- | include/crypto/internal/geniv.h | 27 | ||||
-rw-r--r-- | include/crypto/internal/hash.h | 245 | ||||
-rw-r--r-- | include/crypto/internal/kpp.h | 59 | ||||
-rw-r--r-- | include/crypto/internal/poly1305.h | 34 | ||||
-rw-r--r-- | include/crypto/internal/rng.h | 40 | ||||
-rw-r--r-- | include/crypto/internal/rsa.h | 57 | ||||
-rw-r--r-- | include/crypto/internal/scompress.h | 121 | ||||
-rw-r--r-- | include/crypto/internal/simd.h | 69 | ||||
-rw-r--r-- | include/crypto/internal/skcipher.h | 223 |
16 files changed, 1571 insertions, 0 deletions
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h new file mode 100644 index 000000000..cfc47e188 --- /dev/null +++ b/include/crypto/internal/acompress.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li <weigang.li@intel.com> + * Giovanni Cabiddu <giovanni.cabiddu@intel.com> + */ +#ifndef _CRYPTO_ACOMP_INT_H +#define _CRYPTO_ACOMP_INT_H +#include <crypto/acompress.h> + +/* + * Transform internal helpers. + */ +static inline void *acomp_request_ctx(struct acomp_req *req) +{ + return req->__ctx; +} + +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void acomp_request_complete(struct acomp_req *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *acomp_alg_name(struct crypto_acomp *tfm) +{ + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) +{ + struct acomp_req *req; + + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); + if (likely(req)) + acomp_request_set_tfm(req, tfm); + return req; +} + +static inline void __acomp_request_free(struct acomp_req *req) +{ + kfree_sensitive(req); +} + +/** + * crypto_register_acomp() -- Register asynchronous compression algorithm + * + * Function registers an implementation of an asynchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_acomp(struct acomp_alg *alg); + +/** + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm + * + * Function unregisters an implementation of an asynchronous + * compression algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_acomp(struct acomp_alg *alg); + +int crypto_register_acomps(struct acomp_alg *algs, int count); +void crypto_unregister_acomps(struct acomp_alg *algs, int count); + +#endif diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h new file mode 100644 index 000000000..27b7b0224 --- /dev/null +++ b/include/crypto/internal/aead.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_AEAD_H +#define _CRYPTO_INTERNAL_AEAD_H + +#include <crypto/aead.h> +#include <crypto/algapi.h> +#include <linux/stddef.h> +#include <linux/types.h> + +struct rtattr; + +struct aead_instance { + void (*free)(struct aead_instance *inst); + union { + struct { + char head[offsetof(struct aead_alg, base)]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +struct aead_queue { + struct crypto_queue base; +}; + +static inline void *crypto_aead_ctx(struct crypto_aead *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *aead_crypto_instance( + struct aead_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct aead_instance *aead_instance(struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct aead_instance, alg.base); +} + +static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead) +{ + return aead_instance(crypto_tfm_alg_instance(&aead->base)); +} + +static inline void *aead_instance_ctx(struct aead_instance *inst) +{ + return crypto_instance_ctx(aead_crypto_instance(inst)); +} + +static inline void *aead_request_ctx(struct aead_request *req) +{ + return req->__ctx; +} + +static inline void aead_request_complete(struct aead_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 aead_request_flags(struct aead_request *req) +{ + return req->base.flags; +} + +static inline struct aead_request *aead_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct aead_request, base); +} + +int crypto_grab_aead(struct crypto_aead_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct aead_alg *crypto_spawn_aead_alg( + struct crypto_aead_spawn *spawn) +{ + return container_of(spawn->base.alg, struct aead_alg, base); +} + +static inline struct crypto_aead *crypto_spawn_aead( + struct crypto_aead_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, + unsigned int reqsize) +{ + aead->reqsize = reqsize; +} + +static inline void aead_init_queue(struct aead_queue *queue, + unsigned int max_qlen) +{ + crypto_init_queue(&queue->base, max_qlen); +} + +static inline int aead_enqueue_request(struct aead_queue *queue, + struct aead_request *request) +{ + return crypto_enqueue_request(&queue->base, &request->base); +} + +static inline struct aead_request *aead_dequeue_request( + struct aead_queue *queue) +{ + struct crypto_async_request *req; + + req = crypto_dequeue_request(&queue->base); + + return req ? container_of(req, struct aead_request, base) : NULL; +} + +static inline struct aead_request *aead_get_backlog(struct aead_queue *queue) +{ + struct crypto_async_request *req; + + req = crypto_get_backlog(&queue->base); + + return req ? container_of(req, struct aead_request, base) : NULL; +} + +static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg) +{ + return alg->chunksize; +} + +/** + * crypto_aead_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CCM. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_chunksize(crypto_aead_alg(tfm)); +} + +int crypto_register_aead(struct aead_alg *alg); +void crypto_unregister_aead(struct aead_alg *alg); +int crypto_register_aeads(struct aead_alg *algs, int count); +void crypto_unregister_aeads(struct aead_alg *algs, int count); +int aead_register_instance(struct crypto_template *tmpl, + struct aead_instance *inst); + +#endif /* _CRYPTO_INTERNAL_AEAD_H */ + diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h new file mode 100644 index 000000000..8d3220c9a --- /dev/null +++ b/include/crypto/internal/akcipher.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Public Key Encryption + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk <tadeusz.struk@intel.com> + */ +#ifndef _CRYPTO_AKCIPHER_INT_H +#define _CRYPTO_AKCIPHER_INT_H +#include <crypto/akcipher.h> +#include <crypto/algapi.h> + +struct akcipher_instance { + void (*free)(struct akcipher_instance *inst); + union { + struct { + char head[offsetof(struct akcipher_alg, base)]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +/* + * Transform internal helpers. + */ +static inline void *akcipher_request_ctx(struct akcipher_request *req) +{ + return req->__ctx; +} + +static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher, + unsigned int reqsize) +{ + crypto_akcipher_alg(akcipher)->reqsize = reqsize; +} + +static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void akcipher_request_complete(struct akcipher_request *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm) +{ + return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct crypto_instance *akcipher_crypto_instance( + struct akcipher_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct akcipher_instance *akcipher_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct akcipher_instance, alg.base); +} + +static inline struct akcipher_instance *akcipher_alg_instance( + struct crypto_akcipher *akcipher) +{ + return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base)); +} + +static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) +{ + return crypto_instance_ctx(akcipher_crypto_instance(inst)); +} + +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline struct crypto_akcipher *crypto_spawn_akcipher( + struct crypto_akcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct akcipher_alg *crypto_spawn_akcipher_alg( + struct crypto_akcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct akcipher_alg, base); +} + +/** + * crypto_register_akcipher() -- Register public key algorithm + * + * Function registers an implementation of a public key verify algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_akcipher(struct akcipher_alg *alg); + +/** + * crypto_unregister_akcipher() -- Unregister public key algorithm + * + * Function unregisters an implementation of a public key verify algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_akcipher(struct akcipher_alg *alg); + +/** + * akcipher_register_instance() -- Unregister public key template instance + * + * Function registers an implementation of an asymmetric key algorithm + * created from a template + * + * @tmpl: the template from which the algorithm was created + * @inst: the template instance + */ +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst); +#endif diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h new file mode 100644 index 000000000..52363eee2 --- /dev/null +++ b/include/crypto/internal/blake2s.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Helper functions for BLAKE2s implementations. + * Keep this in sync with the corresponding BLAKE2b header. + */ + +#ifndef _CRYPTO_INTERNAL_BLAKE2S_H +#define _CRYPTO_INTERNAL_BLAKE2S_H + +#include <crypto/blake2s.h> +#include <crypto/internal/hash.h> +#include <linux/string.h> + +void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc); + +void blake2s_compress(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc); + +bool blake2s_selftest(void); + +static inline void blake2s_set_lastblock(struct blake2s_state *state) +{ + state->f[0] = -1; +} + +/* Helper functions for BLAKE2s shared by the library and shash APIs */ + +static __always_inline void +__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen, + bool force_generic) +{ + const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + if (force_generic) + blake2s_compress_generic(state, state->buf, 1, + BLAKE2S_BLOCK_SIZE); + else + blake2s_compress(state, state->buf, 1, + BLAKE2S_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2S_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); + /* Hash one less (full) block than strictly possible */ + if (force_generic) + blake2s_compress_generic(state, in, nblocks - 1, + BLAKE2S_BLOCK_SIZE); + else + blake2s_compress(state, in, nblocks - 1, + BLAKE2S_BLOCK_SIZE); + in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; +} + +static __always_inline void +__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic) +{ + blake2s_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ + if (force_generic) + blake2s_compress_generic(state, state->buf, 1, state->buflen); + else + blake2s_compress(state, state->buf, 1, state->buflen); + cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); + memcpy(out, state->h, state->outlen); +} + +/* Helper functions for shash implementations of BLAKE2s */ + +struct blake2s_tfm_ctx { + u8 key[BLAKE2S_KEY_SIZE]; + unsigned int keylen; +}; + +static inline int crypto_blake2s_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); + + if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) + return -EINVAL; + + memcpy(tctx->key, key, keylen); + tctx->keylen = keylen; + + return 0; +} + +static inline int crypto_blake2s_init(struct shash_desc *desc) +{ + const struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct blake2s_state *state = shash_desc_ctx(desc); + unsigned int outlen = crypto_shash_digestsize(desc->tfm); + + __blake2s_init(state, outlen, tctx->key, tctx->keylen); + return 0; +} + +static inline int crypto_blake2s_update(struct shash_desc *desc, + const u8 *in, unsigned int inlen, + bool force_generic) +{ + struct blake2s_state *state = shash_desc_ctx(desc); + + __blake2s_update(state, in, inlen, force_generic); + return 0; +} + +static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out, + bool force_generic) +{ + struct blake2s_state *state = shash_desc_ctx(desc); + + __blake2s_final(state, out, force_generic); + return 0; +} + +#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */ diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h new file mode 100644 index 000000000..b085dc1ac --- /dev/null +++ b/include/crypto/internal/chacha.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _CRYPTO_INTERNAL_CHACHA_H +#define _CRYPTO_INTERNAL_CHACHA_H + +#include <crypto/chacha.h> +#include <crypto/internal/skcipher.h> +#include <linux/crypto.h> + +struct chacha_ctx { + u32 key[8]; + int nrounds; +}; + +static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize, int nrounds) +{ + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + int i; + + if (keysize != CHACHA_KEY_SIZE) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ctx->key); i++) + ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); + + ctx->nrounds = nrounds; + return 0; +} + +static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 20); +} + +static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 12); +} + +#endif /* _CRYPTO_CHACHA_H */ diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h new file mode 100644 index 000000000..fd5407433 --- /dev/null +++ b/include/crypto/internal/cryptouser.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/cryptouser.h> +#include <net/netlink.h> + +struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); + +#ifdef CONFIG_CRYPTO_STATS +int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); +#else +static inline int crypto_reportstat(struct sk_buff *in_skb, + struct nlmsghdr *in_nlh, + struct nlattr **attrs) +{ + return -ENOTSUPP; +} +#endif diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h new file mode 100644 index 000000000..723fe5bf1 --- /dev/null +++ b/include/crypto/internal/des.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DES & Triple DES EDE key verification helpers + */ + +#ifndef __CRYPTO_INTERNAL_DES_H +#define __CRYPTO_INTERNAL_DES_H + +#include <linux/crypto.h> +#include <linux/fips.h> +#include <crypto/des.h> +#include <crypto/aead.h> +#include <crypto/skcipher.h> + +/** + * crypto_des_verify_key - Check whether a DES key is weak + * @tfm: the crypto algo + * @key: the key buffer + * + * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak + * keys. Otherwise, 0 is returned. + * + * It is the job of the caller to ensure that the size of the key equals + * DES_KEY_SIZE. + */ +static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key) +{ + struct des_ctx tmp; + int err; + + err = des_expand_key(&tmp, key, DES_KEY_SIZE); + if (err == -ENOKEY) { + if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) + err = -EINVAL; + else + err = 0; + } + memzero_explicit(&tmp, sizeof(tmp)); + return err; +} + +/* + * RFC2451: + * + * For DES-EDE3, there is no known need to reject weak or + * complementation keys. Any weakness is obviated by the use of + * multiple keys. + * + * However, if the first two or last two independent 64-bit keys are + * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the + * same as DES. Implementers MUST reject keys that exhibit this + * property. + * + */ +static inline int des3_ede_verify_key(const u8 *key, unsigned int key_len, + bool check_weak) +{ + int ret = fips_enabled ? -EINVAL : -ENOKEY; + u32 K[6]; + + memcpy(K, key, DES3_EDE_KEY_SIZE); + + if ((!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (fips_enabled || check_weak)) + goto bad; + + if ((!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled) + goto bad; + + ret = 0; +bad: + memzero_explicit(K, DES3_EDE_KEY_SIZE); + + return ret; +} + +/** + * crypto_des3_ede_verify_key - Check whether a DES3-EDE key is weak + * @tfm: the crypto algo + * @key: the key buffer + * + * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak + * keys or when running in FIPS mode. Otherwise, 0 is returned. Note that some + * keys are rejected in FIPS mode even if weak keys are permitted by the TFM + * flags. + * + * It is the job of the caller to ensure that the size of the key equals + * DES3_EDE_KEY_SIZE. + */ +static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm, + const u8 *key) +{ + return des3_ede_verify_key(key, DES3_EDE_KEY_SIZE, + crypto_tfm_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); +} + +static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm, + const u8 *key) +{ + return crypto_des_verify_key(crypto_skcipher_tfm(tfm), key); +} + +static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm, + const u8 *key) +{ + return crypto_des3_ede_verify_key(crypto_skcipher_tfm(tfm), key); +} + +static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key, + int keylen) +{ + if (keylen != DES_KEY_SIZE) + return -EINVAL; + return crypto_des_verify_key(crypto_aead_tfm(tfm), key); +} + +static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key, + int keylen) +{ + if (keylen != DES3_EDE_KEY_SIZE) + return -EINVAL; + return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key); +} + +#endif /* __CRYPTO_INTERNAL_DES_H */ diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h new file mode 100644 index 000000000..7fd7126f5 --- /dev/null +++ b/include/crypto/internal/geniv.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * geniv: IV generation + * + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_GENIV_H +#define _CRYPTO_INTERNAL_GENIV_H + +#include <crypto/internal/aead.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); +}; + +struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb); +int aead_init_geniv(struct crypto_aead *tfm); +void aead_exit_geniv(struct crypto_aead *tfm); + +#endif /* _CRYPTO_INTERNAL_GENIV_H */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h new file mode 100644 index 000000000..25806141d --- /dev/null +++ b/include/crypto/internal/hash.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hash algorithms. + * + * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_HASH_H +#define _CRYPTO_INTERNAL_HASH_H + +#include <crypto/algapi.h> +#include <crypto/hash.h> + +struct ahash_request; +struct scatterlist; + +struct crypto_hash_walk { + char *data; + + unsigned int offset; + unsigned int alignmask; + + struct page *pg; + unsigned int entrylen; + + unsigned int total; + struct scatterlist *sg; + + unsigned int flags; +}; + +struct ahash_instance { + void (*free)(struct ahash_instance *inst); + union { + struct { + char head[offsetof(struct ahash_alg, halg.base)]; + struct crypto_instance base; + } s; + struct ahash_alg alg; + }; +}; + +struct shash_instance { + void (*free)(struct shash_instance *inst); + union { + struct { + char head[offsetof(struct shash_alg, base)]; + struct crypto_instance base; + } s; + struct shash_alg alg; + }; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk); + +static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) +{ + return !(walk->entrylen | walk->total); +} + +int crypto_register_ahash(struct ahash_alg *alg); +void crypto_unregister_ahash(struct ahash_alg *alg); +int crypto_register_ahashes(struct ahash_alg *algs, int count); +void crypto_unregister_ahashes(struct ahash_alg *algs, int count); +int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst); + +bool crypto_shash_alg_has_setkey(struct shash_alg *alg); + +static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) +{ + return crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); +} + +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + +int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct hash_alg_common *crypto_spawn_ahash_alg( + struct crypto_ahash_spawn *spawn) +{ + return __crypto_hash_alg_common(spawn->base.alg); +} + +int crypto_register_shash(struct shash_alg *alg); +void crypto_unregister_shash(struct shash_alg *alg); +int crypto_register_shashes(struct shash_alg *algs, int count); +void crypto_unregister_shashes(struct shash_alg *algs, int count); +int shash_register_instance(struct crypto_template *tmpl, + struct shash_instance *inst); +void shash_free_singlespawn_instance(struct shash_instance *inst); + +int crypto_grab_shash(struct crypto_shash_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct shash_alg *crypto_spawn_shash_alg( + struct crypto_shash_spawn *spawn) +{ + return __crypto_shash_alg(spawn->base.alg); +} + +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); + +int crypto_init_shash_ops_async(struct crypto_tfm *tfm); + +static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) +{ + return crypto_tfm_ctx(crypto_ahash_tfm(tfm)); +} + +static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg) +{ + return container_of(__crypto_hash_alg_common(alg), struct ahash_alg, + halg); +} + +static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, + unsigned int reqsize) +{ + tfm->reqsize = reqsize; +} + +static inline struct crypto_instance *ahash_crypto_instance( + struct ahash_instance *inst) +{ + return &inst->s.base; +} + +static inline struct ahash_instance *ahash_instance( + struct crypto_instance *inst) +{ + return container_of(inst, struct ahash_instance, s.base); +} + +static inline struct ahash_instance *ahash_alg_instance( + struct crypto_ahash *ahash) +{ + return ahash_instance(crypto_tfm_alg_instance(&ahash->base)); +} + +static inline void *ahash_instance_ctx(struct ahash_instance *inst) +{ + return crypto_instance_ctx(ahash_crypto_instance(inst)); +} + +static inline void ahash_request_complete(struct ahash_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 ahash_request_flags(struct ahash_request *req) +{ + return req->base.flags; +} + +static inline struct crypto_ahash *crypto_spawn_ahash( + struct crypto_ahash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline int ahash_enqueue_request(struct crypto_queue *queue, + struct ahash_request *request) +{ + return crypto_enqueue_request(queue, &request->base); +} + +static inline struct ahash_request *ahash_dequeue_request( + struct crypto_queue *queue) +{ + return ahash_request_cast(crypto_dequeue_request(queue)); +} + +static inline void *crypto_shash_ctx(struct crypto_shash *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *shash_crypto_instance( + struct shash_instance *inst) +{ + return &inst->s.base; +} + +static inline struct shash_instance *shash_instance( + struct crypto_instance *inst) +{ + return container_of(inst, struct shash_instance, s.base); +} + +static inline struct shash_instance *shash_alg_instance( + struct crypto_shash *shash) +{ + return shash_instance(crypto_tfm_alg_instance(&shash->base)); +} + +static inline void *shash_instance_ctx(struct shash_instance *inst) +{ + return crypto_instance_ctx(shash_crypto_instance(inst)); +} + +static inline struct crypto_shash *crypto_spawn_shash( + struct crypto_shash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_shash, base); +} + +#endif /* _CRYPTO_INTERNAL_HASH_H */ + diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h new file mode 100644 index 000000000..659b642ef --- /dev/null +++ b/include/crypto/internal/kpp.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + */ +#ifndef _CRYPTO_KPP_INT_H +#define _CRYPTO_KPP_INT_H +#include <crypto/kpp.h> +#include <crypto/algapi.h> + +/* + * Transform internal helpers. + */ +static inline void *kpp_request_ctx(struct kpp_request *req) +{ + return req->__ctx; +} + +static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void kpp_request_complete(struct kpp_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *kpp_alg_name(struct crypto_kpp *tfm) +{ + return crypto_kpp_tfm(tfm)->__crt_alg->cra_name; +} + +/** + * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm + * + * Function registers an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_kpp(struct kpp_alg *alg); + +/** + * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive + * algorithm + * + * Function unregisters an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_kpp(struct kpp_alg *alg); + +#endif diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h new file mode 100644 index 000000000..196aa769f --- /dev/null +++ b/include/crypto/internal/poly1305.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Poly1305 algorithm + */ + +#ifndef _CRYPTO_INTERNAL_POLY1305_H +#define _CRYPTO_INTERNAL_POLY1305_H + +#include <asm/unaligned.h> +#include <linux/types.h> +#include <crypto/poly1305.h> + +/* + * Poly1305 core functions. These only accept whole blocks; the caller must + * handle any needed block buffering and padding. 'hibit' must be 1 for any + * full blocks, or 0 for the final block if it had to be padded. If 'nonce' is + * non-NULL, then it's added at the end to compute the Poly1305 MAC. Otherwise, + * only the ε-almost-∆-universal hash function (not the full MAC) is computed. + */ + +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +static inline void poly1305_core_init(struct poly1305_state *state) +{ + *state = (struct poly1305_state){}; +} + +void poly1305_core_blocks(struct poly1305_state *state, + const struct poly1305_core_key *key, const void *src, + unsigned int nblocks, u32 hibit); +void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], + void *dst); + +#endif diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h new file mode 100644 index 000000000..e0711b6a5 --- /dev/null +++ b/include/crypto/internal/rng.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RNG: Random Number Generator algorithms under the crypto API + * + * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_RNG_H +#define _CRYPTO_INTERNAL_RNG_H + +#include <crypto/algapi.h> +#include <crypto/rng.h> + +int crypto_register_rng(struct rng_alg *alg); +void crypto_unregister_rng(struct rng_alg *alg); +int crypto_register_rngs(struct rng_alg *algs, int count); +void crypto_unregister_rngs(struct rng_alg *algs, int count); + +#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE) +int crypto_del_default_rng(void); +#else +static inline int crypto_del_default_rng(void) +{ + return 0; +} +#endif + +static inline void *crypto_rng_ctx(struct crypto_rng *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void crypto_rng_set_entropy(struct crypto_rng *tfm, + const u8 *data, unsigned int len) +{ + crypto_rng_alg(tfm)->set_ent(tfm, data, len); +} + +#endif diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h new file mode 100644 index 000000000..e870133f4 --- /dev/null +++ b/include/crypto/internal/rsa.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RSA internal helpers + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk <tadeusz.struk@intel.com> + */ +#ifndef _RSA_HELPER_ +#define _RSA_HELPER_ +#include <linux/types.h> + +/** + * rsa_key - RSA key structure + * @n : RSA modulus raw byte stream + * @e : RSA public exponent raw byte stream + * @d : RSA private exponent raw byte stream + * @p : RSA prime factor p of n raw byte stream + * @q : RSA prime factor q of n raw byte stream + * @dp : RSA exponent d mod (p - 1) raw byte stream + * @dq : RSA exponent d mod (q - 1) raw byte stream + * @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream + * @n_sz : length in bytes of RSA modulus n + * @e_sz : length in bytes of RSA public exponent + * @d_sz : length in bytes of RSA private exponent + * @p_sz : length in bytes of p field + * @q_sz : length in bytes of q field + * @dp_sz : length in bytes of dp field + * @dq_sz : length in bytes of dq field + * @qinv_sz : length in bytes of qinv field + */ +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +extern struct crypto_template rsa_pkcs1pad_tmpl; +#endif diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h new file mode 100644 index 000000000..f834274c2 --- /dev/null +++ b/include/crypto/internal/scompress.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Synchronous Compression operations + * + * Copyright 2015 LG Electronics Inc. + * Copyright (c) 2016, Intel Corporation + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> + */ +#ifndef _CRYPTO_SCOMP_INT_H +#define _CRYPTO_SCOMP_INT_H +#include <linux/crypto.h> + +#define SCOMP_SCRATCH_SIZE 131072 + +struct crypto_scomp { + struct crypto_tfm base; +}; + +/** + * struct scomp_alg - synchronous compression algorithm + * + * @alloc_ctx: Function allocates algorithm specific context + * @free_ctx: Function frees context allocated with alloc_ctx + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @base: Common crypto API algorithm data structure + */ +struct scomp_alg { + void *(*alloc_ctx)(struct crypto_scomp *tfm); + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); + int (*compress)(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx); + int (*decompress)(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx); + struct crypto_alg base; +}; + +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct scomp_alg, base); +} + +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_scomp, base); +} + +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_scomp(struct crypto_scomp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm)); +} + +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) +{ + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); +} + +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) +{ + return crypto_scomp_alg(tfm)->alloc_ctx(tfm); +} + +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, + void *ctx) +{ + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); +} + +static inline int crypto_scomp_compress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx); +} + +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, + void *ctx) +{ + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen, + ctx); +} + +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); +void crypto_acomp_scomp_free_ctx(struct acomp_req *req); + +/** + * crypto_register_scomp() -- Register synchronous compression algorithm + * + * Function registers an implementation of a synchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_scomp(struct scomp_alg *alg); + +/** + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm + * + * Function unregisters an implementation of a synchronous + * compression algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_scomp(struct scomp_alg *alg); + +int crypto_register_scomps(struct scomp_alg *algs, int count); +void crypto_unregister_scomps(struct scomp_alg *algs, int count); + +#endif diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h new file mode 100644 index 000000000..d2316242a --- /dev/null +++ b/include/crypto/internal/simd.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Shared crypto simd helpers + */ + +#ifndef _CRYPTO_INTERNAL_SIMD_H +#define _CRYPTO_INTERNAL_SIMD_H + +#include <linux/percpu.h> +#include <linux/types.h> + +/* skcipher support */ + +struct simd_skcipher_alg; +struct skcipher_alg; + +struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, + const char *drvname, + const char *basename); +struct simd_skcipher_alg *simd_skcipher_create(const char *algname, + const char *basename); +void simd_skcipher_free(struct simd_skcipher_alg *alg); + +int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, + struct simd_skcipher_alg **simd_algs); + +void simd_unregister_skciphers(struct skcipher_alg *algs, int count, + struct simd_skcipher_alg **simd_algs); + +/* AEAD support */ + +struct simd_aead_alg; +struct aead_alg; + +struct simd_aead_alg *simd_aead_create_compat(const char *algname, + const char *drvname, + const char *basename); +struct simd_aead_alg *simd_aead_create(const char *algname, + const char *basename); +void simd_aead_free(struct simd_aead_alg *alg); + +int simd_register_aeads_compat(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs); + +void simd_unregister_aeads(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs); + +/* + * crypto_simd_usable() - is it allowed at this time to use SIMD instructions or + * access the SIMD register file? + * + * This delegates to may_use_simd(), except that this also returns false if SIMD + * in crypto code has been temporarily disabled on this CPU by the crypto + * self-tests, in order to test the no-SIMD fallback code. This override is + * currently limited to configurations where the extra self-tests are enabled, + * because it might be a bit too invasive to be part of the regular self-tests. + * + * This is a macro so that <asm/simd.h>, which some architectures don't have, + * doesn't have to be included directly here. + */ +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test); +#define crypto_simd_usable() \ + (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test)) +#else +#define crypto_simd_usable() may_use_simd() +#endif + +#endif /* _CRYPTO_INTERNAL_SIMD_H */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h new file mode 100644 index 000000000..10226c12c --- /dev/null +++ b/include/crypto/internal/skcipher.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_SKCIPHER_H +#define _CRYPTO_INTERNAL_SKCIPHER_H + +#include <crypto/algapi.h> +#include <crypto/skcipher.h> +#include <linux/list.h> +#include <linux/types.h> + +struct aead_request; +struct rtattr; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *inst); + union { + struct { + char head[offsetof(struct skcipher_alg, base)]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + unsigned long offset; + } phys; + + struct { + u8 *page; + void *addr; + } virt; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + + struct scatter_walk out; + unsigned int total; + + struct list_head buffers; + + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + + unsigned int ivsize; + + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +static inline struct crypto_instance *skcipher_crypto_instance( + struct skcipher_instance *inst) +{ + return &inst->s.base; +} + +static inline struct skcipher_instance *skcipher_alg_instance( + struct crypto_skcipher *skcipher) +{ + return container_of(crypto_skcipher_alg(skcipher), + struct skcipher_instance, alg); +} + +static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) +{ + return crypto_instance_ctx(skcipher_crypto_instance(inst)); +} + +static inline void skcipher_request_complete(struct skcipher_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct skcipher_alg *crypto_skcipher_spawn_alg( + struct crypto_skcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct skcipher_alg, base); +} + +static inline struct skcipher_alg *crypto_spawn_skcipher_alg( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_skcipher_spawn_alg(spawn); +} + +static inline struct crypto_skcipher *crypto_spawn_skcipher( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_skcipher_set_reqsize( + struct crypto_skcipher *skcipher, unsigned int reqsize) +{ + skcipher->reqsize = reqsize; +} + +int crypto_register_skcipher(struct skcipher_alg *alg); +void crypto_unregister_skcipher(struct skcipher_alg *alg); +int crypto_register_skciphers(struct skcipher_alg *algs, int count); +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst); + +int skcipher_walk_done(struct skcipher_walk *walk, int err); +int skcipher_walk_virt(struct skcipher_walk *walk, + struct skcipher_request *req, + bool atomic); +void skcipher_walk_atomise(struct skcipher_walk *walk); +int skcipher_walk_async(struct skcipher_walk *walk, + struct skcipher_request *req); +int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); +int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); +void skcipher_walk_complete(struct skcipher_walk *walk, int err); + +static inline void skcipher_walk_abort(struct skcipher_walk *walk) +{ + skcipher_walk_done(walk, -ECANCELED); +} + +static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *skcipher_request_ctx(struct skcipher_request *req) +{ + return req->__ctx; +} + +static inline u32 skcipher_request_flags(struct skcipher_request *req) +{ + return req->base.flags; +} + +static inline unsigned int crypto_skcipher_alg_min_keysize( + struct skcipher_alg *alg) +{ + return alg->min_keysize; +} + +static inline unsigned int crypto_skcipher_alg_max_keysize( + struct skcipher_alg *alg) +{ + return alg->max_keysize; +} + +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + return alg->walksize; +} + +/** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + +/* Helpers for simple block cipher modes of operation */ +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; /* underlying block cipher */ +}; +static inline struct crypto_cipher * +skcipher_cipher_simple(struct crypto_skcipher *tfm) +{ + struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); + + return ctx->cipher; +} + +struct skcipher_instance *skcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb); + +static inline struct crypto_alg *skcipher_ialg_simple( + struct skcipher_instance *inst) +{ + struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); + + return crypto_spawn_cipher_alg(spawn); +} + +#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ + |