diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /include/crypto | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include/crypto')
-rw-r--r-- | include/crypto/aead.h | 12 | ||||
-rw-r--r-- | include/crypto/akcipher.h | 4 | ||||
-rw-r--r-- | include/crypto/algapi.h | 5 | ||||
-rw-r--r-- | include/crypto/engine.h | 2 | ||||
-rw-r--r-- | include/crypto/hash.h | 95 | ||||
-rw-r--r-- | include/crypto/hash_info.h | 1 | ||||
-rw-r--r-- | include/crypto/internal/hash.h | 9 | ||||
-rw-r--r-- | include/crypto/internal/skcipher.h | 130 | ||||
-rw-r--r-- | include/crypto/sig.h | 2 | ||||
-rw-r--r-- | include/crypto/skcipher.h | 296 |
10 files changed, 394 insertions, 162 deletions
diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 35e45b854a..51382befbe 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -217,6 +217,18 @@ static inline void crypto_free_aead(struct crypto_aead *tfm) crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); } +/** + * crypto_has_aead() - Search for the availability of an aead. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * aead + * @type: specifies the type of the aead + * @mask: specifies the mask for the aead + * + * Return: true when the aead is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_aead(const char *alg_name, u32 type, u32 mask); + static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm) { return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 670508f1dc..31c111bebb 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -382,7 +382,7 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req) * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() * @src: source buffer * @slen: source length - * @dst: destinatino obuffer + * @dst: destination obuffer * @dlen: destination length * * Return: zero on success; error code in case of error @@ -400,7 +400,7 @@ int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm, * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() * @src: source buffer * @slen: source length - * @dst: destinatino obuffer + * @dst: destination obuffer * @dlen: destination length * * Return: Output length on success; error code in case of error diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index ca86f4c6ba..7a4a71af65 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -195,11 +195,6 @@ static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm, return PTR_ALIGN(crypto_tfm_ctx(tfm), align); } -static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) -{ - return crypto_tfm_ctx_align(tfm, crypto_tfm_alg_alignmask(tfm) + 1); -} - static inline unsigned int crypto_dma_align(void) { return CRYPTO_DMA_ALIGN; diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 2835069c59..545dbefe3e 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -78,7 +78,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, int (*cbk_do_batch)(struct crypto_engine *engine), bool rt, int qlen); -int crypto_engine_exit(struct crypto_engine *engine); +void crypto_engine_exit(struct crypto_engine *engine); int crypto_engine_register_aead(struct aead_engine_alg *alg); void crypto_engine_unregister_aead(struct aead_engine_alg *alg); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index f7c2a22cd7..c7bdbece27 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -250,16 +250,7 @@ struct shash_alg { #undef HASH_ALG_COMMON_STAT struct crypto_ahash { - int (*init)(struct ahash_request *req); - int (*update)(struct ahash_request *req); - int (*final)(struct ahash_request *req); - int (*finup)(struct ahash_request *req); - int (*digest)(struct ahash_request *req); - int (*export)(struct ahash_request *req, void *out); - int (*import)(struct ahash_request *req, const void *in); - int (*setkey)(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen); - + bool using_shash; /* Underlying algorithm is shash, not ahash */ unsigned int statesize; unsigned int reqsize; struct crypto_tfm base; @@ -342,12 +333,6 @@ static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); } -static inline unsigned int crypto_ahash_alignmask( - struct crypto_ahash *tfm) -{ - return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); -} - /** * crypto_ahash_blocksize() - obtain block size for cipher * @tfm: cipher handle @@ -519,10 +504,7 @@ int crypto_ahash_digest(struct ahash_request *req); * * Return: 0 if the export was successful; < 0 if an error occurred */ -static inline int crypto_ahash_export(struct ahash_request *req, void *out) -{ - return crypto_ahash_reqtfm(req)->export(req, out); -} +int crypto_ahash_export(struct ahash_request *req, void *out); /** * crypto_ahash_import() - import message digest state @@ -535,15 +517,7 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out) * * Return: 0 if the import was successful; < 0 if an error occurred */ -static inline int crypto_ahash_import(struct ahash_request *req, const void *in) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->import(req, in); -} +int crypto_ahash_import(struct ahash_request *req, const void *in); /** * crypto_ahash_init() - (re)initialize message digest handle @@ -556,36 +530,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) * * Return: see crypto_ahash_final() */ -static inline int crypto_ahash_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->init(req); -} - -static inline struct crypto_istat_hash *hash_get_stat( - struct hash_alg_common *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&hash_get_stat(alg)->err_cnt); - - return err; -} +int crypto_ahash_init(struct ahash_request *req); /** * crypto_ahash_update() - add data to message digest for processing @@ -598,16 +543,7 @@ static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err) * * Return: see crypto_ahash_final() */ -static inline int crypto_ahash_update(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_alg_common *alg = crypto_hash_alg_common(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_add(req->nbytes, &hash_get_stat(alg)->hash_tlen); - - return crypto_hash_errstat(alg, tfm->update(req)); -} +int crypto_ahash_update(struct ahash_request *req); /** * DOC: Asynchronous Hash Request Handle @@ -798,12 +734,6 @@ static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm) return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); } -static inline unsigned int crypto_shash_alignmask( - struct crypto_shash *tfm) -{ - return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); -} - /** * crypto_shash_blocksize() - obtain block size for cipher * @tfm: cipher handle @@ -952,10 +882,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, * Context: Any context. * Return: 0 if the export creation was successful; < 0 if an error occurred */ -static inline int crypto_shash_export(struct shash_desc *desc, void *out) -{ - return crypto_shash_alg(desc->tfm)->export(desc, out); -} +int crypto_shash_export(struct shash_desc *desc, void *out); /** * crypto_shash_import() - import operational state @@ -969,15 +896,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) * Context: Any context. * Return: 0 if the import was successful; < 0 if an error occurred */ -static inline int crypto_shash_import(struct shash_desc *desc, const void *in) -{ - struct crypto_shash *tfm = desc->tfm; - - if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return crypto_shash_alg(tfm)->import(desc, in); -} +int crypto_shash_import(struct shash_desc *desc, const void *in); /** * crypto_shash_init() - (re)initialize message digest diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h index dd4f067850..d6927739f8 100644 --- a/include/crypto/hash_info.h +++ b/include/crypto/hash_info.h @@ -10,6 +10,7 @@ #include <crypto/sha1.h> #include <crypto/sha2.h> +#include <crypto/sha3.h> #include <crypto/md5.h> #include <crypto/streebog.h> diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index cf65676e45..59c707e4de 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -18,15 +18,13 @@ struct crypto_hash_walk { char *data; unsigned int offset; - unsigned int alignmask; + unsigned int flags; struct page *pg; unsigned int entrylen; unsigned int total; struct scatterlist *sg; - - unsigned int flags; }; struct ahash_instance { @@ -269,11 +267,6 @@ static inline struct crypto_shash *crypto_spawn_shash( return crypto_spawn_tfm2(&spawn->base); } -static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm) -{ - return crypto_tfm_ctx_aligned(&tfm->base); -} - static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_shash, base); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index fb3d9e899f..7ae42afdcf 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -36,10 +36,25 @@ struct skcipher_instance { }; }; +struct lskcipher_instance { + void (*free)(struct lskcipher_instance *inst); + union { + struct { + char head[offsetof(struct lskcipher_alg, co.base)]; + struct crypto_instance base; + } s; + struct lskcipher_alg alg; + }; +}; + struct crypto_skcipher_spawn { struct crypto_spawn base; }; +struct crypto_lskcipher_spawn { + struct crypto_spawn base; +}; + struct skcipher_walk { union { struct { @@ -80,6 +95,12 @@ static inline struct crypto_instance *skcipher_crypto_instance( return &inst->s.base; } +static inline struct crypto_instance *lskcipher_crypto_instance( + struct lskcipher_instance *inst) +{ + return &inst->s.base; +} + static inline struct skcipher_instance *skcipher_alg_instance( struct crypto_skcipher *skcipher) { @@ -87,11 +108,23 @@ static inline struct skcipher_instance *skcipher_alg_instance( struct skcipher_instance, alg); } +static inline struct lskcipher_instance *lskcipher_alg_instance( + struct crypto_lskcipher *lskcipher) +{ + return container_of(crypto_lskcipher_alg(lskcipher), + struct lskcipher_instance, alg); +} + static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) { return crypto_instance_ctx(skcipher_crypto_instance(inst)); } +static inline void *lskcipher_instance_ctx(struct lskcipher_instance *inst) +{ + return crypto_instance_ctx(lskcipher_crypto_instance(inst)); +} + static inline void skcipher_request_complete(struct skcipher_request *req, int err) { crypto_request_complete(&req->base, err); @@ -101,21 +134,36 @@ int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); +int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) { crypto_drop_spawn(&spawn->base); } -static inline struct skcipher_alg *crypto_skcipher_spawn_alg( - struct crypto_skcipher_spawn *spawn) +static inline void crypto_drop_lskcipher(struct crypto_lskcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct lskcipher_alg *crypto_lskcipher_spawn_alg( + struct crypto_lskcipher_spawn *spawn) { - return container_of(spawn->base.alg, struct skcipher_alg, base); + return container_of(spawn->base.alg, struct lskcipher_alg, co.base); } -static inline struct skcipher_alg *crypto_spawn_skcipher_alg( +static inline struct skcipher_alg_common *crypto_spawn_skcipher_alg_common( struct crypto_skcipher_spawn *spawn) { - return crypto_skcipher_spawn_alg(spawn); + return container_of(spawn->base.alg, struct skcipher_alg_common, base); +} + +static inline struct lskcipher_alg *crypto_spawn_lskcipher_alg( + struct crypto_lskcipher_spawn *spawn) +{ + return crypto_lskcipher_spawn_alg(spawn); } static inline struct crypto_skcipher *crypto_spawn_skcipher( @@ -124,6 +172,12 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher( return crypto_spawn_tfm2(&spawn->base); } +static inline struct crypto_lskcipher *crypto_spawn_lskcipher( + struct crypto_lskcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + static inline void crypto_skcipher_set_reqsize( struct crypto_skcipher *skcipher, unsigned int reqsize) { @@ -144,6 +198,13 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); int skcipher_register_instance(struct crypto_template *tmpl, struct skcipher_instance *inst); +int crypto_register_lskcipher(struct lskcipher_alg *alg); +void crypto_unregister_lskcipher(struct lskcipher_alg *alg); +int crypto_register_lskciphers(struct lskcipher_alg *algs, int count); +void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count); +int lskcipher_register_instance(struct crypto_template *tmpl, + struct lskcipher_instance *inst); + int skcipher_walk_done(struct skcipher_walk *walk, int err); int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, @@ -166,6 +227,11 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline void *crypto_lskcipher_ctx(struct crypto_lskcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm) { return crypto_tfm_ctx_dma(&tfm->base); @@ -191,41 +257,6 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req) return req->base.flags; } -static inline unsigned int crypto_skcipher_alg_min_keysize( - struct skcipher_alg *alg) -{ - return alg->min_keysize; -} - -static inline unsigned int crypto_skcipher_alg_max_keysize( - struct skcipher_alg *alg) -{ - return alg->max_keysize; -} - -static inline unsigned int crypto_skcipher_alg_walksize( - struct skcipher_alg *alg) -{ - return alg->walksize; -} - -/** - * crypto_skcipher_walksize() - obtain walk size - * @tfm: cipher handle - * - * In some cases, algorithms can only perform optimally when operating on - * multiple blocks in parallel. This is reflected by the walksize, which - * must be a multiple of the chunksize (or equal if the concern does not - * apply) - * - * Return: walk size in bytes - */ -static inline unsigned int crypto_skcipher_walksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); -} - /* Helpers for simple block cipher modes of operation */ struct skcipher_ctx_simple { struct crypto_cipher *cipher; /* underlying block cipher */ @@ -249,5 +280,24 @@ static inline struct crypto_alg *skcipher_ialg_simple( return crypto_spawn_cipher_alg(spawn); } +static inline struct crypto_lskcipher *lskcipher_cipher_simple( + struct crypto_lskcipher *tfm) +{ + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + + return *ctx; +} + +struct lskcipher_instance *lskcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb); + +static inline struct lskcipher_alg *lskcipher_ialg_simple( + struct lskcipher_instance *inst) +{ + struct crypto_lskcipher_spawn *spawn = lskcipher_instance_ctx(inst); + + return crypto_lskcipher_spawn_alg(spawn); +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ diff --git a/include/crypto/sig.h b/include/crypto/sig.h index 641b4714c4..d25186bb2b 100644 --- a/include/crypto/sig.h +++ b/include/crypto/sig.h @@ -79,7 +79,7 @@ int crypto_sig_maxsize(struct crypto_sig *tfm); * @tfm: signature tfm handle allocated with crypto_alloc_sig() * @src: source buffer * @slen: source length - * @dst: destinatino obuffer + * @dst: destination obuffer * @dlen: destination length * * Return: zero on success; error code in case of error diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 080d1ba361..ea18af4834 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -49,6 +49,10 @@ struct crypto_sync_skcipher { struct crypto_skcipher base; }; +struct crypto_lskcipher { + struct crypto_tfm base; +}; + /* * struct crypto_istat_cipher - statistics for cipher algorithm * @encrypt_cnt: number of encrypt requests @@ -65,6 +69,43 @@ struct crypto_istat_cipher { atomic64_t err_cnt; }; +#ifdef CONFIG_CRYPTO_STATS +#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat; +#else +#define SKCIPHER_ALG_COMMON_STAT +#endif + +/* + * struct skcipher_alg_common - common properties of skcipher_alg + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * @stat: Statistics for cipher algorithm + * @base: Definition of a generic crypto algorithm. + */ +#define SKCIPHER_ALG_COMMON { \ + unsigned int min_keysize; \ + unsigned int max_keysize; \ + unsigned int ivsize; \ + unsigned int chunksize; \ + \ + SKCIPHER_ALG_COMMON_STAT \ + \ + struct crypto_alg base; \ +} +struct skcipher_alg_common SKCIPHER_ALG_COMMON; + /** * struct skcipher_alg - symmetric key cipher definition * @min_keysize: Minimum key size supported by the transformation. This is the @@ -120,6 +161,7 @@ struct crypto_istat_cipher { * in parallel. Should be a multiple of chunksize. * @stat: Statistics for cipher algorithm * @base: Definition of a generic crypto algorithm. + * @co: see struct skcipher_alg_common * * All fields except @ivsize are mandatory and must be filled. */ @@ -131,17 +173,55 @@ struct skcipher_alg { int (*init)(struct crypto_skcipher *tfm); void (*exit)(struct crypto_skcipher *tfm); - unsigned int min_keysize; - unsigned int max_keysize; - unsigned int ivsize; - unsigned int chunksize; unsigned int walksize; -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_cipher stat; -#endif + union { + struct SKCIPHER_ALG_COMMON; + struct skcipher_alg_common co; + }; +}; - struct crypto_alg base; +/** + * struct lskcipher_alg - linear symmetric key cipher definition + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a number of bytes. This function is used to encrypt + * the supplied data. This function shall not modify + * the transformation context, as this function may be called + * in parallel with the same transformation object. Data + * may be left over if length is not a multiple of blocks + * and there is more to come (final == false). The number of + * left-over bytes should be returned in case of success. + * @decrypt: Decrypt a number of bytes. This is a reverse counterpart to + * @encrypt and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @co: see struct skcipher_alg_common + */ +struct lskcipher_alg { + int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final); + int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final); + int (*init)(struct crypto_lskcipher *tfm); + void (*exit)(struct crypto_lskcipher *tfm); + + struct skcipher_alg_common co; }; #define MAX_SYNC_SKCIPHER_REQSIZE 384 @@ -213,12 +293,36 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name, u32 type, u32 mask); + +/** + * crypto_alloc_lskcipher() - allocate linear symmetric key cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * lskcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an lskcipher. The returned struct + * crypto_lskcipher is the cipher handle that is required for any subsequent + * API invocation for that lskcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name, + u32 type, u32 mask); + static inline struct crypto_tfm *crypto_skcipher_tfm( struct crypto_skcipher *tfm) { return &tfm->base; } +static inline struct crypto_tfm *crypto_lskcipher_tfm( + struct crypto_lskcipher *tfm) +{ + return &tfm->base; +} + /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed @@ -236,6 +340,17 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) } /** + * crypto_free_lskcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_lskcipher(struct crypto_lskcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_lskcipher_tfm(tfm)); +} + +/** * crypto_has_skcipher() - Search for the availability of an skcipher. * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * skcipher @@ -253,6 +368,19 @@ static inline const char *crypto_skcipher_driver_name( return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); } +static inline const char *crypto_lskcipher_driver_name( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_lskcipher_tfm(tfm)); +} + +static inline struct skcipher_alg_common *crypto_skcipher_alg_common( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg_common, base); +} + static inline struct skcipher_alg *crypto_skcipher_alg( struct crypto_skcipher *tfm) { @@ -260,9 +388,11 @@ static inline struct skcipher_alg *crypto_skcipher_alg( struct skcipher_alg, base); } -static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) +static inline struct lskcipher_alg *crypto_lskcipher_alg( + struct crypto_lskcipher *tfm) { - return alg->ivsize; + return container_of(crypto_lskcipher_tfm(tfm)->__crt_alg, + struct lskcipher_alg, co.base); } /** @@ -276,7 +406,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) */ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->ivsize; + return crypto_skcipher_alg_common(tfm)->ivsize; } static inline unsigned int crypto_sync_skcipher_ivsize( @@ -286,6 +416,21 @@ static inline unsigned int crypto_sync_skcipher_ivsize( } /** + * crypto_lskcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the lskcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_lskcipher_ivsize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.ivsize; +} + +/** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle * @@ -301,10 +446,20 @@ static inline unsigned int crypto_skcipher_blocksize( return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); } -static inline unsigned int crypto_skcipher_alg_chunksize( - struct skcipher_alg *alg) +/** + * crypto_lskcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the lskcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_lskcipher_blocksize( + struct crypto_lskcipher *tfm) { - return alg->chunksize; + return crypto_tfm_alg_blocksize(crypto_lskcipher_tfm(tfm)); } /** @@ -321,7 +476,24 @@ static inline unsigned int crypto_skcipher_alg_chunksize( static inline unsigned int crypto_skcipher_chunksize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); + return crypto_skcipher_alg_common(tfm)->chunksize; +} + +/** + * crypto_lskcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_lskcipher_chunksize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.chunksize; } static inline unsigned int crypto_sync_skcipher_blocksize( @@ -336,6 +508,12 @@ static inline unsigned int crypto_skcipher_alignmask( return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)); } +static inline unsigned int crypto_lskcipher_alignmask( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_lskcipher_tfm(tfm)); +} + static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm) { return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm)); @@ -371,6 +549,23 @@ static inline void crypto_sync_skcipher_clear_flags( crypto_skcipher_clear_flags(&tfm->base, flags); } +static inline u32 crypto_lskcipher_get_flags(struct crypto_lskcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_lskcipher_tfm(tfm)); +} + +static inline void crypto_lskcipher_set_flags(struct crypto_lskcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_lskcipher_tfm(tfm), flags); +} + +static inline void crypto_lskcipher_clear_flags(struct crypto_lskcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_lskcipher_tfm(tfm), flags); +} + /** * crypto_skcipher_setkey() - set key for cipher * @tfm: cipher handle @@ -396,16 +591,47 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, return crypto_skcipher_setkey(&tfm->base, key, keylen); } +/** + * crypto_lskcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the lskcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen); + static inline unsigned int crypto_skcipher_min_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->min_keysize; + return crypto_skcipher_alg_common(tfm)->min_keysize; } static inline unsigned int crypto_skcipher_max_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->max_keysize; + return crypto_skcipher_alg_common(tfm)->max_keysize; +} + +static inline unsigned int crypto_lskcipher_min_keysize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.min_keysize; +} + +static inline unsigned int crypto_lskcipher_max_keysize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.max_keysize; } /** @@ -458,6 +684,42 @@ int crypto_skcipher_encrypt(struct skcipher_request *req); int crypto_skcipher_decrypt(struct skcipher_request *req); /** + * crypto_lskcipher_encrypt() - encrypt plaintext + * @tfm: lskcipher handle + * @src: source buffer + * @dst: destination buffer + * @len: number of bytes to process + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_lskcipher_ivsize + * + * Encrypt plaintext data using the lskcipher handle. + * + * Return: >=0 if the cipher operation was successful, if positive + * then this many bytes have been left unprocessed; + * < 0 if an error occurred + */ +int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv); + +/** + * crypto_lskcipher_decrypt() - decrypt ciphertext + * @tfm: lskcipher handle + * @src: source buffer + * @dst: destination buffer + * @len: number of bytes to process + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_lskcipher_ivsize + * + * Decrypt ciphertext data using the lskcipher handle. + * + * Return: >=0 if the cipher operation was successful, if positive + * then this many bytes have been left unprocessed; + * < 0 if an error occurred + */ +int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv); + +/** * DOC: Symmetric Key Cipher Request Handle * * The skcipher_request data structure contains all pointers to data |