diff options
Diffstat (limited to '')
81 files changed, 10179 insertions, 0 deletions
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h new file mode 100644 index 000000000..cb3d6b1c6 --- /dev/null +++ b/include/crypto/acompress.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li <weigang.li@intel.com> + * Giovanni Cabiddu <giovanni.cabiddu@intel.com> + */ +#ifndef _CRYPTO_ACOMP_H +#define _CRYPTO_ACOMP_H +#include <linux/crypto.h> + +#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 + +/** + * struct acomp_req - asynchronous (de)compression request + * + * @base: Common attributes for asynchronous crypto requests + * @src: Source Data + * @dst: Destination data + * @slen: Size of the input buffer + * @dlen: Size of the output buffer and number of bytes produced + * @flags: Internal flags + * @__ctx: Start of private context data + */ +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_acomp - user-instantiated objects which encapsulate + * algorithms and core processing logic + * + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @dst_free: Frees destination buffer if allocated inside the + * algorithm + * @reqsize: Context size for (de)compression requests + * @base: Common crypto API algorithm data structure + */ +struct crypto_acomp { + int (*compress)(struct acomp_req *req); + int (*decompress)(struct acomp_req *req); + void (*dst_free)(struct scatterlist *dst); + unsigned int reqsize; + struct crypto_tfm base; +}; + +/** + * struct acomp_alg - asynchronous compression algorithm + * + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @dst_free: Frees destination buffer if allocated inside the algorithm + * @init: Initialize the cryptographic transformation object. + * This function is used to initialize the cryptographic + * transformation object. This function is called only once at + * the instantiation time, right after the transformation context + * was allocated. In case the cryptographic hardware has some + * special requirements which need to be handled by software, this + * function shall check for the precise requirement of the + * transformation and put any software fallbacks in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * + * @reqsize: Context size for (de)compression requests + * @base: Common crypto API algorithm data structure + */ +struct acomp_alg { + int (*compress)(struct acomp_req *req); + int (*decompress)(struct acomp_req *req); + void (*dst_free)(struct scatterlist *dst); + int (*init)(struct crypto_acomp *tfm); + void (*exit)(struct crypto_acomp *tfm); + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Asynchronous Compression API + * + * The Asynchronous Compression API is used with the algorithms of type + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto) + */ + +/** + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * compression algorithm e.g. "deflate" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for a compression algorithm. The returned struct + * crypto_acomp is the handle that is required for any subsequent + * API invocation for the compression operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, + u32 mask); +/** + * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * compression algorithm e.g. "deflate" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * @node: specifies the NUMA node the ZIP hardware belongs to + * + * Allocate a handle for a compression algorithm. Drivers should try to use + * (de)compressors on the specified NUMA node. + * The returned struct crypto_acomp is the handle that is required for any + * subsequent API invocation for the compression operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node); + +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) +{ + return &tfm->base; +} + +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct acomp_alg, base); +} + +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_acomp, base); +} + +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) +{ + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) +{ + return tfm->reqsize; +} + +static inline void acomp_request_set_tfm(struct acomp_req *req, + struct crypto_acomp *tfm) +{ + req->base.tfm = crypto_acomp_tfm(tfm); +} + +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) +{ + return __crypto_acomp_tfm(req->base.tfm); +} + +/** + * crypto_free_acomp() -- free ACOMPRESS tfm handle + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_acomp(struct crypto_acomp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm)); +} + +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_ACOMPRESS; + mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +/** + * acomp_request_alloc() -- allocates asynchronous (de)compression request + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * Return: allocated handle in case of success or NULL in case of an error + */ +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); + +/** + * acomp_request_free() -- zeroize and free asynchronous (de)compression + * request as well as the output buffer if allocated + * inside the algorithm + * + * @req: request to free + */ +void acomp_request_free(struct acomp_req *req); + +/** + * acomp_request_set_callback() -- Sets an asynchronous callback + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmlp: callback which will be called + * @data: private data used by the caller + */ +static inline void acomp_request_set_callback(struct acomp_req *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * acomp_request_set_params() -- Sets request parameters + * + * Sets parameters required by an acomp operation + * + * @req: asynchronous compress request + * @src: pointer to input buffer scatterlist + * @dst: pointer to output buffer scatterlist. If this is NULL, the + * acomp layer will allocate the output memory + * @slen: size of the input buffer + * @dlen: size of the output buffer. If dst is NULL, this can be used by + * the user to specify the maximum amount of memory to allocate + */ +static inline void acomp_request_set_params(struct acomp_req *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int slen, + unsigned int dlen) +{ + req->src = src; + req->dst = dst; + req->slen = slen; + req->dlen = dlen; + + if (!req->dst) + req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; +} + +/** + * crypto_acomp_compress() -- Invoke asynchronous compress operation + * + * Function invokes the asynchronous compress operation + * + * @req: asynchronous compress request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_acomp_compress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int slen = req->slen; + int ret; + + crypto_stats_get(alg); + ret = tfm->compress(req); + crypto_stats_compress(slen, ret, alg); + return ret; +} + +/** + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation + * + * Function invokes the asynchronous decompress operation + * + * @req: asynchronous compress request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_acomp_decompress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int slen = req->slen; + int ret; + + crypto_stats_get(alg); + ret = tfm->decompress(req); + crypto_stats_decompress(slen, ret, alg); + return ret; +} + +#endif diff --git a/include/crypto/aead.h b/include/crypto/aead.h new file mode 100644 index 000000000..14db3bee0 --- /dev/null +++ b/include/crypto/aead.h @@ -0,0 +1,530 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_AEAD_H +#define _CRYPTO_AEAD_H + +#include <linux/container_of.h> +#include <linux/crypto.h> +#include <linux/slab.h> +#include <linux/types.h> + +/** + * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API + * + * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD + * (listed as type "aead" in /proc/crypto) + * + * The most prominent examples for this type of encryption is GCM and CCM. + * However, the kernel supports other types of AEAD ciphers which are defined + * with the following cipher string: + * + * authenc(keyed message digest, block cipher) + * + * For example: authenc(hmac(sha256), cbc(aes)) + * + * The example code provided for the symmetric key cipher operation + * applies here as well. Naturally all *skcipher* symbols must be exchanged + * the *aead* pendants discussed in the following. In addition, for the AEAD + * operation, the aead_request_set_ad function must be used to set the + * pointer to the associated data memory location before performing the + * encryption or decryption operation. In case of an encryption, the associated + * data memory is filled during the encryption operation. For decryption, the + * associated data memory must contain data that is used to verify the integrity + * of the decrypted data. Another deviation from the asynchronous block cipher + * operation is that the caller should explicitly check for -EBADMSG of the + * crypto_aead_decrypt. That error indicates an authentication error, i.e. + * a breach in the integrity of the message. In essence, that -EBADMSG error + * code is the key bonus an AEAD cipher has over "standard" block chaining + * modes. + * + * Memory Structure: + * + * The source scatterlist must contain the concatenation of + * associated data || plaintext or ciphertext. + * + * The destination scatterlist has the same layout, except that the plaintext + * (resp. ciphertext) will grow (resp. shrink) by the authentication tag size + * during encryption (resp. decryption). + * + * In-place encryption/decryption is enabled by using the same scatterlist + * pointer for both the source and destination. + * + * Even in the out-of-place case, space must be reserved in the destination for + * the associated data, even though it won't be written to. This makes the + * in-place and out-of-place cases more consistent. It is permissible for the + * "destination" associated data to alias the "source" associated data. + * + * As with the other scatterlist crypto APIs, zero-length scatterlist elements + * are not allowed in the used part of the scatterlist. Thus, if there is no + * associated data, the first element must point to the plaintext/ciphertext. + * + * To meet the needs of IPsec, a special quirk applies to rfc4106, rfc4309, + * rfc4543, and rfc7539esp ciphers. For these ciphers, the final 'ivsize' bytes + * of the associated data buffer must contain a second copy of the IV. This is + * in addition to the copy passed to aead_request_set_crypt(). These two IV + * copies must not differ; different implementations of the same algorithm may + * behave differently in that case. Note that the algorithm might not actually + * treat the IV as associated data; nevertheless the length passed to + * aead_request_set_ad() must include it. + */ + +struct crypto_aead; +struct scatterlist; + +/** + * struct aead_request - AEAD request + * @base: Common attributes for async crypto requests + * @assoclen: Length in bytes of associated data for authentication + * @cryptlen: Length of data to be encrypted or decrypted + * @iv: Initialisation vector + * @src: Source data + * @dst: Destination data + * @__ctx: Start of private context data + */ +struct aead_request { + struct crypto_async_request base; + + unsigned int assoclen; + unsigned int cryptlen; + + u8 *iv; + + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct aead_alg - AEAD cipher definition + * @maxauthsize: Set the maximum authentication tag size supported by the + * transformation. A transformation may support smaller tag sizes. + * As the authentication tag is a message digest to ensure the + * integrity of the encrypted data, a consumer typically wants the + * largest authentication tag possible as defined by this + * variable. + * @setauthsize: Set authentication size for the AEAD transformation. This + * function is used to specify the consumer requested size of the + * authentication tag to be either generated by the transformation + * during encryption or the size of the authentication tag to be + * supplied during the decryption operation. This function is also + * responsible for checking the authentication tag size for + * validity. + * @setkey: see struct skcipher_alg + * @encrypt: see struct skcipher_alg + * @decrypt: see struct skcipher_alg + * @ivsize: see struct skcipher_alg + * @chunksize: see struct skcipher_alg + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @base: Definition of a generic crypto cipher algorithm. + * + * All fields except @ivsize is mandatory and must be filled. + */ +struct aead_alg { + int (*setkey)(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen); + int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); + int (*encrypt)(struct aead_request *req); + int (*decrypt)(struct aead_request *req); + int (*init)(struct crypto_aead *tfm); + void (*exit)(struct crypto_aead *tfm); + + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + + struct crypto_tfm base; +}; + +static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_aead, base); +} + +/** + * crypto_alloc_aead() - allocate AEAD cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * AEAD cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an AEAD. The returned struct + * crypto_aead is the cipher handle that is required for any subsequent + * API invocation for that AEAD. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_aead() - zeroize and free aead handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_aead(struct crypto_aead *tfm) +{ + crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); +} + +static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); +} + +static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) +{ + return container_of(crypto_aead_tfm(tfm)->__crt_alg, + struct aead_alg, base); +} + +static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg) +{ + return alg->ivsize; +} + +/** + * crypto_aead_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the aead referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_ivsize(crypto_aead_alg(tfm)); +} + +/** + * crypto_aead_authsize() - obtain maximum authentication data size + * @tfm: cipher handle + * + * The maximum size of the authentication data for the AEAD cipher referenced + * by the AEAD cipher handle is returned. The authentication data size may be + * zero if the cipher implements a hard-coded maximum. + * + * The authentication data may also be known as "tag value". + * + * Return: authentication data size / tag size in bytes + */ +static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) +{ + return tfm->authsize; +} + +static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) +{ + return alg->maxauthsize; +} + +static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) +{ + return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); +} + +/** + * crypto_aead_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the AEAD referenced with the cipher handle is returned. + * The caller may use that information to allocate appropriate memory for the + * data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); +} + +static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); +} + +static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm) +{ + return crypto_tfm_get_flags(crypto_aead_tfm(tfm)); +} + +static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags); +} + +static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); +} + +/** + * crypto_aead_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the AEAD referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_aead_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen); + +/** + * crypto_aead_setauthsize() - set authentication data size + * @tfm: cipher handle + * @authsize: size of the authentication data / tag in bytes + * + * Set the authentication data size / tag size. AEAD requires an authentication + * tag (or MAC) in addition to the associated data. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); + +static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) +{ + return __crypto_aead_cast(req->base.tfm); +} + +/** + * crypto_aead_encrypt() - encrypt plaintext + * @req: reference to the aead_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The encryption operation creates the authentication data / + * tag. That data is concatenated with the created ciphertext. + * The ciphertext memory size is therefore the given number of + * block cipher blocks + the size defined by the + * crypto_aead_setauthsize invocation. The caller must ensure + * that sufficient memory is available for the ciphertext and + * the authentication tag. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_aead_encrypt(struct aead_request *req); + +/** + * crypto_aead_decrypt() - decrypt ciphertext + * @req: reference to the aead_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the + * authentication data / tag. That authentication data / tag + * must have the size defined by the crypto_aead_setauthsize + * invocation. + * + * + * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD + * cipher operation performs the authentication of the data during the + * decryption operation. Therefore, the function returns this error if + * the authentication of the ciphertext was unsuccessful (i.e. the + * integrity of the ciphertext or the associated data was violated); + * < 0 if an error occurred. + */ +int crypto_aead_decrypt(struct aead_request *req); + +/** + * DOC: Asynchronous AEAD Request Handle + * + * The aead_request data structure contains all pointers to data required for + * the AEAD cipher operation. This includes the cipher handle (which can be + * used by multiple aead_request instances), pointer to plaintext and + * ciphertext, asynchronous callback function, etc. It acts as a handle to the + * aead_request_* API calls in a similar way as AEAD handle to the + * crypto_aead_* API calls. + */ + +/** + * crypto_aead_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ +static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) +{ + return tfm->reqsize; +} + +/** + * aead_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing aead handle in the request + * data structure with a different one. + */ +static inline void aead_request_set_tfm(struct aead_request *req, + struct crypto_aead *tfm) +{ + req->base.tfm = crypto_aead_tfm(tfm); +} + +/** + * aead_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the AEAD + * encrypt and decrypt API calls. During the allocation, the provided aead + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, + gfp_t gfp) +{ + struct aead_request *req; + + req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp); + + if (likely(req)) + aead_request_set_tfm(req, tfm); + + return req; +} + +/** + * aead_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void aead_request_free(struct aead_request *req) +{ + kfree_sensitive(req); +} + +/** + * aead_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * Setting the callback function that is triggered once the cipher operation + * completes + * + * The callback function is registered with the aead_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void aead_request_set_callback(struct aead_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * aead_request_set_crypt - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @cryptlen: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_aead_ivsize() + * + * Setting the source data and destination data scatter / gather lists which + * hold the associated data concatenated with the plaintext or ciphertext. See + * below for the authentication tag. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed - the source is the ciphertext and the destination is the plaintext. + * + * The memory structure for cipher operation has the following structure: + * + * - AEAD encryption input: assoc data || plaintext + * - AEAD encryption output: assoc data || ciphertext || auth tag + * - AEAD decryption input: assoc data || ciphertext || auth tag + * - AEAD decryption output: assoc data || plaintext + * + * Albeit the kernel requires the presence of the AAD buffer, however, + * the kernel does not fill the AAD buffer in the output case. If the + * caller wants to have that data buffer filled, the caller must either + * use an in-place cipher operation (i.e. same memory location for + * input/output memory location). + */ +static inline void aead_request_set_crypt(struct aead_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int cryptlen, u8 *iv) +{ + req->src = src; + req->dst = dst; + req->cryptlen = cryptlen; + req->iv = iv; +} + +/** + * aead_request_set_ad - set associated data information + * @req: request handle + * @assoclen: number of bytes in associated data + * + * Setting the AD information. This function sets the length of + * the associated data. + */ +static inline void aead_request_set_ad(struct aead_request *req, + unsigned int assoclen) +{ + req->assoclen = assoclen; +} + +#endif /* _CRYPTO_AEAD_H */ diff --git a/include/crypto/aes.h b/include/crypto/aes.h new file mode 100644 index 000000000..209072970 --- /dev/null +++ b/include/crypto/aes.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for AES algorithms + */ + +#ifndef _CRYPTO_AES_H +#define _CRYPTO_AES_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 +#define AES_KEYSIZE_128 16 +#define AES_KEYSIZE_192 24 +#define AES_KEYSIZE_256 32 +#define AES_BLOCK_SIZE 16 +#define AES_MAX_KEYLENGTH (15 * 16) +#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32)) + +/* + * Please ensure that the first two fields are 16-byte aligned + * relative to the start of the structure, i.e., don't move them! + */ +struct crypto_aes_ctx { + u32 key_enc[AES_MAX_KEYLENGTH_U32]; + u32 key_dec[AES_MAX_KEYLENGTH_U32]; + u32 key_length; +}; + +extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned; +extern const u32 crypto_it_tab[4][256] ____cacheline_aligned; + +/* + * validate key length for AES algorithms + */ +static inline int aes_check_keylen(unsigned int keylen) +{ + switch (keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_192: + case AES_KEYSIZE_256: + break; + default: + return -EINVAL; + } + + return 0; +} + +int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len); + +/** + * aes_expandkey - Expands the AES key as described in FIPS-197 + * @ctx: The location where the computed key will be stored. + * @in_key: The supplied key. + * @key_len: The length of the supplied key. + * + * Returns 0 on success. The function fails only if an invalid key size (or + * pointer) is supplied. + * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes + * key schedule plus a 16 bytes key which is used before the first round). + * The decryption key is prepared for the "Equivalent Inverse Cipher" as + * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is + * for the initial combination, the second slot for the first round and so on. + */ +int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, + unsigned int key_len); + +/** + * aes_encrypt - Encrypt a single AES block + * @ctx: Context struct containing the key schedule + * @out: Buffer to store the ciphertext + * @in: Buffer containing the plaintext + */ +void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); + +/** + * aes_decrypt - Decrypt a single AES block + * @ctx: Context struct containing the key schedule + * @out: Buffer to store the plaintext + * @in: Buffer containing the ciphertext + */ +void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); + +extern const u8 crypto_aes_sbox[]; +extern const u8 crypto_aes_inv_sbox[]; + +#endif diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h new file mode 100644 index 000000000..5764b46bd --- /dev/null +++ b/include/crypto/akcipher.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Public Key Encryption + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk <tadeusz.struk@intel.com> + */ +#ifndef _CRYPTO_AKCIPHER_H +#define _CRYPTO_AKCIPHER_H +#include <linux/crypto.h> + +/** + * struct akcipher_request - public key request + * + * @base: Common attributes for async crypto requests + * @src: Source data + * For verify op this is signature + digest, in that case + * total size of @src is @src_len + @dst_len. + * @dst: Destination data (Should be NULL for verify op) + * @src_len: Size of the input buffer + * For verify op it's size of signature part of @src, this part + * is supposed to be operated by cipher. + * @dst_len: Size of @dst buffer (for all ops except verify). + * It needs to be at least as big as the expected result + * depending on the operation. + * After operation it will be updated with the actual size of the + * result. + * In case of error where the dst sgl size was insufficient, + * it will be updated to the size required for the operation. + * For verify op this is size of digest part in @src. + * @__ctx: Start of private context data + */ +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_akcipher - user-instantiated objects which encapsulate + * algorithms and core processing logic + * + * @base: Common crypto API algorithm data structure + */ +struct crypto_akcipher { + struct crypto_tfm base; +}; + +/** + * struct akcipher_alg - generic public key algorithm + * + * @sign: Function performs a sign operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @verify: Function performs a complete verify operation as defined by + * public key algorithm, returning verification status. Requires + * digest value as input parameter. + * @encrypt: Function performs an encrypt operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @decrypt: Function performs a decrypt operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @set_pub_key: Function invokes the algorithm specific set public key + * function, which knows how to decode and interpret + * the BER encoded public key and parameters + * @set_priv_key: Function invokes the algorithm specific set private key + * function, which knows how to decode and interpret + * the BER encoded private key and parameters + * @max_size: Function returns dest buffer size required for a given key. + * @init: Initialize the cryptographic transformation object. + * This function is used to initialize the cryptographic + * transformation object. This function is called only once at + * the instantiation time, right after the transformation context + * was allocated. In case the cryptographic hardware has some + * special requirements which need to be handled by software, this + * function shall check for the precise requirement of the + * transformation and put any software fallbacks in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * + * @reqsize: Request context size required by algorithm implementation + * @base: Common crypto API algorithm data structure + */ +struct akcipher_alg { + int (*sign)(struct akcipher_request *req); + int (*verify)(struct akcipher_request *req); + int (*encrypt)(struct akcipher_request *req); + int (*decrypt)(struct akcipher_request *req); + int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen); + int (*set_priv_key)(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen); + unsigned int (*max_size)(struct crypto_akcipher *tfm); + int (*init)(struct crypto_akcipher *tfm); + void (*exit)(struct crypto_akcipher *tfm); + + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Generic Public Key API + * + * The Public Key API is used with the algorithms of type + * CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto) + */ + +/** + * crypto_alloc_akcipher() - allocate AKCIPHER tfm handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * public key algorithm e.g. "rsa" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for public key algorithm. The returned struct + * crypto_akcipher is the handle that is required for any subsequent + * API invocation for the public key operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_akcipher_tfm( + struct crypto_akcipher *tfm) +{ + return &tfm->base; +} + +static inline struct akcipher_alg *__crypto_akcipher_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct akcipher_alg, base); +} + +static inline struct crypto_akcipher *__crypto_akcipher_tfm( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_akcipher, base); +} + +static inline struct akcipher_alg *crypto_akcipher_alg( + struct crypto_akcipher *tfm) +{ + return __crypto_akcipher_alg(crypto_akcipher_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm) +{ + return crypto_akcipher_alg(tfm)->reqsize; +} + +static inline void akcipher_request_set_tfm(struct akcipher_request *req, + struct crypto_akcipher *tfm) +{ + req->base.tfm = crypto_akcipher_tfm(tfm); +} + +static inline struct crypto_akcipher *crypto_akcipher_reqtfm( + struct akcipher_request *req) +{ + return __crypto_akcipher_tfm(req->base.tfm); +} + +/** + * crypto_free_akcipher() - free AKCIPHER tfm handle + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_akcipher_tfm(tfm)); +} + +/** + * akcipher_request_alloc() - allocates public key request + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * @gfp: allocation flags + * + * Return: allocated handle in case of success or NULL in case of an error. + */ +static inline struct akcipher_request *akcipher_request_alloc( + struct crypto_akcipher *tfm, gfp_t gfp) +{ + struct akcipher_request *req; + + req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp); + if (likely(req)) + akcipher_request_set_tfm(req, tfm); + + return req; +} + +/** + * akcipher_request_free() - zeroize and free public key request + * + * @req: request to free + */ +static inline void akcipher_request_free(struct akcipher_request *req) +{ + kfree_sensitive(req); +} + +/** + * akcipher_request_set_callback() - Sets an asynchronous callback. + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmpl: callback which will be called + * @data: private data used by the caller + */ +static inline void akcipher_request_set_callback(struct akcipher_request *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * akcipher_request_set_crypt() - Sets request parameters + * + * Sets parameters required by crypto operation + * + * @req: public key request + * @src: ptr to input scatter list + * @dst: ptr to output scatter list or NULL for verify op + * @src_len: size of the src input scatter list to be processed + * @dst_len: size of the dst output scatter list or size of signature + * portion in @src for verify op + */ +static inline void akcipher_request_set_crypt(struct akcipher_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int src_len, + unsigned int dst_len) +{ + req->src = src; + req->dst = dst; + req->src_len = src_len; + req->dst_len = dst_len; +} + +/** + * crypto_akcipher_maxsize() - Get len for output buffer + * + * Function returns the dest buffer size required for a given key. + * Function assumes that the key is already set in the transformation. If this + * function is called without a setkey or with a failed setkey, you will end up + * in a NULL dereference. + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + */ +static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->max_size(tfm); +} + +/** + * crypto_akcipher_encrypt() - Invoke public key encrypt operation + * + * Function invokes the specific public key encrypt operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + unsigned int src_len = req->src_len; + int ret; + + crypto_stats_get(calg); + ret = alg->encrypt(req); + crypto_stats_akcipher_encrypt(src_len, ret, calg); + return ret; +} + +/** + * crypto_akcipher_decrypt() - Invoke public key decrypt operation + * + * Function invokes the specific public key decrypt operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + unsigned int src_len = req->src_len; + int ret; + + crypto_stats_get(calg); + ret = alg->decrypt(req); + crypto_stats_akcipher_decrypt(src_len, ret, calg); + return ret; +} + +/** + * crypto_akcipher_sign() - Invoke public key sign operation + * + * Function invokes the specific public key sign operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->sign(req); + crypto_stats_akcipher_sign(ret, calg); + return ret; +} + +/** + * crypto_akcipher_verify() - Invoke public key signature verification + * + * Function invokes the specific public key signature verification operation + * for a given public key algorithm. + * + * @req: asymmetric key request + * + * Note: req->dst should be NULL, req->src should point to SG of size + * (req->src_size + req->dst_size), containing signature (of req->src_size + * length) with appended digest (of req->dst_size length). + * + * Return: zero on verification success; error code in case of error. + */ +static inline int crypto_akcipher_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->verify(req); + crypto_stats_akcipher_verify(ret, calg); + return ret; +} + +/** + * crypto_akcipher_set_pub_key() - Invoke set public key operation + * + * Function invokes the algorithm specific set key function, which knows + * how to decode and interpret the encoded key and parameters + * + * @tfm: tfm handle + * @key: BER encoded public key, algo OID, paramlen, BER encoded + * parameters + * @keylen: length of the key (not including other data) + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->set_pub_key(tfm, key, keylen); +} + +/** + * crypto_akcipher_set_priv_key() - Invoke set private key operation + * + * Function invokes the algorithm specific set key function, which knows + * how to decode and interpret the encoded key and parameters + * + * @tfm: tfm handle + * @key: BER encoded private key, algo OID, paramlen, BER encoded + * parameters + * @keylen: length of the key (not including other data) + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_set_priv_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->set_priv_key(tfm, key, keylen); +} +#endif diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h new file mode 100644 index 000000000..939a3196b --- /dev/null +++ b/include/crypto/algapi.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API for algorithms (i.e., low-level API). + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + */ +#ifndef _CRYPTO_ALGAPI_H +#define _CRYPTO_ALGAPI_H + +#include <linux/align.h> +#include <linux/crypto.h> +#include <linux/kconfig.h> +#include <linux/list.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <asm/unaligned.h> + +/* + * Maximum values for blocksize and alignmask, used to allocate + * static buffers that are big enough for any combination of + * algs and architectures. Ciphers have a lower maximum size. + */ +#define MAX_ALGAPI_BLOCKSIZE 160 +#define MAX_ALGAPI_ALIGNMASK 63 +#define MAX_CIPHER_BLOCKSIZE 16 +#define MAX_CIPHER_ALIGNMASK 15 + +struct crypto_aead; +struct crypto_instance; +struct module; +struct notifier_block; +struct rtattr; +struct seq_file; +struct sk_buff; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); + unsigned int (*extsize)(struct crypto_alg *alg); + int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); + int (*init_tfm)(struct crypto_tfm *tfm); + void (*show)(struct seq_file *m, struct crypto_alg *alg); + int (*report)(struct sk_buff *skb, struct crypto_alg *alg); + void (*free)(struct crypto_instance *inst); + + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct crypto_instance { + struct crypto_alg alg; + + struct crypto_template *tmpl; + + union { + /* Node in list of instances after registration. */ + struct hlist_node list; + /* List of attached spawns before registration. */ + struct crypto_spawn *spawns; + }; + + struct work_struct free_work; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + + int (*create)(struct crypto_template *tmpl, struct rtattr **tb); + + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + union { + /* Back pointer to instance after registration.*/ + struct crypto_instance *inst; + /* Spawn list pointer prior to registration. */ + struct crypto_spawn *next; + }; + const struct crypto_type *frontend; + u32 mask; + bool dead; + bool registered; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + + unsigned int qlen; + unsigned int max_qlen; +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +struct crypto_attr_alg { + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +void crypto_mod_put(struct crypto_alg *alg); + +int crypto_register_template(struct crypto_template *tmpl); +int crypto_register_templates(struct crypto_template *tmpls, int count); +void crypto_unregister_template(struct crypto_template *tmpl); +void crypto_unregister_templates(struct crypto_template *tmpls, int count); +struct crypto_template *crypto_lookup_template(const char *name); + +int crypto_register_instance(struct crypto_template *tmpl, + struct crypto_instance *inst); +void crypto_unregister_instance(struct crypto_instance *inst); + +int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, + const char *name, u32 type, u32 mask); +void crypto_drop_spawn(struct crypto_spawn *spawn); +struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, + u32 mask); +void *crypto_spawn_tfm2(struct crypto_spawn *spawn); + +struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); +const char *crypto_attr_alg_name(struct rtattr *rta); +int crypto_inst_setname(struct crypto_instance *inst, const char *name, + struct crypto_alg *alg); + +void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); +int crypto_enqueue_request(struct crypto_queue *queue, + struct crypto_async_request *request); +void crypto_enqueue_request_head(struct crypto_queue *queue, + struct crypto_async_request *request); +struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); +static inline unsigned int crypto_queue_len(struct crypto_queue *queue) +{ + return queue->qlen; +} + +void crypto_inc(u8 *a, unsigned int size); +void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); + +static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s = (unsigned long *)src; + unsigned long l; + + while (size > 0) { + l = get_unaligned(d) ^ get_unaligned(s++); + put_unaligned(l, d++); + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, dst, src, size); + } +} + +static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, + unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s1 = (unsigned long *)src1; + unsigned long *s2 = (unsigned long *)src2; + unsigned long l; + + while (size > 0) { + l = get_unaligned(s1++) ^ get_unaligned(s2++); + put_unaligned(l, d++); + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, src1, src2, size); + } +} + +static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) +{ + return PTR_ALIGN(crypto_tfm_ctx(tfm), + crypto_tfm_alg_alignmask(tfm) + 1); +} + +static inline struct crypto_instance *crypto_tfm_alg_instance( + struct crypto_tfm *tfm) +{ + return container_of(tfm->__crt_alg, struct crypto_instance, alg); +} + +static inline void *crypto_instance_ctx(struct crypto_instance *inst) +{ + return inst->__ctx; +} + +static inline struct crypto_async_request *crypto_get_backlog( + struct crypto_queue *queue) +{ + return queue->backlog == &queue->list ? NULL : + container_of(queue->backlog, struct crypto_async_request, list); +} + +static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) +{ + return (algt->type ^ off) & algt->mask & off; +} + +/* + * When an algorithm uses another algorithm (e.g., if it's an instance of a + * template), these are the flags that should always be set on the "outer" + * algorithm if any "inner" algorithm has them set. + */ +#define CRYPTO_ALG_INHERITED_FLAGS \ + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \ + CRYPTO_ALG_ALLOCATES_MEMORY) + +/* + * Given the type and mask that specify the flags restrictions on a template + * instance being created, return the mask that should be passed to + * crypto_grab_*() (along with type=0) to honor any request the user made to + * have any of the CRYPTO_ALG_INHERITED_FLAGS clear. + */ +static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt) +{ + return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS); +} + +noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); + +/** + * crypto_memneq - Compare two areas of memory without leaking + * timing information. + * + * @a: One area of memory + * @b: Another area of memory + * @size: The size of the area. + * + * Returns 0 when data is equal, 1 otherwise. + */ +static inline int crypto_memneq(const void *a, const void *b, size_t size) +{ + return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; +} + +int crypto_register_notifier(struct notifier_block *nb); +int crypto_unregister_notifier(struct notifier_block *nb); + +/* Crypto notification events. */ +enum { + CRYPTO_MSG_ALG_REQUEST, + CRYPTO_MSG_ALG_REGISTER, + CRYPTO_MSG_ALG_LOADED, +}; + +static inline void crypto_request_complete(struct crypto_async_request *req, + int err) +{ + crypto_completion_t complete = req->complete; + complete(req, err); +} + +#endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/arc4.h b/include/crypto/arc4.h new file mode 100644 index 000000000..f3c22fe01 --- /dev/null +++ b/include/crypto/arc4.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Common values for ARC4 Cipher Algorithm + */ + +#ifndef _CRYPTO_ARC4_H +#define _CRYPTO_ARC4_H + +#include <linux/types.h> + +#define ARC4_MIN_KEY_SIZE 1 +#define ARC4_MAX_KEY_SIZE 256 +#define ARC4_BLOCK_SIZE 1 + +struct arc4_ctx { + u32 S[256]; + u32 x, y; +}; + +int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len); +void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len); + +#endif /* _CRYPTO_ARC4_H */ diff --git a/include/crypto/aria.h b/include/crypto/aria.h new file mode 100644 index 000000000..254da46cc --- /dev/null +++ b/include/crypto/aria.h @@ -0,0 +1,458 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API. + * + * ARIA Cipher Algorithm. + * + * Documentation of ARIA can be found in RFC 5794. + * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com> + * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com> + * + * Information for ARIA + * http://210.104.33.10/ARIA/index-e.html (English) + * http://seed.kisa.or.kr/ (Korean) + * + * Public domain version is distributed above. + */ + +#ifndef _CRYPTO_ARIA_H +#define _CRYPTO_ARIA_H + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/crypto.h> +#include <asm/byteorder.h> + +#define ARIA_MIN_KEY_SIZE 16 +#define ARIA_MAX_KEY_SIZE 32 +#define ARIA_BLOCK_SIZE 16 +#define ARIA_MAX_RD_KEYS 17 +#define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32)) + +struct aria_ctx { + u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; + u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; + int rounds; + int key_length; +}; + +static const u32 s1[256] = { + 0x00636363, 0x007c7c7c, 0x00777777, 0x007b7b7b, + 0x00f2f2f2, 0x006b6b6b, 0x006f6f6f, 0x00c5c5c5, + 0x00303030, 0x00010101, 0x00676767, 0x002b2b2b, + 0x00fefefe, 0x00d7d7d7, 0x00ababab, 0x00767676, + 0x00cacaca, 0x00828282, 0x00c9c9c9, 0x007d7d7d, + 0x00fafafa, 0x00595959, 0x00474747, 0x00f0f0f0, + 0x00adadad, 0x00d4d4d4, 0x00a2a2a2, 0x00afafaf, + 0x009c9c9c, 0x00a4a4a4, 0x00727272, 0x00c0c0c0, + 0x00b7b7b7, 0x00fdfdfd, 0x00939393, 0x00262626, + 0x00363636, 0x003f3f3f, 0x00f7f7f7, 0x00cccccc, + 0x00343434, 0x00a5a5a5, 0x00e5e5e5, 0x00f1f1f1, + 0x00717171, 0x00d8d8d8, 0x00313131, 0x00151515, + 0x00040404, 0x00c7c7c7, 0x00232323, 0x00c3c3c3, + 0x00181818, 0x00969696, 0x00050505, 0x009a9a9a, + 0x00070707, 0x00121212, 0x00808080, 0x00e2e2e2, + 0x00ebebeb, 0x00272727, 0x00b2b2b2, 0x00757575, + 0x00090909, 0x00838383, 0x002c2c2c, 0x001a1a1a, + 0x001b1b1b, 0x006e6e6e, 0x005a5a5a, 0x00a0a0a0, + 0x00525252, 0x003b3b3b, 0x00d6d6d6, 0x00b3b3b3, + 0x00292929, 0x00e3e3e3, 0x002f2f2f, 0x00848484, + 0x00535353, 0x00d1d1d1, 0x00000000, 0x00ededed, + 0x00202020, 0x00fcfcfc, 0x00b1b1b1, 0x005b5b5b, + 0x006a6a6a, 0x00cbcbcb, 0x00bebebe, 0x00393939, + 0x004a4a4a, 0x004c4c4c, 0x00585858, 0x00cfcfcf, + 0x00d0d0d0, 0x00efefef, 0x00aaaaaa, 0x00fbfbfb, + 0x00434343, 0x004d4d4d, 0x00333333, 0x00858585, + 0x00454545, 0x00f9f9f9, 0x00020202, 0x007f7f7f, + 0x00505050, 0x003c3c3c, 0x009f9f9f, 0x00a8a8a8, + 0x00515151, 0x00a3a3a3, 0x00404040, 0x008f8f8f, + 0x00929292, 0x009d9d9d, 0x00383838, 0x00f5f5f5, + 0x00bcbcbc, 0x00b6b6b6, 0x00dadada, 0x00212121, + 0x00101010, 0x00ffffff, 0x00f3f3f3, 0x00d2d2d2, + 0x00cdcdcd, 0x000c0c0c, 0x00131313, 0x00ececec, + 0x005f5f5f, 0x00979797, 0x00444444, 0x00171717, + 0x00c4c4c4, 0x00a7a7a7, 0x007e7e7e, 0x003d3d3d, + 0x00646464, 0x005d5d5d, 0x00191919, 0x00737373, + 0x00606060, 0x00818181, 0x004f4f4f, 0x00dcdcdc, + 0x00222222, 0x002a2a2a, 0x00909090, 0x00888888, + 0x00464646, 0x00eeeeee, 0x00b8b8b8, 0x00141414, + 0x00dedede, 0x005e5e5e, 0x000b0b0b, 0x00dbdbdb, + 0x00e0e0e0, 0x00323232, 0x003a3a3a, 0x000a0a0a, + 0x00494949, 0x00060606, 0x00242424, 0x005c5c5c, + 0x00c2c2c2, 0x00d3d3d3, 0x00acacac, 0x00626262, + 0x00919191, 0x00959595, 0x00e4e4e4, 0x00797979, + 0x00e7e7e7, 0x00c8c8c8, 0x00373737, 0x006d6d6d, + 0x008d8d8d, 0x00d5d5d5, 0x004e4e4e, 0x00a9a9a9, + 0x006c6c6c, 0x00565656, 0x00f4f4f4, 0x00eaeaea, + 0x00656565, 0x007a7a7a, 0x00aeaeae, 0x00080808, + 0x00bababa, 0x00787878, 0x00252525, 0x002e2e2e, + 0x001c1c1c, 0x00a6a6a6, 0x00b4b4b4, 0x00c6c6c6, + 0x00e8e8e8, 0x00dddddd, 0x00747474, 0x001f1f1f, + 0x004b4b4b, 0x00bdbdbd, 0x008b8b8b, 0x008a8a8a, + 0x00707070, 0x003e3e3e, 0x00b5b5b5, 0x00666666, + 0x00484848, 0x00030303, 0x00f6f6f6, 0x000e0e0e, + 0x00616161, 0x00353535, 0x00575757, 0x00b9b9b9, + 0x00868686, 0x00c1c1c1, 0x001d1d1d, 0x009e9e9e, + 0x00e1e1e1, 0x00f8f8f8, 0x00989898, 0x00111111, + 0x00696969, 0x00d9d9d9, 0x008e8e8e, 0x00949494, + 0x009b9b9b, 0x001e1e1e, 0x00878787, 0x00e9e9e9, + 0x00cecece, 0x00555555, 0x00282828, 0x00dfdfdf, + 0x008c8c8c, 0x00a1a1a1, 0x00898989, 0x000d0d0d, + 0x00bfbfbf, 0x00e6e6e6, 0x00424242, 0x00686868, + 0x00414141, 0x00999999, 0x002d2d2d, 0x000f0f0f, + 0x00b0b0b0, 0x00545454, 0x00bbbbbb, 0x00161616 +}; + +static const u32 s2[256] = { + 0xe200e2e2, 0x4e004e4e, 0x54005454, 0xfc00fcfc, + 0x94009494, 0xc200c2c2, 0x4a004a4a, 0xcc00cccc, + 0x62006262, 0x0d000d0d, 0x6a006a6a, 0x46004646, + 0x3c003c3c, 0x4d004d4d, 0x8b008b8b, 0xd100d1d1, + 0x5e005e5e, 0xfa00fafa, 0x64006464, 0xcb00cbcb, + 0xb400b4b4, 0x97009797, 0xbe00bebe, 0x2b002b2b, + 0xbc00bcbc, 0x77007777, 0x2e002e2e, 0x03000303, + 0xd300d3d3, 0x19001919, 0x59005959, 0xc100c1c1, + 0x1d001d1d, 0x06000606, 0x41004141, 0x6b006b6b, + 0x55005555, 0xf000f0f0, 0x99009999, 0x69006969, + 0xea00eaea, 0x9c009c9c, 0x18001818, 0xae00aeae, + 0x63006363, 0xdf00dfdf, 0xe700e7e7, 0xbb00bbbb, + 0x00000000, 0x73007373, 0x66006666, 0xfb00fbfb, + 0x96009696, 0x4c004c4c, 0x85008585, 0xe400e4e4, + 0x3a003a3a, 0x09000909, 0x45004545, 0xaa00aaaa, + 0x0f000f0f, 0xee00eeee, 0x10001010, 0xeb00ebeb, + 0x2d002d2d, 0x7f007f7f, 0xf400f4f4, 0x29002929, + 0xac00acac, 0xcf00cfcf, 0xad00adad, 0x91009191, + 0x8d008d8d, 0x78007878, 0xc800c8c8, 0x95009595, + 0xf900f9f9, 0x2f002f2f, 0xce00cece, 0xcd00cdcd, + 0x08000808, 0x7a007a7a, 0x88008888, 0x38003838, + 0x5c005c5c, 0x83008383, 0x2a002a2a, 0x28002828, + 0x47004747, 0xdb00dbdb, 0xb800b8b8, 0xc700c7c7, + 0x93009393, 0xa400a4a4, 0x12001212, 0x53005353, + 0xff00ffff, 0x87008787, 0x0e000e0e, 0x31003131, + 0x36003636, 0x21002121, 0x58005858, 0x48004848, + 0x01000101, 0x8e008e8e, 0x37003737, 0x74007474, + 0x32003232, 0xca00caca, 0xe900e9e9, 0xb100b1b1, + 0xb700b7b7, 0xab00abab, 0x0c000c0c, 0xd700d7d7, + 0xc400c4c4, 0x56005656, 0x42004242, 0x26002626, + 0x07000707, 0x98009898, 0x60006060, 0xd900d9d9, + 0xb600b6b6, 0xb900b9b9, 0x11001111, 0x40004040, + 0xec00ecec, 0x20002020, 0x8c008c8c, 0xbd00bdbd, + 0xa000a0a0, 0xc900c9c9, 0x84008484, 0x04000404, + 0x49004949, 0x23002323, 0xf100f1f1, 0x4f004f4f, + 0x50005050, 0x1f001f1f, 0x13001313, 0xdc00dcdc, + 0xd800d8d8, 0xc000c0c0, 0x9e009e9e, 0x57005757, + 0xe300e3e3, 0xc300c3c3, 0x7b007b7b, 0x65006565, + 0x3b003b3b, 0x02000202, 0x8f008f8f, 0x3e003e3e, + 0xe800e8e8, 0x25002525, 0x92009292, 0xe500e5e5, + 0x15001515, 0xdd00dddd, 0xfd00fdfd, 0x17001717, + 0xa900a9a9, 0xbf00bfbf, 0xd400d4d4, 0x9a009a9a, + 0x7e007e7e, 0xc500c5c5, 0x39003939, 0x67006767, + 0xfe00fefe, 0x76007676, 0x9d009d9d, 0x43004343, + 0xa700a7a7, 0xe100e1e1, 0xd000d0d0, 0xf500f5f5, + 0x68006868, 0xf200f2f2, 0x1b001b1b, 0x34003434, + 0x70007070, 0x05000505, 0xa300a3a3, 0x8a008a8a, + 0xd500d5d5, 0x79007979, 0x86008686, 0xa800a8a8, + 0x30003030, 0xc600c6c6, 0x51005151, 0x4b004b4b, + 0x1e001e1e, 0xa600a6a6, 0x27002727, 0xf600f6f6, + 0x35003535, 0xd200d2d2, 0x6e006e6e, 0x24002424, + 0x16001616, 0x82008282, 0x5f005f5f, 0xda00dada, + 0xe600e6e6, 0x75007575, 0xa200a2a2, 0xef00efef, + 0x2c002c2c, 0xb200b2b2, 0x1c001c1c, 0x9f009f9f, + 0x5d005d5d, 0x6f006f6f, 0x80008080, 0x0a000a0a, + 0x72007272, 0x44004444, 0x9b009b9b, 0x6c006c6c, + 0x90009090, 0x0b000b0b, 0x5b005b5b, 0x33003333, + 0x7d007d7d, 0x5a005a5a, 0x52005252, 0xf300f3f3, + 0x61006161, 0xa100a1a1, 0xf700f7f7, 0xb000b0b0, + 0xd600d6d6, 0x3f003f3f, 0x7c007c7c, 0x6d006d6d, + 0xed00eded, 0x14001414, 0xe000e0e0, 0xa500a5a5, + 0x3d003d3d, 0x22002222, 0xb300b3b3, 0xf800f8f8, + 0x89008989, 0xde00dede, 0x71007171, 0x1a001a1a, + 0xaf00afaf, 0xba00baba, 0xb500b5b5, 0x81008181 +}; + +static const u32 x1[256] = { + 0x52520052, 0x09090009, 0x6a6a006a, 0xd5d500d5, + 0x30300030, 0x36360036, 0xa5a500a5, 0x38380038, + 0xbfbf00bf, 0x40400040, 0xa3a300a3, 0x9e9e009e, + 0x81810081, 0xf3f300f3, 0xd7d700d7, 0xfbfb00fb, + 0x7c7c007c, 0xe3e300e3, 0x39390039, 0x82820082, + 0x9b9b009b, 0x2f2f002f, 0xffff00ff, 0x87870087, + 0x34340034, 0x8e8e008e, 0x43430043, 0x44440044, + 0xc4c400c4, 0xdede00de, 0xe9e900e9, 0xcbcb00cb, + 0x54540054, 0x7b7b007b, 0x94940094, 0x32320032, + 0xa6a600a6, 0xc2c200c2, 0x23230023, 0x3d3d003d, + 0xeeee00ee, 0x4c4c004c, 0x95950095, 0x0b0b000b, + 0x42420042, 0xfafa00fa, 0xc3c300c3, 0x4e4e004e, + 0x08080008, 0x2e2e002e, 0xa1a100a1, 0x66660066, + 0x28280028, 0xd9d900d9, 0x24240024, 0xb2b200b2, + 0x76760076, 0x5b5b005b, 0xa2a200a2, 0x49490049, + 0x6d6d006d, 0x8b8b008b, 0xd1d100d1, 0x25250025, + 0x72720072, 0xf8f800f8, 0xf6f600f6, 0x64640064, + 0x86860086, 0x68680068, 0x98980098, 0x16160016, + 0xd4d400d4, 0xa4a400a4, 0x5c5c005c, 0xcccc00cc, + 0x5d5d005d, 0x65650065, 0xb6b600b6, 0x92920092, + 0x6c6c006c, 0x70700070, 0x48480048, 0x50500050, + 0xfdfd00fd, 0xeded00ed, 0xb9b900b9, 0xdada00da, + 0x5e5e005e, 0x15150015, 0x46460046, 0x57570057, + 0xa7a700a7, 0x8d8d008d, 0x9d9d009d, 0x84840084, + 0x90900090, 0xd8d800d8, 0xabab00ab, 0x00000000, + 0x8c8c008c, 0xbcbc00bc, 0xd3d300d3, 0x0a0a000a, + 0xf7f700f7, 0xe4e400e4, 0x58580058, 0x05050005, + 0xb8b800b8, 0xb3b300b3, 0x45450045, 0x06060006, + 0xd0d000d0, 0x2c2c002c, 0x1e1e001e, 0x8f8f008f, + 0xcaca00ca, 0x3f3f003f, 0x0f0f000f, 0x02020002, + 0xc1c100c1, 0xafaf00af, 0xbdbd00bd, 0x03030003, + 0x01010001, 0x13130013, 0x8a8a008a, 0x6b6b006b, + 0x3a3a003a, 0x91910091, 0x11110011, 0x41410041, + 0x4f4f004f, 0x67670067, 0xdcdc00dc, 0xeaea00ea, + 0x97970097, 0xf2f200f2, 0xcfcf00cf, 0xcece00ce, + 0xf0f000f0, 0xb4b400b4, 0xe6e600e6, 0x73730073, + 0x96960096, 0xacac00ac, 0x74740074, 0x22220022, + 0xe7e700e7, 0xadad00ad, 0x35350035, 0x85850085, + 0xe2e200e2, 0xf9f900f9, 0x37370037, 0xe8e800e8, + 0x1c1c001c, 0x75750075, 0xdfdf00df, 0x6e6e006e, + 0x47470047, 0xf1f100f1, 0x1a1a001a, 0x71710071, + 0x1d1d001d, 0x29290029, 0xc5c500c5, 0x89890089, + 0x6f6f006f, 0xb7b700b7, 0x62620062, 0x0e0e000e, + 0xaaaa00aa, 0x18180018, 0xbebe00be, 0x1b1b001b, + 0xfcfc00fc, 0x56560056, 0x3e3e003e, 0x4b4b004b, + 0xc6c600c6, 0xd2d200d2, 0x79790079, 0x20200020, + 0x9a9a009a, 0xdbdb00db, 0xc0c000c0, 0xfefe00fe, + 0x78780078, 0xcdcd00cd, 0x5a5a005a, 0xf4f400f4, + 0x1f1f001f, 0xdddd00dd, 0xa8a800a8, 0x33330033, + 0x88880088, 0x07070007, 0xc7c700c7, 0x31310031, + 0xb1b100b1, 0x12120012, 0x10100010, 0x59590059, + 0x27270027, 0x80800080, 0xecec00ec, 0x5f5f005f, + 0x60600060, 0x51510051, 0x7f7f007f, 0xa9a900a9, + 0x19190019, 0xb5b500b5, 0x4a4a004a, 0x0d0d000d, + 0x2d2d002d, 0xe5e500e5, 0x7a7a007a, 0x9f9f009f, + 0x93930093, 0xc9c900c9, 0x9c9c009c, 0xefef00ef, + 0xa0a000a0, 0xe0e000e0, 0x3b3b003b, 0x4d4d004d, + 0xaeae00ae, 0x2a2a002a, 0xf5f500f5, 0xb0b000b0, + 0xc8c800c8, 0xebeb00eb, 0xbbbb00bb, 0x3c3c003c, + 0x83830083, 0x53530053, 0x99990099, 0x61610061, + 0x17170017, 0x2b2b002b, 0x04040004, 0x7e7e007e, + 0xbaba00ba, 0x77770077, 0xd6d600d6, 0x26260026, + 0xe1e100e1, 0x69690069, 0x14140014, 0x63630063, + 0x55550055, 0x21210021, 0x0c0c000c, 0x7d7d007d +}; + +static const u32 x2[256] = { + 0x30303000, 0x68686800, 0x99999900, 0x1b1b1b00, + 0x87878700, 0xb9b9b900, 0x21212100, 0x78787800, + 0x50505000, 0x39393900, 0xdbdbdb00, 0xe1e1e100, + 0x72727200, 0x09090900, 0x62626200, 0x3c3c3c00, + 0x3e3e3e00, 0x7e7e7e00, 0x5e5e5e00, 0x8e8e8e00, + 0xf1f1f100, 0xa0a0a000, 0xcccccc00, 0xa3a3a300, + 0x2a2a2a00, 0x1d1d1d00, 0xfbfbfb00, 0xb6b6b600, + 0xd6d6d600, 0x20202000, 0xc4c4c400, 0x8d8d8d00, + 0x81818100, 0x65656500, 0xf5f5f500, 0x89898900, + 0xcbcbcb00, 0x9d9d9d00, 0x77777700, 0xc6c6c600, + 0x57575700, 0x43434300, 0x56565600, 0x17171700, + 0xd4d4d400, 0x40404000, 0x1a1a1a00, 0x4d4d4d00, + 0xc0c0c000, 0x63636300, 0x6c6c6c00, 0xe3e3e300, + 0xb7b7b700, 0xc8c8c800, 0x64646400, 0x6a6a6a00, + 0x53535300, 0xaaaaaa00, 0x38383800, 0x98989800, + 0x0c0c0c00, 0xf4f4f400, 0x9b9b9b00, 0xededed00, + 0x7f7f7f00, 0x22222200, 0x76767600, 0xafafaf00, + 0xdddddd00, 0x3a3a3a00, 0x0b0b0b00, 0x58585800, + 0x67676700, 0x88888800, 0x06060600, 0xc3c3c300, + 0x35353500, 0x0d0d0d00, 0x01010100, 0x8b8b8b00, + 0x8c8c8c00, 0xc2c2c200, 0xe6e6e600, 0x5f5f5f00, + 0x02020200, 0x24242400, 0x75757500, 0x93939300, + 0x66666600, 0x1e1e1e00, 0xe5e5e500, 0xe2e2e200, + 0x54545400, 0xd8d8d800, 0x10101000, 0xcecece00, + 0x7a7a7a00, 0xe8e8e800, 0x08080800, 0x2c2c2c00, + 0x12121200, 0x97979700, 0x32323200, 0xababab00, + 0xb4b4b400, 0x27272700, 0x0a0a0a00, 0x23232300, + 0xdfdfdf00, 0xefefef00, 0xcacaca00, 0xd9d9d900, + 0xb8b8b800, 0xfafafa00, 0xdcdcdc00, 0x31313100, + 0x6b6b6b00, 0xd1d1d100, 0xadadad00, 0x19191900, + 0x49494900, 0xbdbdbd00, 0x51515100, 0x96969600, + 0xeeeeee00, 0xe4e4e400, 0xa8a8a800, 0x41414100, + 0xdadada00, 0xffffff00, 0xcdcdcd00, 0x55555500, + 0x86868600, 0x36363600, 0xbebebe00, 0x61616100, + 0x52525200, 0xf8f8f800, 0xbbbbbb00, 0x0e0e0e00, + 0x82828200, 0x48484800, 0x69696900, 0x9a9a9a00, + 0xe0e0e000, 0x47474700, 0x9e9e9e00, 0x5c5c5c00, + 0x04040400, 0x4b4b4b00, 0x34343400, 0x15151500, + 0x79797900, 0x26262600, 0xa7a7a700, 0xdedede00, + 0x29292900, 0xaeaeae00, 0x92929200, 0xd7d7d700, + 0x84848400, 0xe9e9e900, 0xd2d2d200, 0xbababa00, + 0x5d5d5d00, 0xf3f3f300, 0xc5c5c500, 0xb0b0b000, + 0xbfbfbf00, 0xa4a4a400, 0x3b3b3b00, 0x71717100, + 0x44444400, 0x46464600, 0x2b2b2b00, 0xfcfcfc00, + 0xebebeb00, 0x6f6f6f00, 0xd5d5d500, 0xf6f6f600, + 0x14141400, 0xfefefe00, 0x7c7c7c00, 0x70707000, + 0x5a5a5a00, 0x7d7d7d00, 0xfdfdfd00, 0x2f2f2f00, + 0x18181800, 0x83838300, 0x16161600, 0xa5a5a500, + 0x91919100, 0x1f1f1f00, 0x05050500, 0x95959500, + 0x74747400, 0xa9a9a900, 0xc1c1c100, 0x5b5b5b00, + 0x4a4a4a00, 0x85858500, 0x6d6d6d00, 0x13131300, + 0x07070700, 0x4f4f4f00, 0x4e4e4e00, 0x45454500, + 0xb2b2b200, 0x0f0f0f00, 0xc9c9c900, 0x1c1c1c00, + 0xa6a6a600, 0xbcbcbc00, 0xececec00, 0x73737300, + 0x90909000, 0x7b7b7b00, 0xcfcfcf00, 0x59595900, + 0x8f8f8f00, 0xa1a1a100, 0xf9f9f900, 0x2d2d2d00, + 0xf2f2f200, 0xb1b1b100, 0x00000000, 0x94949400, + 0x37373700, 0x9f9f9f00, 0xd0d0d000, 0x2e2e2e00, + 0x9c9c9c00, 0x6e6e6e00, 0x28282800, 0x3f3f3f00, + 0x80808000, 0xf0f0f000, 0x3d3d3d00, 0xd3d3d300, + 0x25252500, 0x8a8a8a00, 0xb5b5b500, 0xe7e7e700, + 0x42424200, 0xb3b3b300, 0xc7c7c700, 0xeaeaea00, + 0xf7f7f700, 0x4c4c4c00, 0x11111100, 0x33333300, + 0x03030300, 0xa2a2a200, 0xacacac00, 0x60606000 +}; + +static inline u32 rotl32(u32 v, u32 r) +{ + return ((v << r) | (v >> (32 - r))); +} + +static inline u32 rotr32(u32 v, u32 r) +{ + return ((v >> r) | (v << (32 - r))); +} + +static inline u32 bswap32(u32 v) +{ + return ((v << 24) ^ + (v >> 24) ^ + ((v & 0x0000ff00) << 8) ^ + ((v & 0x00ff0000) >> 8)); +} + +static inline u8 get_u8(u32 x, u32 y) +{ + return (x >> ((3 - y) * 8)); +} + +static inline u32 make_u32(u8 v0, u8 v1, u8 v2, u8 v3) +{ + return ((u32)v0 << 24) | ((u32)v1 << 16) | ((u32)v2 << 8) | ((u32)v3); +} + +static inline u32 aria_m(u32 t0) +{ + return rotr32(t0, 8) ^ rotr32(t0 ^ rotr32(t0, 8), 16); +} + +/* S-Box Layer 1 + M */ +static inline void aria_sbox_layer1_with_pre_diff(u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 = s1[get_u8(*t0, 0)] ^ + s2[get_u8(*t0, 1)] ^ + x1[get_u8(*t0, 2)] ^ + x2[get_u8(*t0, 3)]; + *t1 = s1[get_u8(*t1, 0)] ^ + s2[get_u8(*t1, 1)] ^ + x1[get_u8(*t1, 2)] ^ + x2[get_u8(*t1, 3)]; + *t2 = s1[get_u8(*t2, 0)] ^ + s2[get_u8(*t2, 1)] ^ + x1[get_u8(*t2, 2)] ^ + x2[get_u8(*t2, 3)]; + *t3 = s1[get_u8(*t3, 0)] ^ + s2[get_u8(*t3, 1)] ^ + x1[get_u8(*t3, 2)] ^ + x2[get_u8(*t3, 3)]; +} + +/* S-Box Layer 2 + M */ +static inline void aria_sbox_layer2_with_pre_diff(u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 = x1[get_u8(*t0, 0)] ^ + x2[get_u8(*t0, 1)] ^ + s1[get_u8(*t0, 2)] ^ + s2[get_u8(*t0, 3)]; + *t1 = x1[get_u8(*t1, 0)] ^ + x2[get_u8(*t1, 1)] ^ + s1[get_u8(*t1, 2)] ^ + s2[get_u8(*t1, 3)]; + *t2 = x1[get_u8(*t2, 0)] ^ + x2[get_u8(*t2, 1)] ^ + s1[get_u8(*t2, 2)] ^ + s2[get_u8(*t2, 3)]; + *t3 = x1[get_u8(*t3, 0)] ^ + x2[get_u8(*t3, 1)] ^ + s1[get_u8(*t3, 2)] ^ + s2[get_u8(*t3, 3)]; +} + +/* Word-level diffusion */ +static inline void aria_diff_word(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + *t1 ^= *t2; + *t2 ^= *t3; + *t0 ^= *t1; + + *t3 ^= *t1; + *t2 ^= *t0; + *t1 ^= *t2; +} + +/* Byte-level diffusion */ +static inline void aria_diff_byte(u32 *t1, u32 *t2, u32 *t3) +{ + *t1 = ((*t1 << 8) & 0xff00ff00) ^ ((*t1 >> 8) & 0x00ff00ff); + *t2 = rotr32(*t2, 16); + *t3 = bswap32(*t3); +} + +/* Key XOR Layer */ +static inline void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 ^= rk[0]; + *t1 ^= rk[1]; + *t2 ^= rk[2]; + *t3 ^= rk[3]; +} +/* Odd round Substitution & Diffusion */ +static inline void aria_subst_diff_odd(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + aria_sbox_layer1_with_pre_diff(t0, t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); + aria_diff_byte(t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); +} + +/* Even round Substitution & Diffusion */ +static inline void aria_subst_diff_even(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + aria_sbox_layer2_with_pre_diff(t0, t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); + aria_diff_byte(t3, t0, t1); + aria_diff_word(t0, t1, t2, t3); +} + +/* Q, R Macro expanded ARIA GSRK */ +static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n) +{ + int q = 4 - (n / 32); + int r = n % 32; + + rk[0] = (x[0]) ^ + ((y[q % 4]) >> r) ^ + ((y[(q + 3) % 4]) << (32 - r)); + rk[1] = (x[1]) ^ + ((y[(q + 1) % 4]) >> r) ^ + ((y[q % 4]) << (32 - r)); + rk[2] = (x[2]) ^ + ((y[(q + 2) % 4]) >> r) ^ + ((y[(q + 1) % 4]) << (32 - r)); + rk[3] = (x[3]) ^ + ((y[(q + 3) % 4]) >> r) ^ + ((y[(q + 2) % 4]) << (32 - r)); +} + +void aria_encrypt(void *ctx, u8 *out, const u8 *in); +void aria_decrypt(void *ctx, u8 *out, const u8 *in); +int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len); + +#endif diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h new file mode 100644 index 000000000..5f92a9860 --- /dev/null +++ b/include/crypto/authenc.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Authenc: Simple AEAD wrapper for IPsec + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + */ +#ifndef _CRYPTO_AUTHENC_H +#define _CRYPTO_AUTHENC_H + +#include <linux/types.h> + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC, + CRYPTO_AUTHENC_KEYA_PARAM, +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +struct crypto_authenc_keys { + const u8 *authkey; + const u8 *enckey; + + unsigned int authkeylen; + unsigned int enckeylen; +}; + +int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen); + +#endif /* _CRYPTO_AUTHENC_H */ diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h new file mode 100644 index 000000000..0b8e6bc55 --- /dev/null +++ b/include/crypto/b128ops.h @@ -0,0 +1,80 @@ +/* b128ops.h - common 128-bit block operations + * + * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. + * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org> + * + * Based on Dr Brian Gladman's (GPL'd) work published at + * http://fp.gladman.plus.com/cryptography_technology/index.htm + * See the original copyright notice below. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +/* + --------------------------------------------------------------------------- + Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software in both source and binary + form is allowed (with or without changes) provided that: + + 1. distributions of this source code include the above copyright + notice, this list of conditions and the following disclaimer; + + 2. distributions in binary form include the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other associated materials; + + 3. the copyright holder's name is not used to endorse products + built using this software without specific written permission. + + ALTERNATIVELY, provided that this notice is retained in full, this product + may be distributed under the terms of the GNU General Public License (GPL), + in which case the provisions of the GPL apply INSTEAD OF those given above. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- + Issue Date: 13/06/2006 +*/ + +#ifndef _CRYPTO_B128OPS_H +#define _CRYPTO_B128OPS_H + +#include <linux/types.h> + +typedef struct { + u64 a, b; +} u128; + +typedef struct { + __be64 a, b; +} be128; + +typedef struct { + __le64 b, a; +} le128; + +static inline void u128_xor(u128 *r, const u128 *p, const u128 *q) +{ + r->a = p->a ^ q->a; + r->b = p->b ^ q->b; +} + +static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) +{ + u128_xor((u128 *)r, (u128 *)p, (u128 *)q); +} + +static inline void le128_xor(le128 *r, const le128 *p, const le128 *q) +{ + u128_xor((u128 *)r, (u128 *)p, (u128 *)q); +} + +#endif /* _CRYPTO_B128OPS_H */ diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h new file mode 100644 index 000000000..0c0176285 --- /dev/null +++ b/include/crypto/blake2b.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +#ifndef _CRYPTO_BLAKE2B_H +#define _CRYPTO_BLAKE2B_H + +#include <linux/bug.h> +#include <linux/types.h> +#include <linux/string.h> + +enum blake2b_lengths { + BLAKE2B_BLOCK_SIZE = 128, + BLAKE2B_HASH_SIZE = 64, + BLAKE2B_KEY_SIZE = 64, + + BLAKE2B_160_HASH_SIZE = 20, + BLAKE2B_256_HASH_SIZE = 32, + BLAKE2B_384_HASH_SIZE = 48, + BLAKE2B_512_HASH_SIZE = 64, +}; + +struct blake2b_state { + /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ + u64 h[8]; + u64 t[2]; + u64 f[2]; + u8 buf[BLAKE2B_BLOCK_SIZE]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2b_iv { + BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL, + BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL, + BLAKE2B_IV2 = 0x3C6EF372FE94F82BULL, + BLAKE2B_IV3 = 0xA54FF53A5F1D36F1ULL, + BLAKE2B_IV4 = 0x510E527FADE682D1ULL, + BLAKE2B_IV5 = 0x9B05688C2B3E6C1FULL, + BLAKE2B_IV6 = 0x1F83D9ABFB41BD6BULL, + BLAKE2B_IV7 = 0x5BE0CD19137E2179ULL, +}; + +static inline void __blake2b_init(struct blake2b_state *state, size_t outlen, + const void *key, size_t keylen) +{ + state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen); + state->h[1] = BLAKE2B_IV1; + state->h[2] = BLAKE2B_IV2; + state->h[3] = BLAKE2B_IV3; + state->h[4] = BLAKE2B_IV4; + state->h[5] = BLAKE2B_IV5; + state->h[6] = BLAKE2B_IV6; + state->h[7] = BLAKE2B_IV7; + state->t[0] = 0; + state->t[1] = 0; + state->f[0] = 0; + state->f[1] = 0; + state->buflen = 0; + state->outlen = outlen; + if (keylen) { + memcpy(state->buf, key, keylen); + memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen); + state->buflen = BLAKE2B_BLOCK_SIZE; + } +} + +#endif /* _CRYPTO_BLAKE2B_H */ diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h new file mode 100644 index 000000000..f9ffd3919 --- /dev/null +++ b/include/crypto/blake2s.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _CRYPTO_BLAKE2S_H +#define _CRYPTO_BLAKE2S_H + +#include <linux/bug.h> +#include <linux/kconfig.h> +#include <linux/types.h> +#include <linux/string.h> + +enum blake2s_lengths { + BLAKE2S_BLOCK_SIZE = 64, + BLAKE2S_HASH_SIZE = 32, + BLAKE2S_KEY_SIZE = 32, + + BLAKE2S_128_HASH_SIZE = 16, + BLAKE2S_160_HASH_SIZE = 20, + BLAKE2S_224_HASH_SIZE = 28, + BLAKE2S_256_HASH_SIZE = 32, +}; + +struct blake2s_state { + /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ + u32 h[8]; + u32 t[2]; + u32 f[2]; + u8 buf[BLAKE2S_BLOCK_SIZE]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2s_iv { + BLAKE2S_IV0 = 0x6A09E667UL, + BLAKE2S_IV1 = 0xBB67AE85UL, + BLAKE2S_IV2 = 0x3C6EF372UL, + BLAKE2S_IV3 = 0xA54FF53AUL, + BLAKE2S_IV4 = 0x510E527FUL, + BLAKE2S_IV5 = 0x9B05688CUL, + BLAKE2S_IV6 = 0x1F83D9ABUL, + BLAKE2S_IV7 = 0x5BE0CD19UL, +}; + +static inline void __blake2s_init(struct blake2s_state *state, size_t outlen, + const void *key, size_t keylen) +{ + state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen); + state->h[1] = BLAKE2S_IV1; + state->h[2] = BLAKE2S_IV2; + state->h[3] = BLAKE2S_IV3; + state->h[4] = BLAKE2S_IV4; + state->h[5] = BLAKE2S_IV5; + state->h[6] = BLAKE2S_IV6; + state->h[7] = BLAKE2S_IV7; + state->t[0] = 0; + state->t[1] = 0; + state->f[0] = 0; + state->f[1] = 0; + state->buflen = 0; + state->outlen = outlen; + if (keylen) { + memcpy(state->buf, key, keylen); + memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen); + state->buflen = BLAKE2S_BLOCK_SIZE; + } +} + +static inline void blake2s_init(struct blake2s_state *state, + const size_t outlen) +{ + __blake2s_init(state, outlen, NULL, 0); +} + +static inline void blake2s_init_key(struct blake2s_state *state, + const size_t outlen, const void *key, + const size_t keylen) +{ + WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE || + !key || !keylen || keylen > BLAKE2S_KEY_SIZE)); + + __blake2s_init(state, outlen, key, keylen); +} + +void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); +void blake2s_final(struct blake2s_state *state, u8 *out); + +static inline void blake2s(u8 *out, const u8 *in, const u8 *key, + const size_t outlen, const size_t inlen, + const size_t keylen) +{ + struct blake2s_state state; + + WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen || + outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE || + (!key && keylen))); + + __blake2s_init(&state, outlen, key, keylen); + blake2s_update(&state, in, inlen); + blake2s_final(&state, out); +} + +#endif /* _CRYPTO_BLAKE2S_H */ diff --git a/include/crypto/blowfish.h b/include/crypto/blowfish.h new file mode 100644 index 000000000..9b384670b --- /dev/null +++ b/include/crypto/blowfish.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for blowfish algorithms + */ + +#ifndef _CRYPTO_BLOWFISH_H +#define _CRYPTO_BLOWFISH_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define BF_BLOCK_SIZE 8 +#define BF_MIN_KEY_SIZE 4 +#define BF_MAX_KEY_SIZE 56 + +struct bf_ctx { + u32 p[18]; + u32 s[1024]; +}; + +int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int key_len); + +#endif diff --git a/include/crypto/cast5.h b/include/crypto/cast5.h new file mode 100644 index 000000000..3d4ed4ea9 --- /dev/null +++ b/include/crypto/cast5.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST5_H +#define _CRYPTO_CAST5_H + +#include <linux/types.h> +#include <linux/crypto.h> +#include <crypto/cast_common.h> + +#define CAST5_BLOCK_SIZE 8 +#define CAST5_MIN_KEY_SIZE 5 +#define CAST5_MAX_KEY_SIZE 16 + +struct cast5_ctx { + u32 Km[16]; + u8 Kr[16]; + int rr; /* rr ? rounds = 12 : rounds = 16; (rfc 2144) */ +}; + +int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __cast5_encrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src); +void __cast5_decrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h new file mode 100644 index 000000000..38f490cd5 --- /dev/null +++ b/include/crypto/cast6.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST6_H +#define _CRYPTO_CAST6_H + +#include <linux/types.h> +#include <linux/crypto.h> +#include <crypto/cast_common.h> + +#define CAST6_BLOCK_SIZE 16 +#define CAST6_MIN_KEY_SIZE 16 +#define CAST6_MAX_KEY_SIZE 32 + +struct cast6_ctx { + u32 Km[12][4]; + u8 Kr[12][4]; +}; + +int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, unsigned int keylen); +int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src); +void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/cast_common.h b/include/crypto/cast_common.h new file mode 100644 index 000000000..b90090244 --- /dev/null +++ b/include/crypto/cast_common.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST_COMMON_H +#define _CRYPTO_CAST_COMMON_H + +extern const u32 cast_s1[256]; +extern const u32 cast_s2[256]; +extern const u32 cast_s3[256]; +extern const u32 cast_s4[256]; + +#endif diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h new file mode 100644 index 000000000..b3ea73b81 --- /dev/null +++ b/include/crypto/chacha.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values and helper functions for the ChaCha and XChaCha stream ciphers. + * + * XChaCha extends ChaCha's nonce to 192 bits, while provably retaining ChaCha's + * security. Here they share the same key size, tfm context, and setkey + * function; only their IV size and encrypt/decrypt function differ. + * + * The ChaCha paper specifies 20, 12, and 8-round variants. In general, it is + * recommended to use the 20-round variant ChaCha20. However, the other + * variants can be needed in some performance-sensitive scenarios. The generic + * ChaCha code currently allows only the 20 and 12-round variants. + */ + +#ifndef _CRYPTO_CHACHA_H +#define _CRYPTO_CHACHA_H + +#include <asm/unaligned.h> +#include <linux/types.h> + +/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */ +#define CHACHA_IV_SIZE 16 + +#define CHACHA_KEY_SIZE 32 +#define CHACHA_BLOCK_SIZE 64 +#define CHACHAPOLY_IV_SIZE 12 + +#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32)) + +/* 192-bit nonce, then 64-bit stream position */ +#define XCHACHA_IV_SIZE 32 + +void chacha_block_generic(u32 *state, u8 *stream, int nrounds); +static inline void chacha20_block(u32 *state, u8 *stream) +{ + chacha_block_generic(state, stream, 20); +} + +void hchacha_block_arch(const u32 *state, u32 *out, int nrounds); +void hchacha_block_generic(const u32 *state, u32 *out, int nrounds); + +static inline void hchacha_block(const u32 *state, u32 *out, int nrounds) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) + hchacha_block_arch(state, out, nrounds); + else + hchacha_block_generic(state, out, nrounds); +} + +enum chacha_constants { /* expand 32-byte k */ + CHACHA_CONSTANT_EXPA = 0x61707865U, + CHACHA_CONSTANT_ND_3 = 0x3320646eU, + CHACHA_CONSTANT_2_BY = 0x79622d32U, + CHACHA_CONSTANT_TE_K = 0x6b206574U +}; + +static inline void chacha_init_consts(u32 *state) +{ + state[0] = CHACHA_CONSTANT_EXPA; + state[1] = CHACHA_CONSTANT_ND_3; + state[2] = CHACHA_CONSTANT_2_BY; + state[3] = CHACHA_CONSTANT_TE_K; +} + +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); +static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) +{ + chacha_init_consts(state); + state[4] = key[0]; + state[5] = key[1]; + state[6] = key[2]; + state[7] = key[3]; + state[8] = key[4]; + state[9] = key[5]; + state[10] = key[6]; + state[11] = key[7]; + state[12] = get_unaligned_le32(iv + 0); + state[13] = get_unaligned_le32(iv + 4); + state[14] = get_unaligned_le32(iv + 8); + state[15] = get_unaligned_le32(iv + 12); +} + +static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) + chacha_init_arch(state, key, iv); + else + chacha_init_generic(state, key, iv); +} + +void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds); +void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds); + +static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) + chacha_crypt_arch(state, dst, src, bytes, nrounds); + else + chacha_crypt_generic(state, dst, src, bytes, nrounds); +} + +static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes) +{ + chacha_crypt(state, dst, src, bytes, 20); +} + +#endif /* _CRYPTO_CHACHA_H */ diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h new file mode 100644 index 000000000..d2ac3ff7d --- /dev/null +++ b/include/crypto/chacha20poly1305.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef __CHACHA20POLY1305_H +#define __CHACHA20POLY1305_H + +#include <linux/types.h> +#include <linux/scatterlist.h> + +enum chacha20poly1305_lengths { + XCHACHA20POLY1305_NONCE_SIZE = 24, + CHACHA20POLY1305_KEY_SIZE = 32, + CHACHA20POLY1305_AUTHTAG_SIZE = 16 +}; + +void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool __must_check +chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool __must_check xchacha20poly1305_decrypt( + u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, + const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool chacha20poly1305_selftest(void); + +#endif /* __CHACHA20POLY1305_H */ diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h new file mode 100644 index 000000000..796d986e5 --- /dev/null +++ b/include/crypto/cryptd.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Software async crypto daemon + * + * Added AEAD support to cryptd. + * Authors: Tadeusz Struk (tadeusz.struk@intel.com) + * Adrian Hoban <adrian.hoban@intel.com> + * Gabriele Paoloni <gabriele.paoloni@intel.com> + * Aidan O'Mahony (aidan.o.mahony@intel.com) + * Copyright (c) 2010, Intel Corporation. + */ + +#ifndef _CRYPTO_CRYPT_H +#define _CRYPTO_CRYPT_H + +#include <linux/types.h> + +#include <crypto/aead.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> + +struct cryptd_skcipher { + struct crypto_skcipher base; +}; + +/* alg_name should be algorithm to be cryptd-ed */ +struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, + u32 type, u32 mask); +struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); +void cryptd_free_skcipher(struct cryptd_skcipher *tfm); + +struct cryptd_ahash { + struct crypto_ahash base; +}; + +static inline struct cryptd_ahash *__cryptd_ahash_cast( + struct crypto_ahash *tfm) +{ + return (struct cryptd_ahash *)tfm; +} + +/* alg_name should be algorithm to be cryptd-ed */ +struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, + u32 type, u32 mask); +struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); +struct shash_desc *cryptd_shash_desc(struct ahash_request *req); +/* Must be called without moving CPUs. */ +bool cryptd_ahash_queued(struct cryptd_ahash *tfm); +void cryptd_free_ahash(struct cryptd_ahash *tfm); + +struct cryptd_aead { + struct crypto_aead base; +}; + +static inline struct cryptd_aead *__cryptd_aead_cast( + struct crypto_aead *tfm) +{ + return (struct cryptd_aead *)tfm; +} + +struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, + u32 type, u32 mask); + +struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_aead_queued(struct cryptd_aead *tfm); + +void cryptd_free_aead(struct cryptd_aead *tfm); + +#endif diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h new file mode 100644 index 000000000..a1c66d100 --- /dev/null +++ b/include/crypto/ctr.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CTR: Counter mode + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_CTR_H +#define _CRYPTO_CTR_H + +#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/string.h> +#include <linux/types.h> + +#define CTR_RFC3686_NONCE_SIZE 4 +#define CTR_RFC3686_IV_SIZE 8 +#define CTR_RFC3686_BLOCK_SIZE 16 + +static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req, + void (*fn)(struct crypto_skcipher *, + const u8 *, u8 *)) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + int blocksize = crypto_skcipher_chunksize(tfm); + u8 buf[MAX_CIPHER_BLOCKSIZE]; + struct skcipher_walk walk; + int err; + + /* avoid integer division due to variable blocksize parameter */ + if (WARN_ON_ONCE(!is_power_of_2(blocksize))) + return -EINVAL; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes > 0) { + u8 *dst = walk.dst.virt.addr; + u8 *src = walk.src.virt.addr; + int nbytes = walk.nbytes; + int tail = 0; + + if (nbytes < walk.total) { + tail = walk.nbytes & (blocksize - 1); + nbytes -= tail; + } + + do { + int bsize = min(nbytes, blocksize); + + fn(tfm, walk.iv, buf); + + crypto_xor_cpy(dst, src, buf, bsize); + crypto_inc(walk.iv, blocksize); + + dst += bsize; + src += bsize; + nbytes -= bsize; + } while (nbytes > 0); + + err = skcipher_walk_done(&walk, tail); + } + return err; +} + +#endif /* _CRYPTO_CTR_H */ diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h new file mode 100644 index 000000000..ece6a9b5f --- /dev/null +++ b/include/crypto/curve25519.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef CURVE25519_H +#define CURVE25519_H + +#include <crypto/algapi.h> // For crypto_memneq. +#include <linux/types.h> +#include <linux/random.h> + +enum curve25519_lengths { + CURVE25519_KEY_SIZE = 32 +}; + +extern const u8 curve25519_null_point[]; +extern const u8 curve25519_base_point[]; + +void curve25519_generic(u8 out[CURVE25519_KEY_SIZE], + const u8 scalar[CURVE25519_KEY_SIZE], + const u8 point[CURVE25519_KEY_SIZE]); + +void curve25519_arch(u8 out[CURVE25519_KEY_SIZE], + const u8 scalar[CURVE25519_KEY_SIZE], + const u8 point[CURVE25519_KEY_SIZE]); + +void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]); + +bool curve25519_selftest(void); + +static inline +bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519)) + curve25519_arch(mypublic, secret, basepoint); + else + curve25519_generic(mypublic, secret, basepoint); + return crypto_memneq(mypublic, curve25519_null_point, + CURVE25519_KEY_SIZE); +} + +static inline bool +__must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + if (unlikely(!crypto_memneq(secret, curve25519_null_point, + CURVE25519_KEY_SIZE))) + return false; + + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519)) + curve25519_base_arch(pub, secret); + else + curve25519_generic(pub, secret, curve25519_base_point); + return crypto_memneq(pub, curve25519_null_point, CURVE25519_KEY_SIZE); +} + +static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE]) +{ + secret[0] &= 248; + secret[31] = (secret[31] & 127) | 64; +} + +static inline void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE]) +{ + get_random_bytes_wait(secret, CURVE25519_KEY_SIZE); + curve25519_clamp_secret(secret); +} + +#endif /* CURVE25519_H */ diff --git a/include/crypto/des.h b/include/crypto/des.h new file mode 100644 index 000000000..7812b4331 --- /dev/null +++ b/include/crypto/des.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DES & Triple DES EDE Cipher Algorithms. + */ + +#ifndef __CRYPTO_DES_H +#define __CRYPTO_DES_H + +#include <linux/types.h> + +#define DES_KEY_SIZE 8 +#define DES_EXPKEY_WORDS 32 +#define DES_BLOCK_SIZE 8 + +#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE) +#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) +#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE + +struct des_ctx { + u32 expkey[DES_EXPKEY_WORDS]; +}; + +struct des3_ede_ctx { + u32 expkey[DES3_EDE_EXPKEY_WORDS]; +}; + +void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src); +void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src); + +void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); +void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); + +/** + * des_expand_key - Expand a DES input key into a key schedule + * @ctx: the key schedule + * @key: buffer containing the input key + * @len: size of the buffer contents + * + * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if + * the key is accepted but has been found to be weak. + */ +int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen); + +/** + * des3_ede_expand_key - Expand a triple DES input key into a key schedule + * @ctx: the key schedule + * @key: buffer containing the input key + * @len: size of the buffer contents + * + * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if + * the key is accepted but has been found to be weak. Note that weak keys will + * be rejected (and -EINVAL will be returned) when running in FIPS mode. + */ +int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key, + unsigned int keylen); + +#endif /* __CRYPTO_DES_H */ diff --git a/include/crypto/dh.h b/include/crypto/dh.h new file mode 100644 index 000000000..7b863e911 --- /dev/null +++ b/include/crypto/dh.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Diffie-Hellman secret to be used with kpp API along with helper functions + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + */ +#ifndef _CRYPTO_DH_ +#define _CRYPTO_DH_ + +/** + * DOC: DH Helper Functions + * + * To use DH with the KPP cipher API, the following data structure and + * functions should be used. + * + * To use DH with KPP, the following functions should be used to operate on + * a DH private key. The packet private key that can be set with + * the KPP API function call of crypto_kpp_set_secret. + */ + +/** + * struct dh - define a DH private key + * + * @key: Private DH key + * @p: Diffie-Hellman parameter P + * @g: Diffie-Hellman generator G + * @key_size: Size of the private DH key + * @p_size: Size of DH parameter P + * @g_size: Size of DH generator G + */ +struct dh { + const void *key; + const void *p; + const void *g; + unsigned int key_size; + unsigned int p_size; + unsigned int g_size; +}; + +/** + * crypto_dh_key_len() - Obtain the size of the private DH key + * @params: private DH key + * + * This function returns the packet DH key size. A caller can use that + * with the provided DH private key reference to obtain the required + * memory size to hold a packet key. + * + * Return: size of the key in bytes + */ +unsigned int crypto_dh_key_len(const struct dh *params); + +/** + * crypto_dh_encode_key() - encode the private key + * @buf: Buffer allocated by the caller to hold the packet DH + * private key. The buffer should be at least crypto_dh_key_len + * bytes in size. + * @len: Length of the packet private key buffer + * @params: Buffer with the caller-specified private key + * + * The DH implementations operate on a packet representation of the private + * key. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); + +/** + * crypto_dh_decode_key() - decode a private key + * @buf: Buffer holding a packet key that should be decoded + * @len: Length of the packet private key buffer + * @params: Buffer allocated by the caller that is filled with the + * unpacked DH private key. + * + * The unpacking obtains the private key by pointing @p to the correct location + * in @buf. Thus, both pointers refer to the same memory. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params); + +/** + * __crypto_dh_decode_key() - decode a private key without parameter checks + * @buf: Buffer holding a packet key that should be decoded + * @len: Length of the packet private key buffer + * @params: Buffer allocated by the caller that is filled with the + * unpacked DH private key. + * + * Internal function providing the same services as the exported + * crypto_dh_decode_key(), but without any of those basic parameter + * checks conducted by the latter. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int __crypto_dh_decode_key(const char *buf, unsigned int len, + struct dh *params); + +#endif diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h new file mode 100644 index 000000000..af5ad51d3 --- /dev/null +++ b/include/crypto/drbg.h @@ -0,0 +1,286 @@ +/* + * DRBG based on NIST SP800-90A + * + * Copyright Stephan Mueller <smueller@chronox.de>, 2014 + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, and the entire permission notice in its entirety, + * including the disclaimer of warranties. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * ALTERNATIVELY, this product may be distributed under the terms of + * the GNU General Public License, in which case the provisions of the GPL are + * required INSTEAD OF the above restrictions. (This clause is + * necessary due to a potential bad interaction between the GPL and + * the restrictions contained in a BSD-style copyright.) + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF + * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +#ifndef _DRBG_H +#define _DRBG_H + + +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> +#include <linux/module.h> +#include <linux/crypto.h> +#include <linux/slab.h> +#include <crypto/internal/rng.h> +#include <crypto/rng.h> +#include <linux/fips.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/workqueue.h> + +/* + * Concatenation Helper and string operation helper + * + * SP800-90A requires the concatenation of different data. To avoid copying + * buffers around or allocate additional memory, the following data structure + * is used to point to the original memory with its size. In addition, it + * is used to build a linked list. The linked list defines the concatenation + * of individual buffers. The order of memory block referenced in that + * linked list determines the order of concatenation. + */ +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +static inline void drbg_string_fill(struct drbg_string *string, + const unsigned char *buf, size_t len) +{ + string->buf = buf; + string->len = len; + INIT_LIST_HEAD(&string->list); +} + +struct drbg_state; +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; /* flags for the cipher */ + __u8 statelen; /* maximum state length */ + __u8 blocklen_bytes; /* block size of output in bytes */ + char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */ + /* kernel crypto API backend cipher name */ + char backend_cra_name[CRYPTO_MAX_ALG_NAME]; +}; + +struct drbg_state_ops { + int (*update)(struct drbg_state *drbg, struct list_head *seed, + int reseed); + int (*generate)(struct drbg_state *drbg, + unsigned char *buf, unsigned int buflen, + struct list_head *addtl); + int (*crypto_init)(struct drbg_state *drbg); + int (*crypto_fini)(struct drbg_state *drbg); + +}; + +struct drbg_test_data { + struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */ +}; + +enum drbg_seed_state { + DRBG_SEED_STATE_UNSEEDED, + DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */ + DRBG_SEED_STATE_FULL, +}; + +struct drbg_state { + struct mutex drbg_mutex; /* lock around DRBG */ + unsigned char *V; /* internal state 10.1.1.1 1a) */ + unsigned char *Vbuf; + /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ + unsigned char *C; + unsigned char *Cbuf; + /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ + size_t reseed_ctr; + size_t reseed_threshold; + /* some memory the DRBG can use for its operation */ + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; /* Cipher handle */ + + struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ + struct skcipher_request *ctr_req; /* CTR mode request handle */ + __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ + __u8 *outscratchpad; /* CTR mode aligned outbuf */ + struct crypto_wait ctr_wait; /* CTR mode async wait obj */ + struct scatterlist sg_in, sg_out; /* CTR mode SGLs */ + + enum drbg_seed_state seeded; /* DRBG fully seeded? */ + unsigned long last_seed_time; + bool pr; /* Prediction resistance enabled? */ + bool fips_primed; /* Continuous test primed? */ + unsigned char *prev; /* FIPS 140-2 continuous test value */ + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; +}; + +static inline __u8 drbg_statelen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return drbg->core->statelen; + return 0; +} + +static inline __u8 drbg_blocklen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return drbg->core->blocklen_bytes; + return 0; +} + +static inline __u8 drbg_keylen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return (drbg->core->statelen - drbg->core->blocklen_bytes); + return 0; +} + +static inline size_t drbg_max_request_bytes(struct drbg_state *drbg) +{ + /* SP800-90A requires the limit 2**19 bits, but we return bytes */ + return (1 << 16); +} + +static inline size_t drbg_max_addtl(struct drbg_state *drbg) +{ + /* SP800-90A requires 2**35 bytes additional info str / pers str */ +#if (__BITS_PER_LONG == 32) + /* + * SP800-90A allows smaller maximum numbers to be returned -- we + * return SIZE_MAX - 1 to allow the verification of the enforcement + * of this value in drbg_healthcheck_sanity. + */ + return (SIZE_MAX - 1); +#else + return (1UL<<35); +#endif +} + +static inline size_t drbg_max_requests(struct drbg_state *drbg) +{ + /* SP800-90A requires 2**48 maximum requests before reseeding */ + return (1<<20); +} + +/* + * This is a wrapper to the kernel crypto API function of + * crypto_rng_generate() to allow the caller to provide additional data. + * + * @drng DRBG handle -- see crypto_rng_get_bytes + * @outbuf output buffer -- see crypto_rng_get_bytes + * @outlen length of output buffer -- see crypto_rng_get_bytes + * @addtl_input additional information string input buffer + * @addtllen length of additional information string buffer + * + * return + * see crypto_rng_get_bytes + */ +static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng, + unsigned char *outbuf, unsigned int outlen, + struct drbg_string *addtl) +{ + return crypto_rng_generate(drng, addtl->buf, addtl->len, + outbuf, outlen); +} + +/* + * TEST code + * + * This is a wrapper to the kernel crypto API function of + * crypto_rng_generate() to allow the caller to provide additional data and + * allow furnishing of test_data + * + * @drng DRBG handle -- see crypto_rng_get_bytes + * @outbuf output buffer -- see crypto_rng_get_bytes + * @outlen length of output buffer -- see crypto_rng_get_bytes + * @addtl_input additional information string input buffer + * @addtllen length of additional information string buffer + * @test_data filled test data + * + * return + * see crypto_rng_get_bytes + */ +static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng, + unsigned char *outbuf, unsigned int outlen, + struct drbg_string *addtl, + struct drbg_test_data *test_data) +{ + crypto_rng_set_entropy(drng, test_data->testentropy->buf, + test_data->testentropy->len); + return crypto_rng_generate(drng, addtl->buf, addtl->len, + outbuf, outlen); +} + +/* + * TEST code + * + * This is a wrapper to the kernel crypto API function of + * crypto_rng_reset() to allow the caller to provide test_data + * + * @drng DRBG handle -- see crypto_rng_reset + * @pers personalization string input buffer + * @perslen length of additional information string buffer + * @test_data filled test data + * + * return + * see crypto_rng_reset + */ +static inline int crypto_drbg_reset_test(struct crypto_rng *drng, + struct drbg_string *pers, + struct drbg_test_data *test_data) +{ + crypto_rng_set_entropy(drng, test_data->testentropy->buf, + test_data->testentropy->len); + return crypto_rng_reset(drng, pers->buf, pers->len); +} + +/* DRBG type flags */ +#define DRBG_CTR ((drbg_flag_t)1<<0) +#define DRBG_HMAC ((drbg_flag_t)1<<1) +#define DRBG_HASH ((drbg_flag_t)1<<2) +#define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH) +/* DRBG strength flags */ +#define DRBG_STRENGTH128 ((drbg_flag_t)1<<3) +#define DRBG_STRENGTH192 ((drbg_flag_t)1<<4) +#define DRBG_STRENGTH256 ((drbg_flag_t)1<<5) +#define DRBG_STRENGTH_MASK (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \ + DRBG_STRENGTH256) + +enum drbg_prefixes { + DRBG_PREFIX0 = 0x00, + DRBG_PREFIX1, + DRBG_PREFIX2, + DRBG_PREFIX3 +}; + +#endif /* _DRBG_H */ diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h new file mode 100644 index 000000000..70964781e --- /dev/null +++ b/include/crypto/ecc_curve.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 HiSilicon */ + +#ifndef _CRYTO_ECC_CURVE_H +#define _CRYTO_ECC_CURVE_H + +#include <linux/types.h> + +/** + * struct ecc_point - elliptic curve point in affine coordinates + * + * @x: X coordinate in vli form. + * @y: Y coordinate in vli form. + * @ndigits: Length of vlis in u64 qwords. + */ +struct ecc_point { + u64 *x; + u64 *y; + u8 ndigits; +}; + +/** + * struct ecc_curve - definition of elliptic curve + * + * @name: Short name of the curve. + * @g: Generator point of the curve. + * @p: Prime number, if Barrett's reduction is used for this curve + * pre-calculated value 'mu' is appended to the @p after ndigits. + * Use of Barrett's reduction is heuristically determined in + * vli_mmod_fast(). + * @n: Order of the curve group. + * @a: Curve parameter a. + * @b: Curve parameter b. + */ +struct ecc_curve { + char *name; + struct ecc_point g; + u64 *p; + u64 *n; + u64 *a; + u64 *b; +}; + +/** + * ecc_get_curve() - get elliptic curve; + * @curve_id: Curves IDs: + * defined in 'include/crypto/ecdh.h'; + * + * Returns curve if get curve succssful, NULL otherwise + */ +const struct ecc_curve *ecc_get_curve(unsigned int curve_id); + +/** + * ecc_get_curve25519() - get curve25519 curve; + * + * Returns curve25519 + */ +const struct ecc_curve *ecc_get_curve25519(void); + +#endif diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h new file mode 100644 index 000000000..a9f98078d --- /dev/null +++ b/include/crypto/ecdh.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * ECDH params to be used with kpp API + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + */ +#ifndef _CRYPTO_ECDH_ +#define _CRYPTO_ECDH_ + +/** + * DOC: ECDH Helper Functions + * + * To use ECDH with the KPP cipher API, the following data structure and + * functions should be used. + * + * The ECC curves known to the ECDH implementation are specified in this + * header file. + * + * To use ECDH with KPP, the following functions should be used to operate on + * an ECDH private key. The packet private key that can be set with + * the KPP API function call of crypto_kpp_set_secret. + */ + +/* Curves IDs */ +#define ECC_CURVE_NIST_P192 0x0001 +#define ECC_CURVE_NIST_P256 0x0002 +#define ECC_CURVE_NIST_P384 0x0003 + +/** + * struct ecdh - define an ECDH private key + * + * @key: Private ECDH key + * @key_size: Size of the private ECDH key + */ +struct ecdh { + char *key; + unsigned short key_size; +}; + +/** + * crypto_ecdh_key_len() - Obtain the size of the private ECDH key + * @params: private ECDH key + * + * This function returns the packet ECDH key size. A caller can use that + * with the provided ECDH private key reference to obtain the required + * memory size to hold a packet key. + * + * Return: size of the key in bytes + */ +unsigned int crypto_ecdh_key_len(const struct ecdh *params); + +/** + * crypto_ecdh_encode_key() - encode the private key + * @buf: Buffer allocated by the caller to hold the packet ECDH + * private key. The buffer should be at least crypto_ecdh_key_len + * bytes in size. + * @len: Length of the packet private key buffer + * @p: Buffer with the caller-specified private key + * + * The ECDH implementations operate on a packet representation of the private + * key. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p); + +/** + * crypto_ecdh_decode_key() - decode a private key + * @buf: Buffer holding a packet key that should be decoded + * @len: Length of the packet private key buffer + * @p: Buffer allocated by the caller that is filled with the + * unpacked ECDH private key. + * + * The unpacking obtains the private key by pointing @p to the correct location + * in @buf. Thus, both pointers refer to the same memory. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p); + +#endif diff --git a/include/crypto/engine.h b/include/crypto/engine.h new file mode 100644 index 000000000..ae133e98d --- /dev/null +++ b/include/crypto/engine.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Crypto engine API + * + * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org> + */ +#ifndef _CRYPTO_ENGINE_H +#define _CRYPTO_ENGINE_H + +#include <linux/crypto.h> +#include <linux/list.h> +#include <linux/kthread.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <crypto/algapi.h> +#include <crypto/aead.h> +#include <crypto/akcipher.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> +#include <crypto/kpp.h> + +struct device; + +#define ENGINE_NAME_LEN 30 +/* + * struct crypto_engine - crypto hardware engine + * @name: the engine name + * @idling: the engine is entering idle state + * @busy: request pump is busy + * @running: the engine is on working + * @retry_support: indication that the hardware allows re-execution + * of a failed backlog request + * crypto-engine, in head position to keep order + * @list: link with the global crypto engine list + * @queue_lock: spinlock to synchronise access to request queue + * @queue: the crypto queue of the engine + * @rt: whether this queue is set to run as a realtime task + * @prepare_crypt_hardware: a request will soon arrive from the queue + * so the subsystem requests the driver to prepare the hardware + * by issuing this call + * @unprepare_crypt_hardware: there are currently no more requests on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * @do_batch_requests: execute a batch of requests. Depends on multiple + * requests support. + * @kworker: kthread worker struct for request pump + * @pump_requests: work struct for scheduling work to the request pump + * @priv_data: the engine private data + * @cur_req: the current request which is on processing + */ +struct crypto_engine { + char name[ENGINE_NAME_LEN]; + bool idling; + bool busy; + bool running; + + bool retry_support; + + struct list_head list; + spinlock_t queue_lock; + struct crypto_queue queue; + struct device *dev; + + bool rt; + + int (*prepare_crypt_hardware)(struct crypto_engine *engine); + int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + int (*do_batch_requests)(struct crypto_engine *engine); + + + struct kthread_worker *kworker; + struct kthread_work pump_requests; + + void *priv_data; + struct crypto_async_request *cur_req; +}; + +/* + * struct crypto_engine_op - crypto hardware engine operations + * @prepare__request: do some prepare if need before handle the current request + * @unprepare_request: undo any work done by prepare_request() + * @do_one_request: do encryption for current request + */ +struct crypto_engine_op { + int (*prepare_request)(struct crypto_engine *engine, + void *areq); + int (*unprepare_request)(struct crypto_engine *engine, + void *areq); + int (*do_one_request)(struct crypto_engine *engine, + void *areq); +}; + +struct crypto_engine_ctx { + struct crypto_engine_op op; +}; + +int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, + struct aead_request *req); +int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, + struct akcipher_request *req); +int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, + struct ahash_request *req); +int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine, + struct kpp_request *req); +int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, + struct skcipher_request *req); +void crypto_finalize_aead_request(struct crypto_engine *engine, + struct aead_request *req, int err); +void crypto_finalize_akcipher_request(struct crypto_engine *engine, + struct akcipher_request *req, int err); +void crypto_finalize_hash_request(struct crypto_engine *engine, + struct ahash_request *req, int err); +void crypto_finalize_kpp_request(struct crypto_engine *engine, + struct kpp_request *req, int err); +void crypto_finalize_skcipher_request(struct crypto_engine *engine, + struct skcipher_request *req, int err); +int crypto_engine_start(struct crypto_engine *engine); +int crypto_engine_stop(struct crypto_engine *engine); +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); +struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, + bool retry_support, + int (*cbk_do_batch)(struct crypto_engine *engine), + bool rt, int qlen); +int crypto_engine_exit(struct crypto_engine *engine); + +#endif /* _CRYPTO_ENGINE_H */ diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h new file mode 100644 index 000000000..9d7eff04f --- /dev/null +++ b/include/crypto/gcm.h @@ -0,0 +1,63 @@ +#ifndef _CRYPTO_GCM_H +#define _CRYPTO_GCM_H + +#include <linux/errno.h> + +#define GCM_AES_IV_SIZE 12 +#define GCM_RFC4106_IV_SIZE 8 +#define GCM_RFC4543_IV_SIZE 8 + +/* + * validate authentication tag for GCM + */ +static inline int crypto_gcm_check_authsize(unsigned int authsize) +{ + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * validate authentication tag for RFC4106 + */ +static inline int crypto_rfc4106_check_authsize(unsigned int authsize) +{ + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * validate assoclen for RFC4106/RFC4543 + */ +static inline int crypto_ipsec_check_assoclen(unsigned int assoclen) +{ + switch (assoclen) { + case 16: + case 20: + break; + default: + return -EINVAL; + } + + return 0; +} +#endif diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h new file mode 100644 index 000000000..81330c644 --- /dev/null +++ b/include/crypto/gf128mul.h @@ -0,0 +1,252 @@ +/* gf128mul.h - GF(2^128) multiplication functions + * + * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. + * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> + * + * Based on Dr Brian Gladman's (GPL'd) work published at + * http://fp.gladman.plus.com/cryptography_technology/index.htm + * See the original copyright notice below. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +/* + --------------------------------------------------------------------------- + Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software in both source and binary + form is allowed (with or without changes) provided that: + + 1. distributions of this source code include the above copyright + notice, this list of conditions and the following disclaimer; + + 2. distributions in binary form include the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other associated materials; + + 3. the copyright holder's name is not used to endorse products + built using this software without specific written permission. + + ALTERNATIVELY, provided that this notice is retained in full, this product + may be distributed under the terms of the GNU General Public License (GPL), + in which case the provisions of the GPL apply INSTEAD OF those given above. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- + Issue Date: 31/01/2006 + + An implementation of field multiplication in Galois Field GF(2^128) +*/ + +#ifndef _CRYPTO_GF128MUL_H +#define _CRYPTO_GF128MUL_H + +#include <asm/byteorder.h> +#include <crypto/b128ops.h> +#include <linux/slab.h> + +/* Comment by Rik: + * + * For some background on GF(2^128) see for example: + * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf + * + * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can + * be mapped to computer memory in a variety of ways. Let's examine + * three common cases. + * + * Take a look at the 16 binary octets below in memory order. The msb's + * are left and the lsb's are right. char b[16] is an array and b[0] is + * the first octet. + * + * 10000000 00000000 00000000 00000000 .... 00000000 00000000 00000000 + * b[0] b[1] b[2] b[3] b[13] b[14] b[15] + * + * Every bit is a coefficient of some power of X. We can store the bits + * in every byte in little-endian order and the bytes themselves also in + * little endian order. I will call this lle (little-little-endian). + * The above buffer represents the polynomial 1, and X^7+X^2+X^1+1 looks + * like 11100001 00000000 .... 00000000 = { 0xE1, 0x00, }. + * This format was originally implemented in gf128mul and is used + * in GCM (Galois/Counter mode) and in ABL (Arbitrary Block Length). + * + * Another convention says: store the bits in bigendian order and the + * bytes also. This is bbe (big-big-endian). Now the buffer above + * represents X^127. X^7+X^2+X^1+1 looks like 00000000 .... 10000111, + * b[15] = 0x87 and the rest is 0. LRW uses this convention and bbe + * is partly implemented. + * + * Both of the above formats are easy to implement on big-endian + * machines. + * + * XTS and EME (the latter of which is patent encumbered) use the ble + * format (bits are stored in big endian order and the bytes in little + * endian). The above buffer represents X^7 in this case and the + * primitive polynomial is b[0] = 0x87. + * + * The common machine word-size is smaller than 128 bits, so to make + * an efficient implementation we must split into machine word sizes. + * This implementation uses 64-bit words for the moment. Machine + * endianness comes into play. The lle format in relation to machine + * endianness is discussed below by the original author of gf128mul Dr + * Brian Gladman. + * + * Let's look at the bbe and ble format on a little endian machine. + * + * bbe on a little endian machine u32 x[4]: + * + * MS x[0] LS MS x[1] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 103..96 111.104 119.112 127.120 71...64 79...72 87...80 95...88 + * + * MS x[2] LS MS x[3] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 39...32 47...40 55...48 63...56 07...00 15...08 23...16 31...24 + * + * ble on a little endian machine + * + * MS x[0] LS MS x[1] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 31...24 23...16 15...08 07...00 63...56 55...48 47...40 39...32 + * + * MS x[2] LS MS x[3] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 95...88 87...80 79...72 71...64 127.120 199.112 111.104 103..96 + * + * Multiplications in GF(2^128) are mostly bit-shifts, so you see why + * ble (and lbe also) are easier to implement on a little-endian + * machine than on a big-endian machine. The converse holds for bbe + * and lle. + * + * Note: to have good alignment, it seems to me that it is sufficient + * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize + * machines this will automatically aligned to wordsize and on a 64-bit + * machine also. + */ +/* Multiply a GF(2^128) field element by x. Field elements are + held in arrays of bytes in which field bits 8n..8n + 7 are held in + byte[n], with lower indexed bits placed in the more numerically + significant bit positions within bytes. + + On little endian machines the bit indexes translate into the bit + positions within four 32-bit words in the following way + + MS x[0] LS MS x[1] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 24...31 16...23 08...15 00...07 56...63 48...55 40...47 32...39 + + MS x[2] LS MS x[3] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 88...95 80...87 72...79 64...71 120.127 112.119 104.111 96..103 + + On big endian machines the bit indexes translate into the bit + positions within four 32-bit words in the following way + + MS x[0] LS MS x[1] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 00...07 08...15 16...23 24...31 32...39 40...47 48...55 56...63 + + MS x[2] LS MS x[3] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127 +*/ + +/* A slow generic version of gf_mul, implemented for lle and bbe + * It multiplies a and b and puts the result in a */ +void gf128mul_lle(be128 *a, const be128 *b); + +void gf128mul_bbe(be128 *a, const be128 *b); + +/* + * The following functions multiply a field element by x in + * the polynomial field representation. They use 64-bit word operations + * to gain speed but compensate for machine endianness and hence work + * correctly on both styles of machine. + * + * They are defined here for performance. + */ + +static inline u64 gf128mul_mask_from_bit(u64 x, int which) +{ + /* a constant-time version of 'x & ((u64)1 << which) ? (u64)-1 : 0' */ + return ((s64)(x << (63 - which)) >> 63); +} + +static inline void gf128mul_x_lle(be128 *r, const be128 *x) +{ + u64 a = be64_to_cpu(x->a); + u64 b = be64_to_cpu(x->b); + + /* equivalent to gf128mul_table_le[(b << 7) & 0xff] << 48 + * (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(b, 0) & ((u64)0xe1 << 56); + + r->b = cpu_to_be64((b >> 1) | (a << 63)); + r->a = cpu_to_be64((a >> 1) ^ _tt); +} + +static inline void gf128mul_x_bbe(be128 *r, const be128 *x) +{ + u64 a = be64_to_cpu(x->a); + u64 b = be64_to_cpu(x->b); + + /* equivalent to gf128mul_table_be[a >> 63] (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; + + r->a = cpu_to_be64((a << 1) | (b >> 63)); + r->b = cpu_to_be64((b << 1) ^ _tt); +} + +/* needed by XTS */ +static inline void gf128mul_x_ble(le128 *r, const le128 *x) +{ + u64 a = le64_to_cpu(x->a); + u64 b = le64_to_cpu(x->b); + + /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; + + r->a = cpu_to_le64((a << 1) | (b >> 63)); + r->b = cpu_to_le64((b << 1) ^ _tt); +} + +/* 4k table optimization */ + +struct gf128mul_4k { + be128 t[256]; +}; + +struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g); +struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g); +void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t); +void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t); +void gf128mul_x8_ble(le128 *r, const le128 *x); +static inline void gf128mul_free_4k(struct gf128mul_4k *t) +{ + kfree_sensitive(t); +} + + +/* 64k table optimization, implemented for bbe */ + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +/* First initialize with the constant factor with which you + * want to multiply and then call gf128mul_64k_bbe with the other + * factor in the first argument, and the table in the second. + * Afterwards, the result is stored in *a. + */ +struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g); +void gf128mul_free_64k(struct gf128mul_64k *t); +void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t); + +#endif /* _CRYPTO_GF128MUL_H */ diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h new file mode 100644 index 000000000..f832c9f2a --- /dev/null +++ b/include/crypto/ghash.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the GHASH hash function + */ + +#ifndef __CRYPTO_GHASH_H__ +#define __CRYPTO_GHASH_H__ + +#include <linux/types.h> +#include <crypto/gf128mul.h> + +#define GHASH_BLOCK_SIZE 16 +#define GHASH_DIGEST_SIZE 16 + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[GHASH_BLOCK_SIZE]; + u32 bytes; +}; + +#endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h new file mode 100644 index 000000000..f5841992d --- /dev/null +++ b/include/crypto/hash.h @@ -0,0 +1,1007 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hash: Hash algorithms under the crypto API + * + * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_HASH_H +#define _CRYPTO_HASH_H + +#include <linux/crypto.h> +#include <linux/string.h> + +struct crypto_ahash; + +/** + * DOC: Message Digest Algorithm Definitions + * + * These data structures define modular message digest algorithm + * implementations, managed via crypto_register_ahash(), + * crypto_register_shash(), crypto_unregister_ahash() and + * crypto_unregister_shash(). + */ + +/** + * struct hash_alg_common - define properties of message digest + * @digestsize: Size of the result of the transformation. A buffer of this size + * must be available to the @final and @finup calls, so they can + * store the resulting hash into it. For various predefined sizes, + * search include/crypto/ using + * git grep _DIGEST_SIZE include/crypto. + * @statesize: Size of the block for partial state of the transformation. A + * buffer of this size must be passed to the @export function as it + * will save the partial state of the transformation into it. On the + * other side, the @import function will load the state from a + * buffer of this size as well. + * @base: Start of data structure of cipher algorithm. The common data + * structure of crypto_alg contains information common to all ciphers. + * The hash_alg_common data structure now adds the hash-specific + * information. + */ +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + + struct crypto_alg base; +}; + +struct ahash_request { + struct crypto_async_request base; + + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + + /* This field may only be used by the ahash API code. */ + void *priv; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct ahash_alg - asynchronous message digest definition + * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the + * state of the HASH transformation at the beginning. This shall fill in + * the internal structures used during the entire duration of the whole + * transformation. No data processing happens at this point. Driver code + * implementation must not use req->result. + * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This + * function actually pushes blocks of data from upper layers into the + * driver, which then passes those to the hardware as seen fit. This + * function must not finalize the HASH transformation by calculating the + * final message digest as this only adds more data into the + * transformation. This function shall not modify the transformation + * context, as this function may be called in parallel with the same + * transformation object. Data processing can happen synchronously + * [SHASH] or asynchronously [AHASH] at this point. Driver must not use + * req->result. + * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the + * transformation and retrieves the resulting hash from the driver and + * pushes it back to upper layers. No data processing happens at this + * point unless hardware requires it to finish the transformation + * (then the data buffered by the device driver is processed). + * @finup: **[optional]** Combination of @update and @final. This function is effectively a + * combination of @update and @final calls issued in sequence. As some + * hardware cannot do @update and @final separately, this callback was + * added to allow such hardware to be used at least by IPsec. Data + * processing can happen synchronously [SHASH] or asynchronously [AHASH] + * at this point. + * @digest: Combination of @init and @update and @final. This function + * effectively behaves as the entire chain of operations, @init, + * @update and @final issued in sequence. Just like @finup, this was + * added for hardware which cannot do even the @finup, but can only do + * the whole transformation in one run. Data processing can happen + * synchronously [SHASH] or asynchronously [AHASH] at this point. + * @setkey: Set optional key used by the hashing algorithm. Intended to push + * optional key used by the hashing algorithm from upper layers into + * the driver. This function can store the key in the transformation + * context or can outright program it into the hardware. In the former + * case, one must be careful to program the key into the hardware at + * appropriate time and one must be careful that .setkey() can be + * called multiple times during the existence of the transformation + * object. Not all hashing algorithms do implement this function as it + * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT + * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement + * this function. This function must be called before any other of the + * @init, @update, @final, @finup, @digest is called. No data + * processing happens at this point. + * @export: Export partial state of the transformation. This function dumps the + * entire state of the ongoing transformation into a provided block of + * data so it can be @import 'ed back later on. This is useful in case + * you want to save partial result of the transformation after + * processing certain amount of data and reload this partial result + * multiple times later on for multiple re-use. No data processing + * happens at this point. Driver must not use req->result. + * @import: Import partial state of the transformation. This function loads the + * entire state of the ongoing transformation from a provided block of + * data so the transformation can continue from this point onward. No + * data processing happens at this point. Driver must not use + * req->result. + * @init_tfm: Initialize the cryptographic transformation object. + * This function is called only once at the instantiation + * time, right after the transformation context was + * allocated. In case the cryptographic hardware has + * some special requirements which need to be handled + * by software, this function shall check for the precise + * requirement of the transformation and put any software + * fallbacks in place. + * @exit_tfm: Deinitialize the cryptographic transformation object. + * This is a counterpart to @init_tfm, used to remove + * various changes set in @init_tfm. + * @halg: see struct hash_alg_common + */ +struct ahash_alg { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + int (*init_tfm)(struct crypto_ahash *tfm); + void (*exit_tfm)(struct crypto_ahash *tfm); + + struct hash_alg_common halg; +}; + +struct shash_desc { + struct crypto_shash *tfm; + void *__ctx[] __aligned(ARCH_SLAB_MINALIGN); +}; + +#define HASH_MAX_DIGESTSIZE 64 + +/* + * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc' + * containing a 'struct sha3_state'. + */ +#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) + +#define HASH_MAX_STATESIZE 512 + +#define SHASH_DESC_ON_STACK(shash, ctx) \ + char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \ + __aligned(__alignof__(struct shash_desc)); \ + struct shash_desc *shash = (struct shash_desc *)__##shash##_desc + +/** + * struct shash_alg - synchronous message digest definition + * @init: see struct ahash_alg + * @update: see struct ahash_alg + * @final: see struct ahash_alg + * @finup: see struct ahash_alg + * @digest: see struct ahash_alg + * @export: see struct ahash_alg + * @import: see struct ahash_alg + * @setkey: see struct ahash_alg + * @init_tfm: Initialize the cryptographic transformation object. + * This function is called only once at the instantiation + * time, right after the transformation context was + * allocated. In case the cryptographic hardware has + * some special requirements which need to be handled + * by software, this function shall check for the precise + * requirement of the transformation and put any software + * fallbacks in place. + * @exit_tfm: Deinitialize the cryptographic transformation object. + * This is a counterpart to @init_tfm, used to remove + * various changes set in @init_tfm. + * @digestsize: see struct ahash_alg + * @statesize: see struct ahash_alg + * @descsize: Size of the operational state for the message digest. This state + * size is the memory size that needs to be allocated for + * shash_desc.__ctx + * @base: internally used + */ +struct shash_alg { + int (*init)(struct shash_desc *desc); + int (*update)(struct shash_desc *desc, const u8 *data, + unsigned int len); + int (*final)(struct shash_desc *desc, u8 *out); + int (*finup)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + int (*digest)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + int (*export)(struct shash_desc *desc, void *out); + int (*import)(struct shash_desc *desc, const void *in); + int (*setkey)(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + int (*init_tfm)(struct crypto_shash *tfm); + void (*exit_tfm)(struct crypto_shash *tfm); + + unsigned int descsize; + + /* These fields must match hash_alg_common. */ + unsigned int digestsize + __attribute__ ((aligned(__alignof__(struct hash_alg_common)))); + unsigned int statesize; + + struct crypto_alg base; +}; + +struct crypto_ahash { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct crypto_shash { + unsigned int descsize; + struct crypto_tfm base; +}; + +/** + * DOC: Asynchronous Message Digest API + * + * The asynchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto) + * + * The asynchronous cipher operation discussion provided for the + * CRYPTO_ALG_TYPE_SKCIPHER API applies here as well. + */ + +static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_ahash, base); +} + +/** + * crypto_alloc_ahash() - allocate ahash cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an ahash. The returned struct + * crypto_ahash is the cipher handle that is required for any subsequent + * API invocation for that ahash. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_ahash() - zeroize and free the ahash handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_ahash(struct crypto_ahash *tfm) +{ + crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); +} + +/** + * crypto_has_ahash() - Search for the availability of an ahash. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash + * @type: specifies the type of the ahash + * @mask: specifies the mask for the ahash + * + * Return: true when the ahash is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_ahash(const char *alg_name, u32 type, u32 mask); + +static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); +} + +static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); +} + +static inline unsigned int crypto_ahash_alignmask( + struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); +} + +/** + * crypto_ahash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); +} + +static inline struct hash_alg_common *__crypto_hash_alg_common( + struct crypto_alg *alg) +{ + return container_of(alg, struct hash_alg_common, base); +} + +static inline struct hash_alg_common *crypto_hash_alg_common( + struct crypto_ahash *tfm) +{ + return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); +} + +/** + * crypto_ahash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * + * Return: message digest size of cipher + */ +static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->digestsize; +} + +/** + * crypto_ahash_statesize() - obtain size of the ahash state + * @tfm: cipher handle + * + * Return the size of the ahash state. With the crypto_ahash_export() + * function, the caller can export the state into a buffer whose size is + * defined with this function. + * + * Return: size of the ahash state + */ +static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->statesize; +} + +static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) +{ + return crypto_tfm_get_flags(crypto_ahash_tfm(tfm)); +} + +static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags); +} + +static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); +} + +/** + * crypto_ahash_reqtfm() - obtain cipher handle from request + * @req: asynchronous request handle that contains the reference to the ahash + * cipher handle + * + * Return the ahash cipher handle that is registered with the asynchronous + * request handle ahash_request. + * + * Return: ahash cipher handle + */ +static inline struct crypto_ahash *crypto_ahash_reqtfm( + struct ahash_request *req) +{ + return __crypto_ahash_cast(req->base.tfm); +} + +/** + * crypto_ahash_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: size of the request data + */ +static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) +{ + return tfm->reqsize; +} + +static inline void *ahash_request_ctx(struct ahash_request *req) +{ + return req->__ctx; +} + +/** + * crypto_ahash_setkey - set key for cipher handle + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the ahash cipher. The cipher + * handle must point to a keyed hash in order for this function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + +/** + * crypto_ahash_finup() - update and finalize message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of + * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Return: see crypto_ahash_final() + */ +int crypto_ahash_finup(struct ahash_request *req); + +/** + * crypto_ahash_final() - calculate message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer registered with the ahash_request handle. + * + * Return: + * 0 if the message digest was successfully calculated; + * -EINPROGRESS if data is fed into hardware (DMA) or queued for later; + * -EBUSY if queue is full and request should be resubmitted later; + * other < 0 if an error occurred + */ +int crypto_ahash_final(struct ahash_request *req); + +/** + * crypto_ahash_digest() - calculate message digest for a buffer + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of crypto_ahash_init, + * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: see crypto_ahash_final() + */ +int crypto_ahash_digest(struct ahash_request *req); + +/** + * crypto_ahash_export() - extract current message digest state + * @req: reference to the ahash_request handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the ahash_request handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_ahash_statesize()). + * + * Return: 0 if the export was successful; < 0 if an error occurred + */ +static inline int crypto_ahash_export(struct ahash_request *req, void *out) +{ + return crypto_ahash_reqtfm(req)->export(req, out); +} + +/** + * crypto_ahash_import() - import message digest state + * @req: reference to ahash_request handle the state is imported into + * @in: buffer holding the state + * + * This function imports the hash state into the ahash_request handle from the + * input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Return: 0 if the import was successful; < 0 if an error occurred + */ +static inline int crypto_ahash_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->import(req, in); +} + +/** + * crypto_ahash_init() - (re)initialize message digest handle + * @req: ahash_request handle that already is initialized with all necessary + * data using the ahash_request_* API functions + * + * The call (re-)initializes the message digest referenced by the ahash_request + * handle. Any potentially existing state created by previous operations is + * discarded. + * + * Return: see crypto_ahash_final() + */ +static inline int crypto_ahash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->init(req); +} + +/** + * crypto_ahash_update() - add data to message digest for processing + * @req: ahash_request handle that was previously initialized with the + * crypto_ahash_init call. + * + * Updates the message digest state of the &ahash_request handle. The input data + * is pointed to by the scatter/gather list registered in the &ahash_request + * handle + * + * Return: see crypto_ahash_final() + */ +static inline int crypto_ahash_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; + int ret; + + crypto_stats_get(alg); + ret = crypto_ahash_reqtfm(req)->update(req); + crypto_stats_ahash_update(nbytes, ret, alg); + return ret; +} + +/** + * DOC: Asynchronous Hash Request Handle + * + * The &ahash_request data structure contains all pointers to data + * required for the asynchronous cipher operation. This includes the cipher + * handle (which can be used by multiple &ahash_request instances), pointer + * to plaintext and the message digest output buffer, asynchronous callback + * function, etc. It acts as a handle to the ahash_request_* API calls in a + * similar way as ahash handle to the crypto_ahash_* API calls. + */ + +/** + * ahash_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing ahash handle in the request + * data structure with a different one. + */ +static inline void ahash_request_set_tfm(struct ahash_request *req, + struct crypto_ahash *tfm) +{ + req->base.tfm = crypto_ahash_tfm(tfm); +} + +/** + * ahash_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the ahash + * message digest API calls. During + * the allocation, the provided ahash handle + * is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct ahash_request *ahash_request_alloc( + struct crypto_ahash *tfm, gfp_t gfp) +{ + struct ahash_request *req; + + req = kmalloc(sizeof(struct ahash_request) + + crypto_ahash_reqsize(tfm), gfp); + + if (likely(req)) + ahash_request_set_tfm(req, tfm); + + return req; +} + +/** + * ahash_request_free() - zeroize and free the request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void ahash_request_free(struct ahash_request *req) +{ + kfree_sensitive(req); +} + +static inline void ahash_request_zero(struct ahash_request *req) +{ + memzero_explicit(req, sizeof(*req) + + crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); +} + +static inline struct ahash_request *ahash_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct ahash_request, base); +} + +/** + * ahash_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * &crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once + * the cipher operation completes. + * + * The callback function is registered with the &ahash_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void ahash_request_set_callback(struct ahash_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * ahash_request_set_crypt() - set data buffers + * @req: ahash_request handle to be updated + * @src: source scatter/gather list + * @result: buffer that is filled with the message digest -- the caller must + * ensure that the buffer has sufficient space by, for example, calling + * crypto_ahash_digestsize() + * @nbytes: number of bytes to process from the source scatter/gather list + * + * By using this call, the caller references the source scatter/gather list. + * The source scatter/gather list points to the data the message digest is to + * be calculated for. + */ +static inline void ahash_request_set_crypt(struct ahash_request *req, + struct scatterlist *src, u8 *result, + unsigned int nbytes) +{ + req->src = src; + req->nbytes = nbytes; + req->result = result; +} + +/** + * DOC: Synchronous Message Digest API + * + * The synchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto) + * + * The message digest API is able to maintain state information for the + * caller. + * + * The synchronous message digest API can store user-related context in its + * shash_desc request data structure. + */ + +/** + * crypto_alloc_shash() - allocate message digest handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a message digest. The returned &struct + * crypto_shash is the cipher handle that is required for any subsequent + * API invocation for that message digest. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, + u32 mask); + +int crypto_has_shash(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_shash() - zeroize and free the message digest handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_shash(struct crypto_shash *tfm) +{ + crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); +} + +static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_name(crypto_shash_tfm(tfm)); +} + +static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); +} + +static inline unsigned int crypto_shash_alignmask( + struct crypto_shash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); +} + +/** + * crypto_shash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); +} + +static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct shash_alg, base); +} + +static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm) +{ + return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); +} + +/** + * crypto_shash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * Return: digest size of cipher + */ +static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->digestsize; +} + +static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->statesize; +} + +static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm) +{ + return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); +} + +static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags); +} + +static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); +} + +/** + * crypto_shash_descsize() - obtain the operational state size + * @tfm: cipher handle + * + * The size of the operational state the cipher needs during operation is + * returned for the hash referenced with the cipher handle. This size is + * required to calculate the memory requirements to allow the caller allocating + * sufficient memory for operational state. + * + * The operational state is defined with struct shash_desc where the size of + * that data structure is to be calculated as + * sizeof(struct shash_desc) + crypto_shash_descsize(alg) + * + * Return: size of the operational state + */ +static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) +{ + return tfm->descsize; +} + +static inline void *shash_desc_ctx(struct shash_desc *desc) +{ + return desc->__ctx; +} + +/** + * crypto_shash_setkey() - set key for message digest + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the keyed message digest cipher. The + * cipher handle must point to a keyed message digest cipher in order for this + * function to succeed. + * + * Context: Any context. + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + +/** + * crypto_shash_digest() - calculate message digest for buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of crypto_shash_init, + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Context: Any context. + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + +/** + * crypto_shash_tfm_digest() - calculate message digest for buffer + * @tfm: hash transformation object + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This is a simplified version of crypto_shash_digest() for users who don't + * want to allocate their own hash descriptor (shash_desc). Instead, + * crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash) + * directly, and it allocates a hash descriptor on the stack internally. + * Note that this stack allocation may be fairly large. + * + * Context: Any context. + * Return: 0 on success; < 0 if an error occurred. + */ +int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, + unsigned int len, u8 *out); + +/** + * crypto_shash_export() - extract operational state for message digest + * @desc: reference to the operational state handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the operational state handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_shash_descsize). + * + * Context: Any context. + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +static inline int crypto_shash_export(struct shash_desc *desc, void *out) +{ + return crypto_shash_alg(desc->tfm)->export(desc, out); +} + +/** + * crypto_shash_import() - import operational state + * @desc: reference to the operational state handle the state imported into + * @in: buffer holding the state + * + * This function imports the hash state into the operational state handle from + * the input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Context: Any context. + * Return: 0 if the import was successful; < 0 if an error occurred + */ +static inline int crypto_shash_import(struct shash_desc *desc, const void *in) +{ + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->import(desc, in); +} + +/** + * crypto_shash_init() - (re)initialize message digest + * @desc: operational state handle that is already filled + * + * The call (re-)initializes the message digest referenced by the + * operational state handle. Any potentially existing state created by + * previous operations is discarded. + * + * Context: Any context. + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred + */ +static inline int crypto_shash_init(struct shash_desc *desc) +{ + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->init(desc); +} + +/** + * crypto_shash_update() - add data to message digest for processing + * @desc: operational state handle that is already initialized + * @data: input data to be added to the message digest + * @len: length of the input data + * + * Updates the message digest state of the operational state handle. + * + * Context: Any context. + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred + */ +int crypto_shash_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +/** + * crypto_shash_final() - calculate message digest + * @desc: operational state handle that is already filled with data + * @out: output buffer filled with the message digest + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer. The caller must ensure that the output buffer is + * large enough by using crypto_shash_digestsize. + * + * Context: Any context. + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_final(struct shash_desc *desc, u8 *out); + +/** + * crypto_shash_finup() - calculate message digest of buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Context: Any context. + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + +static inline void shash_desc_zero(struct shash_desc *desc) +{ + memzero_explicit(desc, + sizeof(*desc) + crypto_shash_descsize(desc->tfm)); +} + +#endif /* _CRYPTO_HASH_H */ diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h new file mode 100644 index 000000000..dd4f06785 --- /dev/null +++ b/include/crypto/hash_info.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hash Info: Hash algorithms information + * + * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com> + */ + +#ifndef _CRYPTO_HASH_INFO_H +#define _CRYPTO_HASH_INFO_H + +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <crypto/md5.h> +#include <crypto/streebog.h> + +#include <uapi/linux/hash_info.h> + +/* not defined in include/crypto/ */ +#define RMD128_DIGEST_SIZE 16 +#define RMD160_DIGEST_SIZE 20 +#define RMD256_DIGEST_SIZE 32 +#define RMD320_DIGEST_SIZE 40 + +/* not defined in include/crypto/ */ +#define WP512_DIGEST_SIZE 64 +#define WP384_DIGEST_SIZE 48 +#define WP256_DIGEST_SIZE 32 + +/* not defined in include/crypto/ */ +#define TGR128_DIGEST_SIZE 16 +#define TGR160_DIGEST_SIZE 20 +#define TGR192_DIGEST_SIZE 24 + +/* not defined in include/crypto/ */ +#define SM3256_DIGEST_SIZE 32 + +extern const char *const hash_algo_name[HASH_ALGO__LAST]; +extern const int hash_digest_size[HASH_ALGO__LAST]; + +#endif /* _CRYPTO_HASH_INFO_H */ diff --git a/include/crypto/hmac.h b/include/crypto/hmac.h new file mode 100644 index 000000000..66774132a --- /dev/null +++ b/include/crypto/hmac.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_HMAC_H +#define _CRYPTO_HMAC_H + +#define HMAC_IPAD_VALUE 0x36 +#define HMAC_OPAD_VALUE 0x5c + +#endif /* _CRYPTO_HMAC_H */ diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h new file mode 100644 index 000000000..a406e281a --- /dev/null +++ b/include/crypto/if_alg.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * if_alg: User-space algorithm interface + * + * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_IF_ALG_H +#define _CRYPTO_IF_ALG_H + +#include <linux/compiler.h> +#include <linux/completion.h> +#include <linux/if_alg.h> +#include <linux/scatterlist.h> +#include <linux/types.h> +#include <linux/atomic.h> +#include <net/sock.h> + +#include <crypto/aead.h> +#include <crypto/skcipher.h> + +#define ALG_MAX_PAGES 16 + +struct crypto_async_request; + +struct alg_sock { + /* struct sock must be the first member of struct alg_sock */ + struct sock sk; + + struct sock *parent; + + atomic_t refcnt; + atomic_t nokey_refcnt; + + const struct af_alg_type *type; + void *private; +}; + +struct af_alg_control { + struct af_alg_iv *iv; + int op; + unsigned int aead_assoclen; +}; + +struct af_alg_type { + void *(*bind)(const char *name, u32 type, u32 mask); + void (*release)(void *private); + int (*setkey)(void *private, const u8 *key, unsigned int keylen); + int (*setentropy)(void *private, sockptr_t entropy, unsigned int len); + int (*accept)(void *private, struct sock *sk); + int (*accept_nokey)(void *private, struct sock *sk); + int (*setauthsize)(void *private, unsigned int authsize); + + struct proto_ops *ops; + struct proto_ops *ops_nokey; + struct module *owner; + char name[14]; +}; + +struct af_alg_sgl { + struct scatterlist sg[ALG_MAX_PAGES + 1]; + struct page *pages[ALG_MAX_PAGES]; + unsigned int npages; +}; + +/* TX SGL entry */ +struct af_alg_tsgl { + struct list_head list; + unsigned int cur; /* Last processed SG entry */ + struct scatterlist sg[]; /* Array of SGs forming the SGL */ +}; + +#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \ + sizeof(struct scatterlist) - 1) + +/* RX SGL entry */ +struct af_alg_rsgl { + struct af_alg_sgl sgl; + struct list_head list; + size_t sg_num_bytes; /* Bytes of data in that SGL */ +}; + +/** + * struct af_alg_async_req - definition of crypto request + * @iocb: IOCB for AIO operations + * @sk: Socket the request is associated with + * @first_rsgl: First RX SG + * @last_rsgl: Pointer to last RX SG + * @rsgl_list: Track RX SGs + * @tsgl: Private, per request TX SGL of buffers to process + * @tsgl_entries: Number of entries in priv. TX SGL + * @outlen: Number of output bytes generated by crypto op + * @areqlen: Length of this data structure + * @cra_u: Cipher request + */ +struct af_alg_async_req { + struct kiocb *iocb; + struct sock *sk; + + struct af_alg_rsgl first_rsgl; + struct af_alg_rsgl *last_rsgl; + struct list_head rsgl_list; + + struct scatterlist *tsgl; + unsigned int tsgl_entries; + + unsigned int outlen; + unsigned int areqlen; + + union { + struct aead_request aead_req; + struct skcipher_request skcipher_req; + } cra_u; + + /* req ctx trails this struct */ +}; + +/** + * struct af_alg_ctx - definition of the crypto context + * + * The crypto context tracks the input data during the lifetime of an AF_ALG + * socket. + * + * @tsgl_list: Link to TX SGL + * @iv: IV for cipher operation + * @aead_assoclen: Length of AAD for AEAD cipher operations + * @completion: Work queue for synchronous operation + * @used: TX bytes sent to kernel. This variable is used to + * ensure that user space cannot cause the kernel + * to allocate too much memory in sendmsg operation. + * @rcvused: Total RX bytes to be filled by kernel. This variable + * is used to ensure user space cannot cause the kernel + * to allocate too much memory in a recvmsg operation. + * @more: More data to be expected from user space? + * @merge: Shall new data from user space be merged into existing + * SG? + * @enc: Cryptographic operation to be performed when + * recvmsg is invoked. + * @init: True if metadata has been sent. + * @len: Length of memory allocated for this data structure. + * @inflight: Non-zero when AIO requests are in flight. + */ +struct af_alg_ctx { + struct list_head tsgl_list; + + void *iv; + size_t aead_assoclen; + + struct crypto_wait wait; + + size_t used; + atomic_t rcvused; + + bool more; + bool merge; + bool enc; + bool init; + + unsigned int len; + + unsigned int inflight; +}; + +int af_alg_register_type(const struct af_alg_type *type); +int af_alg_unregister_type(const struct af_alg_type *type); + +int af_alg_release(struct socket *sock); +void af_alg_release_parent(struct sock *sk); +int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern); + +int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); +void af_alg_free_sg(struct af_alg_sgl *sgl); + +static inline struct alg_sock *alg_sk(struct sock *sk) +{ + return (struct alg_sock *)sk; +} + +/** + * Size of available buffer for sending data from user space to kernel. + * + * @sk socket of connection to user space + * @return number of bytes still available + */ +static inline int af_alg_sndbuf(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + + return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - + ctx->used, 0); +} + +/** + * Can the send buffer still be written to? + * + * @sk socket of connection to user space + * @return true => writable, false => not writable + */ +static inline bool af_alg_writable(struct sock *sk) +{ + return PAGE_SIZE <= af_alg_sndbuf(sk); +} + +/** + * Size of available buffer used by kernel for the RX user space operation. + * + * @sk socket of connection to user space + * @return number of bytes still available + */ +static inline int af_alg_rcvbuf(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + + return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - + atomic_read(&ctx->rcvused), 0); +} + +/** + * Can the RX buffer still be written to? + * + * @sk socket of connection to user space + * @return true => writable, false => not writable + */ +static inline bool af_alg_readable(struct sock *sk) +{ + return PAGE_SIZE <= af_alg_rcvbuf(sk); +} + +unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); +void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, + size_t dst_offset); +void af_alg_wmem_wakeup(struct sock *sk); +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); +int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, + unsigned int ivsize); +ssize_t af_alg_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int flags); +void af_alg_free_resources(struct af_alg_async_req *areq); +void af_alg_async_cb(struct crypto_async_request *_req, int err); +__poll_t af_alg_poll(struct file *file, struct socket *sock, + poll_table *wait); +struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, + unsigned int areqlen); +int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, + struct af_alg_async_req *areq, size_t maxsize, + size_t *outlen); + +#endif /* _CRYPTO_IF_ALG_H */ diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h new file mode 100644 index 000000000..cfc47e188 --- /dev/null +++ b/include/crypto/internal/acompress.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li <weigang.li@intel.com> + * Giovanni Cabiddu <giovanni.cabiddu@intel.com> + */ +#ifndef _CRYPTO_ACOMP_INT_H +#define _CRYPTO_ACOMP_INT_H +#include <crypto/acompress.h> + +/* + * Transform internal helpers. + */ +static inline void *acomp_request_ctx(struct acomp_req *req) +{ + return req->__ctx; +} + +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void acomp_request_complete(struct acomp_req *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *acomp_alg_name(struct crypto_acomp *tfm) +{ + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) +{ + struct acomp_req *req; + + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); + if (likely(req)) + acomp_request_set_tfm(req, tfm); + return req; +} + +static inline void __acomp_request_free(struct acomp_req *req) +{ + kfree_sensitive(req); +} + +/** + * crypto_register_acomp() -- Register asynchronous compression algorithm + * + * Function registers an implementation of an asynchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_acomp(struct acomp_alg *alg); + +/** + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm + * + * Function unregisters an implementation of an asynchronous + * compression algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_acomp(struct acomp_alg *alg); + +int crypto_register_acomps(struct acomp_alg *algs, int count); +void crypto_unregister_acomps(struct acomp_alg *algs, int count); + +#endif diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h new file mode 100644 index 000000000..d482017f3 --- /dev/null +++ b/include/crypto/internal/aead.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_AEAD_H +#define _CRYPTO_INTERNAL_AEAD_H + +#include <crypto/aead.h> +#include <crypto/algapi.h> +#include <linux/stddef.h> +#include <linux/types.h> + +struct rtattr; + +struct aead_instance { + void (*free)(struct aead_instance *inst); + union { + struct { + char head[offsetof(struct aead_alg, base)]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +struct aead_queue { + struct crypto_queue base; +}; + +static inline void *crypto_aead_ctx(struct crypto_aead *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *aead_crypto_instance( + struct aead_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct aead_instance *aead_instance(struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct aead_instance, alg.base); +} + +static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead) +{ + return aead_instance(crypto_tfm_alg_instance(&aead->base)); +} + +static inline void *aead_instance_ctx(struct aead_instance *inst) +{ + return crypto_instance_ctx(aead_crypto_instance(inst)); +} + +static inline void *aead_request_ctx(struct aead_request *req) +{ + return req->__ctx; +} + +static inline void aead_request_complete(struct aead_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 aead_request_flags(struct aead_request *req) +{ + return req->base.flags; +} + +static inline struct aead_request *aead_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct aead_request, base); +} + +int crypto_grab_aead(struct crypto_aead_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct aead_alg *crypto_spawn_aead_alg( + struct crypto_aead_spawn *spawn) +{ + return container_of(spawn->base.alg, struct aead_alg, base); +} + +static inline struct crypto_aead *crypto_spawn_aead( + struct crypto_aead_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, + unsigned int reqsize) +{ + aead->reqsize = reqsize; +} + +static inline void aead_init_queue(struct aead_queue *queue, + unsigned int max_qlen) +{ + crypto_init_queue(&queue->base, max_qlen); +} + +static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg) +{ + return alg->chunksize; +} + +/** + * crypto_aead_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CCM. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_chunksize(crypto_aead_alg(tfm)); +} + +int crypto_register_aead(struct aead_alg *alg); +void crypto_unregister_aead(struct aead_alg *alg); +int crypto_register_aeads(struct aead_alg *algs, int count); +void crypto_unregister_aeads(struct aead_alg *algs, int count); +int aead_register_instance(struct crypto_template *tmpl, + struct aead_instance *inst); + +#endif /* _CRYPTO_INTERNAL_AEAD_H */ + diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h new file mode 100644 index 000000000..8d3220c9a --- /dev/null +++ b/include/crypto/internal/akcipher.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Public Key Encryption + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk <tadeusz.struk@intel.com> + */ +#ifndef _CRYPTO_AKCIPHER_INT_H +#define _CRYPTO_AKCIPHER_INT_H +#include <crypto/akcipher.h> +#include <crypto/algapi.h> + +struct akcipher_instance { + void (*free)(struct akcipher_instance *inst); + union { + struct { + char head[offsetof(struct akcipher_alg, base)]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +/* + * Transform internal helpers. + */ +static inline void *akcipher_request_ctx(struct akcipher_request *req) +{ + return req->__ctx; +} + +static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher, + unsigned int reqsize) +{ + crypto_akcipher_alg(akcipher)->reqsize = reqsize; +} + +static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void akcipher_request_complete(struct akcipher_request *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm) +{ + return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct crypto_instance *akcipher_crypto_instance( + struct akcipher_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct akcipher_instance *akcipher_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct akcipher_instance, alg.base); +} + +static inline struct akcipher_instance *akcipher_alg_instance( + struct crypto_akcipher *akcipher) +{ + return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base)); +} + +static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) +{ + return crypto_instance_ctx(akcipher_crypto_instance(inst)); +} + +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline struct crypto_akcipher *crypto_spawn_akcipher( + struct crypto_akcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct akcipher_alg *crypto_spawn_akcipher_alg( + struct crypto_akcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct akcipher_alg, base); +} + +/** + * crypto_register_akcipher() -- Register public key algorithm + * + * Function registers an implementation of a public key verify algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_akcipher(struct akcipher_alg *alg); + +/** + * crypto_unregister_akcipher() -- Unregister public key algorithm + * + * Function unregisters an implementation of a public key verify algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_akcipher(struct akcipher_alg *alg); + +/** + * akcipher_register_instance() -- Unregister public key template instance + * + * Function registers an implementation of an asymmetric key algorithm + * created from a template + * + * @tmpl: the template from which the algorithm was created + * @inst: the template instance + */ +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst); +#endif diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h new file mode 100644 index 000000000..982fe5e84 --- /dev/null +++ b/include/crypto/internal/blake2b.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Helper functions for BLAKE2b implementations. + * Keep this in sync with the corresponding BLAKE2s header. + */ + +#ifndef _CRYPTO_INTERNAL_BLAKE2B_H +#define _CRYPTO_INTERNAL_BLAKE2B_H + +#include <crypto/blake2b.h> +#include <crypto/internal/hash.h> +#include <linux/string.h> + +void blake2b_compress_generic(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc); + +static inline void blake2b_set_lastblock(struct blake2b_state *state) +{ + state->f[0] = -1; +} + +typedef void (*blake2b_compress_t)(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc); + +static inline void __blake2b_update(struct blake2b_state *state, + const u8 *in, size_t inlen, + blake2b_compress_t compress) +{ + const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2B_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE); + /* Hash one less (full) block than strictly possible */ + (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE); + in += BLAKE2B_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; +} + +static inline void __blake2b_final(struct blake2b_state *state, u8 *out, + blake2b_compress_t compress) +{ + int i; + + blake2b_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */ + (*compress)(state, state->buf, 1, state->buflen); + for (i = 0; i < ARRAY_SIZE(state->h); i++) + __cpu_to_le64s(&state->h[i]); + memcpy(out, state->h, state->outlen); +} + +/* Helper functions for shash implementations of BLAKE2b */ + +struct blake2b_tfm_ctx { + u8 key[BLAKE2B_KEY_SIZE]; + unsigned int keylen; +}; + +static inline int crypto_blake2b_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); + + if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE) + return -EINVAL; + + memcpy(tctx->key, key, keylen); + tctx->keylen = keylen; + + return 0; +} + +static inline int crypto_blake2b_init(struct shash_desc *desc) +{ + const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct blake2b_state *state = shash_desc_ctx(desc); + unsigned int outlen = crypto_shash_digestsize(desc->tfm); + + __blake2b_init(state, outlen, tctx->key, tctx->keylen); + return 0; +} + +static inline int crypto_blake2b_update(struct shash_desc *desc, + const u8 *in, unsigned int inlen, + blake2b_compress_t compress) +{ + struct blake2b_state *state = shash_desc_ctx(desc); + + __blake2b_update(state, in, inlen, compress); + return 0; +} + +static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out, + blake2b_compress_t compress) +{ + struct blake2b_state *state = shash_desc_ctx(desc); + + __blake2b_final(state, out, compress); + return 0; +} + +#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */ diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h new file mode 100644 index 000000000..506d56530 --- /dev/null +++ b/include/crypto/internal/blake2s.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Helper functions for BLAKE2s implementations. + * Keep this in sync with the corresponding BLAKE2b header. + */ + +#ifndef _CRYPTO_INTERNAL_BLAKE2S_H +#define _CRYPTO_INTERNAL_BLAKE2S_H + +#include <crypto/blake2s.h> +#include <linux/string.h> + +void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc); + +void blake2s_compress(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc); + +bool blake2s_selftest(void); + +#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */ diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h new file mode 100644 index 000000000..b085dc1ac --- /dev/null +++ b/include/crypto/internal/chacha.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _CRYPTO_INTERNAL_CHACHA_H +#define _CRYPTO_INTERNAL_CHACHA_H + +#include <crypto/chacha.h> +#include <crypto/internal/skcipher.h> +#include <linux/crypto.h> + +struct chacha_ctx { + u32 key[8]; + int nrounds; +}; + +static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize, int nrounds) +{ + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + int i; + + if (keysize != CHACHA_KEY_SIZE) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ctx->key); i++) + ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); + + ctx->nrounds = nrounds; + return 0; +} + +static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 20); +} + +static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 12); +} + +#endif /* _CRYPTO_CHACHA_H */ diff --git a/include/crypto/internal/cipher.h b/include/crypto/internal/cipher.h new file mode 100644 index 000000000..a9174ba90 --- /dev/null +++ b/include/crypto/internal/cipher.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * Copyright (c) 2002 David S. Miller (davem@redhat.com) + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> + * + * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> + * and Nettle, by Niels Möller. + */ + +#ifndef _CRYPTO_INTERNAL_CIPHER_H +#define _CRYPTO_INTERNAL_CIPHER_H + +#include <crypto/algapi.h> + +struct crypto_cipher { + struct crypto_tfm base; +}; + +/** + * DOC: Single Block Cipher API + * + * The single block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). + * + * Using the single block cipher API calls, operations with the basic cipher + * primitive can be implemented. These cipher primitives exclude any block + * chaining operations including IV handling. + * + * The purpose of this single block cipher API is to support the implementation + * of templates or other concepts that only need to perform the cipher operation + * on one block at a time. Templates invoke the underlying cipher primitive + * block-wise and process either the input or the output data of these cipher + * operations. + */ + +static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_cipher *)tfm; +} + +/** + * crypto_alloc_cipher() - allocate single block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a single block cipher. The returned struct + * crypto_cipher is the cipher handle that is required for any subsequent API + * invocation for that single block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_cipher() - zeroize and free the single block cipher handle + * @tfm: cipher handle to be freed + */ +static inline void crypto_free_cipher(struct crypto_cipher *tfm) +{ + crypto_free_tfm(crypto_cipher_tfm(tfm)); +} + +/** + * crypto_has_cipher() - Search for the availability of a single block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the single block cipher is known to the kernel crypto API; + * false otherwise + */ +static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +/** + * crypto_cipher_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the single block cipher referenced with the cipher handle + * tfm is returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); +} + +static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); +} + +static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) +{ + return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); +} + +static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); +} + +static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); +} + +/** + * crypto_cipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the single block cipher referenced by the + * cipher handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_cipher_setkey(struct crypto_cipher *tfm, + const u8 *key, unsigned int keylen); + +/** + * crypto_cipher_encrypt_one() - encrypt one block of plaintext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the ciphertext + * @src: buffer holding the plaintext to be encrypted + * + * Invoke the encryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ +void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src); + +/** + * crypto_cipher_decrypt_one() - decrypt one block of ciphertext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the plaintext + * @src: buffer holding the ciphertext to be decrypted + * + * Invoke the decryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ +void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src); + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} + +static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct crypto_alg *crypto_spawn_cipher_alg( + struct crypto_cipher_spawn *spawn) +{ + return spawn->base.alg; +} + +static inline struct crypto_cipher *crypto_spawn_cipher( + struct crypto_cipher_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_CIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask)); +} + +static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; +} + +#endif diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h new file mode 100644 index 000000000..fd5407433 --- /dev/null +++ b/include/crypto/internal/cryptouser.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/cryptouser.h> +#include <net/netlink.h> + +struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); + +#ifdef CONFIG_CRYPTO_STATS +int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); +#else +static inline int crypto_reportstat(struct sk_buff *in_skb, + struct nlmsghdr *in_nlh, + struct nlattr **attrs) +{ + return -ENOTSUPP; +} +#endif diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h new file mode 100644 index 000000000..723fe5bf1 --- /dev/null +++ b/include/crypto/internal/des.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DES & Triple DES EDE key verification helpers + */ + +#ifndef __CRYPTO_INTERNAL_DES_H +#define __CRYPTO_INTERNAL_DES_H + +#include <linux/crypto.h> +#include <linux/fips.h> +#include <crypto/des.h> +#include <crypto/aead.h> +#include <crypto/skcipher.h> + +/** + * crypto_des_verify_key - Check whether a DES key is weak + * @tfm: the crypto algo + * @key: the key buffer + * + * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak + * keys. Otherwise, 0 is returned. + * + * It is the job of the caller to ensure that the size of the key equals + * DES_KEY_SIZE. + */ +static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key) +{ + struct des_ctx tmp; + int err; + + err = des_expand_key(&tmp, key, DES_KEY_SIZE); + if (err == -ENOKEY) { + if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) + err = -EINVAL; + else + err = 0; + } + memzero_explicit(&tmp, sizeof(tmp)); + return err; +} + +/* + * RFC2451: + * + * For DES-EDE3, there is no known need to reject weak or + * complementation keys. Any weakness is obviated by the use of + * multiple keys. + * + * However, if the first two or last two independent 64-bit keys are + * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the + * same as DES. Implementers MUST reject keys that exhibit this + * property. + * + */ +static inline int des3_ede_verify_key(const u8 *key, unsigned int key_len, + bool check_weak) +{ + int ret = fips_enabled ? -EINVAL : -ENOKEY; + u32 K[6]; + + memcpy(K, key, DES3_EDE_KEY_SIZE); + + if ((!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (fips_enabled || check_weak)) + goto bad; + + if ((!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled) + goto bad; + + ret = 0; +bad: + memzero_explicit(K, DES3_EDE_KEY_SIZE); + + return ret; +} + +/** + * crypto_des3_ede_verify_key - Check whether a DES3-EDE key is weak + * @tfm: the crypto algo + * @key: the key buffer + * + * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak + * keys or when running in FIPS mode. Otherwise, 0 is returned. Note that some + * keys are rejected in FIPS mode even if weak keys are permitted by the TFM + * flags. + * + * It is the job of the caller to ensure that the size of the key equals + * DES3_EDE_KEY_SIZE. + */ +static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm, + const u8 *key) +{ + return des3_ede_verify_key(key, DES3_EDE_KEY_SIZE, + crypto_tfm_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); +} + +static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm, + const u8 *key) +{ + return crypto_des_verify_key(crypto_skcipher_tfm(tfm), key); +} + +static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm, + const u8 *key) +{ + return crypto_des3_ede_verify_key(crypto_skcipher_tfm(tfm), key); +} + +static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key, + int keylen) +{ + if (keylen != DES_KEY_SIZE) + return -EINVAL; + return crypto_des_verify_key(crypto_aead_tfm(tfm), key); +} + +static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key, + int keylen) +{ + if (keylen != DES3_EDE_KEY_SIZE) + return -EINVAL; + return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key); +} + +#endif /* __CRYPTO_INTERNAL_DES_H */ diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h new file mode 100644 index 000000000..4f6c1a688 --- /dev/null +++ b/include/crypto/internal/ecc.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2013, Kenneth MacKay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _CRYPTO_ECC_H +#define _CRYPTO_ECC_H + +#include <crypto/ecc_curve.h> +#include <asm/unaligned.h> + +/* One digit is u64 qword. */ +#define ECC_CURVE_NIST_P192_DIGITS 3 +#define ECC_CURVE_NIST_P256_DIGITS 4 +#define ECC_CURVE_NIST_P384_DIGITS 6 +#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */ + +#define ECC_DIGITS_TO_BYTES_SHIFT 3 + +#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT) + +#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits } + +/** + * ecc_swap_digits() - Copy ndigits from big endian array to native array + * @in: Input array + * @out: Output array + * @ndigits: Number of digits to copy + */ +static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits) +{ + const __be64 *src = (__force __be64 *)in; + int i; + + for (i = 0; i < ndigits; i++) + out[i] = get_unaligned_be64(&src[ndigits - 1 - i]); +} + +/** + * ecc_is_key_valid() - Validate a given ECDH private key + * + * @curve_id: id representing the curve to use + * @ndigits: curve's number of digits + * @private_key: private key to be used for the given curve + * @private_key_len: private key length + * + * Returns 0 if the key is acceptable, a negative value otherwise + */ +int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, + const u64 *private_key, unsigned int private_key_len); + +/** + * ecc_gen_privkey() - Generates an ECC private key. + * The private key is a random integer in the range 0 < random < n, where n is a + * prime that is the order of the cyclic subgroup generated by the distinguished + * point G. + * @curve_id: id representing the curve to use + * @ndigits: curve number of digits + * @private_key: buffer for storing the generated private key + * + * Returns 0 if the private key was generated successfully, a negative value + * if an error occurred. + */ +int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey); + +/** + * ecc_make_pub_key() - Compute an ECC public key + * + * @curve_id: id representing the curve to use + * @ndigits: curve's number of digits + * @private_key: pregenerated private key for the given curve + * @public_key: buffer for storing the generated public key + * + * Returns 0 if the public key was generated successfully, a negative value + * if an error occurred. + */ +int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits, + const u64 *private_key, u64 *public_key); + +/** + * crypto_ecdh_shared_secret() - Compute a shared secret + * + * @curve_id: id representing the curve to use + * @ndigits: curve's number of digits + * @private_key: private key of part A + * @public_key: public key of counterpart B + * @secret: buffer for storing the calculated shared secret + * + * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret + * before using it for symmetric encryption or HMAC. + * + * Returns 0 if the shared secret was generated successfully, a negative value + * if an error occurred. + */ +int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, + const u64 *private_key, const u64 *public_key, + u64 *secret); + +/** + * ecc_is_pubkey_valid_partial() - Partial public key validation + * + * @curve: elliptic curve domain parameters + * @pk: public key as a point + * + * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial + * Public-Key Validation Routine. + * + * Note: There is no check that the public key is in the correct elliptic curve + * subgroup. + * + * Return: 0 if validation is successful, -EINVAL if validation is failed. + */ +int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, + struct ecc_point *pk); + +/** + * ecc_is_pubkey_valid_full() - Full public key validation + * + * @curve: elliptic curve domain parameters + * @pk: public key as a point + * + * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full + * Public-Key Validation Routine. + * + * Return: 0 if validation is successful, -EINVAL if validation is failed. + */ +int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, + struct ecc_point *pk); + +/** + * vli_is_zero() - Determine is vli is zero + * + * @vli: vli to check. + * @ndigits: length of the @vli + */ +bool vli_is_zero(const u64 *vli, unsigned int ndigits); + +/** + * vli_cmp() - compare left and right vlis + * + * @left: vli + * @right: vli + * @ndigits: length of both vlis + * + * Returns sign of @left - @right, i.e. -1 if @left < @right, + * 0 if @left == @right, 1 if @left > @right. + */ +int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits); + +/** + * vli_sub() - Subtracts right from left + * + * @result: where to write result + * @left: vli + * @right vli + * @ndigits: length of all vlis + * + * Note: can modify in-place. + * + * Return: carry bit. + */ +u64 vli_sub(u64 *result, const u64 *left, const u64 *right, + unsigned int ndigits); + +/** + * vli_from_be64() - Load vli from big-endian u64 array + * + * @dest: destination vli + * @src: source array of u64 BE values + * @ndigits: length of both vli and array + */ +void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits); + +/** + * vli_from_le64() - Load vli from little-endian u64 array + * + * @dest: destination vli + * @src: source array of u64 LE values + * @ndigits: length of both vli and array + */ +void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits); + +/** + * vli_mod_inv() - Modular inversion + * + * @result: where to write vli number + * @input: vli value to operate on + * @mod: modulus + * @ndigits: length of all vlis + */ +void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod, + unsigned int ndigits); + +/** + * vli_mod_mult_slow() - Modular multiplication + * + * @result: where to write result value + * @left: vli number to multiply with @right + * @right: vli number to multiply with @left + * @mod: modulus + * @ndigits: length of all vlis + * + * Note: Assumes that mod is big enough curve order. + */ +void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right, + const u64 *mod, unsigned int ndigits); + +/** + * vli_num_bits() - Counts the number of bits required for vli. + * + * @vli: vli to check. + * @ndigits: Length of the @vli + * + * Return: The number of bits required to represent @vli. + */ +unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits); + +/** + * ecc_aloc_point() - Allocate ECC point. + * + * @ndigits: Length of vlis in u64 qwords. + * + * Return: Pointer to the allocated point or NULL if allocation failed. + */ +struct ecc_point *ecc_alloc_point(unsigned int ndigits); + +/** + * ecc_free_point() - Free ECC point. + * + * @p: The point to free. + */ +void ecc_free_point(struct ecc_point *p); + +/** + * ecc_point_is_zero() - Check if point is zero. + * + * @p: Point to check for zero. + * + * Return: true if point is the point at infinity, false otherwise. + */ +bool ecc_point_is_zero(const struct ecc_point *point); + +/** + * ecc_point_mult_shamir() - Add two points multiplied by scalars + * + * @result: resulting point + * @x: scalar to multiply with @p + * @p: point to multiply with @x + * @y: scalar to multiply with @q + * @q: point to multiply with @y + * @curve: curve + * + * Returns result = x * p + x * q over the curve. + * This works faster than two multiplications and addition. + */ +void ecc_point_mult_shamir(const struct ecc_point *result, + const u64 *x, const struct ecc_point *p, + const u64 *y, const struct ecc_point *q, + const struct ecc_curve *curve); + +#endif diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h new file mode 100644 index 000000000..7fd7126f5 --- /dev/null +++ b/include/crypto/internal/geniv.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * geniv: IV generation + * + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_GENIV_H +#define _CRYPTO_INTERNAL_GENIV_H + +#include <crypto/internal/aead.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); +}; + +struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb); +int aead_init_geniv(struct crypto_aead *tfm); +void aead_exit_geniv(struct crypto_aead *tfm); + +#endif /* _CRYPTO_INTERNAL_GENIV_H */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h new file mode 100644 index 000000000..25806141d --- /dev/null +++ b/include/crypto/internal/hash.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hash algorithms. + * + * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_HASH_H +#define _CRYPTO_INTERNAL_HASH_H + +#include <crypto/algapi.h> +#include <crypto/hash.h> + +struct ahash_request; +struct scatterlist; + +struct crypto_hash_walk { + char *data; + + unsigned int offset; + unsigned int alignmask; + + struct page *pg; + unsigned int entrylen; + + unsigned int total; + struct scatterlist *sg; + + unsigned int flags; +}; + +struct ahash_instance { + void (*free)(struct ahash_instance *inst); + union { + struct { + char head[offsetof(struct ahash_alg, halg.base)]; + struct crypto_instance base; + } s; + struct ahash_alg alg; + }; +}; + +struct shash_instance { + void (*free)(struct shash_instance *inst); + union { + struct { + char head[offsetof(struct shash_alg, base)]; + struct crypto_instance base; + } s; + struct shash_alg alg; + }; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk); + +static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) +{ + return !(walk->entrylen | walk->total); +} + +int crypto_register_ahash(struct ahash_alg *alg); +void crypto_unregister_ahash(struct ahash_alg *alg); +int crypto_register_ahashes(struct ahash_alg *algs, int count); +void crypto_unregister_ahashes(struct ahash_alg *algs, int count); +int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst); + +bool crypto_shash_alg_has_setkey(struct shash_alg *alg); + +static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) +{ + return crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); +} + +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + +int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct hash_alg_common *crypto_spawn_ahash_alg( + struct crypto_ahash_spawn *spawn) +{ + return __crypto_hash_alg_common(spawn->base.alg); +} + +int crypto_register_shash(struct shash_alg *alg); +void crypto_unregister_shash(struct shash_alg *alg); +int crypto_register_shashes(struct shash_alg *algs, int count); +void crypto_unregister_shashes(struct shash_alg *algs, int count); +int shash_register_instance(struct crypto_template *tmpl, + struct shash_instance *inst); +void shash_free_singlespawn_instance(struct shash_instance *inst); + +int crypto_grab_shash(struct crypto_shash_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct shash_alg *crypto_spawn_shash_alg( + struct crypto_shash_spawn *spawn) +{ + return __crypto_shash_alg(spawn->base.alg); +} + +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); + +int crypto_init_shash_ops_async(struct crypto_tfm *tfm); + +static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) +{ + return crypto_tfm_ctx(crypto_ahash_tfm(tfm)); +} + +static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg) +{ + return container_of(__crypto_hash_alg_common(alg), struct ahash_alg, + halg); +} + +static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, + unsigned int reqsize) +{ + tfm->reqsize = reqsize; +} + +static inline struct crypto_instance *ahash_crypto_instance( + struct ahash_instance *inst) +{ + return &inst->s.base; +} + +static inline struct ahash_instance *ahash_instance( + struct crypto_instance *inst) +{ + return container_of(inst, struct ahash_instance, s.base); +} + +static inline struct ahash_instance *ahash_alg_instance( + struct crypto_ahash *ahash) +{ + return ahash_instance(crypto_tfm_alg_instance(&ahash->base)); +} + +static inline void *ahash_instance_ctx(struct ahash_instance *inst) +{ + return crypto_instance_ctx(ahash_crypto_instance(inst)); +} + +static inline void ahash_request_complete(struct ahash_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 ahash_request_flags(struct ahash_request *req) +{ + return req->base.flags; +} + +static inline struct crypto_ahash *crypto_spawn_ahash( + struct crypto_ahash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline int ahash_enqueue_request(struct crypto_queue *queue, + struct ahash_request *request) +{ + return crypto_enqueue_request(queue, &request->base); +} + +static inline struct ahash_request *ahash_dequeue_request( + struct crypto_queue *queue) +{ + return ahash_request_cast(crypto_dequeue_request(queue)); +} + +static inline void *crypto_shash_ctx(struct crypto_shash *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *shash_crypto_instance( + struct shash_instance *inst) +{ + return &inst->s.base; +} + +static inline struct shash_instance *shash_instance( + struct crypto_instance *inst) +{ + return container_of(inst, struct shash_instance, s.base); +} + +static inline struct shash_instance *shash_alg_instance( + struct crypto_shash *shash) +{ + return shash_instance(crypto_tfm_alg_instance(&shash->base)); +} + +static inline void *shash_instance_ctx(struct shash_instance *inst) +{ + return crypto_instance_ctx(shash_crypto_instance(inst)); +} + +static inline struct crypto_shash *crypto_spawn_shash( + struct crypto_shash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_shash, base); +} + +#endif /* _CRYPTO_INTERNAL_HASH_H */ + diff --git a/include/crypto/internal/kdf_selftest.h b/include/crypto/internal/kdf_selftest.h new file mode 100644 index 000000000..4d03d2af5 --- /dev/null +++ b/include/crypto/internal/kdf_selftest.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de> + */ + +#ifndef _CRYPTO_KDF_SELFTEST_H +#define _CRYPTO_KDF_SELFTEST_H + +#include <crypto/hash.h> +#include <linux/uio.h> + +struct kdf_testvec { + unsigned char *key; + size_t keylen; + unsigned char *ikm; + size_t ikmlen; + struct kvec info; + unsigned char *expected; + size_t expectedlen; +}; + +static inline int +kdf_test(const struct kdf_testvec *test, const char *name, + int (*crypto_kdf_setkey)(struct crypto_shash *kmd, + const u8 *key, size_t keylen, + const u8 *ikm, size_t ikmlen), + int (*crypto_kdf_generate)(struct crypto_shash *kmd, + const struct kvec *info, + unsigned int info_nvec, + u8 *dst, unsigned int dlen)) +{ + struct crypto_shash *kmd; + int ret; + u8 *buf = kzalloc(test->expectedlen, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + kmd = crypto_alloc_shash(name, 0, 0); + if (IS_ERR(kmd)) { + pr_err("alg: kdf: could not allocate hash handle for %s\n", + name); + kfree(buf); + return -ENOMEM; + } + + ret = crypto_kdf_setkey(kmd, test->key, test->keylen, + test->ikm, test->ikmlen); + if (ret) { + pr_err("alg: kdf: could not set key derivation key\n"); + goto err; + } + + ret = crypto_kdf_generate(kmd, &test->info, 1, buf, test->expectedlen); + if (ret) { + pr_err("alg: kdf: could not obtain key data\n"); + goto err; + } + + ret = memcmp(test->expected, buf, test->expectedlen); + if (ret) + ret = -EINVAL; + +err: + crypto_free_shash(kmd); + kfree(buf); + return ret; +} + +#endif /* _CRYPTO_KDF_SELFTEST_H */ diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h new file mode 100644 index 000000000..31ff3c198 --- /dev/null +++ b/include/crypto/internal/kpp.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + */ +#ifndef _CRYPTO_KPP_INT_H +#define _CRYPTO_KPP_INT_H +#include <crypto/kpp.h> +#include <crypto/algapi.h> + +/** + * struct kpp_instance - KPP template instance + * @free: Callback getting invoked upon instance destruction. Must be set. + * @s: Internal. Generic crypto core instance state properly layout + * to alias with @alg as needed. + * @alg: The &struct kpp_alg implementation provided by the instance. + */ +struct kpp_instance { + void (*free)(struct kpp_instance *inst); + union { + struct { + char head[offsetof(struct kpp_alg, base)]; + struct crypto_instance base; + } s; + struct kpp_alg alg; + }; +}; + +/** + * struct crypto_kpp_spawn - KPP algorithm spawn + * @base: Internal. Generic crypto core spawn state. + * + * Template instances can get a hold on some inner KPP algorithm by + * binding a &struct crypto_kpp_spawn via + * crypto_grab_kpp(). Transforms may subsequently get instantiated + * from the referenced inner &struct kpp_alg by means of + * crypto_spawn_kpp(). + */ +struct crypto_kpp_spawn { + struct crypto_spawn base; +}; + +/* + * Transform internal helpers. + */ +static inline void *kpp_request_ctx(struct kpp_request *req) +{ + return req->__ctx; +} + +static inline void kpp_set_reqsize(struct crypto_kpp *kpp, + unsigned int reqsize) +{ + crypto_kpp_alg(kpp)->reqsize = reqsize; +} + +static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void kpp_request_complete(struct kpp_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *kpp_alg_name(struct crypto_kpp *tfm) +{ + return crypto_kpp_tfm(tfm)->__crt_alg->cra_name; +} + +/* + * Template instance internal helpers. + */ +/** + * kpp_crypto_instance() - Cast a &struct kpp_instance to the corresponding + * generic &struct crypto_instance. + * @inst: Pointer to the &struct kpp_instance to be cast. + * Return: A pointer to the &struct crypto_instance embedded in @inst. + */ +static inline struct crypto_instance *kpp_crypto_instance( + struct kpp_instance *inst) +{ + return &inst->s.base; +} + +/** + * kpp_instance() - Cast a generic &struct crypto_instance to the corresponding + * &struct kpp_instance. + * @inst: Pointer to the &struct crypto_instance to be cast. + * Return: A pointer to the &struct kpp_instance @inst is embedded in. + */ +static inline struct kpp_instance *kpp_instance(struct crypto_instance *inst) +{ + return container_of(inst, struct kpp_instance, s.base); +} + +/** + * kpp_alg_instance() - Get the &struct kpp_instance a given KPP transform has + * been instantiated from. + * @kpp: The KPP transform instantiated from some &struct kpp_instance. + * Return: The &struct kpp_instance associated with @kpp. + */ +static inline struct kpp_instance *kpp_alg_instance(struct crypto_kpp *kpp) +{ + return kpp_instance(crypto_tfm_alg_instance(&kpp->base)); +} + +/** + * kpp_instance_ctx() - Get a pointer to a &struct kpp_instance's implementation + * specific context data. + * @inst: The &struct kpp_instance whose context data to access. + * + * A KPP template implementation may allocate extra memory beyond the + * end of a &struct kpp_instance instantiated from &crypto_template.create(). + * This function provides a means to obtain a pointer to this area. + * + * Return: A pointer to the implementation specific context data. + */ +static inline void *kpp_instance_ctx(struct kpp_instance *inst) +{ + return crypto_instance_ctx(kpp_crypto_instance(inst)); +} + +/* + * KPP algorithm (un)registration functions. + */ +/** + * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm + * + * Function registers an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_kpp(struct kpp_alg *alg); + +/** + * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive + * algorithm + * + * Function unregisters an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_kpp(struct kpp_alg *alg); + +/** + * kpp_register_instance() - Register a KPP template instance. + * @tmpl: The instantiating template. + * @inst: The KPP template instance to be registered. + * Return: %0 on success, negative error code otherwise. + */ +int kpp_register_instance(struct crypto_template *tmpl, + struct kpp_instance *inst); + +/* + * KPP spawn related functions. + */ +/** + * crypto_grab_kpp() - Look up a KPP algorithm and bind a spawn to it. + * @spawn: The KPP spawn to bind. + * @inst: The template instance owning @spawn. + * @name: The KPP algorithm name to look up. + * @type: The type bitset to pass on to the lookup. + * @mask: The mask bismask to pass on to the lookup. + * Return: %0 on success, a negative error code otherwise. + */ +int crypto_grab_kpp(struct crypto_kpp_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +/** + * crypto_drop_kpp() - Release a spawn previously bound via crypto_grab_kpp(). + * @spawn: The spawn to release. + */ +static inline void crypto_drop_kpp(struct crypto_kpp_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +/** + * crypto_spawn_kpp_alg() - Get the algorithm a KPP spawn has been bound to. + * @spawn: The spawn to get the referenced &struct kpp_alg for. + * + * This function as well as the returned result are safe to use only + * after @spawn has been successfully bound via crypto_grab_kpp() and + * up to until the template instance owning @spawn has either been + * registered successfully or the spawn has been released again via + * crypto_drop_spawn(). + * + * Return: A pointer to the &struct kpp_alg referenced from the spawn. + */ +static inline struct kpp_alg *crypto_spawn_kpp_alg( + struct crypto_kpp_spawn *spawn) +{ + return container_of(spawn->base.alg, struct kpp_alg, base); +} + +/** + * crypto_spawn_kpp() - Create a transform from a KPP spawn. + * @spawn: The spawn previously bound to some &struct kpp_alg via + * crypto_grab_kpp(). + * + * Once a &struct crypto_kpp_spawn has been successfully bound to a + * &struct kpp_alg via crypto_grab_kpp(), transforms for the latter + * may get instantiated from the former by means of this function. + * + * Return: A pointer to the freshly created KPP transform on success + * or an ``ERR_PTR()`` otherwise. + */ +static inline struct crypto_kpp *crypto_spawn_kpp( + struct crypto_kpp_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +#endif diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h new file mode 100644 index 000000000..196aa769f --- /dev/null +++ b/include/crypto/internal/poly1305.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Poly1305 algorithm + */ + +#ifndef _CRYPTO_INTERNAL_POLY1305_H +#define _CRYPTO_INTERNAL_POLY1305_H + +#include <asm/unaligned.h> +#include <linux/types.h> +#include <crypto/poly1305.h> + +/* + * Poly1305 core functions. These only accept whole blocks; the caller must + * handle any needed block buffering and padding. 'hibit' must be 1 for any + * full blocks, or 0 for the final block if it had to be padded. If 'nonce' is + * non-NULL, then it's added at the end to compute the Poly1305 MAC. Otherwise, + * only the ε-almost-∆-universal hash function (not the full MAC) is computed. + */ + +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +static inline void poly1305_core_init(struct poly1305_state *state) +{ + *state = (struct poly1305_state){}; +} + +void poly1305_core_blocks(struct poly1305_state *state, + const struct poly1305_core_key *key, const void *src, + unsigned int nblocks, u32 hibit); +void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], + void *dst); + +#endif diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h new file mode 100644 index 000000000..e0711b6a5 --- /dev/null +++ b/include/crypto/internal/rng.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RNG: Random Number Generator algorithms under the crypto API + * + * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_RNG_H +#define _CRYPTO_INTERNAL_RNG_H + +#include <crypto/algapi.h> +#include <crypto/rng.h> + +int crypto_register_rng(struct rng_alg *alg); +void crypto_unregister_rng(struct rng_alg *alg); +int crypto_register_rngs(struct rng_alg *algs, int count); +void crypto_unregister_rngs(struct rng_alg *algs, int count); + +#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE) +int crypto_del_default_rng(void); +#else +static inline int crypto_del_default_rng(void) +{ + return 0; +} +#endif + +static inline void *crypto_rng_ctx(struct crypto_rng *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void crypto_rng_set_entropy(struct crypto_rng *tfm, + const u8 *data, unsigned int len) +{ + crypto_rng_alg(tfm)->set_ent(tfm, data, len); +} + +#endif diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h new file mode 100644 index 000000000..e870133f4 --- /dev/null +++ b/include/crypto/internal/rsa.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RSA internal helpers + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk <tadeusz.struk@intel.com> + */ +#ifndef _RSA_HELPER_ +#define _RSA_HELPER_ +#include <linux/types.h> + +/** + * rsa_key - RSA key structure + * @n : RSA modulus raw byte stream + * @e : RSA public exponent raw byte stream + * @d : RSA private exponent raw byte stream + * @p : RSA prime factor p of n raw byte stream + * @q : RSA prime factor q of n raw byte stream + * @dp : RSA exponent d mod (p - 1) raw byte stream + * @dq : RSA exponent d mod (q - 1) raw byte stream + * @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream + * @n_sz : length in bytes of RSA modulus n + * @e_sz : length in bytes of RSA public exponent + * @d_sz : length in bytes of RSA private exponent + * @p_sz : length in bytes of p field + * @q_sz : length in bytes of q field + * @dp_sz : length in bytes of dp field + * @dq_sz : length in bytes of dq field + * @qinv_sz : length in bytes of qinv field + */ +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +extern struct crypto_template rsa_pkcs1pad_tmpl; +#endif diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h new file mode 100644 index 000000000..f834274c2 --- /dev/null +++ b/include/crypto/internal/scompress.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Synchronous Compression operations + * + * Copyright 2015 LG Electronics Inc. + * Copyright (c) 2016, Intel Corporation + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> + */ +#ifndef _CRYPTO_SCOMP_INT_H +#define _CRYPTO_SCOMP_INT_H +#include <linux/crypto.h> + +#define SCOMP_SCRATCH_SIZE 131072 + +struct crypto_scomp { + struct crypto_tfm base; +}; + +/** + * struct scomp_alg - synchronous compression algorithm + * + * @alloc_ctx: Function allocates algorithm specific context + * @free_ctx: Function frees context allocated with alloc_ctx + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @base: Common crypto API algorithm data structure + */ +struct scomp_alg { + void *(*alloc_ctx)(struct crypto_scomp *tfm); + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); + int (*compress)(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx); + int (*decompress)(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx); + struct crypto_alg base; +}; + +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct scomp_alg, base); +} + +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_scomp, base); +} + +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_scomp(struct crypto_scomp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm)); +} + +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) +{ + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); +} + +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) +{ + return crypto_scomp_alg(tfm)->alloc_ctx(tfm); +} + +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, + void *ctx) +{ + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); +} + +static inline int crypto_scomp_compress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx); +} + +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, + void *ctx) +{ + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen, + ctx); +} + +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); +void crypto_acomp_scomp_free_ctx(struct acomp_req *req); + +/** + * crypto_register_scomp() -- Register synchronous compression algorithm + * + * Function registers an implementation of a synchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_scomp(struct scomp_alg *alg); + +/** + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm + * + * Function unregisters an implementation of a synchronous + * compression algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_scomp(struct scomp_alg *alg); + +int crypto_register_scomps(struct scomp_alg *algs, int count); +void crypto_unregister_scomps(struct scomp_alg *algs, int count); + +#endif diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h new file mode 100644 index 000000000..d2316242a --- /dev/null +++ b/include/crypto/internal/simd.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Shared crypto simd helpers + */ + +#ifndef _CRYPTO_INTERNAL_SIMD_H +#define _CRYPTO_INTERNAL_SIMD_H + +#include <linux/percpu.h> +#include <linux/types.h> + +/* skcipher support */ + +struct simd_skcipher_alg; +struct skcipher_alg; + +struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, + const char *drvname, + const char *basename); +struct simd_skcipher_alg *simd_skcipher_create(const char *algname, + const char *basename); +void simd_skcipher_free(struct simd_skcipher_alg *alg); + +int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, + struct simd_skcipher_alg **simd_algs); + +void simd_unregister_skciphers(struct skcipher_alg *algs, int count, + struct simd_skcipher_alg **simd_algs); + +/* AEAD support */ + +struct simd_aead_alg; +struct aead_alg; + +struct simd_aead_alg *simd_aead_create_compat(const char *algname, + const char *drvname, + const char *basename); +struct simd_aead_alg *simd_aead_create(const char *algname, + const char *basename); +void simd_aead_free(struct simd_aead_alg *alg); + +int simd_register_aeads_compat(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs); + +void simd_unregister_aeads(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs); + +/* + * crypto_simd_usable() - is it allowed at this time to use SIMD instructions or + * access the SIMD register file? + * + * This delegates to may_use_simd(), except that this also returns false if SIMD + * in crypto code has been temporarily disabled on this CPU by the crypto + * self-tests, in order to test the no-SIMD fallback code. This override is + * currently limited to configurations where the extra self-tests are enabled, + * because it might be a bit too invasive to be part of the regular self-tests. + * + * This is a macro so that <asm/simd.h>, which some architectures don't have, + * doesn't have to be included directly here. + */ +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test); +#define crypto_simd_usable() \ + (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test)) +#else +#define crypto_simd_usable() may_use_simd() +#endif + +#endif /* _CRYPTO_INTERNAL_SIMD_H */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h new file mode 100644 index 000000000..a2339f80a --- /dev/null +++ b/include/crypto/internal/skcipher.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_SKCIPHER_H +#define _CRYPTO_INTERNAL_SKCIPHER_H + +#include <crypto/algapi.h> +#include <crypto/internal/cipher.h> +#include <crypto/skcipher.h> +#include <linux/list.h> +#include <linux/types.h> + +struct aead_request; +struct rtattr; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *inst); + union { + struct { + char head[offsetof(struct skcipher_alg, base)]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + unsigned long offset; + } phys; + + struct { + u8 *page; + void *addr; + } virt; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + + struct scatter_walk out; + unsigned int total; + + struct list_head buffers; + + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + + unsigned int ivsize; + + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +static inline struct crypto_instance *skcipher_crypto_instance( + struct skcipher_instance *inst) +{ + return &inst->s.base; +} + +static inline struct skcipher_instance *skcipher_alg_instance( + struct crypto_skcipher *skcipher) +{ + return container_of(crypto_skcipher_alg(skcipher), + struct skcipher_instance, alg); +} + +static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) +{ + return crypto_instance_ctx(skcipher_crypto_instance(inst)); +} + +static inline void skcipher_request_complete(struct skcipher_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct skcipher_alg *crypto_skcipher_spawn_alg( + struct crypto_skcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct skcipher_alg, base); +} + +static inline struct skcipher_alg *crypto_spawn_skcipher_alg( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_skcipher_spawn_alg(spawn); +} + +static inline struct crypto_skcipher *crypto_spawn_skcipher( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_skcipher_set_reqsize( + struct crypto_skcipher *skcipher, unsigned int reqsize) +{ + skcipher->reqsize = reqsize; +} + +int crypto_register_skcipher(struct skcipher_alg *alg); +void crypto_unregister_skcipher(struct skcipher_alg *alg); +int crypto_register_skciphers(struct skcipher_alg *algs, int count); +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst); + +int skcipher_walk_done(struct skcipher_walk *walk, int err); +int skcipher_walk_virt(struct skcipher_walk *walk, + struct skcipher_request *req, + bool atomic); +int skcipher_walk_async(struct skcipher_walk *walk, + struct skcipher_request *req); +int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); +int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); +void skcipher_walk_complete(struct skcipher_walk *walk, int err); + +static inline void skcipher_walk_abort(struct skcipher_walk *walk) +{ + skcipher_walk_done(walk, -ECANCELED); +} + +static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *skcipher_request_ctx(struct skcipher_request *req) +{ + return req->__ctx; +} + +static inline u32 skcipher_request_flags(struct skcipher_request *req) +{ + return req->base.flags; +} + +static inline unsigned int crypto_skcipher_alg_min_keysize( + struct skcipher_alg *alg) +{ + return alg->min_keysize; +} + +static inline unsigned int crypto_skcipher_alg_max_keysize( + struct skcipher_alg *alg) +{ + return alg->max_keysize; +} + +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + return alg->walksize; +} + +/** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + +/* Helpers for simple block cipher modes of operation */ +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; /* underlying block cipher */ +}; +static inline struct crypto_cipher * +skcipher_cipher_simple(struct crypto_skcipher *tfm) +{ + struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); + + return ctx->cipher; +} + +struct skcipher_instance *skcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb); + +static inline struct crypto_alg *skcipher_ialg_simple( + struct skcipher_instance *inst) +{ + struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); + + return crypto_spawn_cipher_alg(spawn); +} + +#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ + diff --git a/include/crypto/kdf_sp800108.h b/include/crypto/kdf_sp800108.h new file mode 100644 index 000000000..b7b20a778 --- /dev/null +++ b/include/crypto/kdf_sp800108.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de> + */ + +#ifndef _CRYPTO_KDF108_H +#define _CRYPTO_KDF108_H + +#include <crypto/hash.h> +#include <linux/uio.h> + +/** + * Counter KDF generate operation according to SP800-108 section 5.1 + * as well as SP800-56A section 5.8.1 (Single-step KDF). + * + * @kmd Keyed message digest whose key was set with crypto_kdf108_setkey or + * unkeyed message digest + * @info optional context and application specific information - this may be + * NULL + * @info_vec number of optional context/application specific information entries + * @dst destination buffer that the caller already allocated + * @dlen length of the destination buffer - the KDF derives that amount of + * bytes. + * + * To comply with SP800-108, the caller must provide Label || 0x00 || Context + * in the info parameter. + * + * @return 0 on success, < 0 on error + */ +int crypto_kdf108_ctr_generate(struct crypto_shash *kmd, + const struct kvec *info, unsigned int info_nvec, + u8 *dst, unsigned int dlen); + +/** + * Counter KDF setkey operation + * + * @kmd Keyed message digest allocated by the caller. The key should not have + * been set. + * @key Seed key to be used to initialize the keyed message digest context. + * @keylen This length of the key buffer. + * @ikm The SP800-108 KDF does not support IKM - this parameter must be NULL + * @ikmlen This parameter must be 0. + * + * According to SP800-108 section 7.2, the seed key must be at least as large as + * the message digest size of the used keyed message digest. This limitation + * is enforced by the implementation. + * + * SP800-108 allows the use of either a HMAC or a hash primitive. When + * the caller intends to use a hash primitive, the call to + * crypto_kdf108_setkey is not required and the key derivation operation can + * immediately performed using crypto_kdf108_ctr_generate after allocating + * a handle. + * + * @return 0 on success, < 0 on error + */ +int crypto_kdf108_setkey(struct crypto_shash *kmd, + const u8 *key, size_t keylen, + const u8 *ikm, size_t ikmlen); + +#endif /* _CRYPTO_KDF108_H */ diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h new file mode 100644 index 000000000..24d01e987 --- /dev/null +++ b/include/crypto/kpp.h @@ -0,0 +1,364 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + */ + +#ifndef _CRYPTO_KPP_ +#define _CRYPTO_KPP_ +#include <linux/crypto.h> + +/** + * struct kpp_request + * + * @base: Common attributes for async crypto requests + * @src: Source data + * @dst: Destination data + * @src_len: Size of the input buffer + * @dst_len: Size of the output buffer. It needs to be at least + * as big as the expected result depending on the operation + * After operation it will be updated with the actual size of the + * result. In case of error where the dst sgl size was insufficient, + * it will be updated to the size required for the operation. + * @__ctx: Start of private context data + */ +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_kpp - user-instantiated object which encapsulate + * algorithms and core processing logic + * + * @base: Common crypto API algorithm data structure + */ +struct crypto_kpp { + struct crypto_tfm base; +}; + +/** + * struct kpp_alg - generic key-agreement protocol primitives + * + * @set_secret: Function invokes the protocol specific function to + * store the secret private key along with parameters. + * The implementation knows how to decode the buffer + * @generate_public_key: Function generate the public key to be sent to the + * counterpart. In case of error, where output is not big + * enough req->dst_len will be updated to the size + * required + * @compute_shared_secret: Function compute the shared secret as defined by + * the algorithm. The result is given back to the user. + * In case of error, where output is not big enough, + * req->dst_len will be updated to the size required + * @max_size: Function returns the size of the output buffer + * @init: Initialize the object. This is called only once at + * instantiation time. In case the cryptographic hardware + * needs to be initialized. Software fallback should be + * put in place here. + * @exit: Undo everything @init did. + * + * @reqsize: Request context size required by algorithm + * implementation + * @base: Common crypto API algorithm data structure + */ +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *tfm, const void *buffer, + unsigned int len); + int (*generate_public_key)(struct kpp_request *req); + int (*compute_shared_secret)(struct kpp_request *req); + + unsigned int (*max_size)(struct crypto_kpp *tfm); + + int (*init)(struct crypto_kpp *tfm); + void (*exit)(struct crypto_kpp *tfm); + + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Generic Key-agreement Protocol Primitives API + * + * The KPP API is used with the algorithm type + * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto) + */ + +/** + * crypto_alloc_kpp() - allocate KPP tfm handle + * @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh") + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for kpp algorithm. The returned struct crypto_kpp + * is required for any following API invocation + * + * Return: allocated handle in case of success; IS_ERR() is true in case of + * an error, PTR_ERR() returns the error code. + */ +struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask); + +int crypto_has_kpp(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm) +{ + return &tfm->base; +} + +static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct kpp_alg, base); +} + +static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_kpp, base); +} + +static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm) +{ + return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm) +{ + return crypto_kpp_alg(tfm)->reqsize; +} + +static inline void kpp_request_set_tfm(struct kpp_request *req, + struct crypto_kpp *tfm) +{ + req->base.tfm = crypto_kpp_tfm(tfm); +} + +static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req) +{ + return __crypto_kpp_tfm(req->base.tfm); +} + +static inline u32 crypto_kpp_get_flags(struct crypto_kpp *tfm) +{ + return crypto_tfm_get_flags(crypto_kpp_tfm(tfm)); +} + +static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_kpp_tfm(tfm), flags); +} + +/** + * crypto_free_kpp() - free KPP tfm handle + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_kpp(struct crypto_kpp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm)); +} + +/** + * kpp_request_alloc() - allocates kpp request + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * @gfp: allocation flags + * + * Return: allocated handle in case of success or NULL in case of an error. + */ +static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm, + gfp_t gfp) +{ + struct kpp_request *req; + + req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp); + if (likely(req)) + kpp_request_set_tfm(req, tfm); + + return req; +} + +/** + * kpp_request_free() - zeroize and free kpp request + * + * @req: request to free + */ +static inline void kpp_request_free(struct kpp_request *req) +{ + kfree_sensitive(req); +} + +/** + * kpp_request_set_callback() - Sets an asynchronous callback. + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmpl: callback which will be called + * @data: private data used by the caller + */ +static inline void kpp_request_set_callback(struct kpp_request *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * kpp_request_set_input() - Sets input buffer + * + * Sets parameters required by generate_public_key + * + * @req: kpp request + * @input: ptr to input scatter list + * @input_len: size of the input scatter list + */ +static inline void kpp_request_set_input(struct kpp_request *req, + struct scatterlist *input, + unsigned int input_len) +{ + req->src = input; + req->src_len = input_len; +} + +/** + * kpp_request_set_output() - Sets output buffer + * + * Sets parameters required by kpp operation + * + * @req: kpp request + * @output: ptr to output scatter list + * @output_len: size of the output scatter list + */ +static inline void kpp_request_set_output(struct kpp_request *req, + struct scatterlist *output, + unsigned int output_len) +{ + req->dst = output; + req->dst_len = output_len; +} + +enum { + CRYPTO_KPP_SECRET_TYPE_UNKNOWN, + CRYPTO_KPP_SECRET_TYPE_DH, + CRYPTO_KPP_SECRET_TYPE_ECDH, +}; + +/** + * struct kpp_secret - small header for packing secret buffer + * + * @type: define type of secret. Each kpp type will define its own + * @len: specify the len of the secret, include the header, that + * follows the struct + */ +struct kpp_secret { + unsigned short type; + unsigned short len; +}; + +/** + * crypto_kpp_set_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for a given alg. + * + * @tfm: tfm handle + * @buffer: Buffer holding the packet representation of the private + * key. The structure of the packet key depends on the particular + * KPP implementation. Packing and unpacking helpers are provided + * for ECDH and DH (see the respective header files for those + * implementations). + * @len: Length of the packet private key buffer. + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, + const void *buffer, unsigned int len) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->set_secret(tfm, buffer, len); + crypto_stats_kpp_set_secret(calg, ret); + return ret; +} + +/** + * crypto_kpp_generate_public_key() - Invoke kpp operation + * + * Function invokes the specific kpp operation for generating the public part + * for a given kpp algorithm. + * + * To generate a private key, the caller should use a random number generator. + * The output of the requested length serves as the private key. + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->generate_public_key(req); + crypto_stats_kpp_generate_public_key(calg, ret); + return ret; +} + +/** + * crypto_kpp_compute_shared_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for computing the shared secret + * for a given kpp algorithm. + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->compute_shared_secret(req); + crypto_stats_kpp_compute_shared_secret(calg, ret); + return ret; +} + +/** + * crypto_kpp_maxsize() - Get len for output buffer + * + * Function returns the output buffer size required for a given key. + * Function assumes that the key is already set in the transformation. If this + * function is called without a setkey or with a failed setkey, you will end up + * in a NULL dereference. + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + */ +static inline unsigned int crypto_kpp_maxsize(struct crypto_kpp *tfm) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->max_size(tfm); +} + +#endif diff --git a/include/crypto/md5.h b/include/crypto/md5.h new file mode 100644 index 000000000..cf9e9dec3 --- /dev/null +++ b/include/crypto/md5.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_MD5_H +#define _CRYPTO_MD5_H + +#include <linux/types.h> + +#define MD5_DIGEST_SIZE 16 +#define MD5_HMAC_BLOCK_SIZE 64 +#define MD5_BLOCK_WORDS 16 +#define MD5_HASH_WORDS 4 + +#define MD5_H0 0x67452301UL +#define MD5_H1 0xefcdab89UL +#define MD5_H2 0x98badcfeUL +#define MD5_H3 0x10325476UL + +extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE]; + +struct md5_state { + u32 hash[MD5_HASH_WORDS]; + u32 block[MD5_BLOCK_WORDS]; + u64 byte_count; +}; + +#endif diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h new file mode 100644 index 000000000..306925fea --- /dev/null +++ b/include/crypto/nhpoly1305.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values and helper functions for the NHPoly1305 hash function. + */ + +#ifndef _NHPOLY1305_H +#define _NHPOLY1305_H + +#include <crypto/hash.h> +#include <crypto/internal/poly1305.h> + +/* NH parameterization: */ + +/* Endianness: little */ +/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */ + +/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */ +#define NH_PAIR_STRIDE 2 +#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32)) + +/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */ +#define NH_NUM_PASSES 4 +#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64)) + +/* Max message size: 1024 bytes (32x compression factor) */ +#define NH_NUM_STRIDES 64 +#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES) +#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32)) +#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \ + NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1)) +#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32)) + +#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES) + +struct nhpoly1305_key { + struct poly1305_core_key poly_key; + u32 nh_key[NH_KEY_WORDS]; +}; + +struct nhpoly1305_state { + + /* Running total of polynomial evaluation */ + struct poly1305_state poly_state; + + /* Partial block buffer */ + u8 buffer[NH_MESSAGE_UNIT]; + unsigned int buflen; + + /* + * Number of bytes remaining until the current NH message reaches + * NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash. + */ + unsigned int nh_remaining; + + __le64 nh_hash[NH_NUM_PASSES]; +}; + +typedef void (*nh_t)(const u32 *key, const u8 *message, size_t message_len, + __le64 hash[NH_NUM_PASSES]); + +int crypto_nhpoly1305_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen); + +int crypto_nhpoly1305_init(struct shash_desc *desc); +int crypto_nhpoly1305_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen); +int crypto_nhpoly1305_update_helper(struct shash_desc *desc, + const u8 *src, unsigned int srclen, + nh_t nh_fn); +int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst); +int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, + nh_t nh_fn); + +#endif /* _NHPOLY1305_H */ diff --git a/include/crypto/null.h b/include/crypto/null.h new file mode 100644 index 000000000..0ef577cc0 --- /dev/null +++ b/include/crypto/null.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Values for NULL algorithms */ + +#ifndef _CRYPTO_NULL_H +#define _CRYPTO_NULL_H + +#define NULL_KEY_SIZE 0 +#define NULL_BLOCK_SIZE 1 +#define NULL_DIGEST_SIZE 0 +#define NULL_IV_SIZE 0 + +struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void); +void crypto_put_default_null_skcipher(void); + +#endif diff --git a/include/crypto/padlock.h b/include/crypto/padlock.h new file mode 100644 index 000000000..6de70e88f --- /dev/null +++ b/include/crypto/padlock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Driver for VIA PadLock + * + * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> + */ + +#ifndef _CRYPTO_PADLOCK_H +#define _CRYPTO_PADLOCK_H + +#define PADLOCK_ALIGNMENT 16 + +#define PFX KBUILD_MODNAME ": " + +#define PADLOCK_CRA_PRIORITY 300 +#define PADLOCK_COMPOSITE_PRIORITY 400 + +#ifdef CONFIG_64BIT +#define STACK_ALIGN 16 +#else +#define STACK_ALIGN 4 +#endif + +#endif /* _CRYPTO_PADLOCK_H */ diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h new file mode 100644 index 000000000..234d7cf3c --- /dev/null +++ b/include/crypto/pcrypt.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * pcrypt - Parallel crypto engine. + * + * Copyright (C) 2009 secunet Security Networks AG + * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> + */ + +#ifndef _CRYPTO_PCRYPT_H +#define _CRYPTO_PCRYPT_H + +#include <linux/container_of.h> +#include <linux/crypto.h> +#include <linux/padata.h> + +struct pcrypt_request { + struct padata_priv padata; + void *data; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +static inline void *pcrypt_request_ctx(struct pcrypt_request *req) +{ + return req->__ctx; +} + +static inline +struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req) +{ + return &req->padata; +} + +static inline +struct pcrypt_request *pcrypt_padata_request(struct padata_priv *padata) +{ + return container_of(padata, struct pcrypt_request, padata); +} + +#endif diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h new file mode 100644 index 000000000..38ec7f5f9 --- /dev/null +++ b/include/crypto/pkcs7.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* PKCS#7 crypto data parser + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _CRYPTO_PKCS7_H +#define _CRYPTO_PKCS7_H + +#include <linux/verification.h> +#include <linux/hash_info.h> +#include <crypto/public_key.h> + +struct key; +struct pkcs7_message; + +/* + * pkcs7_parser.c + */ +extern struct pkcs7_message *pkcs7_parse_message(const void *data, + size_t datalen); +extern void pkcs7_free_message(struct pkcs7_message *pkcs7); + +extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7, + const void **_data, size_t *_datalen, + size_t *_headerlen); + +/* + * pkcs7_trust.c + */ +extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7, + struct key *trust_keyring); + +/* + * pkcs7_verify.c + */ +extern int pkcs7_verify(struct pkcs7_message *pkcs7, + enum key_being_used_for usage); + +extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, + const void *data, size_t datalen); + +extern int pkcs7_get_digest(struct pkcs7_message *pkcs7, const u8 **buf, + u32 *len, enum hash_algo *hash_algo); + +#endif /* _CRYPTO_PKCS7_H */ diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h new file mode 100644 index 000000000..090692ec3 --- /dev/null +++ b/include/crypto/poly1305.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Poly1305 algorithm + */ + +#ifndef _CRYPTO_POLY1305_H +#define _CRYPTO_POLY1305_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define POLY1305_BLOCK_SIZE 16 +#define POLY1305_KEY_SIZE 32 +#define POLY1305_DIGEST_SIZE 16 + +/* The poly1305_key and poly1305_state types are mostly opaque and + * implementation-defined. Limbs might be in base 2^64 or base 2^26, or + * different yet. The union type provided keeps these 64-bit aligned for the + * case in which this is implemented using 64x64 multiplies. + */ + +struct poly1305_key { + union { + u32 r[5]; + u64 r64[3]; + }; +}; + +struct poly1305_core_key { + struct poly1305_key key; + struct poly1305_key precomputed_s; +}; + +struct poly1305_state { + union { + u32 h[5]; + u64 h64[3]; + }; +}; + +struct poly1305_desc_ctx { + /* partial buffer */ + u8 buf[POLY1305_BLOCK_SIZE]; + /* bytes used in partial buffer */ + unsigned int buflen; + /* how many keys have been set in r[] */ + unsigned short rset; + /* whether s[] has been set */ + bool sset; + /* finalize key */ + u32 s[4]; + /* accumulator */ + struct poly1305_state h; + /* key */ + union { + struct poly1305_key opaque_r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE]; + struct poly1305_core_key core_r; + }; +}; + +void poly1305_init_arch(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]); +void poly1305_init_generic(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]); + +static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_init_arch(desc, key); + else + poly1305_init_generic(desc, key); +} + +void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src, + unsigned int nbytes); +void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src, + unsigned int nbytes); + +static inline void poly1305_update(struct poly1305_desc_ctx *desc, + const u8 *src, unsigned int nbytes) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_update_arch(desc, src, nbytes); + else + poly1305_update_generic(desc, src, nbytes); +} + +void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest); +void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest); + +static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_final_arch(desc, digest); + else + poly1305_final_generic(desc, digest); +} + +#endif diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h new file mode 100644 index 000000000..1d630f371 --- /dev/null +++ b/include/crypto/polyval.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Polyval hash algorithm + * + * Copyright 2021 Google LLC + */ + +#ifndef _CRYPTO_POLYVAL_H +#define _CRYPTO_POLYVAL_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define POLYVAL_BLOCK_SIZE 16 +#define POLYVAL_DIGEST_SIZE 16 + +void polyval_mul_non4k(u8 *op1, const u8 *op2); + +void polyval_update_non4k(const u8 *key, const u8 *in, + size_t nblocks, u8 *accumulator); + +#endif diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h new file mode 100644 index 000000000..68f7aa2a7 --- /dev/null +++ b/include/crypto/public_key.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Asymmetric public-key algorithm definitions + * + * See Documentation/crypto/asymmetric-keys.rst + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_PUBLIC_KEY_H +#define _LINUX_PUBLIC_KEY_H + +#include <linux/keyctl.h> +#include <linux/oid_registry.h> + +/* + * Cryptographic data for the public-key subtype of the asymmetric key type. + * + * Note that this may include private part of the key as well as the public + * part. + */ +struct public_key { + void *key; + u32 keylen; + enum OID algo; + void *params; + u32 paramlen; + bool key_is_private; + const char *id_type; + const char *pkey_algo; +}; + +extern void public_key_free(struct public_key *key); + +/* + * Public key cryptography signature data + */ +struct public_key_signature { + struct asymmetric_key_id *auth_ids[3]; + u8 *s; /* Signature */ + u8 *digest; + u32 s_size; /* Number of bytes in signature */ + u32 digest_size; /* Number of bytes in digest */ + const char *pkey_algo; + const char *hash_algo; + const char *encoding; + const void *data; + unsigned int data_size; +}; + +extern void public_key_signature_free(struct public_key_signature *sig); + +extern struct asymmetric_key_subtype public_key_subtype; + +struct key; +struct key_type; +union key_payload; + +extern int restrict_link_by_signature(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trust_keyring); + +extern int restrict_link_by_key_or_keyring(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trusted); + +extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trusted); + +extern int query_asymmetric_key(const struct kernel_pkey_params *, + struct kernel_pkey_query *); + +extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *); +extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *); +extern int create_signature(struct kernel_pkey_params *, const void *, void *); +extern int verify_signature(const struct key *, + const struct public_key_signature *); + +int public_key_verify_signature(const struct public_key *pkey, + const struct public_key_signature *sig); + +#endif /* _LINUX_PUBLIC_KEY_H */ diff --git a/include/crypto/rng.h b/include/crypto/rng.h new file mode 100644 index 000000000..17bb3673d --- /dev/null +++ b/include/crypto/rng.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RNG: Random Number Generator algorithms under the crypto API + * + * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_RNG_H +#define _CRYPTO_RNG_H + +#include <linux/crypto.h> + +struct crypto_rng; + +/** + * struct rng_alg - random number generator definition + * + * @generate: The function defined by this variable obtains a + * random number. The random number generator transform + * must generate the random number out of the context + * provided with this call, plus any additional data + * if provided to the call. + * @seed: Seed or reseed the random number generator. With the + * invocation of this function call, the random number + * generator shall become ready for generation. If the + * random number generator requires a seed for setting + * up a new state, the seed must be provided by the + * consumer while invoking this function. The required + * size of the seed is defined with @seedsize . + * @set_ent: Set entropy that would otherwise be obtained from + * entropy source. Internal use only. + * @seedsize: The seed size required for a random number generator + * initialization defined with this variable. Some + * random number generators does not require a seed + * as the seeding is implemented internally without + * the need of support by the consumer. In this case, + * the seed size is set to zero. + * @base: Common crypto API algorithm data structure. + */ +struct rng_alg { + int (*generate)(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen); + int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); + void (*set_ent)(struct crypto_rng *tfm, const u8 *data, + unsigned int len); + + unsigned int seedsize; + + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +extern struct crypto_rng *crypto_default_rng; + +int crypto_get_default_rng(void); +void crypto_put_default_rng(void); + +/** + * DOC: Random number generator API + * + * The random number generator API is used with the ciphers of type + * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) + */ + +/** + * crypto_alloc_rng() -- allocate RNG handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a random number generator. The returned struct + * crypto_rng is the cipher handle that is required for any subsequent + * API invocation for that random number generator. + * + * For all random number generators, this call creates a new private copy of + * the random number generator that does not share a state with other + * instances. The only exception is the "krng" random number generator which + * is a kernel crypto API use case for the get_random_bytes() function of the + * /dev/random driver. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) +{ + return &tfm->base; +} + +/** + * crypto_rng_alg - obtain name of RNG + * @tfm: cipher handle + * + * Return the generic name (cra_name) of the initialized random number generator + * + * Return: generic name string + */ +static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) +{ + return container_of(crypto_rng_tfm(tfm)->__crt_alg, + struct rng_alg, base); +} + +/** + * crypto_free_rng() - zeroize and free RNG handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_rng(struct crypto_rng *tfm) +{ + crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); +} + +/** + * crypto_rng_generate() - get random number + * @tfm: cipher handle + * @src: Input buffer holding additional data, may be NULL + * @slen: Length of additional data + * @dst: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random + * numbers using the random number generator referenced by the + * cipher handle. + * + * Return: 0 function was successful; < 0 if an error occurred + */ +static inline int crypto_rng_generate(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen) +{ + struct crypto_alg *alg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(alg); + ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); + crypto_stats_rng_generate(alg, dlen, ret); + return ret; +} + +/** + * crypto_rng_get_bytes() - get random number + * @tfm: cipher handle + * @rdata: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random numbers using the + * random number generator referenced by the cipher handle. + * + * Return: 0 function was successful; < 0 if an error occurred + */ +static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, + u8 *rdata, unsigned int dlen) +{ + return crypto_rng_generate(tfm, NULL, 0, rdata, dlen); +} + +/** + * crypto_rng_reset() - re-initialize the RNG + * @tfm: cipher handle + * @seed: seed input data + * @slen: length of the seed input data + * + * The reset function completely re-initializes the random number generator + * referenced by the cipher handle by clearing the current state. The new state + * is initialized with the caller provided seed or automatically, depending + * on the random number generator type (the ANSI X9.31 RNG requires + * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding). + * The seed is provided as a parameter to this function call. The provided seed + * should have the length of the seed size defined for the random number + * generator as defined by crypto_rng_seedsize. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen); + +/** + * crypto_rng_seedsize() - obtain seed size of RNG + * @tfm: cipher handle + * + * The function returns the seed size for the random number generator + * referenced by the cipher handle. This value may be zero if the random + * number generator does not implement or require a reseeding. For example, + * the SP800-90A DRBGs implement an automated reseeding after reaching a + * pre-defined threshold. + * + * Return: seed size for the random number generator + */ +static inline int crypto_rng_seedsize(struct crypto_rng *tfm) +{ + return crypto_rng_alg(tfm)->seedsize; +} + +#endif diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h new file mode 100644 index 000000000..ccdb05f68 --- /dev/null +++ b/include/crypto/scatterwalk.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic scatter and gather helpers. + * + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com> + * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_SCATTERWALK_H +#define _CRYPTO_SCATTERWALK_H + +#include <crypto/algapi.h> + +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> + +static inline void scatterwalk_crypto_chain(struct scatterlist *head, + struct scatterlist *sg, int num) +{ + if (sg) + sg_chain(head, num, sg); + else + sg_mark_end(head); +} + +static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) +{ + unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; + unsigned int len_this_page = offset_in_page(~walk->offset) + 1; + return len_this_page > len ? len : len_this_page; +} + +static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, + unsigned int nbytes) +{ + unsigned int len_this_page = scatterwalk_pagelen(walk); + return nbytes > len_this_page ? len_this_page : nbytes; +} + +static inline void scatterwalk_advance(struct scatter_walk *walk, + unsigned int nbytes) +{ + walk->offset += nbytes; +} + +static inline struct page *scatterwalk_page(struct scatter_walk *walk) +{ + return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); +} + +static inline void scatterwalk_unmap(void *vaddr) +{ + kunmap_atomic(vaddr); +} + +static inline void scatterwalk_start(struct scatter_walk *walk, + struct scatterlist *sg) +{ + walk->sg = sg; + walk->offset = sg->offset; +} + +static inline void *scatterwalk_map(struct scatter_walk *walk) +{ + return kmap_atomic(scatterwalk_page(walk)) + + offset_in_page(walk->offset); +} + +static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, + unsigned int more) +{ + if (out) { + struct page *page; + + page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); + flush_dcache_page(page); + } + + if (more && walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); +} + +static inline void scatterwalk_done(struct scatter_walk *walk, int out, + int more) +{ + if (!more || walk->offset >= walk->sg->offset + walk->sg->length || + !(walk->offset & (PAGE_SIZE - 1))) + scatterwalk_pagedone(walk, out, more); +} + +void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, + size_t nbytes, int out); +void *scatterwalk_map(struct scatter_walk *walk); + +void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out); + +struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], + struct scatterlist *src, + unsigned int len); + +#endif /* _CRYPTO_SCATTERWALK_H */ diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h new file mode 100644 index 000000000..75c7eaa20 --- /dev/null +++ b/include/crypto/serpent.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for serpent algorithms + */ + +#ifndef _CRYPTO_SERPENT_H +#define _CRYPTO_SERPENT_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define SERPENT_MIN_KEY_SIZE 0 +#define SERPENT_MAX_KEY_SIZE 32 +#define SERPENT_EXPKEY_WORDS 132 +#define SERPENT_BLOCK_SIZE 16 + +struct serpent_ctx { + u32 expkey[SERPENT_EXPKEY_WORDS]; +}; + +int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, + unsigned int keylen); +int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src); +void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h new file mode 100644 index 000000000..044ecea60 --- /dev/null +++ b/include/crypto/sha1.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for SHA-1 algorithms + */ + +#ifndef _CRYPTO_SHA1_H +#define _CRYPTO_SHA1_H + +#include <linux/types.h> + +#define SHA1_DIGEST_SIZE 20 +#define SHA1_BLOCK_SIZE 64 + +#define SHA1_H0 0x67452301UL +#define SHA1_H1 0xefcdab89UL +#define SHA1_H2 0x98badcfeUL +#define SHA1_H3 0x10325476UL +#define SHA1_H4 0xc3d2e1f0UL + +extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE]; + +struct sha1_state { + u32 state[SHA1_DIGEST_SIZE / 4]; + u64 count; + u8 buffer[SHA1_BLOCK_SIZE]; +}; + +struct shash_desc; + +extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +/* + * An implementation of SHA-1's compression function. Don't use in new code! + * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't + * the correct way to hash something with SHA-1 (use crypto_shash instead). + */ +#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4) +#define SHA1_WORKSPACE_WORDS 16 +void sha1_init(__u32 *buf); +void sha1_transform(__u32 *digest, const char *data, __u32 *W); + +#endif /* _CRYPTO_SHA1_H */ diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h new file mode 100644 index 000000000..2e0e7c382 --- /dev/null +++ b/include/crypto/sha1_base.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sha1_base.h - core logic for SHA-1 implementations + * + * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> + */ + +#ifndef _CRYPTO_SHA1_BASE_H +#define _CRYPTO_SHA1_BASE_H + +#include <crypto/internal/hash.h> +#include <crypto/sha1.h> +#include <linux/crypto.h> +#include <linux/module.h> +#include <linux/string.h> + +#include <asm/unaligned.h> + +typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks); + +static inline int sha1_base_init(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA1_H0; + sctx->state[1] = SHA1_H1; + sctx->state[2] = SHA1_H2; + sctx->state[3] = SHA1_H3; + sctx->state[4] = SHA1_H4; + sctx->count = 0; + + return 0; +} + +static inline int sha1_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha1_block_fn *block_fn) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA1_BLOCK_SIZE - partial; + + memcpy(sctx->buffer + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buffer, 1); + } + + blocks = len / SHA1_BLOCK_SIZE; + len %= SHA1_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA1_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buffer + partial, data, len); + + return 0; +} + +static inline int sha1_base_do_finalize(struct shash_desc *desc, + sha1_block_fn *block_fn) +{ + const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64); + struct sha1_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); + unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; + + sctx->buffer[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buffer, 1); + } + + memset(sctx->buffer + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buffer, 1); + + return 0; +} + +static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) + put_unaligned_be32(sctx->state[i], digest++); + + memzero_explicit(sctx, sizeof(*sctx)); + return 0; +} + +#endif /* _CRYPTO_SHA1_BASE_H */ diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h new file mode 100644 index 000000000..2838f529f --- /dev/null +++ b/include/crypto/sha2.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for SHA-2 algorithms + */ + +#ifndef _CRYPTO_SHA2_H +#define _CRYPTO_SHA2_H + +#include <linux/types.h> + +#define SHA224_DIGEST_SIZE 28 +#define SHA224_BLOCK_SIZE 64 + +#define SHA256_DIGEST_SIZE 32 +#define SHA256_BLOCK_SIZE 64 + +#define SHA384_DIGEST_SIZE 48 +#define SHA384_BLOCK_SIZE 128 + +#define SHA512_DIGEST_SIZE 64 +#define SHA512_BLOCK_SIZE 128 + +#define SHA224_H0 0xc1059ed8UL +#define SHA224_H1 0x367cd507UL +#define SHA224_H2 0x3070dd17UL +#define SHA224_H3 0xf70e5939UL +#define SHA224_H4 0xffc00b31UL +#define SHA224_H5 0x68581511UL +#define SHA224_H6 0x64f98fa7UL +#define SHA224_H7 0xbefa4fa4UL + +#define SHA256_H0 0x6a09e667UL +#define SHA256_H1 0xbb67ae85UL +#define SHA256_H2 0x3c6ef372UL +#define SHA256_H3 0xa54ff53aUL +#define SHA256_H4 0x510e527fUL +#define SHA256_H5 0x9b05688cUL +#define SHA256_H6 0x1f83d9abUL +#define SHA256_H7 0x5be0cd19UL + +#define SHA384_H0 0xcbbb9d5dc1059ed8ULL +#define SHA384_H1 0x629a292a367cd507ULL +#define SHA384_H2 0x9159015a3070dd17ULL +#define SHA384_H3 0x152fecd8f70e5939ULL +#define SHA384_H4 0x67332667ffc00b31ULL +#define SHA384_H5 0x8eb44a8768581511ULL +#define SHA384_H6 0xdb0c2e0d64f98fa7ULL +#define SHA384_H7 0x47b5481dbefa4fa4ULL + +#define SHA512_H0 0x6a09e667f3bcc908ULL +#define SHA512_H1 0xbb67ae8584caa73bULL +#define SHA512_H2 0x3c6ef372fe94f82bULL +#define SHA512_H3 0xa54ff53a5f1d36f1ULL +#define SHA512_H4 0x510e527fade682d1ULL +#define SHA512_H5 0x9b05688c2b3e6c1fULL +#define SHA512_H6 0x1f83d9abfb41bd6bULL +#define SHA512_H7 0x5be0cd19137e2179ULL + +extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE]; + +extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE]; + +extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE]; + +extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE]; + +struct sha256_state { + u32 state[SHA256_DIGEST_SIZE / 4]; + u64 count; + u8 buf[SHA256_BLOCK_SIZE]; +}; + +struct sha512_state { + u64 state[SHA512_DIGEST_SIZE / 8]; + u64 count[2]; + u8 buf[SHA512_BLOCK_SIZE]; +}; + +struct shash_desc; + +extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +/* + * Stand-alone implementation of the SHA256 algorithm. It is designed to + * have as little dependencies as possible so it can be used in the + * kexec_file purgatory. In other cases you should generally use the + * hash APIs from include/crypto/hash.h. Especially when hashing large + * amounts of data as those APIs may be hw-accelerated. + * + * For details see lib/crypto/sha256.c + */ + +static inline void sha256_init(struct sha256_state *sctx) +{ + sctx->state[0] = SHA256_H0; + sctx->state[1] = SHA256_H1; + sctx->state[2] = SHA256_H2; + sctx->state[3] = SHA256_H3; + sctx->state[4] = SHA256_H4; + sctx->state[5] = SHA256_H5; + sctx->state[6] = SHA256_H6; + sctx->state[7] = SHA256_H7; + sctx->count = 0; +} +void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len); +void sha256_final(struct sha256_state *sctx, u8 *out); +void sha256(const u8 *data, unsigned int len, u8 *out); + +static inline void sha224_init(struct sha256_state *sctx) +{ + sctx->state[0] = SHA224_H0; + sctx->state[1] = SHA224_H1; + sctx->state[2] = SHA224_H2; + sctx->state[3] = SHA224_H3; + sctx->state[4] = SHA224_H4; + sctx->state[5] = SHA224_H5; + sctx->state[6] = SHA224_H6; + sctx->state[7] = SHA224_H7; + sctx->count = 0; +} +void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len); +void sha224_final(struct sha256_state *sctx, u8 *out); + +#endif /* _CRYPTO_SHA2_H */ diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h new file mode 100644 index 000000000..76173c613 --- /dev/null +++ b/include/crypto/sha256_base.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sha256_base.h - core logic for SHA-256 implementations + * + * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> + */ + +#ifndef _CRYPTO_SHA256_BASE_H +#define _CRYPTO_SHA256_BASE_H + +#include <crypto/internal/hash.h> +#include <crypto/sha2.h> +#include <linux/crypto.h> +#include <linux/module.h> +#include <linux/string.h> + +#include <asm/unaligned.h> + +typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src, + int blocks); + +static inline int sha224_base_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + sha224_init(sctx); + return 0; +} + +static inline int sha256_base_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + sha256_init(sctx); + return 0; +} + +static inline int sha256_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha256_block_fn *block_fn) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA256_BLOCK_SIZE - partial; + + memcpy(sctx->buf + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buf, 1); + } + + blocks = len / SHA256_BLOCK_SIZE; + len %= SHA256_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA256_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buf + partial, data, len); + + return 0; +} + +static inline int sha256_base_do_finalize(struct shash_desc *desc, + sha256_block_fn *block_fn) +{ + const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64); + struct sha256_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buf + bit_offset); + unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; + + sctx->buf[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buf, 1); + } + + memset(sctx->buf + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buf, 1); + + return 0; +} + +static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) +{ + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + struct sha256_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32)) + put_unaligned_be32(sctx->state[i], digest++); + + memzero_explicit(sctx, sizeof(*sctx)); + return 0; +} + +#endif /* _CRYPTO_SHA256_BASE_H */ diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h new file mode 100644 index 000000000..080f60c2e --- /dev/null +++ b/include/crypto/sha3.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for SHA-3 algorithms + */ +#ifndef __CRYPTO_SHA3_H__ +#define __CRYPTO_SHA3_H__ + +#define SHA3_224_DIGEST_SIZE (224 / 8) +#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) + +#define SHA3_256_DIGEST_SIZE (256 / 8) +#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE) + +#define SHA3_384_DIGEST_SIZE (384 / 8) +#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE) + +#define SHA3_512_DIGEST_SIZE (512 / 8) +#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) + +struct sha3_state { + u64 st[25]; + unsigned int rsiz; + unsigned int rsizw; + + unsigned int partial; + u8 buf[SHA3_224_BLOCK_SIZE]; +}; + +int crypto_sha3_init(struct shash_desc *desc); +int crypto_sha3_update(struct shash_desc *desc, const u8 *data, + unsigned int len); +int crypto_sha3_final(struct shash_desc *desc, u8 *out); + +#endif diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h new file mode 100644 index 000000000..b370b3340 --- /dev/null +++ b/include/crypto/sha512_base.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sha512_base.h - core logic for SHA-512 implementations + * + * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> + */ + +#ifndef _CRYPTO_SHA512_BASE_H +#define _CRYPTO_SHA512_BASE_H + +#include <crypto/internal/hash.h> +#include <crypto/sha2.h> +#include <linux/crypto.h> +#include <linux/module.h> +#include <linux/string.h> + +#include <asm/unaligned.h> + +typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src, + int blocks); + +static inline int sha384_base_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA384_H0; + sctx->state[1] = SHA384_H1; + sctx->state[2] = SHA384_H2; + sctx->state[3] = SHA384_H3; + sctx->state[4] = SHA384_H4; + sctx->state[5] = SHA384_H5; + sctx->state[6] = SHA384_H6; + sctx->state[7] = SHA384_H7; + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static inline int sha512_base_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA512_H0; + sctx->state[1] = SHA512_H1; + sctx->state[2] = SHA512_H2; + sctx->state[3] = SHA512_H3; + sctx->state[4] = SHA512_H4; + sctx->state[5] = SHA512_H5; + sctx->state[6] = SHA512_H6; + sctx->state[7] = SHA512_H7; + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static inline int sha512_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha512_block_fn *block_fn) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; + + sctx->count[0] += len; + if (sctx->count[0] < len) + sctx->count[1]++; + + if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA512_BLOCK_SIZE - partial; + + memcpy(sctx->buf + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buf, 1); + } + + blocks = len / SHA512_BLOCK_SIZE; + len %= SHA512_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA512_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buf + partial, data, len); + + return 0; +} + +static inline int sha512_base_do_finalize(struct shash_desc *desc, + sha512_block_fn *block_fn) +{ + const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]); + struct sha512_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buf + bit_offset); + unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; + + sctx->buf[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buf, 1); + } + + memset(sctx->buf + partial, 0x0, bit_offset - partial); + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); + bits[1] = cpu_to_be64(sctx->count[0] << 3); + block_fn(sctx, sctx->buf, 1); + + return 0; +} + +static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) +{ + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + struct sha512_state *sctx = shash_desc_ctx(desc); + __be64 *digest = (__be64 *)out; + int i; + + for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64)) + put_unaligned_be64(sctx->state[i], digest++); + + memzero_explicit(sctx, sizeof(*sctx)); + return 0; +} + +#endif /* _CRYPTO_SHA512_BASE_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h new file mode 100644 index 000000000..39f5b67c3 --- /dev/null +++ b/include/crypto/skcipher.h @@ -0,0 +1,590 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_SKCIPHER_H +#define _CRYPTO_SKCIPHER_H + +#include <linux/container_of.h> +#include <linux/crypto.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> + +struct scatterlist; + +/** + * struct skcipher_request - Symmetric key cipher request + * @cryptlen: Number of bytes to encrypt or decrypt + * @iv: Initialisation Vector + * @src: Source SG list + * @dst: Destination SG list + * @base: Underlying async request + * @__ctx: Start of private context data + */ +struct skcipher_request { + unsigned int cryptlen; + + u8 *iv; + + struct scatterlist *src; + struct scatterlist *dst; + + struct crypto_async_request base; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct crypto_skcipher { + unsigned int reqsize; + + struct crypto_tfm base; +}; + +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + +/** + * struct skcipher_alg - symmetric key cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * @walksize: Equal to the chunk size except in cases where the algorithm is + * considerably more efficient if it can operate on multiple chunks + * in parallel. Should be a multiple of chunksize. + * @base: Definition of a generic crypto algorithm. + * + * All fields except @ivsize are mandatory and must be filled. + */ +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct skcipher_request *req); + int (*decrypt)(struct skcipher_request *req); + int (*init)(struct crypto_skcipher *tfm); + void (*exit)(struct crypto_skcipher *tfm); + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int walksize; + + struct crypto_alg base; +}; + +#define MAX_SYNC_SKCIPHER_REQSIZE 384 +/* + * This performs a type-check against the "tfm" argument to make sure + * all users have the correct skcipher tfm for doing on-stack requests. + */ +#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \ + char __##name##_desc[sizeof(struct skcipher_request) + \ + MAX_SYNC_SKCIPHER_REQSIZE + \ + (!(sizeof((struct crypto_sync_skcipher *)1 == \ + (typeof(tfm))1))) \ + ] CRYPTO_MINALIGN_ATTR; \ + struct skcipher_request *name = (void *)__##name##_desc + +/** + * DOC: Symmetric Key Cipher API + * + * Symmetric key cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto). + * + * Asynchronous cipher operations imply that the function invocation for a + * cipher request returns immediately before the completion of the operation. + * The cipher request is scheduled as a separate kernel thread and therefore + * load-balanced on the different CPUs via the process scheduler. To allow + * the kernel crypto API to inform the caller about the completion of a cipher + * request, the caller must provide a callback function. That function is + * invoked with the cipher handle when the request completes. + * + * To support the asynchronous operation, additional information than just the + * cipher handle must be supplied to the kernel crypto API. That additional + * information is given by filling in the skcipher_request data structure. + * + * For the symmetric key cipher API, the state is maintained with the tfm + * cipher handle. A single tfm can be used across multiple calls and in + * parallel. For asynchronous block cipher calls, context data supplied and + * only used by the caller can be referenced the request data structure in + * addition to the IV used for the cipher request. The maintenance of such + * state information would be important for a crypto driver implementer to + * have, because when calling the callback function upon completion of the + * cipher operation, that callback function may need some information about + * which operation just finished if it invoked multiple in parallel. This + * state information is unused by the kernel crypto API. + */ + +static inline struct crypto_skcipher *__crypto_skcipher_cast( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_skcipher, base); +} + +/** + * crypto_alloc_skcipher() - allocate symmetric key cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an skcipher. The returned struct + * crypto_skcipher is the cipher handle that is required for any subsequent + * API invocation for that skcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, + u32 type, u32 mask); + +struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name, + u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_skcipher_tfm( + struct crypto_skcipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_skcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm)); +} + +static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) +{ + crypto_free_skcipher(&tfm->base); +} + +/** + * crypto_has_skcipher() - Search for the availability of an skcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher + * @type: specifies the type of the skcipher + * @mask: specifies the mask for the skcipher + * + * Return: true when the skcipher is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask); + +static inline const char *crypto_skcipher_driver_name( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); +} + +static inline struct skcipher_alg *crypto_skcipher_alg( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg, base); +} + +static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) +{ + return alg->ivsize; +} + +/** + * crypto_skcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the skcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg(tfm)->ivsize; +} + +static inline unsigned int crypto_sync_skcipher_ivsize( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_ivsize(&tfm->base); +} + +/** + * crypto_skcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the skcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_skcipher_blocksize( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); +} + +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + return alg->chunksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + +static inline unsigned int crypto_sync_skcipher_blocksize( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_blocksize(&tfm->base); +} + +static inline unsigned int crypto_skcipher_alignmask( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)); +} + +static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm)); +} + +static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags); +} + +static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags); +} + +static inline u32 crypto_sync_skcipher_get_flags( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_get_flags(&tfm->base); +} + +static inline void crypto_sync_skcipher_set_flags( + struct crypto_sync_skcipher *tfm, u32 flags) +{ + crypto_skcipher_set_flags(&tfm->base, flags); +} + +static inline void crypto_sync_skcipher_clear_flags( + struct crypto_sync_skcipher *tfm, u32 flags) +{ + crypto_skcipher_clear_flags(&tfm->base, flags); +} + +/** + * crypto_skcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the skcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_skcipher_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen); + +static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_skcipher_setkey(&tfm->base, key, keylen); +} + +static inline unsigned int crypto_skcipher_min_keysize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg(tfm)->min_keysize; +} + +static inline unsigned int crypto_skcipher_max_keysize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg(tfm)->max_keysize; +} + +/** + * crypto_skcipher_reqtfm() - obtain cipher handle from request + * @req: skcipher_request out of which the cipher handle is to be obtained + * + * Return the crypto_skcipher handle when furnishing an skcipher_request + * data structure. + * + * Return: crypto_skcipher handle + */ +static inline struct crypto_skcipher *crypto_skcipher_reqtfm( + struct skcipher_request *req) +{ + return __crypto_skcipher_cast(req->base.tfm); +} + +static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( + struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + return container_of(tfm, struct crypto_sync_skcipher, base); +} + +/** + * crypto_skcipher_encrypt() - encrypt plaintext + * @req: reference to the skcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the skcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * skcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_skcipher_encrypt(struct skcipher_request *req); + +/** + * crypto_skcipher_decrypt() - decrypt ciphertext + * @req: reference to the skcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the skcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * skcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_skcipher_decrypt(struct skcipher_request *req); + +/** + * DOC: Symmetric Key Cipher Request Handle + * + * The skcipher_request data structure contains all pointers to data + * required for the symmetric key cipher operation. This includes the cipher + * handle (which can be used by multiple skcipher_request instances), pointer + * to plaintext and ciphertext, asynchronous callback function, etc. It acts + * as a handle to the skcipher_request_* API calls in a similar way as + * skcipher handle to the crypto_skcipher_* API calls. + */ + +/** + * crypto_skcipher_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ +static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm) +{ + return tfm->reqsize; +} + +/** + * skcipher_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing skcipher handle in the request + * data structure with a different one. + */ +static inline void skcipher_request_set_tfm(struct skcipher_request *req, + struct crypto_skcipher *tfm) +{ + req->base.tfm = crypto_skcipher_tfm(tfm); +} + +static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req, + struct crypto_sync_skcipher *tfm) +{ + skcipher_request_set_tfm(req, &tfm->base); +} + +static inline struct skcipher_request *skcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct skcipher_request, base); +} + +/** + * skcipher_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the skcipher + * encrypt and decrypt API calls. During the allocation, the provided skcipher + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct skcipher_request *skcipher_request_alloc( + struct crypto_skcipher *tfm, gfp_t gfp) +{ + struct skcipher_request *req; + + req = kmalloc(sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(tfm), gfp); + + if (likely(req)) + skcipher_request_set_tfm(req, tfm); + + return req; +} + +/** + * skcipher_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void skcipher_request_free(struct skcipher_request *req) +{ + kfree_sensitive(req); +} + +static inline void skcipher_request_zero(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm)); +} + +/** + * skcipher_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once the + * cipher operation completes. + * + * The callback function is registered with the skcipher_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void skcipher_request_set_callback(struct skcipher_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * skcipher_request_set_crypt() - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @cryptlen: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_skcipher_ivsize + * + * This function allows setting of the source data and destination data + * scatter / gather lists. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed - the source is the ciphertext and the destination is the plaintext. + */ +static inline void skcipher_request_set_crypt( + struct skcipher_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int cryptlen, void *iv) +{ + req->src = src; + req->dst = dst; + req->cryptlen = cryptlen; + req->iv = iv; +} + +#endif /* _CRYPTO_SKCIPHER_H */ + diff --git a/include/crypto/sm2.h b/include/crypto/sm2.h new file mode 100644 index 000000000..af452556d --- /dev/null +++ b/include/crypto/sm2.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * sm2.h - SM2 asymmetric public-key algorithm + * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and + * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 + * + * Copyright (c) 2020, Alibaba Group. + * Written by Tianjia Zhang <tianjia.zhang@linux.alibaba.com> + */ + +#ifndef _CRYPTO_SM2_H +#define _CRYPTO_SM2_H + +#include <crypto/sm3.h> +#include <crypto/akcipher.h> + +/* The default user id as specified in GM/T 0009-2012 */ +#define SM2_DEFAULT_USERID "1234567812345678" +#define SM2_DEFAULT_USERID_LEN 16 + +extern int sm2_compute_z_digest(struct crypto_akcipher *tfm, + const unsigned char *id, size_t id_len, + unsigned char dgst[SM3_DIGEST_SIZE]); + +#endif /* _CRYPTO_SM2_H */ diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h new file mode 100644 index 000000000..1f021ad05 --- /dev/null +++ b/include/crypto/sm3.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Common values for SM3 algorithm + * + * Copyright (C) 2017 ARM Limited or its affiliates. + * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com> + * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> + */ + +#ifndef _CRYPTO_SM3_H +#define _CRYPTO_SM3_H + +#include <linux/types.h> + +#define SM3_DIGEST_SIZE 32 +#define SM3_BLOCK_SIZE 64 + +#define SM3_T1 0x79CC4519 +#define SM3_T2 0x7A879D8A + +#define SM3_IVA 0x7380166f +#define SM3_IVB 0x4914b2b9 +#define SM3_IVC 0x172442d7 +#define SM3_IVD 0xda8a0600 +#define SM3_IVE 0xa96f30bc +#define SM3_IVF 0x163138aa +#define SM3_IVG 0xe38dee4d +#define SM3_IVH 0xb0fb0e4e + +extern const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE]; + +struct sm3_state { + u32 state[SM3_DIGEST_SIZE / 4]; + u64 count; + u8 buffer[SM3_BLOCK_SIZE]; +}; + +/* + * Stand-alone implementation of the SM3 algorithm. It is designed to + * have as little dependencies as possible so it can be used in the + * kexec_file purgatory. In other cases you should generally use the + * hash APIs from include/crypto/hash.h. Especially when hashing large + * amounts of data as those APIs may be hw-accelerated. + * + * For details see lib/crypto/sm3.c + */ + +static inline void sm3_init(struct sm3_state *sctx) +{ + sctx->state[0] = SM3_IVA; + sctx->state[1] = SM3_IVB; + sctx->state[2] = SM3_IVC; + sctx->state[3] = SM3_IVD; + sctx->state[4] = SM3_IVE; + sctx->state[5] = SM3_IVF; + sctx->state[6] = SM3_IVG; + sctx->state[7] = SM3_IVH; + sctx->count = 0; +} + +void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len); +void sm3_final(struct sm3_state *sctx, u8 *out); + +#endif diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h new file mode 100644 index 000000000..2f3a32ab9 --- /dev/null +++ b/include/crypto/sm3_base.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sm3_base.h - core logic for SM3 implementations + * + * Copyright (C) 2017 ARM Limited or its affiliates. + * Written by Gilad Ben-Yossef <gilad@benyossef.com> + */ + +#ifndef _CRYPTO_SM3_BASE_H +#define _CRYPTO_SM3_BASE_H + +#include <crypto/internal/hash.h> +#include <crypto/sm3.h> +#include <linux/crypto.h> +#include <linux/module.h> +#include <linux/string.h> +#include <asm/unaligned.h> + +typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks); + +static inline int sm3_base_init(struct shash_desc *desc) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SM3_IVA; + sctx->state[1] = SM3_IVB; + sctx->state[2] = SM3_IVC; + sctx->state[3] = SM3_IVD; + sctx->state[4] = SM3_IVE; + sctx->state[5] = SM3_IVF; + sctx->state[6] = SM3_IVG; + sctx->state[7] = SM3_IVH; + sctx->count = 0; + + return 0; +} + +static inline int sm3_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sm3_block_fn *block_fn) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SM3_BLOCK_SIZE - partial; + + memcpy(sctx->buffer + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buffer, 1); + } + + blocks = len / SM3_BLOCK_SIZE; + len %= SM3_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SM3_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buffer + partial, data, len); + + return 0; +} + +static inline int sm3_base_do_finalize(struct shash_desc *desc, + sm3_block_fn *block_fn) +{ + const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64); + struct sm3_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + + sctx->buffer[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buffer, 1); + } + + memset(sctx->buffer + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buffer, 1); + + return 0; +} + +static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++) + put_unaligned_be32(sctx->state[i], digest++); + + memzero_explicit(sctx, sizeof(*sctx)); + return 0; +} + +#endif /* _CRYPTO_SM3_BASE_H */ diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h new file mode 100644 index 000000000..9656a9a40 --- /dev/null +++ b/include/crypto/sm4.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Common values for the SM4 algorithm + * Copyright (C) 2018 ARM Limited or its affiliates. + * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> + */ + +#ifndef _CRYPTO_SM4_H +#define _CRYPTO_SM4_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define SM4_KEY_SIZE 16 +#define SM4_BLOCK_SIZE 16 +#define SM4_RKEY_WORDS 32 + +struct sm4_ctx { + u32 rkey_enc[SM4_RKEY_WORDS]; + u32 rkey_dec[SM4_RKEY_WORDS]; +}; + +extern const u32 crypto_sm4_fk[]; +extern const u32 crypto_sm4_ck[]; +extern const u8 crypto_sm4_sbox[]; + +/** + * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016 + * @ctx: The location where the computed key will be stored. + * @in_key: The supplied key. + * @key_len: The length of the supplied key. + * + * Returns 0 on success. The function fails only if an invalid key size (or + * pointer) is supplied. + */ +int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key, + unsigned int key_len); + +/** + * sm4_crypt_block - Encrypt or decrypt a single SM4 block + * @rk: The rkey_enc for encrypt or rkey_dec for decrypt + * @out: Buffer to store output data + * @in: Buffer containing the input data + */ +void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in); + +#endif diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h new file mode 100644 index 000000000..cae1b4a01 --- /dev/null +++ b/include/crypto/streebog.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause */ +/* + * Copyright (c) 2013 Alexey Degtyarev <alexey@renatasystems.org> + * Copyright (c) 2018 Vitaly Chikunov <vt@altlinux.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef _CRYPTO_STREEBOG_H_ +#define _CRYPTO_STREEBOG_H_ + +#include <linux/types.h> + +#define STREEBOG256_DIGEST_SIZE 32 +#define STREEBOG512_DIGEST_SIZE 64 +#define STREEBOG_BLOCK_SIZE 64 + +struct streebog_uint512 { + __le64 qword[8]; +}; + +struct streebog_state { + union { + u8 buffer[STREEBOG_BLOCK_SIZE]; + struct streebog_uint512 m; + }; + struct streebog_uint512 hash; + struct streebog_uint512 h; + struct streebog_uint512 N; + struct streebog_uint512 Sigma; + size_t fillsize; +}; + +#endif /* !_CRYPTO_STREEBOG_H_ */ diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h new file mode 100644 index 000000000..f6b307a58 --- /dev/null +++ b/include/crypto/twofish.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_TWOFISH_H +#define _CRYPTO_TWOFISH_H + +#include <linux/types.h> + +#define TF_MIN_KEY_SIZE 16 +#define TF_MAX_KEY_SIZE 32 +#define TF_BLOCK_SIZE 16 + +struct crypto_tfm; + +/* Structure for an expanded Twofish key. s contains the key-dependent + * S-boxes composed with the MDS matrix; w contains the eight "whitening" + * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note + * that k[i] corresponds to what the Twofish paper calls K[i+8]. */ +struct twofish_ctx { + u32 s[4][256], w[8], k[32]; +}; + +int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key, + unsigned int key_len); +int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len); + +#endif diff --git a/include/crypto/xts.h b/include/crypto/xts.h new file mode 100644 index 000000000..0f8dba69f --- /dev/null +++ b/include/crypto/xts.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_XTS_H +#define _CRYPTO_XTS_H + +#include <crypto/b128ops.h> +#include <crypto/internal/skcipher.h> +#include <linux/fips.h> + +#define XTS_BLOCK_SIZE 16 + +static inline int xts_check_key(struct crypto_tfm *tfm, + const u8 *key, unsigned int keylen) +{ + /* + * key consists of keys of equal size concatenated, therefore + * the length must be even. + */ + if (keylen % 2) + return -EINVAL; + + /* ensure that the AES and tweak key are not identical */ + if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2)) + return -EINVAL; + + return 0; +} + +static inline int xts_verify_key(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + /* + * key consists of keys of equal size concatenated, therefore + * the length must be even. + */ + if (keylen % 2) + return -EINVAL; + + /* ensure that the AES and tweak key are not identical */ + if ((fips_enabled || (crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) && + !crypto_memneq(key, key + (keylen / 2), keylen / 2)) + return -EINVAL; + + return 0; +} + +#endif /* _CRYPTO_XTS_H */ |