diff options
Diffstat (limited to 'include/crypto')
72 files changed, 8745 insertions, 0 deletions
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h new file mode 100644 index 000000000..cb3d6b1c6 --- /dev/null +++ b/include/crypto/acompress.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li <weigang.li@intel.com> + * Giovanni Cabiddu <giovanni.cabiddu@intel.com> + */ +#ifndef _CRYPTO_ACOMP_H +#define _CRYPTO_ACOMP_H +#include <linux/crypto.h> + +#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 + +/** + * struct acomp_req - asynchronous (de)compression request + * + * @base: Common attributes for asynchronous crypto requests + * @src: Source Data + * @dst: Destination data + * @slen: Size of the input buffer + * @dlen: Size of the output buffer and number of bytes produced + * @flags: Internal flags + * @__ctx: Start of private context data + */ +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_acomp - user-instantiated objects which encapsulate + * algorithms and core processing logic + * + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @dst_free: Frees destination buffer if allocated inside the + * algorithm + * @reqsize: Context size for (de)compression requests + * @base: Common crypto API algorithm data structure + */ +struct crypto_acomp { + int (*compress)(struct acomp_req *req); + int (*decompress)(struct acomp_req *req); + void (*dst_free)(struct scatterlist *dst); + unsigned int reqsize; + struct crypto_tfm base; +}; + +/** + * struct acomp_alg - asynchronous compression algorithm + * + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @dst_free: Frees destination buffer if allocated inside the algorithm + * @init: Initialize the cryptographic transformation object. + * This function is used to initialize the cryptographic + * transformation object. This function is called only once at + * the instantiation time, right after the transformation context + * was allocated. In case the cryptographic hardware has some + * special requirements which need to be handled by software, this + * function shall check for the precise requirement of the + * transformation and put any software fallbacks in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * + * @reqsize: Context size for (de)compression requests + * @base: Common crypto API algorithm data structure + */ +struct acomp_alg { + int (*compress)(struct acomp_req *req); + int (*decompress)(struct acomp_req *req); + void (*dst_free)(struct scatterlist *dst); + int (*init)(struct crypto_acomp *tfm); + void (*exit)(struct crypto_acomp *tfm); + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Asynchronous Compression API + * + * The Asynchronous Compression API is used with the algorithms of type + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto) + */ + +/** + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * compression algorithm e.g. "deflate" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for a compression algorithm. The returned struct + * crypto_acomp is the handle that is required for any subsequent + * API invocation for the compression operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, + u32 mask); +/** + * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * compression algorithm e.g. "deflate" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * @node: specifies the NUMA node the ZIP hardware belongs to + * + * Allocate a handle for a compression algorithm. Drivers should try to use + * (de)compressors on the specified NUMA node. + * The returned struct crypto_acomp is the handle that is required for any + * subsequent API invocation for the compression operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node); + +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) +{ + return &tfm->base; +} + +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct acomp_alg, base); +} + +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_acomp, base); +} + +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) +{ + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) +{ + return tfm->reqsize; +} + +static inline void acomp_request_set_tfm(struct acomp_req *req, + struct crypto_acomp *tfm) +{ + req->base.tfm = crypto_acomp_tfm(tfm); +} + +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) +{ + return __crypto_acomp_tfm(req->base.tfm); +} + +/** + * crypto_free_acomp() -- free ACOMPRESS tfm handle + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_acomp(struct crypto_acomp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm)); +} + +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_ACOMPRESS; + mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +/** + * acomp_request_alloc() -- allocates asynchronous (de)compression request + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * Return: allocated handle in case of success or NULL in case of an error + */ +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); + +/** + * acomp_request_free() -- zeroize and free asynchronous (de)compression + * request as well as the output buffer if allocated + * inside the algorithm + * + * @req: request to free + */ +void acomp_request_free(struct acomp_req *req); + +/** + * acomp_request_set_callback() -- Sets an asynchronous callback + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmlp: callback which will be called + * @data: private data used by the caller + */ +static inline void acomp_request_set_callback(struct acomp_req *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * acomp_request_set_params() -- Sets request parameters + * + * Sets parameters required by an acomp operation + * + * @req: asynchronous compress request + * @src: pointer to input buffer scatterlist + * @dst: pointer to output buffer scatterlist. If this is NULL, the + * acomp layer will allocate the output memory + * @slen: size of the input buffer + * @dlen: size of the output buffer. If dst is NULL, this can be used by + * the user to specify the maximum amount of memory to allocate + */ +static inline void acomp_request_set_params(struct acomp_req *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int slen, + unsigned int dlen) +{ + req->src = src; + req->dst = dst; + req->slen = slen; + req->dlen = dlen; + + if (!req->dst) + req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; +} + +/** + * crypto_acomp_compress() -- Invoke asynchronous compress operation + * + * Function invokes the asynchronous compress operation + * + * @req: asynchronous compress request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_acomp_compress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int slen = req->slen; + int ret; + + crypto_stats_get(alg); + ret = tfm->compress(req); + crypto_stats_compress(slen, ret, alg); + return ret; +} + +/** + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation + * + * Function invokes the asynchronous decompress operation + * + * @req: asynchronous compress request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_acomp_decompress(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int slen = req->slen; + int ret; + + crypto_stats_get(alg); + ret = tfm->decompress(req); + crypto_stats_decompress(slen, ret, alg); + return ret; +} + +#endif diff --git a/include/crypto/aead.h b/include/crypto/aead.h new file mode 100644 index 000000000..fe956629f --- /dev/null +++ b/include/crypto/aead.h @@ -0,0 +1,523 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_AEAD_H +#define _CRYPTO_AEAD_H + +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +/** + * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API + * + * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD + * (listed as type "aead" in /proc/crypto) + * + * The most prominent examples for this type of encryption is GCM and CCM. + * However, the kernel supports other types of AEAD ciphers which are defined + * with the following cipher string: + * + * authenc(keyed message digest, block cipher) + * + * For example: authenc(hmac(sha256), cbc(aes)) + * + * The example code provided for the symmetric key cipher operation + * applies here as well. Naturally all *skcipher* symbols must be exchanged + * the *aead* pendants discussed in the following. In addition, for the AEAD + * operation, the aead_request_set_ad function must be used to set the + * pointer to the associated data memory location before performing the + * encryption or decryption operation. In case of an encryption, the associated + * data memory is filled during the encryption operation. For decryption, the + * associated data memory must contain data that is used to verify the integrity + * of the decrypted data. Another deviation from the asynchronous block cipher + * operation is that the caller should explicitly check for -EBADMSG of the + * crypto_aead_decrypt. That error indicates an authentication error, i.e. + * a breach in the integrity of the message. In essence, that -EBADMSG error + * code is the key bonus an AEAD cipher has over "standard" block chaining + * modes. + * + * Memory Structure: + * + * The source scatterlist must contain the concatenation of + * associated data || plaintext or ciphertext. + * + * The destination scatterlist has the same layout, except that the plaintext + * (resp. ciphertext) will grow (resp. shrink) by the authentication tag size + * during encryption (resp. decryption). + * + * In-place encryption/decryption is enabled by using the same scatterlist + * pointer for both the source and destination. + * + * Even in the out-of-place case, space must be reserved in the destination for + * the associated data, even though it won't be written to. This makes the + * in-place and out-of-place cases more consistent. It is permissible for the + * "destination" associated data to alias the "source" associated data. + * + * As with the other scatterlist crypto APIs, zero-length scatterlist elements + * are not allowed in the used part of the scatterlist. Thus, if there is no + * associated data, the first element must point to the plaintext/ciphertext. + * + * To meet the needs of IPsec, a special quirk applies to rfc4106, rfc4309, + * rfc4543, and rfc7539esp ciphers. For these ciphers, the final 'ivsize' bytes + * of the associated data buffer must contain a second copy of the IV. This is + * in addition to the copy passed to aead_request_set_crypt(). These two IV + * copies must not differ; different implementations of the same algorithm may + * behave differently in that case. Note that the algorithm might not actually + * treat the IV as associated data; nevertheless the length passed to + * aead_request_set_ad() must include it. + */ + +struct crypto_aead; + +/** + * struct aead_request - AEAD request + * @base: Common attributes for async crypto requests + * @assoclen: Length in bytes of associated data for authentication + * @cryptlen: Length of data to be encrypted or decrypted + * @iv: Initialisation vector + * @src: Source data + * @dst: Destination data + * @__ctx: Start of private context data + */ +struct aead_request { + struct crypto_async_request base; + + unsigned int assoclen; + unsigned int cryptlen; + + u8 *iv; + + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct aead_alg - AEAD cipher definition + * @maxauthsize: Set the maximum authentication tag size supported by the + * transformation. A transformation may support smaller tag sizes. + * As the authentication tag is a message digest to ensure the + * integrity of the encrypted data, a consumer typically wants the + * largest authentication tag possible as defined by this + * variable. + * @setauthsize: Set authentication size for the AEAD transformation. This + * function is used to specify the consumer requested size of the + * authentication tag to be either generated by the transformation + * during encryption or the size of the authentication tag to be + * supplied during the decryption operation. This function is also + * responsible for checking the authentication tag size for + * validity. + * @setkey: see struct skcipher_alg + * @encrypt: see struct skcipher_alg + * @decrypt: see struct skcipher_alg + * @ivsize: see struct skcipher_alg + * @chunksize: see struct skcipher_alg + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @base: Definition of a generic crypto cipher algorithm. + * + * All fields except @ivsize is mandatory and must be filled. + */ +struct aead_alg { + int (*setkey)(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen); + int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); + int (*encrypt)(struct aead_request *req); + int (*decrypt)(struct aead_request *req); + int (*init)(struct crypto_aead *tfm); + void (*exit)(struct crypto_aead *tfm); + + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + + struct crypto_tfm base; +}; + +static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_aead, base); +} + +/** + * crypto_alloc_aead() - allocate AEAD cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * AEAD cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an AEAD. The returned struct + * crypto_aead is the cipher handle that is required for any subsequent + * API invocation for that AEAD. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_aead() - zeroize and free aead handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_aead(struct crypto_aead *tfm) +{ + crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); +} + +static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) +{ + return container_of(crypto_aead_tfm(tfm)->__crt_alg, + struct aead_alg, base); +} + +static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg) +{ + return alg->ivsize; +} + +/** + * crypto_aead_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the aead referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_ivsize(crypto_aead_alg(tfm)); +} + +/** + * crypto_aead_authsize() - obtain maximum authentication data size + * @tfm: cipher handle + * + * The maximum size of the authentication data for the AEAD cipher referenced + * by the AEAD cipher handle is returned. The authentication data size may be + * zero if the cipher implements a hard-coded maximum. + * + * The authentication data may also be known as "tag value". + * + * Return: authentication data size / tag size in bytes + */ +static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) +{ + return tfm->authsize; +} + +static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) +{ + return alg->maxauthsize; +} + +static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) +{ + return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); +} + +/** + * crypto_aead_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the AEAD referenced with the cipher handle is returned. + * The caller may use that information to allocate appropriate memory for the + * data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); +} + +static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); +} + +static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm) +{ + return crypto_tfm_get_flags(crypto_aead_tfm(tfm)); +} + +static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags); +} + +static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); +} + +/** + * crypto_aead_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the AEAD referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_aead_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen); + +/** + * crypto_aead_setauthsize() - set authentication data size + * @tfm: cipher handle + * @authsize: size of the authentication data / tag in bytes + * + * Set the authentication data size / tag size. AEAD requires an authentication + * tag (or MAC) in addition to the associated data. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); + +static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) +{ + return __crypto_aead_cast(req->base.tfm); +} + +/** + * crypto_aead_encrypt() - encrypt plaintext + * @req: reference to the aead_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The encryption operation creates the authentication data / + * tag. That data is concatenated with the created ciphertext. + * The ciphertext memory size is therefore the given number of + * block cipher blocks + the size defined by the + * crypto_aead_setauthsize invocation. The caller must ensure + * that sufficient memory is available for the ciphertext and + * the authentication tag. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_aead_encrypt(struct aead_request *req); + +/** + * crypto_aead_decrypt() - decrypt ciphertext + * @req: reference to the aead_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the + * authentication data / tag. That authentication data / tag + * must have the size defined by the crypto_aead_setauthsize + * invocation. + * + * + * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD + * cipher operation performs the authentication of the data during the + * decryption operation. Therefore, the function returns this error if + * the authentication of the ciphertext was unsuccessful (i.e. the + * integrity of the ciphertext or the associated data was violated); + * < 0 if an error occurred. + */ +int crypto_aead_decrypt(struct aead_request *req); + +/** + * DOC: Asynchronous AEAD Request Handle + * + * The aead_request data structure contains all pointers to data required for + * the AEAD cipher operation. This includes the cipher handle (which can be + * used by multiple aead_request instances), pointer to plaintext and + * ciphertext, asynchronous callback function, etc. It acts as a handle to the + * aead_request_* API calls in a similar way as AEAD handle to the + * crypto_aead_* API calls. + */ + +/** + * crypto_aead_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ +static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) +{ + return tfm->reqsize; +} + +/** + * aead_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing aead handle in the request + * data structure with a different one. + */ +static inline void aead_request_set_tfm(struct aead_request *req, + struct crypto_aead *tfm) +{ + req->base.tfm = crypto_aead_tfm(tfm); +} + +/** + * aead_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the AEAD + * encrypt and decrypt API calls. During the allocation, the provided aead + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, + gfp_t gfp) +{ + struct aead_request *req; + + req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp); + + if (likely(req)) + aead_request_set_tfm(req, tfm); + + return req; +} + +/** + * aead_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void aead_request_free(struct aead_request *req) +{ + kfree_sensitive(req); +} + +/** + * aead_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * Setting the callback function that is triggered once the cipher operation + * completes + * + * The callback function is registered with the aead_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void aead_request_set_callback(struct aead_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * aead_request_set_crypt - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @cryptlen: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_aead_ivsize() + * + * Setting the source data and destination data scatter / gather lists which + * hold the associated data concatenated with the plaintext or ciphertext. See + * below for the authentication tag. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed - the source is the ciphertext and the destination is the plaintext. + * + * The memory structure for cipher operation has the following structure: + * + * - AEAD encryption input: assoc data || plaintext + * - AEAD encryption output: assoc data || cipherntext || auth tag + * - AEAD decryption input: assoc data || ciphertext || auth tag + * - AEAD decryption output: assoc data || plaintext + * + * Albeit the kernel requires the presence of the AAD buffer, however, + * the kernel does not fill the AAD buffer in the output case. If the + * caller wants to have that data buffer filled, the caller must either + * use an in-place cipher operation (i.e. same memory location for + * input/output memory location). + */ +static inline void aead_request_set_crypt(struct aead_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int cryptlen, u8 *iv) +{ + req->src = src; + req->dst = dst; + req->cryptlen = cryptlen; + req->iv = iv; +} + +/** + * aead_request_set_ad - set associated data information + * @req: request handle + * @assoclen: number of bytes in associated data + * + * Setting the AD information. This function sets the length of + * the associated data. + */ +static inline void aead_request_set_ad(struct aead_request *req, + unsigned int assoclen) +{ + req->assoclen = assoclen; +} + +#endif /* _CRYPTO_AEAD_H */ diff --git a/include/crypto/aes.h b/include/crypto/aes.h new file mode 100644 index 000000000..209072970 --- /dev/null +++ b/include/crypto/aes.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for AES algorithms + */ + +#ifndef _CRYPTO_AES_H +#define _CRYPTO_AES_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 +#define AES_KEYSIZE_128 16 +#define AES_KEYSIZE_192 24 +#define AES_KEYSIZE_256 32 +#define AES_BLOCK_SIZE 16 +#define AES_MAX_KEYLENGTH (15 * 16) +#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32)) + +/* + * Please ensure that the first two fields are 16-byte aligned + * relative to the start of the structure, i.e., don't move them! + */ +struct crypto_aes_ctx { + u32 key_enc[AES_MAX_KEYLENGTH_U32]; + u32 key_dec[AES_MAX_KEYLENGTH_U32]; + u32 key_length; +}; + +extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned; +extern const u32 crypto_it_tab[4][256] ____cacheline_aligned; + +/* + * validate key length for AES algorithms + */ +static inline int aes_check_keylen(unsigned int keylen) +{ + switch (keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_192: + case AES_KEYSIZE_256: + break; + default: + return -EINVAL; + } + + return 0; +} + +int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len); + +/** + * aes_expandkey - Expands the AES key as described in FIPS-197 + * @ctx: The location where the computed key will be stored. + * @in_key: The supplied key. + * @key_len: The length of the supplied key. + * + * Returns 0 on success. The function fails only if an invalid key size (or + * pointer) is supplied. + * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes + * key schedule plus a 16 bytes key which is used before the first round). + * The decryption key is prepared for the "Equivalent Inverse Cipher" as + * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is + * for the initial combination, the second slot for the first round and so on. + */ +int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, + unsigned int key_len); + +/** + * aes_encrypt - Encrypt a single AES block + * @ctx: Context struct containing the key schedule + * @out: Buffer to store the ciphertext + * @in: Buffer containing the plaintext + */ +void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); + +/** + * aes_decrypt - Decrypt a single AES block + * @ctx: Context struct containing the key schedule + * @out: Buffer to store the plaintext + * @in: Buffer containing the ciphertext + */ +void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); + +extern const u8 crypto_aes_sbox[]; +extern const u8 crypto_aes_inv_sbox[]; + +#endif diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h new file mode 100644 index 000000000..5764b46bd --- /dev/null +++ b/include/crypto/akcipher.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Public Key Encryption + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk <tadeusz.struk@intel.com> + */ +#ifndef _CRYPTO_AKCIPHER_H +#define _CRYPTO_AKCIPHER_H +#include <linux/crypto.h> + +/** + * struct akcipher_request - public key request + * + * @base: Common attributes for async crypto requests + * @src: Source data + * For verify op this is signature + digest, in that case + * total size of @src is @src_len + @dst_len. + * @dst: Destination data (Should be NULL for verify op) + * @src_len: Size of the input buffer + * For verify op it's size of signature part of @src, this part + * is supposed to be operated by cipher. + * @dst_len: Size of @dst buffer (for all ops except verify). + * It needs to be at least as big as the expected result + * depending on the operation. + * After operation it will be updated with the actual size of the + * result. + * In case of error where the dst sgl size was insufficient, + * it will be updated to the size required for the operation. + * For verify op this is size of digest part in @src. + * @__ctx: Start of private context data + */ +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_akcipher - user-instantiated objects which encapsulate + * algorithms and core processing logic + * + * @base: Common crypto API algorithm data structure + */ +struct crypto_akcipher { + struct crypto_tfm base; +}; + +/** + * struct akcipher_alg - generic public key algorithm + * + * @sign: Function performs a sign operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @verify: Function performs a complete verify operation as defined by + * public key algorithm, returning verification status. Requires + * digest value as input parameter. + * @encrypt: Function performs an encrypt operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @decrypt: Function performs a decrypt operation as defined by public key + * algorithm. In case of error, where the dst_len was insufficient, + * the req->dst_len will be updated to the size required for the + * operation + * @set_pub_key: Function invokes the algorithm specific set public key + * function, which knows how to decode and interpret + * the BER encoded public key and parameters + * @set_priv_key: Function invokes the algorithm specific set private key + * function, which knows how to decode and interpret + * the BER encoded private key and parameters + * @max_size: Function returns dest buffer size required for a given key. + * @init: Initialize the cryptographic transformation object. + * This function is used to initialize the cryptographic + * transformation object. This function is called only once at + * the instantiation time, right after the transformation context + * was allocated. In case the cryptographic hardware has some + * special requirements which need to be handled by software, this + * function shall check for the precise requirement of the + * transformation and put any software fallbacks in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * + * @reqsize: Request context size required by algorithm implementation + * @base: Common crypto API algorithm data structure + */ +struct akcipher_alg { + int (*sign)(struct akcipher_request *req); + int (*verify)(struct akcipher_request *req); + int (*encrypt)(struct akcipher_request *req); + int (*decrypt)(struct akcipher_request *req); + int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen); + int (*set_priv_key)(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen); + unsigned int (*max_size)(struct crypto_akcipher *tfm); + int (*init)(struct crypto_akcipher *tfm); + void (*exit)(struct crypto_akcipher *tfm); + + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Generic Public Key API + * + * The Public Key API is used with the algorithms of type + * CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto) + */ + +/** + * crypto_alloc_akcipher() - allocate AKCIPHER tfm handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * public key algorithm e.g. "rsa" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for public key algorithm. The returned struct + * crypto_akcipher is the handle that is required for any subsequent + * API invocation for the public key operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_akcipher_tfm( + struct crypto_akcipher *tfm) +{ + return &tfm->base; +} + +static inline struct akcipher_alg *__crypto_akcipher_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct akcipher_alg, base); +} + +static inline struct crypto_akcipher *__crypto_akcipher_tfm( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_akcipher, base); +} + +static inline struct akcipher_alg *crypto_akcipher_alg( + struct crypto_akcipher *tfm) +{ + return __crypto_akcipher_alg(crypto_akcipher_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm) +{ + return crypto_akcipher_alg(tfm)->reqsize; +} + +static inline void akcipher_request_set_tfm(struct akcipher_request *req, + struct crypto_akcipher *tfm) +{ + req->base.tfm = crypto_akcipher_tfm(tfm); +} + +static inline struct crypto_akcipher *crypto_akcipher_reqtfm( + struct akcipher_request *req) +{ + return __crypto_akcipher_tfm(req->base.tfm); +} + +/** + * crypto_free_akcipher() - free AKCIPHER tfm handle + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_akcipher_tfm(tfm)); +} + +/** + * akcipher_request_alloc() - allocates public key request + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * @gfp: allocation flags + * + * Return: allocated handle in case of success or NULL in case of an error. + */ +static inline struct akcipher_request *akcipher_request_alloc( + struct crypto_akcipher *tfm, gfp_t gfp) +{ + struct akcipher_request *req; + + req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp); + if (likely(req)) + akcipher_request_set_tfm(req, tfm); + + return req; +} + +/** + * akcipher_request_free() - zeroize and free public key request + * + * @req: request to free + */ +static inline void akcipher_request_free(struct akcipher_request *req) +{ + kfree_sensitive(req); +} + +/** + * akcipher_request_set_callback() - Sets an asynchronous callback. + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmpl: callback which will be called + * @data: private data used by the caller + */ +static inline void akcipher_request_set_callback(struct akcipher_request *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * akcipher_request_set_crypt() - Sets request parameters + * + * Sets parameters required by crypto operation + * + * @req: public key request + * @src: ptr to input scatter list + * @dst: ptr to output scatter list or NULL for verify op + * @src_len: size of the src input scatter list to be processed + * @dst_len: size of the dst output scatter list or size of signature + * portion in @src for verify op + */ +static inline void akcipher_request_set_crypt(struct akcipher_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int src_len, + unsigned int dst_len) +{ + req->src = src; + req->dst = dst; + req->src_len = src_len; + req->dst_len = dst_len; +} + +/** + * crypto_akcipher_maxsize() - Get len for output buffer + * + * Function returns the dest buffer size required for a given key. + * Function assumes that the key is already set in the transformation. If this + * function is called without a setkey or with a failed setkey, you will end up + * in a NULL dereference. + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + */ +static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->max_size(tfm); +} + +/** + * crypto_akcipher_encrypt() - Invoke public key encrypt operation + * + * Function invokes the specific public key encrypt operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + unsigned int src_len = req->src_len; + int ret; + + crypto_stats_get(calg); + ret = alg->encrypt(req); + crypto_stats_akcipher_encrypt(src_len, ret, calg); + return ret; +} + +/** + * crypto_akcipher_decrypt() - Invoke public key decrypt operation + * + * Function invokes the specific public key decrypt operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + unsigned int src_len = req->src_len; + int ret; + + crypto_stats_get(calg); + ret = alg->decrypt(req); + crypto_stats_akcipher_decrypt(src_len, ret, calg); + return ret; +} + +/** + * crypto_akcipher_sign() - Invoke public key sign operation + * + * Function invokes the specific public key sign operation for a given + * public key algorithm + * + * @req: asymmetric key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->sign(req); + crypto_stats_akcipher_sign(ret, calg); + return ret; +} + +/** + * crypto_akcipher_verify() - Invoke public key signature verification + * + * Function invokes the specific public key signature verification operation + * for a given public key algorithm. + * + * @req: asymmetric key request + * + * Note: req->dst should be NULL, req->src should point to SG of size + * (req->src_size + req->dst_size), containing signature (of req->src_size + * length) with appended digest (of req->dst_size length). + * + * Return: zero on verification success; error code in case of error. + */ +static inline int crypto_akcipher_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->verify(req); + crypto_stats_akcipher_verify(ret, calg); + return ret; +} + +/** + * crypto_akcipher_set_pub_key() - Invoke set public key operation + * + * Function invokes the algorithm specific set key function, which knows + * how to decode and interpret the encoded key and parameters + * + * @tfm: tfm handle + * @key: BER encoded public key, algo OID, paramlen, BER encoded + * parameters + * @keylen: length of the key (not including other data) + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->set_pub_key(tfm, key, keylen); +} + +/** + * crypto_akcipher_set_priv_key() - Invoke set private key operation + * + * Function invokes the algorithm specific set key function, which knows + * how to decode and interpret the encoded key and parameters + * + * @tfm: tfm handle + * @key: BER encoded private key, algo OID, paramlen, BER encoded + * parameters + * @keylen: length of the key (not including other data) + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_set_priv_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->set_priv_key(tfm, key, keylen); +} +#endif diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h new file mode 100644 index 000000000..96dbd438c --- /dev/null +++ b/include/crypto/algapi.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API for algorithms (i.e., low-level API). + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + */ +#ifndef _CRYPTO_ALGAPI_H +#define _CRYPTO_ALGAPI_H + +#include <linux/crypto.h> +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/workqueue.h> + +/* + * Maximum values for blocksize and alignmask, used to allocate + * static buffers that are big enough for any combination of + * algs and architectures. Ciphers have a lower maximum size. + */ +#define MAX_ALGAPI_BLOCKSIZE 160 +#define MAX_ALGAPI_ALIGNMASK 63 +#define MAX_CIPHER_BLOCKSIZE 16 +#define MAX_CIPHER_ALIGNMASK 15 + +struct crypto_aead; +struct crypto_instance; +struct module; +struct rtattr; +struct seq_file; +struct sk_buff; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); + unsigned int (*extsize)(struct crypto_alg *alg); + int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); + int (*init_tfm)(struct crypto_tfm *tfm); + void (*show)(struct seq_file *m, struct crypto_alg *alg); + int (*report)(struct sk_buff *skb, struct crypto_alg *alg); + void (*free)(struct crypto_instance *inst); + + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct crypto_instance { + struct crypto_alg alg; + + struct crypto_template *tmpl; + + union { + /* Node in list of instances after registration. */ + struct hlist_node list; + /* List of attached spawns before registration. */ + struct crypto_spawn *spawns; + }; + + struct work_struct free_work; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + + int (*create)(struct crypto_template *tmpl, struct rtattr **tb); + + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + union { + /* Back pointer to instance after registration.*/ + struct crypto_instance *inst; + /* Spawn list pointer prior to registration. */ + struct crypto_spawn *next; + }; + const struct crypto_type *frontend; + u32 mask; + bool dead; + bool registered; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + + unsigned int qlen; + unsigned int max_qlen; +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +void crypto_mod_put(struct crypto_alg *alg); + +int crypto_register_template(struct crypto_template *tmpl); +int crypto_register_templates(struct crypto_template *tmpls, int count); +void crypto_unregister_template(struct crypto_template *tmpl); +void crypto_unregister_templates(struct crypto_template *tmpls, int count); +struct crypto_template *crypto_lookup_template(const char *name); + +int crypto_register_instance(struct crypto_template *tmpl, + struct crypto_instance *inst); +void crypto_unregister_instance(struct crypto_instance *inst); + +int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, + const char *name, u32 type, u32 mask); +void crypto_drop_spawn(struct crypto_spawn *spawn); +struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, + u32 mask); +void *crypto_spawn_tfm2(struct crypto_spawn *spawn); + +struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); +const char *crypto_attr_alg_name(struct rtattr *rta); +int crypto_attr_u32(struct rtattr *rta, u32 *num); +int crypto_inst_setname(struct crypto_instance *inst, const char *name, + struct crypto_alg *alg); + +void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); +int crypto_enqueue_request(struct crypto_queue *queue, + struct crypto_async_request *request); +void crypto_enqueue_request_head(struct crypto_queue *queue, + struct crypto_async_request *request); +struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); +static inline unsigned int crypto_queue_len(struct crypto_queue *queue) +{ + return queue->qlen; +} + +void crypto_inc(u8 *a, unsigned int size); +void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); + +static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s = (unsigned long *)src; + + while (size > 0) { + *d++ ^= *s++; + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, dst, src, size); + } +} + +static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, + unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s1 = (unsigned long *)src1; + unsigned long *s2 = (unsigned long *)src2; + + while (size > 0) { + *d++ = *s1++ ^ *s2++; + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, src1, src2, size); + } +} + +static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) +{ + return PTR_ALIGN(crypto_tfm_ctx(tfm), + crypto_tfm_alg_alignmask(tfm) + 1); +} + +static inline struct crypto_instance *crypto_tfm_alg_instance( + struct crypto_tfm *tfm) +{ + return container_of(tfm->__crt_alg, struct crypto_instance, alg); +} + +static inline void *crypto_instance_ctx(struct crypto_instance *inst) +{ + return inst->__ctx; +} + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} + +static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct crypto_alg *crypto_spawn_cipher_alg( + struct crypto_cipher_spawn *spawn) +{ + return spawn->base.alg; +} + +static inline struct crypto_cipher *crypto_spawn_cipher( + struct crypto_cipher_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_CIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask)); +} + +static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; +} + +static inline struct crypto_async_request *crypto_get_backlog( + struct crypto_queue *queue) +{ + return queue->backlog == &queue->list ? NULL : + container_of(queue->backlog, struct crypto_async_request, list); +} + +static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) +{ + return (algt->type ^ off) & algt->mask & off; +} + +/* + * When an algorithm uses another algorithm (e.g., if it's an instance of a + * template), these are the flags that should always be set on the "outer" + * algorithm if any "inner" algorithm has them set. + */ +#define CRYPTO_ALG_INHERITED_FLAGS \ + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \ + CRYPTO_ALG_ALLOCATES_MEMORY) + +/* + * Given the type and mask that specify the flags restrictions on a template + * instance being created, return the mask that should be passed to + * crypto_grab_*() (along with type=0) to honor any request the user made to + * have any of the CRYPTO_ALG_INHERITED_FLAGS clear. + */ +static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt) +{ + return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS); +} + +noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); + +/** + * crypto_memneq - Compare two areas of memory without leaking + * timing information. + * + * @a: One area of memory + * @b: Another area of memory + * @size: The size of the area. + * + * Returns 0 when data is equal, 1 otherwise. + */ +static inline int crypto_memneq(const void *a, const void *b, size_t size) +{ + return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; +} + +int crypto_register_notifier(struct notifier_block *nb); +int crypto_unregister_notifier(struct notifier_block *nb); + +/* Crypto notification events. */ +enum { + CRYPTO_MSG_ALG_REQUEST, + CRYPTO_MSG_ALG_REGISTER, + CRYPTO_MSG_ALG_LOADED, +}; + +#endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/arc4.h b/include/crypto/arc4.h new file mode 100644 index 000000000..f3c22fe01 --- /dev/null +++ b/include/crypto/arc4.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Common values for ARC4 Cipher Algorithm + */ + +#ifndef _CRYPTO_ARC4_H +#define _CRYPTO_ARC4_H + +#include <linux/types.h> + +#define ARC4_MIN_KEY_SIZE 1 +#define ARC4_MAX_KEY_SIZE 256 +#define ARC4_BLOCK_SIZE 1 + +struct arc4_ctx { + u32 S[256]; + u32 x, y; +}; + +int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len); +void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len); + +#endif /* _CRYPTO_ARC4_H */ diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h new file mode 100644 index 000000000..48198c36d --- /dev/null +++ b/include/crypto/asym_tpm_subtype.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _LINUX_ASYM_TPM_SUBTYPE_H +#define _LINUX_ASYM_TPM_SUBTYPE_H + +#include <linux/keyctl.h> + +struct tpm_key { + void *blob; + u32 blob_len; + uint16_t key_len; /* Size in bits of the key */ + const void *pub_key; /* pointer inside blob to the public key bytes */ + uint16_t pub_key_len; /* length of the public key */ +}; + +struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len); + +extern struct asymmetric_key_subtype asym_tpm_subtype; + +#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */ diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h new file mode 100644 index 000000000..5f92a9860 --- /dev/null +++ b/include/crypto/authenc.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Authenc: Simple AEAD wrapper for IPsec + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + */ +#ifndef _CRYPTO_AUTHENC_H +#define _CRYPTO_AUTHENC_H + +#include <linux/types.h> + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC, + CRYPTO_AUTHENC_KEYA_PARAM, +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +struct crypto_authenc_keys { + const u8 *authkey; + const u8 *enckey; + + unsigned int authkeylen; + unsigned int enckeylen; +}; + +int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen); + +#endif /* _CRYPTO_AUTHENC_H */ diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h new file mode 100644 index 000000000..0b8e6bc55 --- /dev/null +++ b/include/crypto/b128ops.h @@ -0,0 +1,80 @@ +/* b128ops.h - common 128-bit block operations + * + * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. + * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org> + * + * Based on Dr Brian Gladman's (GPL'd) work published at + * http://fp.gladman.plus.com/cryptography_technology/index.htm + * See the original copyright notice below. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +/* + --------------------------------------------------------------------------- + Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software in both source and binary + form is allowed (with or without changes) provided that: + + 1. distributions of this source code include the above copyright + notice, this list of conditions and the following disclaimer; + + 2. distributions in binary form include the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other associated materials; + + 3. the copyright holder's name is not used to endorse products + built using this software without specific written permission. + + ALTERNATIVELY, provided that this notice is retained in full, this product + may be distributed under the terms of the GNU General Public License (GPL), + in which case the provisions of the GPL apply INSTEAD OF those given above. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- + Issue Date: 13/06/2006 +*/ + +#ifndef _CRYPTO_B128OPS_H +#define _CRYPTO_B128OPS_H + +#include <linux/types.h> + +typedef struct { + u64 a, b; +} u128; + +typedef struct { + __be64 a, b; +} be128; + +typedef struct { + __le64 b, a; +} le128; + +static inline void u128_xor(u128 *r, const u128 *p, const u128 *q) +{ + r->a = p->a ^ q->a; + r->b = p->b ^ q->b; +} + +static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) +{ + u128_xor((u128 *)r, (u128 *)p, (u128 *)q); +} + +static inline void le128_xor(le128 *r, const le128 *p, const le128 *q) +{ + u128_xor((u128 *)r, (u128 *)p, (u128 *)q); +} + +#endif /* _CRYPTO_B128OPS_H */ diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h new file mode 100644 index 000000000..4e30e1799 --- /dev/null +++ b/include/crypto/blake2s.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _CRYPTO_BLAKE2S_H +#define _CRYPTO_BLAKE2S_H + +#include <linux/bug.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/string.h> + +enum blake2s_lengths { + BLAKE2S_BLOCK_SIZE = 64, + BLAKE2S_HASH_SIZE = 32, + BLAKE2S_KEY_SIZE = 32, + + BLAKE2S_128_HASH_SIZE = 16, + BLAKE2S_160_HASH_SIZE = 20, + BLAKE2S_224_HASH_SIZE = 28, + BLAKE2S_256_HASH_SIZE = 32, +}; + +struct blake2s_state { + /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ + u32 h[8]; + u32 t[2]; + u32 f[2]; + u8 buf[BLAKE2S_BLOCK_SIZE]; + unsigned int buflen; + unsigned int outlen; +}; + +enum blake2s_iv { + BLAKE2S_IV0 = 0x6A09E667UL, + BLAKE2S_IV1 = 0xBB67AE85UL, + BLAKE2S_IV2 = 0x3C6EF372UL, + BLAKE2S_IV3 = 0xA54FF53AUL, + BLAKE2S_IV4 = 0x510E527FUL, + BLAKE2S_IV5 = 0x9B05688CUL, + BLAKE2S_IV6 = 0x1F83D9ABUL, + BLAKE2S_IV7 = 0x5BE0CD19UL, +}; + +static inline void __blake2s_init(struct blake2s_state *state, size_t outlen, + const void *key, size_t keylen) +{ + state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen); + state->h[1] = BLAKE2S_IV1; + state->h[2] = BLAKE2S_IV2; + state->h[3] = BLAKE2S_IV3; + state->h[4] = BLAKE2S_IV4; + state->h[5] = BLAKE2S_IV5; + state->h[6] = BLAKE2S_IV6; + state->h[7] = BLAKE2S_IV7; + state->t[0] = 0; + state->t[1] = 0; + state->f[0] = 0; + state->f[1] = 0; + state->buflen = 0; + state->outlen = outlen; + if (keylen) { + memcpy(state->buf, key, keylen); + memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen); + state->buflen = BLAKE2S_BLOCK_SIZE; + } +} + +static inline void blake2s_init(struct blake2s_state *state, + const size_t outlen) +{ + __blake2s_init(state, outlen, NULL, 0); +} + +static inline void blake2s_init_key(struct blake2s_state *state, + const size_t outlen, const void *key, + const size_t keylen) +{ + WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE || + !key || !keylen || keylen > BLAKE2S_KEY_SIZE)); + + __blake2s_init(state, outlen, key, keylen); +} + +void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); +void blake2s_final(struct blake2s_state *state, u8 *out); + +static inline void blake2s(u8 *out, const u8 *in, const u8 *key, + const size_t outlen, const size_t inlen, + const size_t keylen) +{ + struct blake2s_state state; + + WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen || + outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE || + (!key && keylen))); + + __blake2s_init(&state, outlen, key, keylen); + blake2s_update(&state, in, inlen); + blake2s_final(&state, out); +} + +#endif /* _CRYPTO_BLAKE2S_H */ diff --git a/include/crypto/blowfish.h b/include/crypto/blowfish.h new file mode 100644 index 000000000..9b384670b --- /dev/null +++ b/include/crypto/blowfish.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for blowfish algorithms + */ + +#ifndef _CRYPTO_BLOWFISH_H +#define _CRYPTO_BLOWFISH_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define BF_BLOCK_SIZE 8 +#define BF_MIN_KEY_SIZE 4 +#define BF_MAX_KEY_SIZE 56 + +struct bf_ctx { + u32 p[18]; + u32 s[1024]; +}; + +int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int key_len); + +#endif diff --git a/include/crypto/cast5.h b/include/crypto/cast5.h new file mode 100644 index 000000000..3d4ed4ea9 --- /dev/null +++ b/include/crypto/cast5.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST5_H +#define _CRYPTO_CAST5_H + +#include <linux/types.h> +#include <linux/crypto.h> +#include <crypto/cast_common.h> + +#define CAST5_BLOCK_SIZE 8 +#define CAST5_MIN_KEY_SIZE 5 +#define CAST5_MAX_KEY_SIZE 16 + +struct cast5_ctx { + u32 Km[16]; + u8 Kr[16]; + int rr; /* rr ? rounds = 12 : rounds = 16; (rfc 2144) */ +}; + +int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __cast5_encrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src); +void __cast5_decrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h new file mode 100644 index 000000000..38f490cd5 --- /dev/null +++ b/include/crypto/cast6.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST6_H +#define _CRYPTO_CAST6_H + +#include <linux/types.h> +#include <linux/crypto.h> +#include <crypto/cast_common.h> + +#define CAST6_BLOCK_SIZE 16 +#define CAST6_MIN_KEY_SIZE 16 +#define CAST6_MAX_KEY_SIZE 32 + +struct cast6_ctx { + u32 Km[12][4]; + u8 Kr[12][4]; +}; + +int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, unsigned int keylen); +int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src); +void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/cast_common.h b/include/crypto/cast_common.h new file mode 100644 index 000000000..b90090244 --- /dev/null +++ b/include/crypto/cast_common.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_CAST_COMMON_H +#define _CRYPTO_CAST_COMMON_H + +extern const u32 cast_s1[256]; +extern const u32 cast_s2[256]; +extern const u32 cast_s3[256]; +extern const u32 cast_s4[256]; + +#endif diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h new file mode 100644 index 000000000..b3ea73b81 --- /dev/null +++ b/include/crypto/chacha.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values and helper functions for the ChaCha and XChaCha stream ciphers. + * + * XChaCha extends ChaCha's nonce to 192 bits, while provably retaining ChaCha's + * security. Here they share the same key size, tfm context, and setkey + * function; only their IV size and encrypt/decrypt function differ. + * + * The ChaCha paper specifies 20, 12, and 8-round variants. In general, it is + * recommended to use the 20-round variant ChaCha20. However, the other + * variants can be needed in some performance-sensitive scenarios. The generic + * ChaCha code currently allows only the 20 and 12-round variants. + */ + +#ifndef _CRYPTO_CHACHA_H +#define _CRYPTO_CHACHA_H + +#include <asm/unaligned.h> +#include <linux/types.h> + +/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */ +#define CHACHA_IV_SIZE 16 + +#define CHACHA_KEY_SIZE 32 +#define CHACHA_BLOCK_SIZE 64 +#define CHACHAPOLY_IV_SIZE 12 + +#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32)) + +/* 192-bit nonce, then 64-bit stream position */ +#define XCHACHA_IV_SIZE 32 + +void chacha_block_generic(u32 *state, u8 *stream, int nrounds); +static inline void chacha20_block(u32 *state, u8 *stream) +{ + chacha_block_generic(state, stream, 20); +} + +void hchacha_block_arch(const u32 *state, u32 *out, int nrounds); +void hchacha_block_generic(const u32 *state, u32 *out, int nrounds); + +static inline void hchacha_block(const u32 *state, u32 *out, int nrounds) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) + hchacha_block_arch(state, out, nrounds); + else + hchacha_block_generic(state, out, nrounds); +} + +enum chacha_constants { /* expand 32-byte k */ + CHACHA_CONSTANT_EXPA = 0x61707865U, + CHACHA_CONSTANT_ND_3 = 0x3320646eU, + CHACHA_CONSTANT_2_BY = 0x79622d32U, + CHACHA_CONSTANT_TE_K = 0x6b206574U +}; + +static inline void chacha_init_consts(u32 *state) +{ + state[0] = CHACHA_CONSTANT_EXPA; + state[1] = CHACHA_CONSTANT_ND_3; + state[2] = CHACHA_CONSTANT_2_BY; + state[3] = CHACHA_CONSTANT_TE_K; +} + +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); +static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) +{ + chacha_init_consts(state); + state[4] = key[0]; + state[5] = key[1]; + state[6] = key[2]; + state[7] = key[3]; + state[8] = key[4]; + state[9] = key[5]; + state[10] = key[6]; + state[11] = key[7]; + state[12] = get_unaligned_le32(iv + 0); + state[13] = get_unaligned_le32(iv + 4); + state[14] = get_unaligned_le32(iv + 8); + state[15] = get_unaligned_le32(iv + 12); +} + +static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) + chacha_init_arch(state, key, iv); + else + chacha_init_generic(state, key, iv); +} + +void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds); +void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds); + +static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) + chacha_crypt_arch(state, dst, src, bytes, nrounds); + else + chacha_crypt_generic(state, dst, src, bytes, nrounds); +} + +static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes) +{ + chacha_crypt(state, dst, src, bytes, 20); +} + +#endif /* _CRYPTO_CHACHA_H */ diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h new file mode 100644 index 000000000..d2ac3ff7d --- /dev/null +++ b/include/crypto/chacha20poly1305.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef __CHACHA20POLY1305_H +#define __CHACHA20POLY1305_H + +#include <linux/types.h> +#include <linux/scatterlist.h> + +enum chacha20poly1305_lengths { + XCHACHA20POLY1305_NONCE_SIZE = 24, + CHACHA20POLY1305_KEY_SIZE = 32, + CHACHA20POLY1305_AUTHTAG_SIZE = 16 +}; + +void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool __must_check +chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, + const u8 *ad, const size_t ad_len, + const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool __must_check xchacha20poly1305_decrypt( + u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, + const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len, + const u8 *ad, const size_t ad_len, + const u64 nonce, + const u8 key[CHACHA20POLY1305_KEY_SIZE]); + +bool chacha20poly1305_selftest(void); + +#endif /* __CHACHA20POLY1305_H */ diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h new file mode 100644 index 000000000..23169f4d8 --- /dev/null +++ b/include/crypto/cryptd.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Software async crypto daemon + * + * Added AEAD support to cryptd. + * Authors: Tadeusz Struk (tadeusz.struk@intel.com) + * Adrian Hoban <adrian.hoban@intel.com> + * Gabriele Paoloni <gabriele.paoloni@intel.com> + * Aidan O'Mahony (aidan.o.mahony@intel.com) + * Copyright (c) 2010, Intel Corporation. + */ + +#ifndef _CRYPTO_CRYPT_H +#define _CRYPTO_CRYPT_H + +#include <linux/kernel.h> +#include <crypto/aead.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> + +struct cryptd_skcipher { + struct crypto_skcipher base; +}; + +/* alg_name should be algorithm to be cryptd-ed */ +struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, + u32 type, u32 mask); +struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); +void cryptd_free_skcipher(struct cryptd_skcipher *tfm); + +struct cryptd_ahash { + struct crypto_ahash base; +}; + +static inline struct cryptd_ahash *__cryptd_ahash_cast( + struct crypto_ahash *tfm) +{ + return (struct cryptd_ahash *)tfm; +} + +/* alg_name should be algorithm to be cryptd-ed */ +struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, + u32 type, u32 mask); +struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); +struct shash_desc *cryptd_shash_desc(struct ahash_request *req); +/* Must be called without moving CPUs. */ +bool cryptd_ahash_queued(struct cryptd_ahash *tfm); +void cryptd_free_ahash(struct cryptd_ahash *tfm); + +struct cryptd_aead { + struct crypto_aead base; +}; + +static inline struct cryptd_aead *__cryptd_aead_cast( + struct crypto_aead *tfm) +{ + return (struct cryptd_aead *)tfm; +} + +struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, + u32 type, u32 mask); + +struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_aead_queued(struct cryptd_aead *tfm); + +void cryptd_free_aead(struct cryptd_aead *tfm); + +#endif diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h new file mode 100644 index 000000000..a1c66d100 --- /dev/null +++ b/include/crypto/ctr.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * CTR: Counter mode + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_CTR_H +#define _CRYPTO_CTR_H + +#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/string.h> +#include <linux/types.h> + +#define CTR_RFC3686_NONCE_SIZE 4 +#define CTR_RFC3686_IV_SIZE 8 +#define CTR_RFC3686_BLOCK_SIZE 16 + +static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req, + void (*fn)(struct crypto_skcipher *, + const u8 *, u8 *)) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + int blocksize = crypto_skcipher_chunksize(tfm); + u8 buf[MAX_CIPHER_BLOCKSIZE]; + struct skcipher_walk walk; + int err; + + /* avoid integer division due to variable blocksize parameter */ + if (WARN_ON_ONCE(!is_power_of_2(blocksize))) + return -EINVAL; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes > 0) { + u8 *dst = walk.dst.virt.addr; + u8 *src = walk.src.virt.addr; + int nbytes = walk.nbytes; + int tail = 0; + + if (nbytes < walk.total) { + tail = walk.nbytes & (blocksize - 1); + nbytes -= tail; + } + + do { + int bsize = min(nbytes, blocksize); + + fn(tfm, walk.iv, buf); + + crypto_xor_cpy(dst, src, buf, bsize); + crypto_inc(walk.iv, blocksize); + + dst += bsize; + src += bsize; + nbytes -= bsize; + } while (nbytes > 0); + + err = skcipher_walk_done(&walk, tail); + } + return err; +} + +#endif /* _CRYPTO_CTR_H */ diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h new file mode 100644 index 000000000..4e6dc840b --- /dev/null +++ b/include/crypto/curve25519.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef CURVE25519_H +#define CURVE25519_H + +#include <crypto/algapi.h> // For crypto_memneq. +#include <linux/types.h> +#include <linux/random.h> + +enum curve25519_lengths { + CURVE25519_KEY_SIZE = 32 +}; + +extern const u8 curve25519_null_point[]; +extern const u8 curve25519_base_point[]; + +void curve25519_generic(u8 out[CURVE25519_KEY_SIZE], + const u8 scalar[CURVE25519_KEY_SIZE], + const u8 point[CURVE25519_KEY_SIZE]); + +void curve25519_arch(u8 out[CURVE25519_KEY_SIZE], + const u8 scalar[CURVE25519_KEY_SIZE], + const u8 point[CURVE25519_KEY_SIZE]); + +void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]); + +static inline +bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519)) + curve25519_arch(mypublic, secret, basepoint); + else + curve25519_generic(mypublic, secret, basepoint); + return crypto_memneq(mypublic, curve25519_null_point, + CURVE25519_KEY_SIZE); +} + +static inline bool +__must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + if (unlikely(!crypto_memneq(secret, curve25519_null_point, + CURVE25519_KEY_SIZE))) + return false; + + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519)) + curve25519_base_arch(pub, secret); + else + curve25519_generic(pub, secret, curve25519_base_point); + return crypto_memneq(pub, curve25519_null_point, CURVE25519_KEY_SIZE); +} + +static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE]) +{ + secret[0] &= 248; + secret[31] = (secret[31] & 127) | 64; +} + +static inline void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE]) +{ + get_random_bytes_wait(secret, CURVE25519_KEY_SIZE); + curve25519_clamp_secret(secret); +} + +#endif /* CURVE25519_H */ diff --git a/include/crypto/des.h b/include/crypto/des.h new file mode 100644 index 000000000..7812b4331 --- /dev/null +++ b/include/crypto/des.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DES & Triple DES EDE Cipher Algorithms. + */ + +#ifndef __CRYPTO_DES_H +#define __CRYPTO_DES_H + +#include <linux/types.h> + +#define DES_KEY_SIZE 8 +#define DES_EXPKEY_WORDS 32 +#define DES_BLOCK_SIZE 8 + +#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE) +#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) +#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE + +struct des_ctx { + u32 expkey[DES_EXPKEY_WORDS]; +}; + +struct des3_ede_ctx { + u32 expkey[DES3_EDE_EXPKEY_WORDS]; +}; + +void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src); +void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src); + +void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); +void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); + +/** + * des_expand_key - Expand a DES input key into a key schedule + * @ctx: the key schedule + * @key: buffer containing the input key + * @len: size of the buffer contents + * + * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if + * the key is accepted but has been found to be weak. + */ +int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen); + +/** + * des3_ede_expand_key - Expand a triple DES input key into a key schedule + * @ctx: the key schedule + * @key: buffer containing the input key + * @len: size of the buffer contents + * + * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if + * the key is accepted but has been found to be weak. Note that weak keys will + * be rejected (and -EINVAL will be returned) when running in FIPS mode. + */ +int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key, + unsigned int keylen); + +#endif /* __CRYPTO_DES_H */ diff --git a/include/crypto/dh.h b/include/crypto/dh.h new file mode 100644 index 000000000..d71e9858a --- /dev/null +++ b/include/crypto/dh.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Diffie-Hellman secret to be used with kpp API along with helper functions + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + */ +#ifndef _CRYPTO_DH_ +#define _CRYPTO_DH_ + +/** + * DOC: DH Helper Functions + * + * To use DH with the KPP cipher API, the following data structure and + * functions should be used. + * + * To use DH with KPP, the following functions should be used to operate on + * a DH private key. The packet private key that can be set with + * the KPP API function call of crypto_kpp_set_secret. + */ + +/** + * struct dh - define a DH private key + * + * @key: Private DH key + * @p: Diffie-Hellman parameter P + * @q: Diffie-Hellman parameter Q + * @g: Diffie-Hellman generator G + * @key_size: Size of the private DH key + * @p_size: Size of DH parameter P + * @q_size: Size of DH parameter Q + * @g_size: Size of DH generator G + */ +struct dh { + void *key; + void *p; + void *q; + void *g; + unsigned int key_size; + unsigned int p_size; + unsigned int q_size; + unsigned int g_size; +}; + +/** + * crypto_dh_key_len() - Obtain the size of the private DH key + * @params: private DH key + * + * This function returns the packet DH key size. A caller can use that + * with the provided DH private key reference to obtain the required + * memory size to hold a packet key. + * + * Return: size of the key in bytes + */ +unsigned int crypto_dh_key_len(const struct dh *params); + +/** + * crypto_dh_encode_key() - encode the private key + * @buf: Buffer allocated by the caller to hold the packet DH + * private key. The buffer should be at least crypto_dh_key_len + * bytes in size. + * @len: Length of the packet private key buffer + * @params: Buffer with the caller-specified private key + * + * The DH implementations operate on a packet representation of the private + * key. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); + +/** + * crypto_dh_decode_key() - decode a private key + * @buf: Buffer holding a packet key that should be decoded + * @len: Length of the packet private key buffer + * @params: Buffer allocated by the caller that is filled with the + * unpacked DH private key. + * + * The unpacking obtains the private key by pointing @p to the correct location + * in @buf. Thus, both pointers refer to the same memory. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params); + +#endif diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h new file mode 100644 index 000000000..a6c3b8e7d --- /dev/null +++ b/include/crypto/drbg.h @@ -0,0 +1,285 @@ +/* + * DRBG based on NIST SP800-90A + * + * Copyright Stephan Mueller <smueller@chronox.de>, 2014 + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, and the entire permission notice in its entirety, + * including the disclaimer of warranties. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * ALTERNATIVELY, this product may be distributed under the terms of + * the GNU General Public License, in which case the provisions of the GPL are + * required INSTEAD OF the above restrictions. (This clause is + * necessary due to a potential bad interaction between the GPL and + * the restrictions contained in a BSD-style copyright.) + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF + * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +#ifndef _DRBG_H +#define _DRBG_H + + +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> +#include <linux/module.h> +#include <linux/crypto.h> +#include <linux/slab.h> +#include <crypto/internal/rng.h> +#include <crypto/rng.h> +#include <linux/fips.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/workqueue.h> + +/* + * Concatenation Helper and string operation helper + * + * SP800-90A requires the concatenation of different data. To avoid copying + * buffers around or allocate additional memory, the following data structure + * is used to point to the original memory with its size. In addition, it + * is used to build a linked list. The linked list defines the concatenation + * of individual buffers. The order of memory block referenced in that + * linked list determines the order of concatenation. + */ +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +static inline void drbg_string_fill(struct drbg_string *string, + const unsigned char *buf, size_t len) +{ + string->buf = buf; + string->len = len; + INIT_LIST_HEAD(&string->list); +} + +struct drbg_state; +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; /* flags for the cipher */ + __u8 statelen; /* maximum state length */ + __u8 blocklen_bytes; /* block size of output in bytes */ + char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */ + /* kernel crypto API backend cipher name */ + char backend_cra_name[CRYPTO_MAX_ALG_NAME]; +}; + +struct drbg_state_ops { + int (*update)(struct drbg_state *drbg, struct list_head *seed, + int reseed); + int (*generate)(struct drbg_state *drbg, + unsigned char *buf, unsigned int buflen, + struct list_head *addtl); + int (*crypto_init)(struct drbg_state *drbg); + int (*crypto_fini)(struct drbg_state *drbg); + +}; + +struct drbg_test_data { + struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */ +}; + +enum drbg_seed_state { + DRBG_SEED_STATE_UNSEEDED, + DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */ + DRBG_SEED_STATE_FULL, +}; + +struct drbg_state { + struct mutex drbg_mutex; /* lock around DRBG */ + unsigned char *V; /* internal state 10.1.1.1 1a) */ + unsigned char *Vbuf; + /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ + unsigned char *C; + unsigned char *Cbuf; + /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ + size_t reseed_ctr; + size_t reseed_threshold; + /* some memory the DRBG can use for its operation */ + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; /* Cipher handle */ + + struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ + struct skcipher_request *ctr_req; /* CTR mode request handle */ + __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ + __u8 *outscratchpad; /* CTR mode aligned outbuf */ + struct crypto_wait ctr_wait; /* CTR mode async wait obj */ + struct scatterlist sg_in, sg_out; /* CTR mode SGLs */ + + enum drbg_seed_state seeded; /* DRBG fully seeded? */ + bool pr; /* Prediction resistance enabled? */ + bool fips_primed; /* Continuous test primed? */ + unsigned char *prev; /* FIPS 140-2 continuous test value */ + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; +}; + +static inline __u8 drbg_statelen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return drbg->core->statelen; + return 0; +} + +static inline __u8 drbg_blocklen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return drbg->core->blocklen_bytes; + return 0; +} + +static inline __u8 drbg_keylen(struct drbg_state *drbg) +{ + if (drbg && drbg->core) + return (drbg->core->statelen - drbg->core->blocklen_bytes); + return 0; +} + +static inline size_t drbg_max_request_bytes(struct drbg_state *drbg) +{ + /* SP800-90A requires the limit 2**19 bits, but we return bytes */ + return (1 << 16); +} + +static inline size_t drbg_max_addtl(struct drbg_state *drbg) +{ + /* SP800-90A requires 2**35 bytes additional info str / pers str */ +#if (__BITS_PER_LONG == 32) + /* + * SP800-90A allows smaller maximum numbers to be returned -- we + * return SIZE_MAX - 1 to allow the verification of the enforcement + * of this value in drbg_healthcheck_sanity. + */ + return (SIZE_MAX - 1); +#else + return (1UL<<35); +#endif +} + +static inline size_t drbg_max_requests(struct drbg_state *drbg) +{ + /* SP800-90A requires 2**48 maximum requests before reseeding */ + return (1<<20); +} + +/* + * This is a wrapper to the kernel crypto API function of + * crypto_rng_generate() to allow the caller to provide additional data. + * + * @drng DRBG handle -- see crypto_rng_get_bytes + * @outbuf output buffer -- see crypto_rng_get_bytes + * @outlen length of output buffer -- see crypto_rng_get_bytes + * @addtl_input additional information string input buffer + * @addtllen length of additional information string buffer + * + * return + * see crypto_rng_get_bytes + */ +static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng, + unsigned char *outbuf, unsigned int outlen, + struct drbg_string *addtl) +{ + return crypto_rng_generate(drng, addtl->buf, addtl->len, + outbuf, outlen); +} + +/* + * TEST code + * + * This is a wrapper to the kernel crypto API function of + * crypto_rng_generate() to allow the caller to provide additional data and + * allow furnishing of test_data + * + * @drng DRBG handle -- see crypto_rng_get_bytes + * @outbuf output buffer -- see crypto_rng_get_bytes + * @outlen length of output buffer -- see crypto_rng_get_bytes + * @addtl_input additional information string input buffer + * @addtllen length of additional information string buffer + * @test_data filled test data + * + * return + * see crypto_rng_get_bytes + */ +static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng, + unsigned char *outbuf, unsigned int outlen, + struct drbg_string *addtl, + struct drbg_test_data *test_data) +{ + crypto_rng_set_entropy(drng, test_data->testentropy->buf, + test_data->testentropy->len); + return crypto_rng_generate(drng, addtl->buf, addtl->len, + outbuf, outlen); +} + +/* + * TEST code + * + * This is a wrapper to the kernel crypto API function of + * crypto_rng_reset() to allow the caller to provide test_data + * + * @drng DRBG handle -- see crypto_rng_reset + * @pers personalization string input buffer + * @perslen length of additional information string buffer + * @test_data filled test data + * + * return + * see crypto_rng_reset + */ +static inline int crypto_drbg_reset_test(struct crypto_rng *drng, + struct drbg_string *pers, + struct drbg_test_data *test_data) +{ + crypto_rng_set_entropy(drng, test_data->testentropy->buf, + test_data->testentropy->len); + return crypto_rng_reset(drng, pers->buf, pers->len); +} + +/* DRBG type flags */ +#define DRBG_CTR ((drbg_flag_t)1<<0) +#define DRBG_HMAC ((drbg_flag_t)1<<1) +#define DRBG_HASH ((drbg_flag_t)1<<2) +#define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH) +/* DRBG strength flags */ +#define DRBG_STRENGTH128 ((drbg_flag_t)1<<3) +#define DRBG_STRENGTH192 ((drbg_flag_t)1<<4) +#define DRBG_STRENGTH256 ((drbg_flag_t)1<<5) +#define DRBG_STRENGTH_MASK (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \ + DRBG_STRENGTH256) + +enum drbg_prefixes { + DRBG_PREFIX0 = 0x00, + DRBG_PREFIX1, + DRBG_PREFIX2, + DRBG_PREFIX3 +}; + +#endif /* _DRBG_H */ diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h new file mode 100644 index 000000000..a5b805b55 --- /dev/null +++ b/include/crypto/ecdh.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * ECDH params to be used with kpp API + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + */ +#ifndef _CRYPTO_ECDH_ +#define _CRYPTO_ECDH_ + +/** + * DOC: ECDH Helper Functions + * + * To use ECDH with the KPP cipher API, the following data structure and + * functions should be used. + * + * The ECC curves known to the ECDH implementation are specified in this + * header file. + * + * To use ECDH with KPP, the following functions should be used to operate on + * an ECDH private key. The packet private key that can be set with + * the KPP API function call of crypto_kpp_set_secret. + */ + +/* Curves IDs */ +#define ECC_CURVE_NIST_P192 0x0001 +#define ECC_CURVE_NIST_P256 0x0002 + +/** + * struct ecdh - define an ECDH private key + * + * @curve_id: ECC curve the key is based on. + * @key: Private ECDH key + * @key_size: Size of the private ECDH key + */ +struct ecdh { + unsigned short curve_id; + char *key; + unsigned short key_size; +}; + +/** + * crypto_ecdh_key_len() - Obtain the size of the private ECDH key + * @params: private ECDH key + * + * This function returns the packet ECDH key size. A caller can use that + * with the provided ECDH private key reference to obtain the required + * memory size to hold a packet key. + * + * Return: size of the key in bytes + */ +unsigned int crypto_ecdh_key_len(const struct ecdh *params); + +/** + * crypto_ecdh_encode_key() - encode the private key + * @buf: Buffer allocated by the caller to hold the packet ECDH + * private key. The buffer should be at least crypto_ecdh_key_len + * bytes in size. + * @len: Length of the packet private key buffer + * @p: Buffer with the caller-specified private key + * + * The ECDH implementations operate on a packet representation of the private + * key. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p); + +/** + * crypto_ecdh_decode_key() - decode a private key + * @buf: Buffer holding a packet key that should be decoded + * @len: Length of the packet private key buffer + * @p: Buffer allocated by the caller that is filled with the + * unpacked ECDH private key. + * + * The unpacking obtains the private key by pointing @p to the correct location + * in @buf. Thus, both pointers refer to the same memory. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p); + +#endif diff --git a/include/crypto/engine.h b/include/crypto/engine.h new file mode 100644 index 000000000..3f06e40d0 --- /dev/null +++ b/include/crypto/engine.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Crypto engine API + * + * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org> + */ +#ifndef _CRYPTO_ENGINE_H +#define _CRYPTO_ENGINE_H + +#include <linux/crypto.h> +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <crypto/algapi.h> +#include <crypto/aead.h> +#include <crypto/akcipher.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> + +#define ENGINE_NAME_LEN 30 +/* + * struct crypto_engine - crypto hardware engine + * @name: the engine name + * @idling: the engine is entering idle state + * @busy: request pump is busy + * @running: the engine is on working + * @retry_support: indication that the hardware allows re-execution + * of a failed backlog request + * crypto-engine, in head position to keep order + * @list: link with the global crypto engine list + * @queue_lock: spinlock to syncronise access to request queue + * @queue: the crypto queue of the engine + * @rt: whether this queue is set to run as a realtime task + * @prepare_crypt_hardware: a request will soon arrive from the queue + * so the subsystem requests the driver to prepare the hardware + * by issuing this call + * @unprepare_crypt_hardware: there are currently no more requests on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * @do_batch_requests: execute a batch of requests. Depends on multiple + * requests support. + * @kworker: kthread worker struct for request pump + * @pump_requests: work struct for scheduling work to the request pump + * @priv_data: the engine private data + * @cur_req: the current request which is on processing + */ +struct crypto_engine { + char name[ENGINE_NAME_LEN]; + bool idling; + bool busy; + bool running; + + bool retry_support; + + struct list_head list; + spinlock_t queue_lock; + struct crypto_queue queue; + struct device *dev; + + bool rt; + + int (*prepare_crypt_hardware)(struct crypto_engine *engine); + int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + int (*do_batch_requests)(struct crypto_engine *engine); + + + struct kthread_worker *kworker; + struct kthread_work pump_requests; + + void *priv_data; + struct crypto_async_request *cur_req; +}; + +/* + * struct crypto_engine_op - crypto hardware engine operations + * @prepare__request: do some prepare if need before handle the current request + * @unprepare_request: undo any work done by prepare_request() + * @do_one_request: do encryption for current request + */ +struct crypto_engine_op { + int (*prepare_request)(struct crypto_engine *engine, + void *areq); + int (*unprepare_request)(struct crypto_engine *engine, + void *areq); + int (*do_one_request)(struct crypto_engine *engine, + void *areq); +}; + +struct crypto_engine_ctx { + struct crypto_engine_op op; +}; + +int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, + struct aead_request *req); +int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, + struct akcipher_request *req); +int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, + struct ahash_request *req); +int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, + struct skcipher_request *req); +void crypto_finalize_aead_request(struct crypto_engine *engine, + struct aead_request *req, int err); +void crypto_finalize_akcipher_request(struct crypto_engine *engine, + struct akcipher_request *req, int err); +void crypto_finalize_hash_request(struct crypto_engine *engine, + struct ahash_request *req, int err); +void crypto_finalize_skcipher_request(struct crypto_engine *engine, + struct skcipher_request *req, int err); +int crypto_engine_start(struct crypto_engine *engine); +int crypto_engine_stop(struct crypto_engine *engine); +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); +struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, + bool retry_support, + int (*cbk_do_batch)(struct crypto_engine *engine), + bool rt, int qlen); +int crypto_engine_exit(struct crypto_engine *engine); + +#endif /* _CRYPTO_ENGINE_H */ diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h new file mode 100644 index 000000000..9d7eff04f --- /dev/null +++ b/include/crypto/gcm.h @@ -0,0 +1,63 @@ +#ifndef _CRYPTO_GCM_H +#define _CRYPTO_GCM_H + +#include <linux/errno.h> + +#define GCM_AES_IV_SIZE 12 +#define GCM_RFC4106_IV_SIZE 8 +#define GCM_RFC4543_IV_SIZE 8 + +/* + * validate authentication tag for GCM + */ +static inline int crypto_gcm_check_authsize(unsigned int authsize) +{ + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * validate authentication tag for RFC4106 + */ +static inline int crypto_rfc4106_check_authsize(unsigned int authsize) +{ + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * validate assoclen for RFC4106/RFC4543 + */ +static inline int crypto_ipsec_check_assoclen(unsigned int assoclen) +{ + switch (assoclen) { + case 16: + case 20: + break; + default: + return -EINVAL; + } + + return 0; +} +#endif diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h new file mode 100644 index 000000000..81330c644 --- /dev/null +++ b/include/crypto/gf128mul.h @@ -0,0 +1,252 @@ +/* gf128mul.h - GF(2^128) multiplication functions + * + * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. + * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> + * + * Based on Dr Brian Gladman's (GPL'd) work published at + * http://fp.gladman.plus.com/cryptography_technology/index.htm + * See the original copyright notice below. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +/* + --------------------------------------------------------------------------- + Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved. + + LICENSE TERMS + + The free distribution and use of this software in both source and binary + form is allowed (with or without changes) provided that: + + 1. distributions of this source code include the above copyright + notice, this list of conditions and the following disclaimer; + + 2. distributions in binary form include the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other associated materials; + + 3. the copyright holder's name is not used to endorse products + built using this software without specific written permission. + + ALTERNATIVELY, provided that this notice is retained in full, this product + may be distributed under the terms of the GNU General Public License (GPL), + in which case the provisions of the GPL apply INSTEAD OF those given above. + + DISCLAIMER + + This software is provided 'as is' with no explicit or implied warranties + in respect of its properties, including, but not limited to, correctness + and/or fitness for purpose. + --------------------------------------------------------------------------- + Issue Date: 31/01/2006 + + An implementation of field multiplication in Galois Field GF(2^128) +*/ + +#ifndef _CRYPTO_GF128MUL_H +#define _CRYPTO_GF128MUL_H + +#include <asm/byteorder.h> +#include <crypto/b128ops.h> +#include <linux/slab.h> + +/* Comment by Rik: + * + * For some background on GF(2^128) see for example: + * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf + * + * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can + * be mapped to computer memory in a variety of ways. Let's examine + * three common cases. + * + * Take a look at the 16 binary octets below in memory order. The msb's + * are left and the lsb's are right. char b[16] is an array and b[0] is + * the first octet. + * + * 10000000 00000000 00000000 00000000 .... 00000000 00000000 00000000 + * b[0] b[1] b[2] b[3] b[13] b[14] b[15] + * + * Every bit is a coefficient of some power of X. We can store the bits + * in every byte in little-endian order and the bytes themselves also in + * little endian order. I will call this lle (little-little-endian). + * The above buffer represents the polynomial 1, and X^7+X^2+X^1+1 looks + * like 11100001 00000000 .... 00000000 = { 0xE1, 0x00, }. + * This format was originally implemented in gf128mul and is used + * in GCM (Galois/Counter mode) and in ABL (Arbitrary Block Length). + * + * Another convention says: store the bits in bigendian order and the + * bytes also. This is bbe (big-big-endian). Now the buffer above + * represents X^127. X^7+X^2+X^1+1 looks like 00000000 .... 10000111, + * b[15] = 0x87 and the rest is 0. LRW uses this convention and bbe + * is partly implemented. + * + * Both of the above formats are easy to implement on big-endian + * machines. + * + * XTS and EME (the latter of which is patent encumbered) use the ble + * format (bits are stored in big endian order and the bytes in little + * endian). The above buffer represents X^7 in this case and the + * primitive polynomial is b[0] = 0x87. + * + * The common machine word-size is smaller than 128 bits, so to make + * an efficient implementation we must split into machine word sizes. + * This implementation uses 64-bit words for the moment. Machine + * endianness comes into play. The lle format in relation to machine + * endianness is discussed below by the original author of gf128mul Dr + * Brian Gladman. + * + * Let's look at the bbe and ble format on a little endian machine. + * + * bbe on a little endian machine u32 x[4]: + * + * MS x[0] LS MS x[1] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 103..96 111.104 119.112 127.120 71...64 79...72 87...80 95...88 + * + * MS x[2] LS MS x[3] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 39...32 47...40 55...48 63...56 07...00 15...08 23...16 31...24 + * + * ble on a little endian machine + * + * MS x[0] LS MS x[1] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 31...24 23...16 15...08 07...00 63...56 55...48 47...40 39...32 + * + * MS x[2] LS MS x[3] LS + * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + * 95...88 87...80 79...72 71...64 127.120 199.112 111.104 103..96 + * + * Multiplications in GF(2^128) are mostly bit-shifts, so you see why + * ble (and lbe also) are easier to implement on a little-endian + * machine than on a big-endian machine. The converse holds for bbe + * and lle. + * + * Note: to have good alignment, it seems to me that it is sufficient + * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize + * machines this will automatically aligned to wordsize and on a 64-bit + * machine also. + */ +/* Multiply a GF(2^128) field element by x. Field elements are + held in arrays of bytes in which field bits 8n..8n + 7 are held in + byte[n], with lower indexed bits placed in the more numerically + significant bit positions within bytes. + + On little endian machines the bit indexes translate into the bit + positions within four 32-bit words in the following way + + MS x[0] LS MS x[1] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 24...31 16...23 08...15 00...07 56...63 48...55 40...47 32...39 + + MS x[2] LS MS x[3] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 88...95 80...87 72...79 64...71 120.127 112.119 104.111 96..103 + + On big endian machines the bit indexes translate into the bit + positions within four 32-bit words in the following way + + MS x[0] LS MS x[1] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 00...07 08...15 16...23 24...31 32...39 40...47 48...55 56...63 + + MS x[2] LS MS x[3] LS + ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls + 64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127 +*/ + +/* A slow generic version of gf_mul, implemented for lle and bbe + * It multiplies a and b and puts the result in a */ +void gf128mul_lle(be128 *a, const be128 *b); + +void gf128mul_bbe(be128 *a, const be128 *b); + +/* + * The following functions multiply a field element by x in + * the polynomial field representation. They use 64-bit word operations + * to gain speed but compensate for machine endianness and hence work + * correctly on both styles of machine. + * + * They are defined here for performance. + */ + +static inline u64 gf128mul_mask_from_bit(u64 x, int which) +{ + /* a constant-time version of 'x & ((u64)1 << which) ? (u64)-1 : 0' */ + return ((s64)(x << (63 - which)) >> 63); +} + +static inline void gf128mul_x_lle(be128 *r, const be128 *x) +{ + u64 a = be64_to_cpu(x->a); + u64 b = be64_to_cpu(x->b); + + /* equivalent to gf128mul_table_le[(b << 7) & 0xff] << 48 + * (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(b, 0) & ((u64)0xe1 << 56); + + r->b = cpu_to_be64((b >> 1) | (a << 63)); + r->a = cpu_to_be64((a >> 1) ^ _tt); +} + +static inline void gf128mul_x_bbe(be128 *r, const be128 *x) +{ + u64 a = be64_to_cpu(x->a); + u64 b = be64_to_cpu(x->b); + + /* equivalent to gf128mul_table_be[a >> 63] (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; + + r->a = cpu_to_be64((a << 1) | (b >> 63)); + r->b = cpu_to_be64((b << 1) ^ _tt); +} + +/* needed by XTS */ +static inline void gf128mul_x_ble(le128 *r, const le128 *x) +{ + u64 a = le64_to_cpu(x->a); + u64 b = le64_to_cpu(x->b); + + /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ + u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; + + r->a = cpu_to_le64((a << 1) | (b >> 63)); + r->b = cpu_to_le64((b << 1) ^ _tt); +} + +/* 4k table optimization */ + +struct gf128mul_4k { + be128 t[256]; +}; + +struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g); +struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g); +void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t); +void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t); +void gf128mul_x8_ble(le128 *r, const le128 *x); +static inline void gf128mul_free_4k(struct gf128mul_4k *t) +{ + kfree_sensitive(t); +} + + +/* 64k table optimization, implemented for bbe */ + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +/* First initialize with the constant factor with which you + * want to multiply and then call gf128mul_64k_bbe with the other + * factor in the first argument, and the table in the second. + * Afterwards, the result is stored in *a. + */ +struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g); +void gf128mul_free_64k(struct gf128mul_64k *t); +void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t); + +#endif /* _CRYPTO_GF128MUL_H */ diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h new file mode 100644 index 000000000..f832c9f2a --- /dev/null +++ b/include/crypto/ghash.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the GHASH hash function + */ + +#ifndef __CRYPTO_GHASH_H__ +#define __CRYPTO_GHASH_H__ + +#include <linux/types.h> +#include <crypto/gf128mul.h> + +#define GHASH_BLOCK_SIZE 16 +#define GHASH_DIGEST_SIZE 16 + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[GHASH_BLOCK_SIZE]; + u32 bytes; +}; + +#endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h new file mode 100644 index 000000000..b2bc1e46e --- /dev/null +++ b/include/crypto/hash.h @@ -0,0 +1,1005 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hash: Hash algorithms under the crypto API + * + * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_HASH_H +#define _CRYPTO_HASH_H + +#include <linux/crypto.h> +#include <linux/string.h> + +struct crypto_ahash; + +/** + * DOC: Message Digest Algorithm Definitions + * + * These data structures define modular message digest algorithm + * implementations, managed via crypto_register_ahash(), + * crypto_register_shash(), crypto_unregister_ahash() and + * crypto_unregister_shash(). + */ + +/** + * struct hash_alg_common - define properties of message digest + * @digestsize: Size of the result of the transformation. A buffer of this size + * must be available to the @final and @finup calls, so they can + * store the resulting hash into it. For various predefined sizes, + * search include/crypto/ using + * git grep _DIGEST_SIZE include/crypto. + * @statesize: Size of the block for partial state of the transformation. A + * buffer of this size must be passed to the @export function as it + * will save the partial state of the transformation into it. On the + * other side, the @import function will load the state from a + * buffer of this size as well. + * @base: Start of data structure of cipher algorithm. The common data + * structure of crypto_alg contains information common to all ciphers. + * The hash_alg_common data structure now adds the hash-specific + * information. + */ +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + + struct crypto_alg base; +}; + +struct ahash_request { + struct crypto_async_request base; + + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + + /* This field may only be used by the ahash API code. */ + void *priv; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct ahash_alg - asynchronous message digest definition + * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the + * state of the HASH transformation at the beginning. This shall fill in + * the internal structures used during the entire duration of the whole + * transformation. No data processing happens at this point. Driver code + * implementation must not use req->result. + * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This + * function actually pushes blocks of data from upper layers into the + * driver, which then passes those to the hardware as seen fit. This + * function must not finalize the HASH transformation by calculating the + * final message digest as this only adds more data into the + * transformation. This function shall not modify the transformation + * context, as this function may be called in parallel with the same + * transformation object. Data processing can happen synchronously + * [SHASH] or asynchronously [AHASH] at this point. Driver must not use + * req->result. + * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the + * transformation and retrieves the resulting hash from the driver and + * pushes it back to upper layers. No data processing happens at this + * point unless hardware requires it to finish the transformation + * (then the data buffered by the device driver is processed). + * @finup: **[optional]** Combination of @update and @final. This function is effectively a + * combination of @update and @final calls issued in sequence. As some + * hardware cannot do @update and @final separately, this callback was + * added to allow such hardware to be used at least by IPsec. Data + * processing can happen synchronously [SHASH] or asynchronously [AHASH] + * at this point. + * @digest: Combination of @init and @update and @final. This function + * effectively behaves as the entire chain of operations, @init, + * @update and @final issued in sequence. Just like @finup, this was + * added for hardware which cannot do even the @finup, but can only do + * the whole transformation in one run. Data processing can happen + * synchronously [SHASH] or asynchronously [AHASH] at this point. + * @setkey: Set optional key used by the hashing algorithm. Intended to push + * optional key used by the hashing algorithm from upper layers into + * the driver. This function can store the key in the transformation + * context or can outright program it into the hardware. In the former + * case, one must be careful to program the key into the hardware at + * appropriate time and one must be careful that .setkey() can be + * called multiple times during the existence of the transformation + * object. Not all hashing algorithms do implement this function as it + * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT + * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement + * this function. This function must be called before any other of the + * @init, @update, @final, @finup, @digest is called. No data + * processing happens at this point. + * @export: Export partial state of the transformation. This function dumps the + * entire state of the ongoing transformation into a provided block of + * data so it can be @import 'ed back later on. This is useful in case + * you want to save partial result of the transformation after + * processing certain amount of data and reload this partial result + * multiple times later on for multiple re-use. No data processing + * happens at this point. Driver must not use req->result. + * @import: Import partial state of the transformation. This function loads the + * entire state of the ongoing transformation from a provided block of + * data so the transformation can continue from this point onward. No + * data processing happens at this point. Driver must not use + * req->result. + * @init_tfm: Initialize the cryptographic transformation object. + * This function is called only once at the instantiation + * time, right after the transformation context was + * allocated. In case the cryptographic hardware has + * some special requirements which need to be handled + * by software, this function shall check for the precise + * requirement of the transformation and put any software + * fallbacks in place. + * @exit_tfm: Deinitialize the cryptographic transformation object. + * This is a counterpart to @init_tfm, used to remove + * various changes set in @init_tfm. + * @halg: see struct hash_alg_common + */ +struct ahash_alg { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + int (*init_tfm)(struct crypto_ahash *tfm); + void (*exit_tfm)(struct crypto_ahash *tfm); + + struct hash_alg_common halg; +}; + +struct shash_desc { + struct crypto_shash *tfm; + void *__ctx[] __aligned(ARCH_SLAB_MINALIGN); +}; + +#define HASH_MAX_DIGESTSIZE 64 + +/* + * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc' + * containing a 'struct sha3_state'. + */ +#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) + +#define HASH_MAX_STATESIZE 512 + +#define SHASH_DESC_ON_STACK(shash, ctx) \ + char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \ + __aligned(__alignof__(struct shash_desc)); \ + struct shash_desc *shash = (struct shash_desc *)__##shash##_desc + +/** + * struct shash_alg - synchronous message digest definition + * @init: see struct ahash_alg + * @update: see struct ahash_alg + * @final: see struct ahash_alg + * @finup: see struct ahash_alg + * @digest: see struct ahash_alg + * @export: see struct ahash_alg + * @import: see struct ahash_alg + * @setkey: see struct ahash_alg + * @init_tfm: Initialize the cryptographic transformation object. + * This function is called only once at the instantiation + * time, right after the transformation context was + * allocated. In case the cryptographic hardware has + * some special requirements which need to be handled + * by software, this function shall check for the precise + * requirement of the transformation and put any software + * fallbacks in place. + * @exit_tfm: Deinitialize the cryptographic transformation object. + * This is a counterpart to @init_tfm, used to remove + * various changes set in @init_tfm. + * @digestsize: see struct ahash_alg + * @statesize: see struct ahash_alg + * @descsize: Size of the operational state for the message digest. This state + * size is the memory size that needs to be allocated for + * shash_desc.__ctx + * @base: internally used + */ +struct shash_alg { + int (*init)(struct shash_desc *desc); + int (*update)(struct shash_desc *desc, const u8 *data, + unsigned int len); + int (*final)(struct shash_desc *desc, u8 *out); + int (*finup)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + int (*digest)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + int (*export)(struct shash_desc *desc, void *out); + int (*import)(struct shash_desc *desc, const void *in); + int (*setkey)(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + int (*init_tfm)(struct crypto_shash *tfm); + void (*exit_tfm)(struct crypto_shash *tfm); + + unsigned int descsize; + + /* These fields must match hash_alg_common. */ + unsigned int digestsize + __attribute__ ((aligned(__alignof__(struct hash_alg_common)))); + unsigned int statesize; + + struct crypto_alg base; +}; + +struct crypto_ahash { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct crypto_shash { + unsigned int descsize; + struct crypto_tfm base; +}; + +/** + * DOC: Asynchronous Message Digest API + * + * The asynchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto) + * + * The asynchronous cipher operation discussion provided for the + * CRYPTO_ALG_TYPE_SKCIPHER API applies here as well. + */ + +static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_ahash, base); +} + +/** + * crypto_alloc_ahash() - allocate ahash cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an ahash. The returned struct + * crypto_ahash is the cipher handle that is required for any subsequent + * API invocation for that ahash. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_ahash() - zeroize and free the ahash handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_ahash(struct crypto_ahash *tfm) +{ + crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); +} + +/** + * crypto_has_ahash() - Search for the availability of an ahash. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash + * @type: specifies the type of the ahash + * @mask: specifies the mask for the ahash + * + * Return: true when the ahash is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_ahash(const char *alg_name, u32 type, u32 mask); + +static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); +} + +static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); +} + +static inline unsigned int crypto_ahash_alignmask( + struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); +} + +/** + * crypto_ahash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); +} + +static inline struct hash_alg_common *__crypto_hash_alg_common( + struct crypto_alg *alg) +{ + return container_of(alg, struct hash_alg_common, base); +} + +static inline struct hash_alg_common *crypto_hash_alg_common( + struct crypto_ahash *tfm) +{ + return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); +} + +/** + * crypto_ahash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * + * Return: message digest size of cipher + */ +static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->digestsize; +} + +/** + * crypto_ahash_statesize() - obtain size of the ahash state + * @tfm: cipher handle + * + * Return the size of the ahash state. With the crypto_ahash_export() + * function, the caller can export the state into a buffer whose size is + * defined with this function. + * + * Return: size of the ahash state + */ +static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->statesize; +} + +static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) +{ + return crypto_tfm_get_flags(crypto_ahash_tfm(tfm)); +} + +static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags); +} + +static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); +} + +/** + * crypto_ahash_reqtfm() - obtain cipher handle from request + * @req: asynchronous request handle that contains the reference to the ahash + * cipher handle + * + * Return the ahash cipher handle that is registered with the asynchronous + * request handle ahash_request. + * + * Return: ahash cipher handle + */ +static inline struct crypto_ahash *crypto_ahash_reqtfm( + struct ahash_request *req) +{ + return __crypto_ahash_cast(req->base.tfm); +} + +/** + * crypto_ahash_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: size of the request data + */ +static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) +{ + return tfm->reqsize; +} + +static inline void *ahash_request_ctx(struct ahash_request *req) +{ + return req->__ctx; +} + +/** + * crypto_ahash_setkey - set key for cipher handle + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the ahash cipher. The cipher + * handle must point to a keyed hash in order for this function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + +/** + * crypto_ahash_finup() - update and finalize message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of + * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Return: see crypto_ahash_final() + */ +int crypto_ahash_finup(struct ahash_request *req); + +/** + * crypto_ahash_final() - calculate message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer registered with the ahash_request handle. + * + * Return: + * 0 if the message digest was successfully calculated; + * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later; + * -EBUSY if queue is full and request should be resubmitted later; + * other < 0 if an error occurred + */ +int crypto_ahash_final(struct ahash_request *req); + +/** + * crypto_ahash_digest() - calculate message digest for a buffer + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of crypto_ahash_init, + * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: see crypto_ahash_final() + */ +int crypto_ahash_digest(struct ahash_request *req); + +/** + * crypto_ahash_export() - extract current message digest state + * @req: reference to the ahash_request handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the ahash_request handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_ahash_statesize()). + * + * Return: 0 if the export was successful; < 0 if an error occurred + */ +static inline int crypto_ahash_export(struct ahash_request *req, void *out) +{ + return crypto_ahash_reqtfm(req)->export(req, out); +} + +/** + * crypto_ahash_import() - import message digest state + * @req: reference to ahash_request handle the state is imported into + * @in: buffer holding the state + * + * This function imports the hash state into the ahash_request handle from the + * input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Return: 0 if the import was successful; < 0 if an error occurred + */ +static inline int crypto_ahash_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->import(req, in); +} + +/** + * crypto_ahash_init() - (re)initialize message digest handle + * @req: ahash_request handle that already is initialized with all necessary + * data using the ahash_request_* API functions + * + * The call (re-)initializes the message digest referenced by the ahash_request + * handle. Any potentially existing state created by previous operations is + * discarded. + * + * Return: see crypto_ahash_final() + */ +static inline int crypto_ahash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->init(req); +} + +/** + * crypto_ahash_update() - add data to message digest for processing + * @req: ahash_request handle that was previously initialized with the + * crypto_ahash_init call. + * + * Updates the message digest state of the &ahash_request handle. The input data + * is pointed to by the scatter/gather list registered in the &ahash_request + * handle + * + * Return: see crypto_ahash_final() + */ +static inline int crypto_ahash_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; + int ret; + + crypto_stats_get(alg); + ret = crypto_ahash_reqtfm(req)->update(req); + crypto_stats_ahash_update(nbytes, ret, alg); + return ret; +} + +/** + * DOC: Asynchronous Hash Request Handle + * + * The &ahash_request data structure contains all pointers to data + * required for the asynchronous cipher operation. This includes the cipher + * handle (which can be used by multiple &ahash_request instances), pointer + * to plaintext and the message digest output buffer, asynchronous callback + * function, etc. It acts as a handle to the ahash_request_* API calls in a + * similar way as ahash handle to the crypto_ahash_* API calls. + */ + +/** + * ahash_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing ahash handle in the request + * data structure with a different one. + */ +static inline void ahash_request_set_tfm(struct ahash_request *req, + struct crypto_ahash *tfm) +{ + req->base.tfm = crypto_ahash_tfm(tfm); +} + +/** + * ahash_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the ahash + * message digest API calls. During + * the allocation, the provided ahash handle + * is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct ahash_request *ahash_request_alloc( + struct crypto_ahash *tfm, gfp_t gfp) +{ + struct ahash_request *req; + + req = kmalloc(sizeof(struct ahash_request) + + crypto_ahash_reqsize(tfm), gfp); + + if (likely(req)) + ahash_request_set_tfm(req, tfm); + + return req; +} + +/** + * ahash_request_free() - zeroize and free the request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void ahash_request_free(struct ahash_request *req) +{ + kfree_sensitive(req); +} + +static inline void ahash_request_zero(struct ahash_request *req) +{ + memzero_explicit(req, sizeof(*req) + + crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); +} + +static inline struct ahash_request *ahash_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct ahash_request, base); +} + +/** + * ahash_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * &crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once + * the cipher operation completes. + * + * The callback function is registered with the &ahash_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void ahash_request_set_callback(struct ahash_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * ahash_request_set_crypt() - set data buffers + * @req: ahash_request handle to be updated + * @src: source scatter/gather list + * @result: buffer that is filled with the message digest -- the caller must + * ensure that the buffer has sufficient space by, for example, calling + * crypto_ahash_digestsize() + * @nbytes: number of bytes to process from the source scatter/gather list + * + * By using this call, the caller references the source scatter/gather list. + * The source scatter/gather list points to the data the message digest is to + * be calculated for. + */ +static inline void ahash_request_set_crypt(struct ahash_request *req, + struct scatterlist *src, u8 *result, + unsigned int nbytes) +{ + req->src = src; + req->nbytes = nbytes; + req->result = result; +} + +/** + * DOC: Synchronous Message Digest API + * + * The synchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto) + * + * The message digest API is able to maintain state information for the + * caller. + * + * The synchronous message digest API can store user-related context in its + * shash_desc request data structure. + */ + +/** + * crypto_alloc_shash() - allocate message digest handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a message digest. The returned &struct + * crypto_shash is the cipher handle that is required for any subsequent + * API invocation for that message digest. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, + u32 mask); + +static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_shash() - zeroize and free the message digest handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_shash(struct crypto_shash *tfm) +{ + crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); +} + +static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_name(crypto_shash_tfm(tfm)); +} + +static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); +} + +static inline unsigned int crypto_shash_alignmask( + struct crypto_shash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); +} + +/** + * crypto_shash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); +} + +static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct shash_alg, base); +} + +static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm) +{ + return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); +} + +/** + * crypto_shash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * Return: digest size of cipher + */ +static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->digestsize; +} + +static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->statesize; +} + +static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm) +{ + return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); +} + +static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags); +} + +static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); +} + +/** + * crypto_shash_descsize() - obtain the operational state size + * @tfm: cipher handle + * + * The size of the operational state the cipher needs during operation is + * returned for the hash referenced with the cipher handle. This size is + * required to calculate the memory requirements to allow the caller allocating + * sufficient memory for operational state. + * + * The operational state is defined with struct shash_desc where the size of + * that data structure is to be calculated as + * sizeof(struct shash_desc) + crypto_shash_descsize(alg) + * + * Return: size of the operational state + */ +static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) +{ + return tfm->descsize; +} + +static inline void *shash_desc_ctx(struct shash_desc *desc) +{ + return desc->__ctx; +} + +/** + * crypto_shash_setkey() - set key for message digest + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the keyed message digest cipher. The + * cipher handle must point to a keyed message digest cipher in order for this + * function to succeed. + * + * Context: Any context. + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + +/** + * crypto_shash_digest() - calculate message digest for buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of crypto_shash_init, + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Context: Any context. + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + +/** + * crypto_shash_tfm_digest() - calculate message digest for buffer + * @tfm: hash transformation object + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This is a simplified version of crypto_shash_digest() for users who don't + * want to allocate their own hash descriptor (shash_desc). Instead, + * crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash) + * directly, and it allocates a hash descriptor on the stack internally. + * Note that this stack allocation may be fairly large. + * + * Context: Any context. + * Return: 0 on success; < 0 if an error occurred. + */ +int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, + unsigned int len, u8 *out); + +/** + * crypto_shash_export() - extract operational state for message digest + * @desc: reference to the operational state handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the operational state handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_shash_descsize). + * + * Context: Any context. + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +static inline int crypto_shash_export(struct shash_desc *desc, void *out) +{ + return crypto_shash_alg(desc->tfm)->export(desc, out); +} + +/** + * crypto_shash_import() - import operational state + * @desc: reference to the operational state handle the state imported into + * @in: buffer holding the state + * + * This function imports the hash state into the operational state handle from + * the input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Context: Any context. + * Return: 0 if the import was successful; < 0 if an error occurred + */ +static inline int crypto_shash_import(struct shash_desc *desc, const void *in) +{ + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->import(desc, in); +} + +/** + * crypto_shash_init() - (re)initialize message digest + * @desc: operational state handle that is already filled + * + * The call (re-)initializes the message digest referenced by the + * operational state handle. Any potentially existing state created by + * previous operations is discarded. + * + * Context: Any context. + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred + */ +static inline int crypto_shash_init(struct shash_desc *desc) +{ + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->init(desc); +} + +/** + * crypto_shash_update() - add data to message digest for processing + * @desc: operational state handle that is already initialized + * @data: input data to be added to the message digest + * @len: length of the input data + * + * Updates the message digest state of the operational state handle. + * + * Context: Any context. + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred + */ +int crypto_shash_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +/** + * crypto_shash_final() - calculate message digest + * @desc: operational state handle that is already filled with data + * @out: output buffer filled with the message digest + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer. The caller must ensure that the output buffer is + * large enough by using crypto_shash_digestsize. + * + * Context: Any context. + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_final(struct shash_desc *desc, u8 *out); + +/** + * crypto_shash_finup() - calculate message digest of buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Context: Any context. + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + +static inline void shash_desc_zero(struct shash_desc *desc) +{ + memzero_explicit(desc, + sizeof(*desc) + crypto_shash_descsize(desc->tfm)); +} + +#endif /* _CRYPTO_HASH_H */ diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h new file mode 100644 index 000000000..eb9d2e368 --- /dev/null +++ b/include/crypto/hash_info.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hash Info: Hash algorithms information + * + * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com> + */ + +#ifndef _CRYPTO_HASH_INFO_H +#define _CRYPTO_HASH_INFO_H + +#include <crypto/sha.h> +#include <crypto/md5.h> +#include <crypto/streebog.h> + +#include <uapi/linux/hash_info.h> + +/* not defined in include/crypto/ */ +#define RMD128_DIGEST_SIZE 16 +#define RMD160_DIGEST_SIZE 20 +#define RMD256_DIGEST_SIZE 32 +#define RMD320_DIGEST_SIZE 40 + +/* not defined in include/crypto/ */ +#define WP512_DIGEST_SIZE 64 +#define WP384_DIGEST_SIZE 48 +#define WP256_DIGEST_SIZE 32 + +/* not defined in include/crypto/ */ +#define TGR128_DIGEST_SIZE 16 +#define TGR160_DIGEST_SIZE 20 +#define TGR192_DIGEST_SIZE 24 + +/* not defined in include/crypto/ */ +#define SM3256_DIGEST_SIZE 32 + +extern const char *const hash_algo_name[HASH_ALGO__LAST]; +extern const int hash_digest_size[HASH_ALGO__LAST]; + +#endif /* _CRYPTO_HASH_INFO_H */ diff --git a/include/crypto/hmac.h b/include/crypto/hmac.h new file mode 100644 index 000000000..66774132a --- /dev/null +++ b/include/crypto/hmac.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_HMAC_H +#define _CRYPTO_HMAC_H + +#define HMAC_IPAD_VALUE 0x36 +#define HMAC_OPAD_VALUE 0x5c + +#endif /* _CRYPTO_HMAC_H */ diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h new file mode 100644 index 000000000..a406e281a --- /dev/null +++ b/include/crypto/if_alg.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * if_alg: User-space algorithm interface + * + * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_IF_ALG_H +#define _CRYPTO_IF_ALG_H + +#include <linux/compiler.h> +#include <linux/completion.h> +#include <linux/if_alg.h> +#include <linux/scatterlist.h> +#include <linux/types.h> +#include <linux/atomic.h> +#include <net/sock.h> + +#include <crypto/aead.h> +#include <crypto/skcipher.h> + +#define ALG_MAX_PAGES 16 + +struct crypto_async_request; + +struct alg_sock { + /* struct sock must be the first member of struct alg_sock */ + struct sock sk; + + struct sock *parent; + + atomic_t refcnt; + atomic_t nokey_refcnt; + + const struct af_alg_type *type; + void *private; +}; + +struct af_alg_control { + struct af_alg_iv *iv; + int op; + unsigned int aead_assoclen; +}; + +struct af_alg_type { + void *(*bind)(const char *name, u32 type, u32 mask); + void (*release)(void *private); + int (*setkey)(void *private, const u8 *key, unsigned int keylen); + int (*setentropy)(void *private, sockptr_t entropy, unsigned int len); + int (*accept)(void *private, struct sock *sk); + int (*accept_nokey)(void *private, struct sock *sk); + int (*setauthsize)(void *private, unsigned int authsize); + + struct proto_ops *ops; + struct proto_ops *ops_nokey; + struct module *owner; + char name[14]; +}; + +struct af_alg_sgl { + struct scatterlist sg[ALG_MAX_PAGES + 1]; + struct page *pages[ALG_MAX_PAGES]; + unsigned int npages; +}; + +/* TX SGL entry */ +struct af_alg_tsgl { + struct list_head list; + unsigned int cur; /* Last processed SG entry */ + struct scatterlist sg[]; /* Array of SGs forming the SGL */ +}; + +#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \ + sizeof(struct scatterlist) - 1) + +/* RX SGL entry */ +struct af_alg_rsgl { + struct af_alg_sgl sgl; + struct list_head list; + size_t sg_num_bytes; /* Bytes of data in that SGL */ +}; + +/** + * struct af_alg_async_req - definition of crypto request + * @iocb: IOCB for AIO operations + * @sk: Socket the request is associated with + * @first_rsgl: First RX SG + * @last_rsgl: Pointer to last RX SG + * @rsgl_list: Track RX SGs + * @tsgl: Private, per request TX SGL of buffers to process + * @tsgl_entries: Number of entries in priv. TX SGL + * @outlen: Number of output bytes generated by crypto op + * @areqlen: Length of this data structure + * @cra_u: Cipher request + */ +struct af_alg_async_req { + struct kiocb *iocb; + struct sock *sk; + + struct af_alg_rsgl first_rsgl; + struct af_alg_rsgl *last_rsgl; + struct list_head rsgl_list; + + struct scatterlist *tsgl; + unsigned int tsgl_entries; + + unsigned int outlen; + unsigned int areqlen; + + union { + struct aead_request aead_req; + struct skcipher_request skcipher_req; + } cra_u; + + /* req ctx trails this struct */ +}; + +/** + * struct af_alg_ctx - definition of the crypto context + * + * The crypto context tracks the input data during the lifetime of an AF_ALG + * socket. + * + * @tsgl_list: Link to TX SGL + * @iv: IV for cipher operation + * @aead_assoclen: Length of AAD for AEAD cipher operations + * @completion: Work queue for synchronous operation + * @used: TX bytes sent to kernel. This variable is used to + * ensure that user space cannot cause the kernel + * to allocate too much memory in sendmsg operation. + * @rcvused: Total RX bytes to be filled by kernel. This variable + * is used to ensure user space cannot cause the kernel + * to allocate too much memory in a recvmsg operation. + * @more: More data to be expected from user space? + * @merge: Shall new data from user space be merged into existing + * SG? + * @enc: Cryptographic operation to be performed when + * recvmsg is invoked. + * @init: True if metadata has been sent. + * @len: Length of memory allocated for this data structure. + * @inflight: Non-zero when AIO requests are in flight. + */ +struct af_alg_ctx { + struct list_head tsgl_list; + + void *iv; + size_t aead_assoclen; + + struct crypto_wait wait; + + size_t used; + atomic_t rcvused; + + bool more; + bool merge; + bool enc; + bool init; + + unsigned int len; + + unsigned int inflight; +}; + +int af_alg_register_type(const struct af_alg_type *type); +int af_alg_unregister_type(const struct af_alg_type *type); + +int af_alg_release(struct socket *sock); +void af_alg_release_parent(struct sock *sk); +int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern); + +int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); +void af_alg_free_sg(struct af_alg_sgl *sgl); + +static inline struct alg_sock *alg_sk(struct sock *sk) +{ + return (struct alg_sock *)sk; +} + +/** + * Size of available buffer for sending data from user space to kernel. + * + * @sk socket of connection to user space + * @return number of bytes still available + */ +static inline int af_alg_sndbuf(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + + return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - + ctx->used, 0); +} + +/** + * Can the send buffer still be written to? + * + * @sk socket of connection to user space + * @return true => writable, false => not writable + */ +static inline bool af_alg_writable(struct sock *sk) +{ + return PAGE_SIZE <= af_alg_sndbuf(sk); +} + +/** + * Size of available buffer used by kernel for the RX user space operation. + * + * @sk socket of connection to user space + * @return number of bytes still available + */ +static inline int af_alg_rcvbuf(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + + return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - + atomic_read(&ctx->rcvused), 0); +} + +/** + * Can the RX buffer still be written to? + * + * @sk socket of connection to user space + * @return true => writable, false => not writable + */ +static inline bool af_alg_readable(struct sock *sk) +{ + return PAGE_SIZE <= af_alg_rcvbuf(sk); +} + +unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); +void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, + size_t dst_offset); +void af_alg_wmem_wakeup(struct sock *sk); +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); +int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, + unsigned int ivsize); +ssize_t af_alg_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int flags); +void af_alg_free_resources(struct af_alg_async_req *areq); +void af_alg_async_cb(struct crypto_async_request *_req, int err); +__poll_t af_alg_poll(struct file *file, struct socket *sock, + poll_table *wait); +struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, + unsigned int areqlen); +int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, + struct af_alg_async_req *areq, size_t maxsize, + size_t *outlen); + +#endif /* _CRYPTO_IF_ALG_H */ diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h new file mode 100644 index 000000000..cfc47e188 --- /dev/null +++ b/include/crypto/internal/acompress.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li <weigang.li@intel.com> + * Giovanni Cabiddu <giovanni.cabiddu@intel.com> + */ +#ifndef _CRYPTO_ACOMP_INT_H +#define _CRYPTO_ACOMP_INT_H +#include <crypto/acompress.h> + +/* + * Transform internal helpers. + */ +static inline void *acomp_request_ctx(struct acomp_req *req) +{ + return req->__ctx; +} + +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void acomp_request_complete(struct acomp_req *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *acomp_alg_name(struct crypto_acomp *tfm) +{ + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) +{ + struct acomp_req *req; + + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); + if (likely(req)) + acomp_request_set_tfm(req, tfm); + return req; +} + +static inline void __acomp_request_free(struct acomp_req *req) +{ + kfree_sensitive(req); +} + +/** + * crypto_register_acomp() -- Register asynchronous compression algorithm + * + * Function registers an implementation of an asynchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_acomp(struct acomp_alg *alg); + +/** + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm + * + * Function unregisters an implementation of an asynchronous + * compression algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_acomp(struct acomp_alg *alg); + +int crypto_register_acomps(struct acomp_alg *algs, int count); +void crypto_unregister_acomps(struct acomp_alg *algs, int count); + +#endif diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h new file mode 100644 index 000000000..27b7b0224 --- /dev/null +++ b/include/crypto/internal/aead.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_AEAD_H +#define _CRYPTO_INTERNAL_AEAD_H + +#include <crypto/aead.h> +#include <crypto/algapi.h> +#include <linux/stddef.h> +#include <linux/types.h> + +struct rtattr; + +struct aead_instance { + void (*free)(struct aead_instance *inst); + union { + struct { + char head[offsetof(struct aead_alg, base)]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +struct aead_queue { + struct crypto_queue base; +}; + +static inline void *crypto_aead_ctx(struct crypto_aead *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *aead_crypto_instance( + struct aead_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct aead_instance *aead_instance(struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct aead_instance, alg.base); +} + +static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead) +{ + return aead_instance(crypto_tfm_alg_instance(&aead->base)); +} + +static inline void *aead_instance_ctx(struct aead_instance *inst) +{ + return crypto_instance_ctx(aead_crypto_instance(inst)); +} + +static inline void *aead_request_ctx(struct aead_request *req) +{ + return req->__ctx; +} + +static inline void aead_request_complete(struct aead_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 aead_request_flags(struct aead_request *req) +{ + return req->base.flags; +} + +static inline struct aead_request *aead_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct aead_request, base); +} + +int crypto_grab_aead(struct crypto_aead_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct aead_alg *crypto_spawn_aead_alg( + struct crypto_aead_spawn *spawn) +{ + return container_of(spawn->base.alg, struct aead_alg, base); +} + +static inline struct crypto_aead *crypto_spawn_aead( + struct crypto_aead_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, + unsigned int reqsize) +{ + aead->reqsize = reqsize; +} + +static inline void aead_init_queue(struct aead_queue *queue, + unsigned int max_qlen) +{ + crypto_init_queue(&queue->base, max_qlen); +} + +static inline int aead_enqueue_request(struct aead_queue *queue, + struct aead_request *request) +{ + return crypto_enqueue_request(&queue->base, &request->base); +} + +static inline struct aead_request *aead_dequeue_request( + struct aead_queue *queue) +{ + struct crypto_async_request *req; + + req = crypto_dequeue_request(&queue->base); + + return req ? container_of(req, struct aead_request, base) : NULL; +} + +static inline struct aead_request *aead_get_backlog(struct aead_queue *queue) +{ + struct crypto_async_request *req; + + req = crypto_get_backlog(&queue->base); + + return req ? container_of(req, struct aead_request, base) : NULL; +} + +static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg) +{ + return alg->chunksize; +} + +/** + * crypto_aead_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CCM. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm) +{ + return crypto_aead_alg_chunksize(crypto_aead_alg(tfm)); +} + +int crypto_register_aead(struct aead_alg *alg); +void crypto_unregister_aead(struct aead_alg *alg); +int crypto_register_aeads(struct aead_alg *algs, int count); +void crypto_unregister_aeads(struct aead_alg *algs, int count); +int aead_register_instance(struct crypto_template *tmpl, + struct aead_instance *inst); + +#endif /* _CRYPTO_INTERNAL_AEAD_H */ + diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h new file mode 100644 index 000000000..8d3220c9a --- /dev/null +++ b/include/crypto/internal/akcipher.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Public Key Encryption + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk <tadeusz.struk@intel.com> + */ +#ifndef _CRYPTO_AKCIPHER_INT_H +#define _CRYPTO_AKCIPHER_INT_H +#include <crypto/akcipher.h> +#include <crypto/algapi.h> + +struct akcipher_instance { + void (*free)(struct akcipher_instance *inst); + union { + struct { + char head[offsetof(struct akcipher_alg, base)]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +/* + * Transform internal helpers. + */ +static inline void *akcipher_request_ctx(struct akcipher_request *req) +{ + return req->__ctx; +} + +static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher, + unsigned int reqsize) +{ + crypto_akcipher_alg(akcipher)->reqsize = reqsize; +} + +static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void akcipher_request_complete(struct akcipher_request *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm) +{ + return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct crypto_instance *akcipher_crypto_instance( + struct akcipher_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct akcipher_instance *akcipher_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct akcipher_instance, alg.base); +} + +static inline struct akcipher_instance *akcipher_alg_instance( + struct crypto_akcipher *akcipher) +{ + return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base)); +} + +static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) +{ + return crypto_instance_ctx(akcipher_crypto_instance(inst)); +} + +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline struct crypto_akcipher *crypto_spawn_akcipher( + struct crypto_akcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct akcipher_alg *crypto_spawn_akcipher_alg( + struct crypto_akcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct akcipher_alg, base); +} + +/** + * crypto_register_akcipher() -- Register public key algorithm + * + * Function registers an implementation of a public key verify algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_akcipher(struct akcipher_alg *alg); + +/** + * crypto_unregister_akcipher() -- Unregister public key algorithm + * + * Function unregisters an implementation of a public key verify algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_akcipher(struct akcipher_alg *alg); + +/** + * akcipher_register_instance() -- Unregister public key template instance + * + * Function registers an implementation of an asymmetric key algorithm + * created from a template + * + * @tmpl: the template from which the algorithm was created + * @inst: the template instance + */ +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst); +#endif diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h new file mode 100644 index 000000000..52363eee2 --- /dev/null +++ b/include/crypto/internal/blake2s.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Helper functions for BLAKE2s implementations. + * Keep this in sync with the corresponding BLAKE2b header. + */ + +#ifndef _CRYPTO_INTERNAL_BLAKE2S_H +#define _CRYPTO_INTERNAL_BLAKE2S_H + +#include <crypto/blake2s.h> +#include <crypto/internal/hash.h> +#include <linux/string.h> + +void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc); + +void blake2s_compress(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc); + +bool blake2s_selftest(void); + +static inline void blake2s_set_lastblock(struct blake2s_state *state) +{ + state->f[0] = -1; +} + +/* Helper functions for BLAKE2s shared by the library and shash APIs */ + +static __always_inline void +__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen, + bool force_generic) +{ + const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + if (force_generic) + blake2s_compress_generic(state, state->buf, 1, + BLAKE2S_BLOCK_SIZE); + else + blake2s_compress(state, state->buf, 1, + BLAKE2S_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2S_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); + /* Hash one less (full) block than strictly possible */ + if (force_generic) + blake2s_compress_generic(state, in, nblocks - 1, + BLAKE2S_BLOCK_SIZE); + else + blake2s_compress(state, in, nblocks - 1, + BLAKE2S_BLOCK_SIZE); + in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; +} + +static __always_inline void +__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic) +{ + blake2s_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ + if (force_generic) + blake2s_compress_generic(state, state->buf, 1, state->buflen); + else + blake2s_compress(state, state->buf, 1, state->buflen); + cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); + memcpy(out, state->h, state->outlen); +} + +/* Helper functions for shash implementations of BLAKE2s */ + +struct blake2s_tfm_ctx { + u8 key[BLAKE2S_KEY_SIZE]; + unsigned int keylen; +}; + +static inline int crypto_blake2s_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); + + if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) + return -EINVAL; + + memcpy(tctx->key, key, keylen); + tctx->keylen = keylen; + + return 0; +} + +static inline int crypto_blake2s_init(struct shash_desc *desc) +{ + const struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct blake2s_state *state = shash_desc_ctx(desc); + unsigned int outlen = crypto_shash_digestsize(desc->tfm); + + __blake2s_init(state, outlen, tctx->key, tctx->keylen); + return 0; +} + +static inline int crypto_blake2s_update(struct shash_desc *desc, + const u8 *in, unsigned int inlen, + bool force_generic) +{ + struct blake2s_state *state = shash_desc_ctx(desc); + + __blake2s_update(state, in, inlen, force_generic); + return 0; +} + +static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out, + bool force_generic) +{ + struct blake2s_state *state = shash_desc_ctx(desc); + + __blake2s_final(state, out, force_generic); + return 0; +} + +#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */ diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h new file mode 100644 index 000000000..b085dc1ac --- /dev/null +++ b/include/crypto/internal/chacha.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _CRYPTO_INTERNAL_CHACHA_H +#define _CRYPTO_INTERNAL_CHACHA_H + +#include <crypto/chacha.h> +#include <crypto/internal/skcipher.h> +#include <linux/crypto.h> + +struct chacha_ctx { + u32 key[8]; + int nrounds; +}; + +static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize, int nrounds) +{ + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + int i; + + if (keysize != CHACHA_KEY_SIZE) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ctx->key); i++) + ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); + + ctx->nrounds = nrounds; + return 0; +} + +static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 20); +} + +static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 12); +} + +#endif /* _CRYPTO_CHACHA_H */ diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h new file mode 100644 index 000000000..fd5407433 --- /dev/null +++ b/include/crypto/internal/cryptouser.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/cryptouser.h> +#include <net/netlink.h> + +struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); + +#ifdef CONFIG_CRYPTO_STATS +int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); +#else +static inline int crypto_reportstat(struct sk_buff *in_skb, + struct nlmsghdr *in_nlh, + struct nlattr **attrs) +{ + return -ENOTSUPP; +} +#endif diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h new file mode 100644 index 000000000..723fe5bf1 --- /dev/null +++ b/include/crypto/internal/des.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DES & Triple DES EDE key verification helpers + */ + +#ifndef __CRYPTO_INTERNAL_DES_H +#define __CRYPTO_INTERNAL_DES_H + +#include <linux/crypto.h> +#include <linux/fips.h> +#include <crypto/des.h> +#include <crypto/aead.h> +#include <crypto/skcipher.h> + +/** + * crypto_des_verify_key - Check whether a DES key is weak + * @tfm: the crypto algo + * @key: the key buffer + * + * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak + * keys. Otherwise, 0 is returned. + * + * It is the job of the caller to ensure that the size of the key equals + * DES_KEY_SIZE. + */ +static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key) +{ + struct des_ctx tmp; + int err; + + err = des_expand_key(&tmp, key, DES_KEY_SIZE); + if (err == -ENOKEY) { + if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) + err = -EINVAL; + else + err = 0; + } + memzero_explicit(&tmp, sizeof(tmp)); + return err; +} + +/* + * RFC2451: + * + * For DES-EDE3, there is no known need to reject weak or + * complementation keys. Any weakness is obviated by the use of + * multiple keys. + * + * However, if the first two or last two independent 64-bit keys are + * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the + * same as DES. Implementers MUST reject keys that exhibit this + * property. + * + */ +static inline int des3_ede_verify_key(const u8 *key, unsigned int key_len, + bool check_weak) +{ + int ret = fips_enabled ? -EINVAL : -ENOKEY; + u32 K[6]; + + memcpy(K, key, DES3_EDE_KEY_SIZE); + + if ((!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (fips_enabled || check_weak)) + goto bad; + + if ((!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled) + goto bad; + + ret = 0; +bad: + memzero_explicit(K, DES3_EDE_KEY_SIZE); + + return ret; +} + +/** + * crypto_des3_ede_verify_key - Check whether a DES3-EDE key is weak + * @tfm: the crypto algo + * @key: the key buffer + * + * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak + * keys or when running in FIPS mode. Otherwise, 0 is returned. Note that some + * keys are rejected in FIPS mode even if weak keys are permitted by the TFM + * flags. + * + * It is the job of the caller to ensure that the size of the key equals + * DES3_EDE_KEY_SIZE. + */ +static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm, + const u8 *key) +{ + return des3_ede_verify_key(key, DES3_EDE_KEY_SIZE, + crypto_tfm_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); +} + +static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm, + const u8 *key) +{ + return crypto_des_verify_key(crypto_skcipher_tfm(tfm), key); +} + +static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm, + const u8 *key) +{ + return crypto_des3_ede_verify_key(crypto_skcipher_tfm(tfm), key); +} + +static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key, + int keylen) +{ + if (keylen != DES_KEY_SIZE) + return -EINVAL; + return crypto_des_verify_key(crypto_aead_tfm(tfm), key); +} + +static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key, + int keylen) +{ + if (keylen != DES3_EDE_KEY_SIZE) + return -EINVAL; + return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key); +} + +#endif /* __CRYPTO_INTERNAL_DES_H */ diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h new file mode 100644 index 000000000..7fd7126f5 --- /dev/null +++ b/include/crypto/internal/geniv.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * geniv: IV generation + * + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_GENIV_H +#define _CRYPTO_INTERNAL_GENIV_H + +#include <crypto/internal/aead.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); +}; + +struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb); +int aead_init_geniv(struct crypto_aead *tfm); +void aead_exit_geniv(struct crypto_aead *tfm); + +#endif /* _CRYPTO_INTERNAL_GENIV_H */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h new file mode 100644 index 000000000..25806141d --- /dev/null +++ b/include/crypto/internal/hash.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Hash algorithms. + * + * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_HASH_H +#define _CRYPTO_INTERNAL_HASH_H + +#include <crypto/algapi.h> +#include <crypto/hash.h> + +struct ahash_request; +struct scatterlist; + +struct crypto_hash_walk { + char *data; + + unsigned int offset; + unsigned int alignmask; + + struct page *pg; + unsigned int entrylen; + + unsigned int total; + struct scatterlist *sg; + + unsigned int flags; +}; + +struct ahash_instance { + void (*free)(struct ahash_instance *inst); + union { + struct { + char head[offsetof(struct ahash_alg, halg.base)]; + struct crypto_instance base; + } s; + struct ahash_alg alg; + }; +}; + +struct shash_instance { + void (*free)(struct shash_instance *inst); + union { + struct { + char head[offsetof(struct shash_alg, base)]; + struct crypto_instance base; + } s; + struct shash_alg alg; + }; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk); + +static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) +{ + return !(walk->entrylen | walk->total); +} + +int crypto_register_ahash(struct ahash_alg *alg); +void crypto_unregister_ahash(struct ahash_alg *alg); +int crypto_register_ahashes(struct ahash_alg *algs, int count); +void crypto_unregister_ahashes(struct ahash_alg *algs, int count); +int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst); + +bool crypto_shash_alg_has_setkey(struct shash_alg *alg); + +static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) +{ + return crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); +} + +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + +int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct hash_alg_common *crypto_spawn_ahash_alg( + struct crypto_ahash_spawn *spawn) +{ + return __crypto_hash_alg_common(spawn->base.alg); +} + +int crypto_register_shash(struct shash_alg *alg); +void crypto_unregister_shash(struct shash_alg *alg); +int crypto_register_shashes(struct shash_alg *algs, int count); +void crypto_unregister_shashes(struct shash_alg *algs, int count); +int shash_register_instance(struct crypto_template *tmpl, + struct shash_instance *inst); +void shash_free_singlespawn_instance(struct shash_instance *inst); + +int crypto_grab_shash(struct crypto_shash_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct shash_alg *crypto_spawn_shash_alg( + struct crypto_shash_spawn *spawn) +{ + return __crypto_shash_alg(spawn->base.alg); +} + +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); + +int crypto_init_shash_ops_async(struct crypto_tfm *tfm); + +static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) +{ + return crypto_tfm_ctx(crypto_ahash_tfm(tfm)); +} + +static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg) +{ + return container_of(__crypto_hash_alg_common(alg), struct ahash_alg, + halg); +} + +static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, + unsigned int reqsize) +{ + tfm->reqsize = reqsize; +} + +static inline struct crypto_instance *ahash_crypto_instance( + struct ahash_instance *inst) +{ + return &inst->s.base; +} + +static inline struct ahash_instance *ahash_instance( + struct crypto_instance *inst) +{ + return container_of(inst, struct ahash_instance, s.base); +} + +static inline struct ahash_instance *ahash_alg_instance( + struct crypto_ahash *ahash) +{ + return ahash_instance(crypto_tfm_alg_instance(&ahash->base)); +} + +static inline void *ahash_instance_ctx(struct ahash_instance *inst) +{ + return crypto_instance_ctx(ahash_crypto_instance(inst)); +} + +static inline void ahash_request_complete(struct ahash_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 ahash_request_flags(struct ahash_request *req) +{ + return req->base.flags; +} + +static inline struct crypto_ahash *crypto_spawn_ahash( + struct crypto_ahash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline int ahash_enqueue_request(struct crypto_queue *queue, + struct ahash_request *request) +{ + return crypto_enqueue_request(queue, &request->base); +} + +static inline struct ahash_request *ahash_dequeue_request( + struct crypto_queue *queue) +{ + return ahash_request_cast(crypto_dequeue_request(queue)); +} + +static inline void *crypto_shash_ctx(struct crypto_shash *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *shash_crypto_instance( + struct shash_instance *inst) +{ + return &inst->s.base; +} + +static inline struct shash_instance *shash_instance( + struct crypto_instance *inst) +{ + return container_of(inst, struct shash_instance, s.base); +} + +static inline struct shash_instance *shash_alg_instance( + struct crypto_shash *shash) +{ + return shash_instance(crypto_tfm_alg_instance(&shash->base)); +} + +static inline void *shash_instance_ctx(struct shash_instance *inst) +{ + return crypto_instance_ctx(shash_crypto_instance(inst)); +} + +static inline struct crypto_shash *crypto_spawn_shash( + struct crypto_shash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_shash, base); +} + +#endif /* _CRYPTO_INTERNAL_HASH_H */ + diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h new file mode 100644 index 000000000..659b642ef --- /dev/null +++ b/include/crypto/internal/kpp.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + */ +#ifndef _CRYPTO_KPP_INT_H +#define _CRYPTO_KPP_INT_H +#include <crypto/kpp.h> +#include <crypto/algapi.h> + +/* + * Transform internal helpers. + */ +static inline void *kpp_request_ctx(struct kpp_request *req) +{ + return req->__ctx; +} + +static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm) +{ + return tfm->base.__crt_ctx; +} + +static inline void kpp_request_complete(struct kpp_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline const char *kpp_alg_name(struct crypto_kpp *tfm) +{ + return crypto_kpp_tfm(tfm)->__crt_alg->cra_name; +} + +/** + * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm + * + * Function registers an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_kpp(struct kpp_alg *alg); + +/** + * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive + * algorithm + * + * Function unregisters an implementation of a key-agreement protocol primitive + * algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_kpp(struct kpp_alg *alg); + +#endif diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h new file mode 100644 index 000000000..196aa769f --- /dev/null +++ b/include/crypto/internal/poly1305.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Poly1305 algorithm + */ + +#ifndef _CRYPTO_INTERNAL_POLY1305_H +#define _CRYPTO_INTERNAL_POLY1305_H + +#include <asm/unaligned.h> +#include <linux/types.h> +#include <crypto/poly1305.h> + +/* + * Poly1305 core functions. These only accept whole blocks; the caller must + * handle any needed block buffering and padding. 'hibit' must be 1 for any + * full blocks, or 0 for the final block if it had to be padded. If 'nonce' is + * non-NULL, then it's added at the end to compute the Poly1305 MAC. Otherwise, + * only the ε-almost-∆-universal hash function (not the full MAC) is computed. + */ + +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +static inline void poly1305_core_init(struct poly1305_state *state) +{ + *state = (struct poly1305_state){}; +} + +void poly1305_core_blocks(struct poly1305_state *state, + const struct poly1305_core_key *key, const void *src, + unsigned int nblocks, u32 hibit); +void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], + void *dst); + +#endif diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h new file mode 100644 index 000000000..e0711b6a5 --- /dev/null +++ b/include/crypto/internal/rng.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RNG: Random Number Generator algorithms under the crypto API + * + * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_RNG_H +#define _CRYPTO_INTERNAL_RNG_H + +#include <crypto/algapi.h> +#include <crypto/rng.h> + +int crypto_register_rng(struct rng_alg *alg); +void crypto_unregister_rng(struct rng_alg *alg); +int crypto_register_rngs(struct rng_alg *algs, int count); +void crypto_unregister_rngs(struct rng_alg *algs, int count); + +#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE) +int crypto_del_default_rng(void); +#else +static inline int crypto_del_default_rng(void) +{ + return 0; +} +#endif + +static inline void *crypto_rng_ctx(struct crypto_rng *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void crypto_rng_set_entropy(struct crypto_rng *tfm, + const u8 *data, unsigned int len) +{ + crypto_rng_alg(tfm)->set_ent(tfm, data, len); +} + +#endif diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h new file mode 100644 index 000000000..e870133f4 --- /dev/null +++ b/include/crypto/internal/rsa.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RSA internal helpers + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk <tadeusz.struk@intel.com> + */ +#ifndef _RSA_HELPER_ +#define _RSA_HELPER_ +#include <linux/types.h> + +/** + * rsa_key - RSA key structure + * @n : RSA modulus raw byte stream + * @e : RSA public exponent raw byte stream + * @d : RSA private exponent raw byte stream + * @p : RSA prime factor p of n raw byte stream + * @q : RSA prime factor q of n raw byte stream + * @dp : RSA exponent d mod (p - 1) raw byte stream + * @dq : RSA exponent d mod (q - 1) raw byte stream + * @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream + * @n_sz : length in bytes of RSA modulus n + * @e_sz : length in bytes of RSA public exponent + * @d_sz : length in bytes of RSA private exponent + * @p_sz : length in bytes of p field + * @q_sz : length in bytes of q field + * @dp_sz : length in bytes of dp field + * @dq_sz : length in bytes of dq field + * @qinv_sz : length in bytes of qinv field + */ +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +extern struct crypto_template rsa_pkcs1pad_tmpl; +#endif diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h new file mode 100644 index 000000000..f834274c2 --- /dev/null +++ b/include/crypto/internal/scompress.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Synchronous Compression operations + * + * Copyright 2015 LG Electronics Inc. + * Copyright (c) 2016, Intel Corporation + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> + */ +#ifndef _CRYPTO_SCOMP_INT_H +#define _CRYPTO_SCOMP_INT_H +#include <linux/crypto.h> + +#define SCOMP_SCRATCH_SIZE 131072 + +struct crypto_scomp { + struct crypto_tfm base; +}; + +/** + * struct scomp_alg - synchronous compression algorithm + * + * @alloc_ctx: Function allocates algorithm specific context + * @free_ctx: Function frees context allocated with alloc_ctx + * @compress: Function performs a compress operation + * @decompress: Function performs a de-compress operation + * @base: Common crypto API algorithm data structure + */ +struct scomp_alg { + void *(*alloc_ctx)(struct crypto_scomp *tfm); + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); + int (*compress)(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx); + int (*decompress)(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx); + struct crypto_alg base; +}; + +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct scomp_alg, base); +} + +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_scomp, base); +} + +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_scomp(struct crypto_scomp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm)); +} + +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) +{ + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); +} + +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) +{ + return crypto_scomp_alg(tfm)->alloc_ctx(tfm); +} + +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, + void *ctx) +{ + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); +} + +static inline int crypto_scomp_compress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx); +} + +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, + void *ctx) +{ + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen, + ctx); +} + +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); +void crypto_acomp_scomp_free_ctx(struct acomp_req *req); + +/** + * crypto_register_scomp() -- Register synchronous compression algorithm + * + * Function registers an implementation of a synchronous + * compression algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_scomp(struct scomp_alg *alg); + +/** + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm + * + * Function unregisters an implementation of a synchronous + * compression algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_scomp(struct scomp_alg *alg); + +int crypto_register_scomps(struct scomp_alg *algs, int count); +void crypto_unregister_scomps(struct scomp_alg *algs, int count); + +#endif diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h new file mode 100644 index 000000000..d2316242a --- /dev/null +++ b/include/crypto/internal/simd.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Shared crypto simd helpers + */ + +#ifndef _CRYPTO_INTERNAL_SIMD_H +#define _CRYPTO_INTERNAL_SIMD_H + +#include <linux/percpu.h> +#include <linux/types.h> + +/* skcipher support */ + +struct simd_skcipher_alg; +struct skcipher_alg; + +struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, + const char *drvname, + const char *basename); +struct simd_skcipher_alg *simd_skcipher_create(const char *algname, + const char *basename); +void simd_skcipher_free(struct simd_skcipher_alg *alg); + +int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, + struct simd_skcipher_alg **simd_algs); + +void simd_unregister_skciphers(struct skcipher_alg *algs, int count, + struct simd_skcipher_alg **simd_algs); + +/* AEAD support */ + +struct simd_aead_alg; +struct aead_alg; + +struct simd_aead_alg *simd_aead_create_compat(const char *algname, + const char *drvname, + const char *basename); +struct simd_aead_alg *simd_aead_create(const char *algname, + const char *basename); +void simd_aead_free(struct simd_aead_alg *alg); + +int simd_register_aeads_compat(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs); + +void simd_unregister_aeads(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs); + +/* + * crypto_simd_usable() - is it allowed at this time to use SIMD instructions or + * access the SIMD register file? + * + * This delegates to may_use_simd(), except that this also returns false if SIMD + * in crypto code has been temporarily disabled on this CPU by the crypto + * self-tests, in order to test the no-SIMD fallback code. This override is + * currently limited to configurations where the extra self-tests are enabled, + * because it might be a bit too invasive to be part of the regular self-tests. + * + * This is a macro so that <asm/simd.h>, which some architectures don't have, + * doesn't have to be included directly here. + */ +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test); +#define crypto_simd_usable() \ + (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test)) +#else +#define crypto_simd_usable() may_use_simd() +#endif + +#endif /* _CRYPTO_INTERNAL_SIMD_H */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h new file mode 100644 index 000000000..10226c12c --- /dev/null +++ b/include/crypto/internal/skcipher.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_SKCIPHER_H +#define _CRYPTO_INTERNAL_SKCIPHER_H + +#include <crypto/algapi.h> +#include <crypto/skcipher.h> +#include <linux/list.h> +#include <linux/types.h> + +struct aead_request; +struct rtattr; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *inst); + union { + struct { + char head[offsetof(struct skcipher_alg, base)]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + unsigned long offset; + } phys; + + struct { + u8 *page; + void *addr; + } virt; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + + struct scatter_walk out; + unsigned int total; + + struct list_head buffers; + + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + + unsigned int ivsize; + + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +static inline struct crypto_instance *skcipher_crypto_instance( + struct skcipher_instance *inst) +{ + return &inst->s.base; +} + +static inline struct skcipher_instance *skcipher_alg_instance( + struct crypto_skcipher *skcipher) +{ + return container_of(crypto_skcipher_alg(skcipher), + struct skcipher_instance, alg); +} + +static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) +{ + return crypto_instance_ctx(skcipher_crypto_instance(inst)); +} + +static inline void skcipher_request_complete(struct skcipher_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct skcipher_alg *crypto_skcipher_spawn_alg( + struct crypto_skcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct skcipher_alg, base); +} + +static inline struct skcipher_alg *crypto_spawn_skcipher_alg( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_skcipher_spawn_alg(spawn); +} + +static inline struct crypto_skcipher *crypto_spawn_skcipher( + struct crypto_skcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_skcipher_set_reqsize( + struct crypto_skcipher *skcipher, unsigned int reqsize) +{ + skcipher->reqsize = reqsize; +} + +int crypto_register_skcipher(struct skcipher_alg *alg); +void crypto_unregister_skcipher(struct skcipher_alg *alg); +int crypto_register_skciphers(struct skcipher_alg *algs, int count); +void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); +int skcipher_register_instance(struct crypto_template *tmpl, + struct skcipher_instance *inst); + +int skcipher_walk_done(struct skcipher_walk *walk, int err); +int skcipher_walk_virt(struct skcipher_walk *walk, + struct skcipher_request *req, + bool atomic); +void skcipher_walk_atomise(struct skcipher_walk *walk); +int skcipher_walk_async(struct skcipher_walk *walk, + struct skcipher_request *req); +int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); +int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); +void skcipher_walk_complete(struct skcipher_walk *walk, int err); + +static inline void skcipher_walk_abort(struct skcipher_walk *walk) +{ + skcipher_walk_done(walk, -ECANCELED); +} + +static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *skcipher_request_ctx(struct skcipher_request *req) +{ + return req->__ctx; +} + +static inline u32 skcipher_request_flags(struct skcipher_request *req) +{ + return req->base.flags; +} + +static inline unsigned int crypto_skcipher_alg_min_keysize( + struct skcipher_alg *alg) +{ + return alg->min_keysize; +} + +static inline unsigned int crypto_skcipher_alg_max_keysize( + struct skcipher_alg *alg) +{ + return alg->max_keysize; +} + +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + return alg->walksize; +} + +/** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + +/* Helpers for simple block cipher modes of operation */ +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; /* underlying block cipher */ +}; +static inline struct crypto_cipher * +skcipher_cipher_simple(struct crypto_skcipher *tfm) +{ + struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); + + return ctx->cipher; +} + +struct skcipher_instance *skcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb); + +static inline struct crypto_alg *skcipher_ialg_simple( + struct skcipher_instance *inst) +{ + struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); + + return crypto_spawn_cipher_alg(spawn); +} + +#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ + diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h new file mode 100644 index 000000000..cccceadc1 --- /dev/null +++ b/include/crypto/kpp.h @@ -0,0 +1,362 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Key-agreement Protocol Primitives (KPP) + * + * Copyright (c) 2016, Intel Corporation + * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> + */ + +#ifndef _CRYPTO_KPP_ +#define _CRYPTO_KPP_ +#include <linux/crypto.h> + +/** + * struct kpp_request + * + * @base: Common attributes for async crypto requests + * @src: Source data + * @dst: Destination data + * @src_len: Size of the input buffer + * @dst_len: Size of the output buffer. It needs to be at least + * as big as the expected result depending on the operation + * After operation it will be updated with the actual size of the + * result. In case of error where the dst sgl size was insufficient, + * it will be updated to the size required for the operation. + * @__ctx: Start of private context data + */ +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_kpp - user-instantiated object which encapsulate + * algorithms and core processing logic + * + * @base: Common crypto API algorithm data structure + */ +struct crypto_kpp { + struct crypto_tfm base; +}; + +/** + * struct kpp_alg - generic key-agreement protocol primitives + * + * @set_secret: Function invokes the protocol specific function to + * store the secret private key along with parameters. + * The implementation knows how to decode the buffer + * @generate_public_key: Function generate the public key to be sent to the + * counterpart. In case of error, where output is not big + * enough req->dst_len will be updated to the size + * required + * @compute_shared_secret: Function compute the shared secret as defined by + * the algorithm. The result is given back to the user. + * In case of error, where output is not big enough, + * req->dst_len will be updated to the size required + * @max_size: Function returns the size of the output buffer + * @init: Initialize the object. This is called only once at + * instantiation time. In case the cryptographic hardware + * needs to be initialized. Software fallback should be + * put in place here. + * @exit: Undo everything @init did. + * + * @reqsize: Request context size required by algorithm + * implementation + * @base: Common crypto API algorithm data structure + */ +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *tfm, const void *buffer, + unsigned int len); + int (*generate_public_key)(struct kpp_request *req); + int (*compute_shared_secret)(struct kpp_request *req); + + unsigned int (*max_size)(struct crypto_kpp *tfm); + + int (*init)(struct crypto_kpp *tfm); + void (*exit)(struct crypto_kpp *tfm); + + unsigned int reqsize; + struct crypto_alg base; +}; + +/** + * DOC: Generic Key-agreement Protocol Primitives API + * + * The KPP API is used with the algorithm type + * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto) + */ + +/** + * crypto_alloc_kpp() - allocate KPP tfm handle + * @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh") + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * + * Allocate a handle for kpp algorithm. The returned struct crypto_kpp + * is required for any following API invocation + * + * Return: allocated handle in case of success; IS_ERR() is true in case of + * an error, PTR_ERR() returns the error code. + */ +struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm) +{ + return &tfm->base; +} + +static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct kpp_alg, base); +} + +static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_kpp, base); +} + +static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm) +{ + return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm) +{ + return crypto_kpp_alg(tfm)->reqsize; +} + +static inline void kpp_request_set_tfm(struct kpp_request *req, + struct crypto_kpp *tfm) +{ + req->base.tfm = crypto_kpp_tfm(tfm); +} + +static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req) +{ + return __crypto_kpp_tfm(req->base.tfm); +} + +static inline u32 crypto_kpp_get_flags(struct crypto_kpp *tfm) +{ + return crypto_tfm_get_flags(crypto_kpp_tfm(tfm)); +} + +static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_kpp_tfm(tfm), flags); +} + +/** + * crypto_free_kpp() - free KPP tfm handle + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_kpp(struct crypto_kpp *tfm) +{ + crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm)); +} + +/** + * kpp_request_alloc() - allocates kpp request + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * @gfp: allocation flags + * + * Return: allocated handle in case of success or NULL in case of an error. + */ +static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm, + gfp_t gfp) +{ + struct kpp_request *req; + + req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp); + if (likely(req)) + kpp_request_set_tfm(req, tfm); + + return req; +} + +/** + * kpp_request_free() - zeroize and free kpp request + * + * @req: request to free + */ +static inline void kpp_request_free(struct kpp_request *req) +{ + kfree_sensitive(req); +} + +/** + * kpp_request_set_callback() - Sets an asynchronous callback. + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req: request that the callback will be set for + * @flgs: specify for instance if the operation may backlog + * @cmpl: callback which will be called + * @data: private data used by the caller + */ +static inline void kpp_request_set_callback(struct kpp_request *req, + u32 flgs, + crypto_completion_t cmpl, + void *data) +{ + req->base.complete = cmpl; + req->base.data = data; + req->base.flags = flgs; +} + +/** + * kpp_request_set_input() - Sets input buffer + * + * Sets parameters required by generate_public_key + * + * @req: kpp request + * @input: ptr to input scatter list + * @input_len: size of the input scatter list + */ +static inline void kpp_request_set_input(struct kpp_request *req, + struct scatterlist *input, + unsigned int input_len) +{ + req->src = input; + req->src_len = input_len; +} + +/** + * kpp_request_set_output() - Sets output buffer + * + * Sets parameters required by kpp operation + * + * @req: kpp request + * @output: ptr to output scatter list + * @output_len: size of the output scatter list + */ +static inline void kpp_request_set_output(struct kpp_request *req, + struct scatterlist *output, + unsigned int output_len) +{ + req->dst = output; + req->dst_len = output_len; +} + +enum { + CRYPTO_KPP_SECRET_TYPE_UNKNOWN, + CRYPTO_KPP_SECRET_TYPE_DH, + CRYPTO_KPP_SECRET_TYPE_ECDH, +}; + +/** + * struct kpp_secret - small header for packing secret buffer + * + * @type: define type of secret. Each kpp type will define its own + * @len: specify the len of the secret, include the header, that + * follows the struct + */ +struct kpp_secret { + unsigned short type; + unsigned short len; +}; + +/** + * crypto_kpp_set_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for a given alg. + * + * @tfm: tfm handle + * @buffer: Buffer holding the packet representation of the private + * key. The structure of the packet key depends on the particular + * KPP implementation. Packing and unpacking helpers are provided + * for ECDH and DH (see the respective header files for those + * implementations). + * @len: Length of the packet private key buffer. + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, + const void *buffer, unsigned int len) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->set_secret(tfm, buffer, len); + crypto_stats_kpp_set_secret(calg, ret); + return ret; +} + +/** + * crypto_kpp_generate_public_key() - Invoke kpp operation + * + * Function invokes the specific kpp operation for generating the public part + * for a given kpp algorithm. + * + * To generate a private key, the caller should use a random number generator. + * The output of the requested length serves as the private key. + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->generate_public_key(req); + crypto_stats_kpp_generate_public_key(calg, ret); + return ret; +} + +/** + * crypto_kpp_compute_shared_secret() - Invoke kpp operation + * + * Function invokes the specific kpp operation for computing the shared secret + * for a given kpp algorithm. + * + * @req: kpp key request + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(calg); + ret = alg->compute_shared_secret(req); + crypto_stats_kpp_compute_shared_secret(calg, ret); + return ret; +} + +/** + * crypto_kpp_maxsize() - Get len for output buffer + * + * Function returns the output buffer size required for a given key. + * Function assumes that the key is already set in the transformation. If this + * function is called without a setkey or with a failed setkey, you will end up + * in a NULL dereference. + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + */ +static inline unsigned int crypto_kpp_maxsize(struct crypto_kpp *tfm) +{ + struct kpp_alg *alg = crypto_kpp_alg(tfm); + + return alg->max_size(tfm); +} + +#endif diff --git a/include/crypto/md5.h b/include/crypto/md5.h new file mode 100644 index 000000000..cf9e9dec3 --- /dev/null +++ b/include/crypto/md5.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_MD5_H +#define _CRYPTO_MD5_H + +#include <linux/types.h> + +#define MD5_DIGEST_SIZE 16 +#define MD5_HMAC_BLOCK_SIZE 64 +#define MD5_BLOCK_WORDS 16 +#define MD5_HASH_WORDS 4 + +#define MD5_H0 0x67452301UL +#define MD5_H1 0xefcdab89UL +#define MD5_H2 0x98badcfeUL +#define MD5_H3 0x10325476UL + +extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE]; + +struct md5_state { + u32 hash[MD5_HASH_WORDS]; + u32 block[MD5_BLOCK_WORDS]; + u64 byte_count; +}; + +#endif diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h new file mode 100644 index 000000000..306925fea --- /dev/null +++ b/include/crypto/nhpoly1305.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values and helper functions for the NHPoly1305 hash function. + */ + +#ifndef _NHPOLY1305_H +#define _NHPOLY1305_H + +#include <crypto/hash.h> +#include <crypto/internal/poly1305.h> + +/* NH parameterization: */ + +/* Endianness: little */ +/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */ + +/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */ +#define NH_PAIR_STRIDE 2 +#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32)) + +/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */ +#define NH_NUM_PASSES 4 +#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64)) + +/* Max message size: 1024 bytes (32x compression factor) */ +#define NH_NUM_STRIDES 64 +#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES) +#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32)) +#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \ + NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1)) +#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32)) + +#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES) + +struct nhpoly1305_key { + struct poly1305_core_key poly_key; + u32 nh_key[NH_KEY_WORDS]; +}; + +struct nhpoly1305_state { + + /* Running total of polynomial evaluation */ + struct poly1305_state poly_state; + + /* Partial block buffer */ + u8 buffer[NH_MESSAGE_UNIT]; + unsigned int buflen; + + /* + * Number of bytes remaining until the current NH message reaches + * NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash. + */ + unsigned int nh_remaining; + + __le64 nh_hash[NH_NUM_PASSES]; +}; + +typedef void (*nh_t)(const u32 *key, const u8 *message, size_t message_len, + __le64 hash[NH_NUM_PASSES]); + +int crypto_nhpoly1305_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen); + +int crypto_nhpoly1305_init(struct shash_desc *desc); +int crypto_nhpoly1305_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen); +int crypto_nhpoly1305_update_helper(struct shash_desc *desc, + const u8 *src, unsigned int srclen, + nh_t nh_fn); +int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst); +int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, + nh_t nh_fn); + +#endif /* _NHPOLY1305_H */ diff --git a/include/crypto/null.h b/include/crypto/null.h new file mode 100644 index 000000000..0ef577cc0 --- /dev/null +++ b/include/crypto/null.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Values for NULL algorithms */ + +#ifndef _CRYPTO_NULL_H +#define _CRYPTO_NULL_H + +#define NULL_KEY_SIZE 0 +#define NULL_BLOCK_SIZE 1 +#define NULL_DIGEST_SIZE 0 +#define NULL_IV_SIZE 0 + +struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void); +void crypto_put_default_null_skcipher(void); + +#endif diff --git a/include/crypto/padlock.h b/include/crypto/padlock.h new file mode 100644 index 000000000..6de70e88f --- /dev/null +++ b/include/crypto/padlock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Driver for VIA PadLock + * + * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> + */ + +#ifndef _CRYPTO_PADLOCK_H +#define _CRYPTO_PADLOCK_H + +#define PADLOCK_ALIGNMENT 16 + +#define PFX KBUILD_MODNAME ": " + +#define PADLOCK_CRA_PRIORITY 300 +#define PADLOCK_COMPOSITE_PRIORITY 400 + +#ifdef CONFIG_64BIT +#define STACK_ALIGN 16 +#else +#define STACK_ALIGN 4 +#endif + +#endif /* _CRYPTO_PADLOCK_H */ diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h new file mode 100644 index 000000000..b9bc34361 --- /dev/null +++ b/include/crypto/pcrypt.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * pcrypt - Parallel crypto engine. + * + * Copyright (C) 2009 secunet Security Networks AG + * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> + */ + +#ifndef _CRYPTO_PCRYPT_H +#define _CRYPTO_PCRYPT_H + +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/padata.h> + +struct pcrypt_request { + struct padata_priv padata; + void *data; + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +static inline void *pcrypt_request_ctx(struct pcrypt_request *req) +{ + return req->__ctx; +} + +static inline +struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req) +{ + return &req->padata; +} + +static inline +struct pcrypt_request *pcrypt_padata_request(struct padata_priv *padata) +{ + return container_of(padata, struct pcrypt_request, padata); +} + +#endif diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h new file mode 100644 index 000000000..38ec7f5f9 --- /dev/null +++ b/include/crypto/pkcs7.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* PKCS#7 crypto data parser + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _CRYPTO_PKCS7_H +#define _CRYPTO_PKCS7_H + +#include <linux/verification.h> +#include <linux/hash_info.h> +#include <crypto/public_key.h> + +struct key; +struct pkcs7_message; + +/* + * pkcs7_parser.c + */ +extern struct pkcs7_message *pkcs7_parse_message(const void *data, + size_t datalen); +extern void pkcs7_free_message(struct pkcs7_message *pkcs7); + +extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7, + const void **_data, size_t *_datalen, + size_t *_headerlen); + +/* + * pkcs7_trust.c + */ +extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7, + struct key *trust_keyring); + +/* + * pkcs7_verify.c + */ +extern int pkcs7_verify(struct pkcs7_message *pkcs7, + enum key_being_used_for usage); + +extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, + const void *data, size_t datalen); + +extern int pkcs7_get_digest(struct pkcs7_message *pkcs7, const u8 **buf, + u32 *len, enum hash_algo *hash_algo); + +#endif /* _CRYPTO_PKCS7_H */ diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h new file mode 100644 index 000000000..090692ec3 --- /dev/null +++ b/include/crypto/poly1305.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Poly1305 algorithm + */ + +#ifndef _CRYPTO_POLY1305_H +#define _CRYPTO_POLY1305_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define POLY1305_BLOCK_SIZE 16 +#define POLY1305_KEY_SIZE 32 +#define POLY1305_DIGEST_SIZE 16 + +/* The poly1305_key and poly1305_state types are mostly opaque and + * implementation-defined. Limbs might be in base 2^64 or base 2^26, or + * different yet. The union type provided keeps these 64-bit aligned for the + * case in which this is implemented using 64x64 multiplies. + */ + +struct poly1305_key { + union { + u32 r[5]; + u64 r64[3]; + }; +}; + +struct poly1305_core_key { + struct poly1305_key key; + struct poly1305_key precomputed_s; +}; + +struct poly1305_state { + union { + u32 h[5]; + u64 h64[3]; + }; +}; + +struct poly1305_desc_ctx { + /* partial buffer */ + u8 buf[POLY1305_BLOCK_SIZE]; + /* bytes used in partial buffer */ + unsigned int buflen; + /* how many keys have been set in r[] */ + unsigned short rset; + /* whether s[] has been set */ + bool sset; + /* finalize key */ + u32 s[4]; + /* accumulator */ + struct poly1305_state h; + /* key */ + union { + struct poly1305_key opaque_r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE]; + struct poly1305_core_key core_r; + }; +}; + +void poly1305_init_arch(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]); +void poly1305_init_generic(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]); + +static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_init_arch(desc, key); + else + poly1305_init_generic(desc, key); +} + +void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src, + unsigned int nbytes); +void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src, + unsigned int nbytes); + +static inline void poly1305_update(struct poly1305_desc_ctx *desc, + const u8 *src, unsigned int nbytes) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_update_arch(desc, src, nbytes); + else + poly1305_update_generic(desc, src, nbytes); +} + +void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest); +void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest); + +static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest) +{ + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_final_arch(desc, digest); + else + poly1305_final_generic(desc, digest); +} + +#endif diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h new file mode 100644 index 000000000..f5bd80858 --- /dev/null +++ b/include/crypto/public_key.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Asymmetric public-key algorithm definitions + * + * See Documentation/crypto/asymmetric-keys.rst + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_PUBLIC_KEY_H +#define _LINUX_PUBLIC_KEY_H + +#include <linux/keyctl.h> +#include <linux/oid_registry.h> +#include <crypto/akcipher.h> + +/* + * Cryptographic data for the public-key subtype of the asymmetric key type. + * + * Note that this may include private part of the key as well as the public + * part. + */ +struct public_key { + void *key; + u32 keylen; + enum OID algo; + void *params; + u32 paramlen; + bool key_is_private; + const char *id_type; + const char *pkey_algo; +}; + +extern void public_key_free(struct public_key *key); + +/* + * Public key cryptography signature data + */ +struct public_key_signature { + struct asymmetric_key_id *auth_ids[2]; + u8 *s; /* Signature */ + u8 *digest; + u32 s_size; /* Number of bytes in signature */ + u32 digest_size; /* Number of bytes in digest */ + const char *pkey_algo; + const char *hash_algo; + const char *encoding; + const void *data; + unsigned int data_size; +}; + +extern void public_key_signature_free(struct public_key_signature *sig); + +extern struct asymmetric_key_subtype public_key_subtype; + +struct key; +struct key_type; +union key_payload; + +extern int restrict_link_by_signature(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trust_keyring); + +extern int restrict_link_by_key_or_keyring(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trusted); + +extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *trusted); + +extern int query_asymmetric_key(const struct kernel_pkey_params *, + struct kernel_pkey_query *); + +extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *); +extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *); +extern int create_signature(struct kernel_pkey_params *, const void *, void *); +extern int verify_signature(const struct key *, + const struct public_key_signature *); + +int public_key_verify_signature(const struct public_key *pkey, + const struct public_key_signature *sig); + +#endif /* _LINUX_PUBLIC_KEY_H */ diff --git a/include/crypto/rng.h b/include/crypto/rng.h new file mode 100644 index 000000000..17bb3673d --- /dev/null +++ b/include/crypto/rng.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RNG: Random Number Generator algorithms under the crypto API + * + * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_RNG_H +#define _CRYPTO_RNG_H + +#include <linux/crypto.h> + +struct crypto_rng; + +/** + * struct rng_alg - random number generator definition + * + * @generate: The function defined by this variable obtains a + * random number. The random number generator transform + * must generate the random number out of the context + * provided with this call, plus any additional data + * if provided to the call. + * @seed: Seed or reseed the random number generator. With the + * invocation of this function call, the random number + * generator shall become ready for generation. If the + * random number generator requires a seed for setting + * up a new state, the seed must be provided by the + * consumer while invoking this function. The required + * size of the seed is defined with @seedsize . + * @set_ent: Set entropy that would otherwise be obtained from + * entropy source. Internal use only. + * @seedsize: The seed size required for a random number generator + * initialization defined with this variable. Some + * random number generators does not require a seed + * as the seeding is implemented internally without + * the need of support by the consumer. In this case, + * the seed size is set to zero. + * @base: Common crypto API algorithm data structure. + */ +struct rng_alg { + int (*generate)(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen); + int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); + void (*set_ent)(struct crypto_rng *tfm, const u8 *data, + unsigned int len); + + unsigned int seedsize; + + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +extern struct crypto_rng *crypto_default_rng; + +int crypto_get_default_rng(void); +void crypto_put_default_rng(void); + +/** + * DOC: Random number generator API + * + * The random number generator API is used with the ciphers of type + * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) + */ + +/** + * crypto_alloc_rng() -- allocate RNG handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a random number generator. The returned struct + * crypto_rng is the cipher handle that is required for any subsequent + * API invocation for that random number generator. + * + * For all random number generators, this call creates a new private copy of + * the random number generator that does not share a state with other + * instances. The only exception is the "krng" random number generator which + * is a kernel crypto API use case for the get_random_bytes() function of the + * /dev/random driver. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) +{ + return &tfm->base; +} + +/** + * crypto_rng_alg - obtain name of RNG + * @tfm: cipher handle + * + * Return the generic name (cra_name) of the initialized random number generator + * + * Return: generic name string + */ +static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) +{ + return container_of(crypto_rng_tfm(tfm)->__crt_alg, + struct rng_alg, base); +} + +/** + * crypto_free_rng() - zeroize and free RNG handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_rng(struct crypto_rng *tfm) +{ + crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); +} + +/** + * crypto_rng_generate() - get random number + * @tfm: cipher handle + * @src: Input buffer holding additional data, may be NULL + * @slen: Length of additional data + * @dst: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random + * numbers using the random number generator referenced by the + * cipher handle. + * + * Return: 0 function was successful; < 0 if an error occurred + */ +static inline int crypto_rng_generate(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen) +{ + struct crypto_alg *alg = tfm->base.__crt_alg; + int ret; + + crypto_stats_get(alg); + ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); + crypto_stats_rng_generate(alg, dlen, ret); + return ret; +} + +/** + * crypto_rng_get_bytes() - get random number + * @tfm: cipher handle + * @rdata: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random numbers using the + * random number generator referenced by the cipher handle. + * + * Return: 0 function was successful; < 0 if an error occurred + */ +static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, + u8 *rdata, unsigned int dlen) +{ + return crypto_rng_generate(tfm, NULL, 0, rdata, dlen); +} + +/** + * crypto_rng_reset() - re-initialize the RNG + * @tfm: cipher handle + * @seed: seed input data + * @slen: length of the seed input data + * + * The reset function completely re-initializes the random number generator + * referenced by the cipher handle by clearing the current state. The new state + * is initialized with the caller provided seed or automatically, depending + * on the random number generator type (the ANSI X9.31 RNG requires + * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding). + * The seed is provided as a parameter to this function call. The provided seed + * should have the length of the seed size defined for the random number + * generator as defined by crypto_rng_seedsize. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen); + +/** + * crypto_rng_seedsize() - obtain seed size of RNG + * @tfm: cipher handle + * + * The function returns the seed size for the random number generator + * referenced by the cipher handle. This value may be zero if the random + * number generator does not implement or require a reseeding. For example, + * the SP800-90A DRBGs implement an automated reseeding after reaching a + * pre-defined threshold. + * + * Return: seed size for the random number generator + */ +static inline int crypto_rng_seedsize(struct crypto_rng *tfm) +{ + return crypto_rng_alg(tfm)->seedsize; +} + +#endif diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h new file mode 100644 index 000000000..c837d0775 --- /dev/null +++ b/include/crypto/scatterwalk.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic scatter and gather helpers. + * + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com> + * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_SCATTERWALK_H +#define _CRYPTO_SCATTERWALK_H + +#include <crypto/algapi.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/scatterlist.h> + +static inline void scatterwalk_crypto_chain(struct scatterlist *head, + struct scatterlist *sg, int num) +{ + if (sg) + sg_chain(head, num, sg); + else + sg_mark_end(head); +} + +static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) +{ + unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; + unsigned int len_this_page = offset_in_page(~walk->offset) + 1; + return len_this_page > len ? len : len_this_page; +} + +static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, + unsigned int nbytes) +{ + unsigned int len_this_page = scatterwalk_pagelen(walk); + return nbytes > len_this_page ? len_this_page : nbytes; +} + +static inline void scatterwalk_advance(struct scatter_walk *walk, + unsigned int nbytes) +{ + walk->offset += nbytes; +} + +static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, + unsigned int alignmask) +{ + return !(walk->offset & alignmask); +} + +static inline struct page *scatterwalk_page(struct scatter_walk *walk) +{ + return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); +} + +static inline void scatterwalk_unmap(void *vaddr) +{ + kunmap_atomic(vaddr); +} + +static inline void scatterwalk_start(struct scatter_walk *walk, + struct scatterlist *sg) +{ + walk->sg = sg; + walk->offset = sg->offset; +} + +static inline void *scatterwalk_map(struct scatter_walk *walk) +{ + return kmap_atomic(scatterwalk_page(walk)) + + offset_in_page(walk->offset); +} + +static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, + unsigned int more) +{ + if (out) { + struct page *page; + + page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); + /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as + * PageSlab cannot be optimised away per se due to + * use of volatile pointer. + */ + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) + flush_dcache_page(page); + } + + if (more && walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); +} + +static inline void scatterwalk_done(struct scatter_walk *walk, int out, + int more) +{ + if (!more || walk->offset >= walk->sg->offset + walk->sg->length || + !(walk->offset & (PAGE_SIZE - 1))) + scatterwalk_pagedone(walk, out, more); +} + +void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, + size_t nbytes, int out); +void *scatterwalk_map(struct scatter_walk *walk); + +void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out); + +struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], + struct scatterlist *src, + unsigned int len); + +#endif /* _CRYPTO_SCATTERWALK_H */ diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h new file mode 100644 index 000000000..75c7eaa20 --- /dev/null +++ b/include/crypto/serpent.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for serpent algorithms + */ + +#ifndef _CRYPTO_SERPENT_H +#define _CRYPTO_SERPENT_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define SERPENT_MIN_KEY_SIZE 0 +#define SERPENT_MAX_KEY_SIZE 32 +#define SERPENT_EXPKEY_WORDS 132 +#define SERPENT_BLOCK_SIZE 16 + +struct serpent_ctx { + u32 expkey[SERPENT_EXPKEY_WORDS]; +}; + +int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, + unsigned int keylen); +int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); + +void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src); +void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src); + +#endif diff --git a/include/crypto/sha.h b/include/crypto/sha.h new file mode 100644 index 000000000..4ff3da816 --- /dev/null +++ b/include/crypto/sha.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for SHA algorithms + */ + +#ifndef _CRYPTO_SHA_H +#define _CRYPTO_SHA_H + +#include <linux/types.h> + +#define SHA1_DIGEST_SIZE 20 +#define SHA1_BLOCK_SIZE 64 + +#define SHA224_DIGEST_SIZE 28 +#define SHA224_BLOCK_SIZE 64 + +#define SHA256_DIGEST_SIZE 32 +#define SHA256_BLOCK_SIZE 64 + +#define SHA384_DIGEST_SIZE 48 +#define SHA384_BLOCK_SIZE 128 + +#define SHA512_DIGEST_SIZE 64 +#define SHA512_BLOCK_SIZE 128 + +#define SHA1_H0 0x67452301UL +#define SHA1_H1 0xefcdab89UL +#define SHA1_H2 0x98badcfeUL +#define SHA1_H3 0x10325476UL +#define SHA1_H4 0xc3d2e1f0UL + +#define SHA224_H0 0xc1059ed8UL +#define SHA224_H1 0x367cd507UL +#define SHA224_H2 0x3070dd17UL +#define SHA224_H3 0xf70e5939UL +#define SHA224_H4 0xffc00b31UL +#define SHA224_H5 0x68581511UL +#define SHA224_H6 0x64f98fa7UL +#define SHA224_H7 0xbefa4fa4UL + +#define SHA256_H0 0x6a09e667UL +#define SHA256_H1 0xbb67ae85UL +#define SHA256_H2 0x3c6ef372UL +#define SHA256_H3 0xa54ff53aUL +#define SHA256_H4 0x510e527fUL +#define SHA256_H5 0x9b05688cUL +#define SHA256_H6 0x1f83d9abUL +#define SHA256_H7 0x5be0cd19UL + +#define SHA384_H0 0xcbbb9d5dc1059ed8ULL +#define SHA384_H1 0x629a292a367cd507ULL +#define SHA384_H2 0x9159015a3070dd17ULL +#define SHA384_H3 0x152fecd8f70e5939ULL +#define SHA384_H4 0x67332667ffc00b31ULL +#define SHA384_H5 0x8eb44a8768581511ULL +#define SHA384_H6 0xdb0c2e0d64f98fa7ULL +#define SHA384_H7 0x47b5481dbefa4fa4ULL + +#define SHA512_H0 0x6a09e667f3bcc908ULL +#define SHA512_H1 0xbb67ae8584caa73bULL +#define SHA512_H2 0x3c6ef372fe94f82bULL +#define SHA512_H3 0xa54ff53a5f1d36f1ULL +#define SHA512_H4 0x510e527fade682d1ULL +#define SHA512_H5 0x9b05688c2b3e6c1fULL +#define SHA512_H6 0x1f83d9abfb41bd6bULL +#define SHA512_H7 0x5be0cd19137e2179ULL + +extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE]; + +extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE]; + +extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE]; + +extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE]; + +extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE]; + +struct sha1_state { + u32 state[SHA1_DIGEST_SIZE / 4]; + u64 count; + u8 buffer[SHA1_BLOCK_SIZE]; +}; + +struct sha256_state { + u32 state[SHA256_DIGEST_SIZE / 4]; + u64 count; + u8 buf[SHA256_BLOCK_SIZE]; +}; + +struct sha512_state { + u64 state[SHA512_DIGEST_SIZE / 8]; + u64 count[2]; + u8 buf[SHA512_BLOCK_SIZE]; +}; + +struct shash_desc; + +extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +/* + * An implementation of SHA-1's compression function. Don't use in new code! + * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't + * the correct way to hash something with SHA-1 (use crypto_shash instead). + */ +#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4) +#define SHA1_WORKSPACE_WORDS 16 +void sha1_init(__u32 *buf); +void sha1_transform(__u32 *digest, const char *data, __u32 *W); + +/* + * Stand-alone implementation of the SHA256 algorithm. It is designed to + * have as little dependencies as possible so it can be used in the + * kexec_file purgatory. In other cases you should generally use the + * hash APIs from include/crypto/hash.h. Especially when hashing large + * amounts of data as those APIs may be hw-accelerated. + * + * For details see lib/crypto/sha256.c + */ + +static inline void sha256_init(struct sha256_state *sctx) +{ + sctx->state[0] = SHA256_H0; + sctx->state[1] = SHA256_H1; + sctx->state[2] = SHA256_H2; + sctx->state[3] = SHA256_H3; + sctx->state[4] = SHA256_H4; + sctx->state[5] = SHA256_H5; + sctx->state[6] = SHA256_H6; + sctx->state[7] = SHA256_H7; + sctx->count = 0; +} +void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len); +void sha256_final(struct sha256_state *sctx, u8 *out); +void sha256(const u8 *data, unsigned int len, u8 *out); + +static inline void sha224_init(struct sha256_state *sctx) +{ + sctx->state[0] = SHA224_H0; + sctx->state[1] = SHA224_H1; + sctx->state[2] = SHA224_H2; + sctx->state[3] = SHA224_H3; + sctx->state[4] = SHA224_H4; + sctx->state[5] = SHA224_H5; + sctx->state[6] = SHA224_H6; + sctx->state[7] = SHA224_H7; + sctx->count = 0; +} +void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len); +void sha224_final(struct sha256_state *sctx, u8 *out); + +#endif diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h new file mode 100644 index 000000000..20fd1f746 --- /dev/null +++ b/include/crypto/sha1_base.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sha1_base.h - core logic for SHA-1 implementations + * + * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> + */ + +#ifndef _CRYPTO_SHA1_BASE_H +#define _CRYPTO_SHA1_BASE_H + +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <linux/crypto.h> +#include <linux/module.h> + +#include <asm/unaligned.h> + +typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks); + +static inline int sha1_base_init(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA1_H0; + sctx->state[1] = SHA1_H1; + sctx->state[2] = SHA1_H2; + sctx->state[3] = SHA1_H3; + sctx->state[4] = SHA1_H4; + sctx->count = 0; + + return 0; +} + +static inline int sha1_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha1_block_fn *block_fn) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA1_BLOCK_SIZE - partial; + + memcpy(sctx->buffer + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buffer, 1); + } + + blocks = len / SHA1_BLOCK_SIZE; + len %= SHA1_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA1_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buffer + partial, data, len); + + return 0; +} + +static inline int sha1_base_do_finalize(struct shash_desc *desc, + sha1_block_fn *block_fn) +{ + const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64); + struct sha1_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); + unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; + + sctx->buffer[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buffer, 1); + } + + memset(sctx->buffer + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buffer, 1); + + return 0; +} + +static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) + put_unaligned_be32(sctx->state[i], digest++); + + *sctx = (struct sha1_state){}; + return 0; +} + +#endif /* _CRYPTO_SHA1_BASE_H */ diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h new file mode 100644 index 000000000..6ded11078 --- /dev/null +++ b/include/crypto/sha256_base.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sha256_base.h - core logic for SHA-256 implementations + * + * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> + */ + +#ifndef _CRYPTO_SHA256_BASE_H +#define _CRYPTO_SHA256_BASE_H + +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <linux/crypto.h> +#include <linux/module.h> + +#include <asm/unaligned.h> + +typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src, + int blocks); + +static inline int sha224_base_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + sha224_init(sctx); + return 0; +} + +static inline int sha256_base_init(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + sha256_init(sctx); + return 0; +} + +static inline int sha256_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha256_block_fn *block_fn) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA256_BLOCK_SIZE - partial; + + memcpy(sctx->buf + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buf, 1); + } + + blocks = len / SHA256_BLOCK_SIZE; + len %= SHA256_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA256_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buf + partial, data, len); + + return 0; +} + +static inline int sha256_base_do_finalize(struct shash_desc *desc, + sha256_block_fn *block_fn) +{ + const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64); + struct sha256_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buf + bit_offset); + unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; + + sctx->buf[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buf, 1); + } + + memset(sctx->buf + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buf, 1); + + return 0; +} + +static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) +{ + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + struct sha256_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32)) + put_unaligned_be32(sctx->state[i], digest++); + + *sctx = (struct sha256_state){}; + return 0; +} + +#endif /* _CRYPTO_SHA256_BASE_H */ diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h new file mode 100644 index 000000000..080f60c2e --- /dev/null +++ b/include/crypto/sha3.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for SHA-3 algorithms + */ +#ifndef __CRYPTO_SHA3_H__ +#define __CRYPTO_SHA3_H__ + +#define SHA3_224_DIGEST_SIZE (224 / 8) +#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) + +#define SHA3_256_DIGEST_SIZE (256 / 8) +#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE) + +#define SHA3_384_DIGEST_SIZE (384 / 8) +#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE) + +#define SHA3_512_DIGEST_SIZE (512 / 8) +#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) + +struct sha3_state { + u64 st[25]; + unsigned int rsiz; + unsigned int rsizw; + + unsigned int partial; + u8 buf[SHA3_224_BLOCK_SIZE]; +}; + +int crypto_sha3_init(struct shash_desc *desc); +int crypto_sha3_update(struct shash_desc *desc, const u8 *data, + unsigned int len); +int crypto_sha3_final(struct shash_desc *desc, u8 *out); + +#endif diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h new file mode 100644 index 000000000..fb19c7749 --- /dev/null +++ b/include/crypto/sha512_base.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sha512_base.h - core logic for SHA-512 implementations + * + * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> + */ + +#ifndef _CRYPTO_SHA512_BASE_H +#define _CRYPTO_SHA512_BASE_H + +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <linux/crypto.h> +#include <linux/module.h> + +#include <asm/unaligned.h> + +typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src, + int blocks); + +static inline int sha384_base_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA384_H0; + sctx->state[1] = SHA384_H1; + sctx->state[2] = SHA384_H2; + sctx->state[3] = SHA384_H3; + sctx->state[4] = SHA384_H4; + sctx->state[5] = SHA384_H5; + sctx->state[6] = SHA384_H6; + sctx->state[7] = SHA384_H7; + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static inline int sha512_base_init(struct shash_desc *desc) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SHA512_H0; + sctx->state[1] = SHA512_H1; + sctx->state[2] = SHA512_H2; + sctx->state[3] = SHA512_H3; + sctx->state[4] = SHA512_H4; + sctx->state[5] = SHA512_H5; + sctx->state[6] = SHA512_H6; + sctx->state[7] = SHA512_H7; + sctx->count[0] = sctx->count[1] = 0; + + return 0; +} + +static inline int sha512_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha512_block_fn *block_fn) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; + + sctx->count[0] += len; + if (sctx->count[0] < len) + sctx->count[1]++; + + if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SHA512_BLOCK_SIZE - partial; + + memcpy(sctx->buf + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buf, 1); + } + + blocks = len / SHA512_BLOCK_SIZE; + len %= SHA512_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SHA512_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buf + partial, data, len); + + return 0; +} + +static inline int sha512_base_do_finalize(struct shash_desc *desc, + sha512_block_fn *block_fn) +{ + const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]); + struct sha512_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buf + bit_offset); + unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; + + sctx->buf[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buf, 1); + } + + memset(sctx->buf + partial, 0x0, bit_offset - partial); + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); + bits[1] = cpu_to_be64(sctx->count[0] << 3); + block_fn(sctx, sctx->buf, 1); + + return 0; +} + +static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) +{ + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + struct sha512_state *sctx = shash_desc_ctx(desc); + __be64 *digest = (__be64 *)out; + int i; + + for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64)) + put_unaligned_be64(sctx->state[i], digest++); + + *sctx = (struct sha512_state){}; + return 0; +} + +#endif /* _CRYPTO_SHA512_BASE_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h new file mode 100644 index 000000000..ef0fc9ed4 --- /dev/null +++ b/include/crypto/skcipher.h @@ -0,0 +1,586 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_SKCIPHER_H +#define _CRYPTO_SKCIPHER_H + +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +/** + * struct skcipher_request - Symmetric key cipher request + * @cryptlen: Number of bytes to encrypt or decrypt + * @iv: Initialisation Vector + * @src: Source SG list + * @dst: Destination SG list + * @base: Underlying async request + * @__ctx: Start of private context data + */ +struct skcipher_request { + unsigned int cryptlen; + + u8 *iv; + + struct scatterlist *src; + struct scatterlist *dst; + + struct crypto_async_request base; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct crypto_skcipher { + unsigned int reqsize; + + struct crypto_tfm base; +}; + +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + +/** + * struct skcipher_alg - symmetric key cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * @walksize: Equal to the chunk size except in cases where the algorithm is + * considerably more efficient if it can operate on multiple chunks + * in parallel. Should be a multiple of chunksize. + * @base: Definition of a generic crypto algorithm. + * + * All fields except @ivsize are mandatory and must be filled. + */ +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct skcipher_request *req); + int (*decrypt)(struct skcipher_request *req); + int (*init)(struct crypto_skcipher *tfm); + void (*exit)(struct crypto_skcipher *tfm); + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int walksize; + + struct crypto_alg base; +}; + +#define MAX_SYNC_SKCIPHER_REQSIZE 384 +/* + * This performs a type-check against the "tfm" argument to make sure + * all users have the correct skcipher tfm for doing on-stack requests. + */ +#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \ + char __##name##_desc[sizeof(struct skcipher_request) + \ + MAX_SYNC_SKCIPHER_REQSIZE + \ + (!(sizeof((struct crypto_sync_skcipher *)1 == \ + (typeof(tfm))1))) \ + ] CRYPTO_MINALIGN_ATTR; \ + struct skcipher_request *name = (void *)__##name##_desc + +/** + * DOC: Symmetric Key Cipher API + * + * Symmetric key cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto). + * + * Asynchronous cipher operations imply that the function invocation for a + * cipher request returns immediately before the completion of the operation. + * The cipher request is scheduled as a separate kernel thread and therefore + * load-balanced on the different CPUs via the process scheduler. To allow + * the kernel crypto API to inform the caller about the completion of a cipher + * request, the caller must provide a callback function. That function is + * invoked with the cipher handle when the request completes. + * + * To support the asynchronous operation, additional information than just the + * cipher handle must be supplied to the kernel crypto API. That additional + * information is given by filling in the skcipher_request data structure. + * + * For the symmetric key cipher API, the state is maintained with the tfm + * cipher handle. A single tfm can be used across multiple calls and in + * parallel. For asynchronous block cipher calls, context data supplied and + * only used by the caller can be referenced the request data structure in + * addition to the IV used for the cipher request. The maintenance of such + * state information would be important for a crypto driver implementer to + * have, because when calling the callback function upon completion of the + * cipher operation, that callback function may need some information about + * which operation just finished if it invoked multiple in parallel. This + * state information is unused by the kernel crypto API. + */ + +static inline struct crypto_skcipher *__crypto_skcipher_cast( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_skcipher, base); +} + +/** + * crypto_alloc_skcipher() - allocate symmetric key cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an skcipher. The returned struct + * crypto_skcipher is the cipher handle that is required for any subsequent + * API invocation for that skcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, + u32 type, u32 mask); + +struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name, + u32 type, u32 mask); + +static inline struct crypto_tfm *crypto_skcipher_tfm( + struct crypto_skcipher *tfm) +{ + return &tfm->base; +} + +/** + * crypto_free_skcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm)); +} + +static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) +{ + crypto_free_skcipher(&tfm->base); +} + +/** + * crypto_has_skcipher() - Search for the availability of an skcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * skcipher + * @type: specifies the type of the skcipher + * @mask: specifies the mask for the skcipher + * + * Return: true when the skcipher is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask); + +static inline const char *crypto_skcipher_driver_name( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); +} + +static inline struct skcipher_alg *crypto_skcipher_alg( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg, base); +} + +static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) +{ + return alg->ivsize; +} + +/** + * crypto_skcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the skcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg(tfm)->ivsize; +} + +static inline unsigned int crypto_sync_skcipher_ivsize( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_ivsize(&tfm->base); +} + +/** + * crypto_skcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the skcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_skcipher_blocksize( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); +} + +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + return alg->chunksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + +static inline unsigned int crypto_sync_skcipher_blocksize( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_blocksize(&tfm->base); +} + +static inline unsigned int crypto_skcipher_alignmask( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)); +} + +static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm)); +} + +static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags); +} + +static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags); +} + +static inline u32 crypto_sync_skcipher_get_flags( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_get_flags(&tfm->base); +} + +static inline void crypto_sync_skcipher_set_flags( + struct crypto_sync_skcipher *tfm, u32 flags) +{ + crypto_skcipher_set_flags(&tfm->base, flags); +} + +static inline void crypto_sync_skcipher_clear_flags( + struct crypto_sync_skcipher *tfm, u32 flags) +{ + crypto_skcipher_clear_flags(&tfm->base, flags); +} + +/** + * crypto_skcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the skcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_skcipher_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen); + +static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_skcipher_setkey(&tfm->base, key, keylen); +} + +static inline unsigned int crypto_skcipher_min_keysize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg(tfm)->min_keysize; +} + +static inline unsigned int crypto_skcipher_max_keysize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg(tfm)->max_keysize; +} + +/** + * crypto_skcipher_reqtfm() - obtain cipher handle from request + * @req: skcipher_request out of which the cipher handle is to be obtained + * + * Return the crypto_skcipher handle when furnishing an skcipher_request + * data structure. + * + * Return: crypto_skcipher handle + */ +static inline struct crypto_skcipher *crypto_skcipher_reqtfm( + struct skcipher_request *req) +{ + return __crypto_skcipher_cast(req->base.tfm); +} + +static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( + struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + return container_of(tfm, struct crypto_sync_skcipher, base); +} + +/** + * crypto_skcipher_encrypt() - encrypt plaintext + * @req: reference to the skcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the skcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * skcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_skcipher_encrypt(struct skcipher_request *req); + +/** + * crypto_skcipher_decrypt() - decrypt ciphertext + * @req: reference to the skcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the skcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * skcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ +int crypto_skcipher_decrypt(struct skcipher_request *req); + +/** + * DOC: Symmetric Key Cipher Request Handle + * + * The skcipher_request data structure contains all pointers to data + * required for the symmetric key cipher operation. This includes the cipher + * handle (which can be used by multiple skcipher_request instances), pointer + * to plaintext and ciphertext, asynchronous callback function, etc. It acts + * as a handle to the skcipher_request_* API calls in a similar way as + * skcipher handle to the crypto_skcipher_* API calls. + */ + +/** + * crypto_skcipher_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ +static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm) +{ + return tfm->reqsize; +} + +/** + * skcipher_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing skcipher handle in the request + * data structure with a different one. + */ +static inline void skcipher_request_set_tfm(struct skcipher_request *req, + struct crypto_skcipher *tfm) +{ + req->base.tfm = crypto_skcipher_tfm(tfm); +} + +static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req, + struct crypto_sync_skcipher *tfm) +{ + skcipher_request_set_tfm(req, &tfm->base); +} + +static inline struct skcipher_request *skcipher_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct skcipher_request, base); +} + +/** + * skcipher_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the skcipher + * encrypt and decrypt API calls. During the allocation, the provided skcipher + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success, or NULL if out of memory + */ +static inline struct skcipher_request *skcipher_request_alloc( + struct crypto_skcipher *tfm, gfp_t gfp) +{ + struct skcipher_request *req; + + req = kmalloc(sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(tfm), gfp); + + if (likely(req)) + skcipher_request_set_tfm(req, tfm); + + return req; +} + +/** + * skcipher_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ +static inline void skcipher_request_free(struct skcipher_request *req) +{ + kfree_sensitive(req); +} + +static inline void skcipher_request_zero(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm)); +} + +/** + * skcipher_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once the + * cipher operation completes. + * + * The callback function is registered with the skcipher_request handle and + * must comply with the following template:: + * + * void callback_function(struct crypto_async_request *req, int error) + */ +static inline void skcipher_request_set_callback(struct skcipher_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} + +/** + * skcipher_request_set_crypt() - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @cryptlen: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_skcipher_ivsize + * + * This function allows setting of the source data and destination data + * scatter / gather lists. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed - the source is the ciphertext and the destination is the plaintext. + */ +static inline void skcipher_request_set_crypt( + struct skcipher_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int cryptlen, void *iv) +{ + req->src = src; + req->dst = dst; + req->cryptlen = cryptlen; + req->iv = iv; +} + +#endif /* _CRYPTO_SKCIPHER_H */ + diff --git a/include/crypto/sm2.h b/include/crypto/sm2.h new file mode 100644 index 000000000..af452556d --- /dev/null +++ b/include/crypto/sm2.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * sm2.h - SM2 asymmetric public-key algorithm + * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and + * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 + * + * Copyright (c) 2020, Alibaba Group. + * Written by Tianjia Zhang <tianjia.zhang@linux.alibaba.com> + */ + +#ifndef _CRYPTO_SM2_H +#define _CRYPTO_SM2_H + +#include <crypto/sm3.h> +#include <crypto/akcipher.h> + +/* The default user id as specified in GM/T 0009-2012 */ +#define SM2_DEFAULT_USERID "1234567812345678" +#define SM2_DEFAULT_USERID_LEN 16 + +extern int sm2_compute_z_digest(struct crypto_akcipher *tfm, + const unsigned char *id, size_t id_len, + unsigned char dgst[SM3_DIGEST_SIZE]); + +#endif /* _CRYPTO_SM2_H */ diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h new file mode 100644 index 000000000..42ea21289 --- /dev/null +++ b/include/crypto/sm3.h @@ -0,0 +1,42 @@ +/* + * Common values for SM3 algorithm + */ + +#ifndef _CRYPTO_SM3_H +#define _CRYPTO_SM3_H + +#include <linux/types.h> + +#define SM3_DIGEST_SIZE 32 +#define SM3_BLOCK_SIZE 64 + +#define SM3_T1 0x79CC4519 +#define SM3_T2 0x7A879D8A + +#define SM3_IVA 0x7380166f +#define SM3_IVB 0x4914b2b9 +#define SM3_IVC 0x172442d7 +#define SM3_IVD 0xda8a0600 +#define SM3_IVE 0xa96f30bc +#define SM3_IVF 0x163138aa +#define SM3_IVG 0xe38dee4d +#define SM3_IVH 0xb0fb0e4e + +extern const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE]; + +struct sm3_state { + u32 state[SM3_DIGEST_SIZE / 4]; + u64 count; + u8 buffer[SM3_BLOCK_SIZE]; +}; + +struct shash_desc; + +extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sm3_final(struct shash_desc *desc, u8 *out); + +extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); +#endif diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h new file mode 100644 index 000000000..1cbf9aa1f --- /dev/null +++ b/include/crypto/sm3_base.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sm3_base.h - core logic for SM3 implementations + * + * Copyright (C) 2017 ARM Limited or its affiliates. + * Written by Gilad Ben-Yossef <gilad@benyossef.com> + */ + +#ifndef _CRYPTO_SM3_BASE_H +#define _CRYPTO_SM3_BASE_H + +#include <crypto/internal/hash.h> +#include <crypto/sm3.h> +#include <linux/crypto.h> +#include <linux/module.h> +#include <asm/unaligned.h> + +typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks); + +static inline int sm3_base_init(struct shash_desc *desc) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + + sctx->state[0] = SM3_IVA; + sctx->state[1] = SM3_IVB; + sctx->state[2] = SM3_IVC; + sctx->state[3] = SM3_IVD; + sctx->state[4] = SM3_IVE; + sctx->state[5] = SM3_IVF; + sctx->state[6] = SM3_IVG; + sctx->state[7] = SM3_IVH; + sctx->count = 0; + + return 0; +} + +static inline int sm3_base_do_update(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sm3_block_fn *block_fn) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + + sctx->count += len; + + if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) { + int blocks; + + if (partial) { + int p = SM3_BLOCK_SIZE - partial; + + memcpy(sctx->buffer + partial, data, p); + data += p; + len -= p; + + block_fn(sctx, sctx->buffer, 1); + } + + blocks = len / SM3_BLOCK_SIZE; + len %= SM3_BLOCK_SIZE; + + if (blocks) { + block_fn(sctx, data, blocks); + data += blocks * SM3_BLOCK_SIZE; + } + partial = 0; + } + if (len) + memcpy(sctx->buffer + partial, data, len); + + return 0; +} + +static inline int sm3_base_do_finalize(struct shash_desc *desc, + sm3_block_fn *block_fn) +{ + const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64); + struct sm3_state *sctx = shash_desc_ctx(desc); + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + + sctx->buffer[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial); + partial = 0; + + block_fn(sctx, sctx->buffer, 1); + } + + memset(sctx->buffer + partial, 0x0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + block_fn(sctx, sctx->buffer, 1); + + return 0; +} + +static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + int i; + + for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++) + put_unaligned_be32(sctx->state[i], digest++); + + *sctx = (struct sm3_state){}; + return 0; +} + +#endif /* _CRYPTO_SM3_BASE_H */ diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h new file mode 100644 index 000000000..7afd730d1 --- /dev/null +++ b/include/crypto/sm4.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Common values for the SM4 algorithm + * Copyright (C) 2018 ARM Limited or its affiliates. + */ + +#ifndef _CRYPTO_SM4_H +#define _CRYPTO_SM4_H + +#include <linux/types.h> +#include <linux/crypto.h> + +#define SM4_KEY_SIZE 16 +#define SM4_BLOCK_SIZE 16 +#define SM4_RKEY_WORDS 32 + +struct crypto_sm4_ctx { + u32 rkey_enc[SM4_RKEY_WORDS]; + u32 rkey_dec[SM4_RKEY_WORDS]; +}; + +int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len); +int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key, + unsigned int key_len); + +void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); +void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); + +#endif diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h new file mode 100644 index 000000000..cae1b4a01 --- /dev/null +++ b/include/crypto/streebog.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause */ +/* + * Copyright (c) 2013 Alexey Degtyarev <alexey@renatasystems.org> + * Copyright (c) 2018 Vitaly Chikunov <vt@altlinux.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef _CRYPTO_STREEBOG_H_ +#define _CRYPTO_STREEBOG_H_ + +#include <linux/types.h> + +#define STREEBOG256_DIGEST_SIZE 32 +#define STREEBOG512_DIGEST_SIZE 64 +#define STREEBOG_BLOCK_SIZE 64 + +struct streebog_uint512 { + __le64 qword[8]; +}; + +struct streebog_state { + union { + u8 buffer[STREEBOG_BLOCK_SIZE]; + struct streebog_uint512 m; + }; + struct streebog_uint512 hash; + struct streebog_uint512 h; + struct streebog_uint512 N; + struct streebog_uint512 Sigma; + size_t fillsize; +}; + +#endif /* !_CRYPTO_STREEBOG_H_ */ diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h new file mode 100644 index 000000000..f6b307a58 --- /dev/null +++ b/include/crypto/twofish.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_TWOFISH_H +#define _CRYPTO_TWOFISH_H + +#include <linux/types.h> + +#define TF_MIN_KEY_SIZE 16 +#define TF_MAX_KEY_SIZE 32 +#define TF_BLOCK_SIZE 16 + +struct crypto_tfm; + +/* Structure for an expanded Twofish key. s contains the key-dependent + * S-boxes composed with the MDS matrix; w contains the eight "whitening" + * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note + * that k[i] corresponds to what the Twofish paper calls K[i+8]. */ +struct twofish_ctx { + u32 s[4][256], w[8], k[32]; +}; + +int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key, + unsigned int key_len); +int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len); + +#endif diff --git a/include/crypto/xts.h b/include/crypto/xts.h new file mode 100644 index 000000000..0f8dba69f --- /dev/null +++ b/include/crypto/xts.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CRYPTO_XTS_H +#define _CRYPTO_XTS_H + +#include <crypto/b128ops.h> +#include <crypto/internal/skcipher.h> +#include <linux/fips.h> + +#define XTS_BLOCK_SIZE 16 + +static inline int xts_check_key(struct crypto_tfm *tfm, + const u8 *key, unsigned int keylen) +{ + /* + * key consists of keys of equal size concatenated, therefore + * the length must be even. + */ + if (keylen % 2) + return -EINVAL; + + /* ensure that the AES and tweak key are not identical */ + if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2)) + return -EINVAL; + + return 0; +} + +static inline int xts_verify_key(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + /* + * key consists of keys of equal size concatenated, therefore + * the length must be even. + */ + if (keylen % 2) + return -EINVAL; + + /* ensure that the AES and tweak key are not identical */ + if ((fips_enabled || (crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) && + !crypto_memneq(key, key + (keylen / 2), keylen / 2)) + return -EINVAL; + + return 0; +} + +#endif /* _CRYPTO_XTS_H */ |