summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig87
-rw-r--r--crypto/Makefile6
-rw-r--r--crypto/adiantum.c80
-rw-r--r--crypto/aead.c6
-rw-r--r--crypto/ahash.c409
-rw-r--r--crypto/algif_hash.c5
-rw-r--r--crypto/api.c2
-rw-r--r--crypto/arc4.c60
-rw-r--r--crypto/asymmetric_keys/mscode_parser.c19
-rw-r--r--crypto/asymmetric_keys/pkcs7.asn17
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c22
-rw-r--r--crypto/asymmetric_keys/pkcs8.asn16
-rw-r--r--crypto/asymmetric_keys/public_key.c8
-rw-r--r--crypto/asymmetric_keys/restrict.c4
-rw-r--r--crypto/asymmetric_keys/signature.c2
-rw-r--r--crypto/asymmetric_keys/x509.asn17
-rw-r--r--crypto/asymmetric_keys/x509_akid.asn129
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c36
-rw-r--r--crypto/authenc.c20
-rw-r--r--crypto/authencesn.c28
-rw-r--r--crypto/cbc.c163
-rw-r--r--crypto/ccm.c29
-rw-r--r--crypto/chacha20poly1305.c11
-rw-r--r--crypto/cmac.c39
-rw-r--r--crypto/cryptd.c14
-rw-r--r--crypto/crypto_engine.c8
-rw-r--r--crypto/ctr.c14
-rw-r--r--crypto/cts.c12
-rw-r--r--crypto/deflate.c61
-rw-r--r--crypto/drbg.c2
-rw-r--r--crypto/ecb.c190
-rw-r--r--crypto/essiv.c20
-rw-r--r--crypto/gcm.c12
-rw-r--r--crypto/hash.h14
-rw-r--r--crypto/hash_info.c6
-rw-r--r--crypto/hctr2.c11
-rw-r--r--crypto/hmac.c56
-rw-r--r--crypto/jitterentropy-kcapi.c17
-rw-r--r--crypto/jitterentropy.c344
-rw-r--r--crypto/jitterentropy.h5
-rw-r--r--crypto/lrw.c12
-rw-r--r--crypto/lskcipher.c634
-rw-r--r--crypto/rsa-pkcs1pad.c27
-rw-r--r--crypto/rsaprivkey.asn17
-rw-r--r--crypto/rsapubkey.asn17
-rw-r--r--crypto/shash.c379
-rw-r--r--crypto/skcipher.c81
-rw-r--r--crypto/skcipher.h28
-rw-r--r--crypto/testmgr.c57
-rw-r--r--crypto/testmgr.h155
-rw-r--r--crypto/vmac.c1
-rw-r--r--crypto/xcbc.c32
-rw-r--r--crypto/xts.c10
53 files changed, 1978 insertions, 1323 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 650b1b3620..70661f58ee 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -85,6 +85,7 @@ config CRYPTO_SKCIPHER
tristate
select CRYPTO_SKCIPHER2
select CRYPTO_ALGAPI
+ select CRYPTO_ECB
config CRYPTO_SKCIPHER2
tristate
@@ -689,7 +690,7 @@ config CRYPTO_CTS
config CRYPTO_ECB
tristate "ECB (Electronic Codebook)"
- select CRYPTO_SKCIPHER
+ select CRYPTO_SKCIPHER2
select CRYPTO_MANAGER
help
ECB (Electronic Codebook) mode (NIST SP800-38A)
@@ -1296,9 +1297,69 @@ config CRYPTO_JITTERENTROPY
See https://www.chronox.de/jent.html
+if CRYPTO_JITTERENTROPY
+if CRYPTO_FIPS && EXPERT
+
+choice
+ prompt "CPU Jitter RNG Memory Size"
+ default CRYPTO_JITTERENTROPY_MEMSIZE_2
+ help
+ The Jitter RNG measures the execution time of memory accesses.
+ Multiple consecutive memory accesses are performed. If the memory
+ size fits into a cache (e.g. L1), only the memory access timing
+ to that cache is measured. The closer the cache is to the CPU
+ the less variations are measured and thus the less entropy is
+ obtained. Thus, if the memory size fits into the L1 cache, the
+ obtained entropy is less than if the memory size fits within
+ L1 + L2, which in turn is less if the memory fits into
+ L1 + L2 + L3. Thus, by selecting a different memory size,
+ the entropy rate produced by the Jitter RNG can be modified.
+
+ config CRYPTO_JITTERENTROPY_MEMSIZE_2
+ bool "2048 Bytes (default)"
+
+ config CRYPTO_JITTERENTROPY_MEMSIZE_128
+ bool "128 kBytes"
+
+ config CRYPTO_JITTERENTROPY_MEMSIZE_1024
+ bool "1024 kBytes"
+
+ config CRYPTO_JITTERENTROPY_MEMSIZE_8192
+ bool "8192 kBytes"
+endchoice
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKS
+ int
+ default 64 if CRYPTO_JITTERENTROPY_MEMSIZE_2
+ default 512 if CRYPTO_JITTERENTROPY_MEMSIZE_128
+ default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024
+ default 4096 if CRYPTO_JITTERENTROPY_MEMSIZE_8192
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE
+ int
+ default 32 if CRYPTO_JITTERENTROPY_MEMSIZE_2
+ default 256 if CRYPTO_JITTERENTROPY_MEMSIZE_128
+ default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024
+ default 2048 if CRYPTO_JITTERENTROPY_MEMSIZE_8192
+
+config CRYPTO_JITTERENTROPY_OSR
+ int "CPU Jitter RNG Oversampling Rate"
+ range 1 15
+ default 1
+ help
+ The Jitter RNG allows the specification of an oversampling rate (OSR).
+ The Jitter RNG operation requires a fixed amount of timing
+ measurements to produce one output block of random numbers. The
+ OSR value is multiplied with the amount of timing measurements to
+ generate one output block. Thus, the timing measurement is oversampled
+ by the OSR factor. The oversampling allows the Jitter RNG to operate
+ on hardware whose timers deliver limited amount of entropy (e.g.
+ the timer is coarse) by setting the OSR to a higher value. The
+ trade-off, however, is that the Jitter RNG now requires more time
+ to generate random numbers.
+
config CRYPTO_JITTERENTROPY_TESTINTERFACE
bool "CPU Jitter RNG Test Interface"
- depends on CRYPTO_JITTERENTROPY
help
The test interface allows a privileged process to capture
the raw unconditioned high resolution time stamp noise that
@@ -1316,6 +1377,28 @@ config CRYPTO_JITTERENTROPY_TESTINTERFACE
If unsure, select N.
+endif # if CRYPTO_FIPS && EXPERT
+
+if !(CRYPTO_FIPS && EXPERT)
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKS
+ int
+ default 64
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE
+ int
+ default 32
+
+config CRYPTO_JITTERENTROPY_OSR
+ int
+ default 1
+
+config CRYPTO_JITTERENTROPY_TESTINTERFACE
+ bool
+
+endif # if !(CRYPTO_FIPS && EXPERT)
+endif # if CRYPTO_JITTERENTROPY
+
config CRYPTO_KDF800108_CTR
tristate
select CRYPTO_HMAC
diff --git a/crypto/Makefile b/crypto/Makefile
index 953a7e105e..5ac6876f93 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -16,7 +16,11 @@ obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
obj-$(CONFIG_CRYPTO_GENIV) += geniv.o
-obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o
+crypto_skcipher-y += lskcipher.o
+crypto_skcipher-y += skcipher.o
+
+obj-$(CONFIG_CRYPTO_SKCIPHER2) += crypto_skcipher.o
+
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index c33ba22a66..60f3883b73 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -245,10 +245,9 @@ static void adiantum_hash_header(struct skcipher_request *req)
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
static int adiantum_hash_message(struct skcipher_request *req,
- struct scatterlist *sgl, le128 *digest)
+ struct scatterlist *sgl, unsigned int nents,
+ le128 *digest)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct shash_desc *hash_desc = &rctx->u.hash_desc;
@@ -256,14 +255,11 @@ static int adiantum_hash_message(struct skcipher_request *req,
unsigned int i, n;
int err;
- hash_desc->tfm = tctx->hash;
-
err = crypto_shash_init(hash_desc);
if (err)
return err;
- sg_miter_start(&miter, sgl, sg_nents(sgl),
- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
for (i = 0; i < bulk_len; i += n) {
sg_miter_next(&miter);
n = min_t(unsigned int, miter.length, bulk_len - i);
@@ -285,6 +281,8 @@ static int adiantum_finish(struct skcipher_request *req)
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+ struct scatterlist *dst = req->dst;
+ const unsigned int dst_nents = sg_nents(dst);
le128 digest;
int err;
@@ -298,13 +296,32 @@ static int adiantum_finish(struct skcipher_request *req)
* enc: C_R = C_M - H_{K_H}(T, C_L)
* dec: P_R = P_M - H_{K_H}(T, P_L)
*/
- err = adiantum_hash_message(req, req->dst, &digest);
- if (err)
- return err;
- le128_add(&digest, &digest, &rctx->header_hash);
- le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
- scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
- bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1);
+ rctx->u.hash_desc.tfm = tctx->hash;
+ le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
+ if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
+ /* Fast path for single-page destination */
+ struct page *page = sg_page(dst);
+ void *virt = kmap_local_page(page) + dst->offset;
+
+ err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
+ (u8 *)&digest);
+ if (err) {
+ kunmap_local(virt);
+ return err;
+ }
+ le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
+ memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
+ flush_dcache_page(page);
+ kunmap_local(virt);
+ } else {
+ /* Slow path that works for any destination scatterlist */
+ err = adiantum_hash_message(req, dst, dst_nents, &digest);
+ if (err)
+ return err;
+ le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
+ scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
+ bulk_len, sizeof(le128), 1);
+ }
return 0;
}
@@ -324,6 +341,8 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+ struct scatterlist *src = req->src;
+ const unsigned int src_nents = sg_nents(src);
unsigned int stream_len;
le128 digest;
int err;
@@ -339,12 +358,24 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
* dec: C_M = C_R + H_{K_H}(T, C_L)
*/
adiantum_hash_header(req);
- err = adiantum_hash_message(req, req->src, &digest);
+ rctx->u.hash_desc.tfm = tctx->hash;
+ if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
+ /* Fast path for single-page source */
+ void *virt = kmap_local_page(sg_page(src)) + src->offset;
+
+ err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
+ (u8 *)&digest);
+ memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
+ kunmap_local(virt);
+ } else {
+ /* Slow path that works for any source scatterlist */
+ err = adiantum_hash_message(req, src, src_nents, &digest);
+ scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
+ bulk_len, sizeof(le128), 0);
+ }
if (err)
return err;
- le128_add(&digest, &digest, &rctx->header_hash);
- scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
- bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0);
+ le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
/* If encrypting, encrypt P_M with the block cipher to get C_M */
@@ -468,7 +499,7 @@ static void adiantum_free_instance(struct skcipher_instance *inst)
* Check for a supported set of inner algorithms.
* See the comment at the beginning of this file.
*/
-static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
+static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg,
struct crypto_alg *blockcipher_alg,
struct shash_alg *hash_alg)
{
@@ -494,7 +525,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
const char *nhpoly1305_name;
struct skcipher_instance *inst;
struct adiantum_instance_ctx *ictx;
- struct skcipher_alg *streamcipher_alg;
+ struct skcipher_alg_common *streamcipher_alg;
struct crypto_alg *blockcipher_alg;
struct shash_alg *hash_alg;
int err;
@@ -514,7 +545,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
- streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
+ streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */
err = crypto_grab_cipher(&ictx->blockcipher_spawn,
@@ -561,8 +592,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
- inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
- hash_alg->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask;
/*
* The block cipher is only invoked once per message, so for long
* messages (e.g. sectors for disk encryption) its performance doesn't
@@ -578,8 +608,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.decrypt = adiantum_decrypt;
inst->alg.init = adiantum_init_tfm;
inst->alg.exit = adiantum_exit_tfm;
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg);
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg);
+ inst->alg.min_keysize = streamcipher_alg->min_keysize;
+ inst->alg.max_keysize = streamcipher_alg->max_keysize;
inst->alg.ivsize = TWEAK_SIZE;
inst->free = adiantum_free_instance;
diff --git a/crypto/aead.c b/crypto/aead.c
index d5ba204ebd..5490663356 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -269,6 +269,12 @@ struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alloc_aead);
+int crypto_has_aead(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_type_has_alg(alg_name, &crypto_aead_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_has_aead);
+
static int aead_prepare_alg(struct aead_alg *alg)
{
struct crypto_istat_aead *istat = aead_get_stat(alg);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 709ef09407..80c3e53547 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -2,8 +2,12 @@
/*
* Asynchronous Cryptographic Hash operations.
*
- * This is the asynchronous version of hash.c with notification of
- * completion via a callback.
+ * This is the implementation of the ahash (asynchronous hash) API. It differs
+ * from shash (synchronous hash) in that ahash supports asynchronous operations,
+ * and it hashes data from scatterlists instead of virtually addressed buffers.
+ *
+ * The ahash API provides access to both ahash and shash algorithms. The shash
+ * API only provides access to shash algorithms.
*
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*/
@@ -21,33 +25,142 @@
#include "hash.h"
-static const struct crypto_type crypto_ahash_type;
+#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
-struct ahash_request_priv {
- crypto_completion_t complete;
- void *data;
- u8 *result;
- u32 flags;
- void *ubuf[] CRYPTO_MINALIGN_ATTR;
-};
+static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg)
+{
+ return hash_get_stat(&alg->halg);
+}
+
+static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&ahash_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
+/*
+ * For an ahash tfm that is using an shash algorithm (instead of an ahash
+ * algorithm), this returns the underlying shash tfm.
+ */
+static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm)
+{
+ return *(struct crypto_shash **)crypto_ahash_ctx(tfm);
+}
+
+static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req,
+ struct crypto_ahash *tfm)
+{
+ struct shash_desc *desc = ahash_request_ctx(req);
+
+ desc->tfm = ahash_to_shash(tfm);
+ return desc;
+}
+
+int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
+{
+ struct crypto_hash_walk walk;
+ int nbytes;
+
+ for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
+ nbytes = crypto_hash_walk_done(&walk, nbytes))
+ nbytes = crypto_shash_update(desc, walk.data, nbytes);
+
+ return nbytes;
+}
+EXPORT_SYMBOL_GPL(shash_ahash_update);
+
+int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
+{
+ struct crypto_hash_walk walk;
+ int nbytes;
+
+ nbytes = crypto_hash_walk_first(req, &walk);
+ if (!nbytes)
+ return crypto_shash_final(desc, req->result);
+
+ do {
+ nbytes = crypto_hash_walk_last(&walk) ?
+ crypto_shash_finup(desc, walk.data, nbytes,
+ req->result) :
+ crypto_shash_update(desc, walk.data, nbytes);
+ nbytes = crypto_hash_walk_done(&walk, nbytes);
+ } while (nbytes > 0);
+
+ return nbytes;
+}
+EXPORT_SYMBOL_GPL(shash_ahash_finup);
+
+int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
+{
+ unsigned int nbytes = req->nbytes;
+ struct scatterlist *sg;
+ unsigned int offset;
+ int err;
+
+ if (nbytes &&
+ (sg = req->src, offset = sg->offset,
+ nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
+ void *data;
+
+ data = kmap_local_page(sg_page(sg));
+ err = crypto_shash_digest(desc, data + offset, nbytes,
+ req->result);
+ kunmap_local(data);
+ } else
+ err = crypto_shash_init(desc) ?:
+ shash_ahash_finup(req, desc);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(shash_ahash_digest);
+
+static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm)
+{
+ struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(*ctx);
+}
+
+static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *calg = tfm->__crt_alg;
+ struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
+ struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
+ struct crypto_shash *shash;
+
+ if (!crypto_mod_get(calg))
+ return -EAGAIN;
+
+ shash = crypto_create_tfm(calg, &crypto_shash_type);
+ if (IS_ERR(shash)) {
+ crypto_mod_put(calg);
+ return PTR_ERR(shash);
+ }
+
+ crt->using_shash = true;
+ *ctx = shash;
+ tfm->exit = crypto_exit_ahash_using_shash;
+
+ crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
+ CRYPTO_TFM_NEED_KEY);
+ crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
+
+ return 0;
+}
static int hash_walk_next(struct crypto_hash_walk *walk)
{
- unsigned int alignmask = walk->alignmask;
unsigned int offset = walk->offset;
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
walk->data = kmap_local_page(walk->pg);
walk->data += offset;
-
- if (offset & alignmask) {
- unsigned int unaligned = alignmask + 1 - (offset & alignmask);
-
- if (nbytes > unaligned)
- nbytes = unaligned;
- }
-
walk->entrylen -= nbytes;
return nbytes;
}
@@ -71,23 +184,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
- unsigned int alignmask = walk->alignmask;
-
walk->data -= walk->offset;
- if (walk->entrylen && (walk->offset & alignmask) && !err) {
- unsigned int nbytes;
-
- walk->offset = ALIGN(walk->offset, alignmask + 1);
- nbytes = min(walk->entrylen,
- (unsigned int)(PAGE_SIZE - walk->offset));
- if (nbytes) {
- walk->entrylen -= nbytes;
- walk->data += walk->offset;
- return nbytes;
- }
- }
-
kunmap_local(walk->data);
crypto_yield(walk->flags);
@@ -119,7 +217,6 @@ int crypto_hash_walk_first(struct ahash_request *req,
return 0;
}
- walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
walk->flags = req->base.flags;
@@ -127,67 +224,64 @@ int crypto_hash_walk_first(struct ahash_request *req,
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
-static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- int ret;
- u8 *buffer, *alignbuffer;
- unsigned long absize;
-
- absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- memcpy(alignbuffer, key, keylen);
- ret = tfm->setkey(tfm, alignbuffer, keylen);
- kfree_sensitive(buffer);
- return ret;
-}
-
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
-static void ahash_set_needkey(struct crypto_ahash *tfm)
+static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg)
{
- const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
-
- if (tfm->setkey != ahash_nosetkey &&
- !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ if (alg->setkey != ahash_nosetkey &&
+ !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- int err;
+ if (likely(tfm->using_shash)) {
+ struct crypto_shash *shash = ahash_to_shash(tfm);
+ int err;
- if ((unsigned long)key & alignmask)
- err = ahash_setkey_unaligned(tfm, key, keylen);
- else
- err = tfm->setkey(tfm, key, keylen);
-
- if (unlikely(err)) {
- ahash_set_needkey(tfm);
- return err;
+ err = crypto_shash_setkey(shash, key, keylen);
+ if (unlikely(err)) {
+ crypto_ahash_set_flags(tfm,
+ crypto_shash_get_flags(shash) &
+ CRYPTO_TFM_NEED_KEY);
+ return err;
+ }
+ } else {
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ int err;
+
+ err = alg->setkey(tfm, key, keylen);
+ if (unlikely(err)) {
+ ahash_set_needkey(tfm, alg);
+ return err;
+ }
}
-
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
+int crypto_ahash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_init(prepare_shash_desc(req, tfm));
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ return crypto_ahash_alg(tfm)->init(req);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_init);
+
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
bool has_state)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
struct ahash_request *subreq;
unsigned int subreq_size;
@@ -201,7 +295,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
subreq_size += reqsize;
subreq_size += ds;
- subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
flags = ahash_request_flags(req);
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
@@ -213,7 +306,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
ahash_request_set_callback(subreq, flags, cplt, req);
result = (u8 *)(subreq + 1) + reqsize;
- result = PTR_ALIGN(result, alignmask + 1);
ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
@@ -249,100 +341,78 @@ static void ahash_restore_req(struct ahash_request *req, int err)
kfree_sensitive(subreq);
}
-static void ahash_op_unaligned_done(void *data, int err)
-{
- struct ahash_request *areq = data;
-
- if (err == -EINPROGRESS)
- goto out;
-
- /* First copy req->result into req->priv.result */
- ahash_restore_req(areq, err);
-
-out:
- /* Complete the ORIGINAL request. */
- ahash_request_complete(areq, err);
-}
-
-static int ahash_op_unaligned(struct ahash_request *req,
- int (*op)(struct ahash_request *),
- bool has_state)
-{
- int err;
-
- err = ahash_save_req(req, ahash_op_unaligned_done, has_state);
- if (err)
- return err;
-
- err = op(req->priv);
- if (err == -EINPROGRESS || err == -EBUSY)
- return err;
-
- ahash_restore_req(req, err);
-
- return err;
-}
-
-static int crypto_ahash_op(struct ahash_request *req,
- int (*op)(struct ahash_request *),
- bool has_state)
+int crypto_ahash_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- int err;
+ struct ahash_alg *alg;
- if ((unsigned long)req->result & alignmask)
- err = ahash_op_unaligned(req, op, has_state);
- else
- err = op(req);
+ if (likely(tfm->using_shash))
+ return shash_ahash_update(req, ahash_request_ctx(req));
- return crypto_hash_errstat(crypto_hash_alg_common(tfm), err);
+ alg = crypto_ahash_alg(tfm);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen);
+ return crypto_ahash_errstat(alg, alg->update(req));
}
+EXPORT_SYMBOL_GPL(crypto_ahash_update);
int crypto_ahash_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+ struct ahash_alg *alg;
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_inc(&hash_get_stat(alg)->hash_cnt);
+ if (likely(tfm->using_shash))
+ return crypto_shash_final(ahash_request_ctx(req), req->result);
- return crypto_ahash_op(req, tfm->final, true);
+ alg = crypto_ahash_alg(tfm);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&ahash_get_stat(alg)->hash_cnt);
+ return crypto_ahash_errstat(alg, alg->final(req));
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+ struct ahash_alg *alg;
+
+ if (likely(tfm->using_shash))
+ return shash_ahash_finup(req, ahash_request_ctx(req));
+ alg = crypto_ahash_alg(tfm);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_hash *istat = hash_get_stat(alg);
+ struct crypto_istat_hash *istat = ahash_get_stat(alg);
atomic64_inc(&istat->hash_cnt);
atomic64_add(req->nbytes, &istat->hash_tlen);
}
-
- return crypto_ahash_op(req, tfm->finup, true);
+ return crypto_ahash_errstat(alg, alg->finup(req));
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+ struct ahash_alg *alg;
+ int err;
+
+ if (likely(tfm->using_shash))
+ return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
+ alg = crypto_ahash_alg(tfm);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
- struct crypto_istat_hash *istat = hash_get_stat(alg);
+ struct crypto_istat_hash *istat = ahash_get_stat(alg);
atomic64_inc(&istat->hash_cnt);
atomic64_add(req->nbytes, &istat->hash_tlen);
}
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return crypto_hash_errstat(alg, -ENOKEY);
+ err = -ENOKEY;
+ else
+ err = alg->digest(req);
- return crypto_ahash_op(req, tfm->digest, false);
+ return crypto_ahash_errstat(alg, err);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -367,7 +437,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
subreq->base.complete = ahash_def_finup_done2;
- err = crypto_ahash_reqtfm(req)->final(subreq);
+ err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
@@ -404,13 +474,35 @@ static int ahash_def_finup(struct ahash_request *req)
if (err)
return err;
- err = tfm->update(req->priv);
+ err = crypto_ahash_alg(tfm)->update(req->priv);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
return ahash_def_finup_finish1(req, err);
}
+int crypto_ahash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_export(ahash_request_ctx(req), out);
+ return crypto_ahash_alg(tfm)->export(req, out);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_export);
+
+int crypto_ahash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_import(prepare_shash_desc(req, tfm), in);
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ return crypto_ahash_alg(tfm)->import(req, in);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_import);
+
static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
@@ -424,25 +516,12 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
- hash->setkey = ahash_nosetkey;
-
crypto_ahash_set_statesize(hash, alg->halg.statesize);
- if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
- return crypto_init_shash_ops_async(tfm);
+ if (tfm->__crt_alg->cra_type == &crypto_shash_type)
+ return crypto_init_ahash_using_shash(tfm);
- hash->init = alg->init;
- hash->update = alg->update;
- hash->final = alg->final;
- hash->finup = alg->finup ?: ahash_def_finup;
- hash->digest = alg->digest;
- hash->export = alg->export;
- hash->import = alg->import;
-
- if (alg->setkey) {
- hash->setkey = alg->setkey;
- ahash_set_needkey(hash);
- }
+ ahash_set_needkey(hash, alg);
if (alg->exit_tfm)
tfm->exit = crypto_ahash_exit_tfm;
@@ -452,7 +531,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
{
- if (alg->cra_type != &crypto_ahash_type)
+ if (alg->cra_type == &crypto_shash_type)
return sizeof(struct crypto_shash *);
return crypto_alg_extsize(alg);
@@ -560,19 +639,22 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
if (IS_ERR(nhash))
return nhash;
- nhash->init = hash->init;
- nhash->update = hash->update;
- nhash->final = hash->final;
- nhash->finup = hash->finup;
- nhash->digest = hash->digest;
- nhash->export = hash->export;
- nhash->import = hash->import;
- nhash->setkey = hash->setkey;
nhash->reqsize = hash->reqsize;
nhash->statesize = hash->statesize;
- if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
- return crypto_clone_shash_ops_async(nhash, hash);
+ if (likely(hash->using_shash)) {
+ struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
+ struct crypto_shash *shash;
+
+ shash = crypto_clone_shash(ahash_to_shash(hash));
+ if (IS_ERR(shash)) {
+ err = PTR_ERR(shash);
+ goto out_free_nhash;
+ }
+ nhash->using_shash = true;
+ *nctx = shash;
+ return nhash;
+ }
err = -ENOSYS;
alg = crypto_ahash_alg(hash);
@@ -606,6 +688,11 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
base->cra_type = &crypto_ahash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
+ if (!alg->finup)
+ alg->finup = ahash_def_finup;
+ if (!alg->setkey)
+ alg->setkey = ahash_nosetkey;
+
return 0;
}
@@ -677,10 +764,10 @@ bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
{
struct crypto_alg *alg = &halg->base;
- if (alg->cra_type != &crypto_ahash_type)
+ if (alg->cra_type == &crypto_shash_type)
return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
- return __crypto_ahash_alg(alg)->setkey != NULL;
+ return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
}
EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 82c44d4899..e24c829d7a 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -91,13 +91,13 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
if (!(msg->msg_flags & MSG_MORE)) {
err = hash_alloc_result(sk, ctx);
if (err)
- goto unlock_free;
+ goto unlock_free_result;
ahash_request_set_crypt(&ctx->req, NULL,
ctx->result, 0);
err = crypto_wait_req(crypto_ahash_final(&ctx->req),
&ctx->wait);
if (err)
- goto unlock_free;
+ goto unlock_free_result;
}
goto done_more;
}
@@ -170,6 +170,7 @@ unlock:
unlock_free:
af_alg_free_sg(&ctx->sgl);
+unlock_free_result:
hash_free_result(sk, ctx);
ctx->more = false;
goto unlock;
diff --git a/crypto/api.c b/crypto/api.c
index b9cc0c906e..7f402107f0 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -389,7 +389,7 @@ EXPORT_SYMBOL_GPL(crypto_shoot_alg);
struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type,
u32 mask, gfp_t gfp)
{
- struct crypto_tfm *tfm = NULL;
+ struct crypto_tfm *tfm;
unsigned int tfm_size;
int err = -ENOMEM;
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 3254dcc343..eb3590dc92 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -7,7 +7,6 @@
* Jon Oberheide <jon@oberheide.org>
*/
-#include <crypto/algapi.h>
#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
@@ -15,33 +14,24 @@
#include <linux/module.h>
#include <linux/sched.h>
-static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
+static int crypto_arc4_setkey(struct crypto_lskcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm);
return arc4_setkey(ctx, in_key, key_len);
}
-static int crypto_arc4_crypt(struct skcipher_request *req)
+static int crypto_arc4_crypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned nbytes, u8 *iv, bool final)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- int err;
+ struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm);
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes > 0) {
- arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
+ arc4_crypt(ctx, dst, src, nbytes);
+ return 0;
}
-static int crypto_arc4_init(struct crypto_skcipher *tfm)
+static int crypto_arc4_init(struct crypto_lskcipher *tfm)
{
pr_warn_ratelimited("\"%s\" (%ld) uses obsolete ecb(arc4) skcipher\n",
current->comm, (unsigned long)current->pid);
@@ -49,33 +39,29 @@ static int crypto_arc4_init(struct crypto_skcipher *tfm)
return 0;
}
-static struct skcipher_alg arc4_alg = {
- /*
- * For legacy reasons, this is named "ecb(arc4)", not "arc4".
- * Nevertheless it's actually a stream cipher, not a block cipher.
- */
- .base.cra_name = "ecb(arc4)",
- .base.cra_driver_name = "ecb(arc4)-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = ARC4_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct arc4_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- .setkey = crypto_arc4_setkey,
- .encrypt = crypto_arc4_crypt,
- .decrypt = crypto_arc4_crypt,
- .init = crypto_arc4_init,
+static struct lskcipher_alg arc4_alg = {
+ .co.base.cra_name = "arc4",
+ .co.base.cra_driver_name = "arc4-generic",
+ .co.base.cra_priority = 100,
+ .co.base.cra_blocksize = ARC4_BLOCK_SIZE,
+ .co.base.cra_ctxsize = sizeof(struct arc4_ctx),
+ .co.base.cra_module = THIS_MODULE,
+ .co.min_keysize = ARC4_MIN_KEY_SIZE,
+ .co.max_keysize = ARC4_MAX_KEY_SIZE,
+ .setkey = crypto_arc4_setkey,
+ .encrypt = crypto_arc4_crypt,
+ .decrypt = crypto_arc4_crypt,
+ .init = crypto_arc4_init,
};
static int __init arc4_init(void)
{
- return crypto_register_skcipher(&arc4_alg);
+ return crypto_register_lskcipher(&arc4_alg);
}
static void __exit arc4_exit(void)
{
- crypto_unregister_skcipher(&arc4_alg);
+ crypto_unregister_lskcipher(&arc4_alg);
}
subsys_initcall(arc4_init);
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 839591ad21..05402ef896 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -75,15 +75,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen,
oid = look_up_OID(value, vlen);
switch (oid) {
- case OID_md4:
- ctx->digest_algo = "md4";
- break;
- case OID_md5:
- ctx->digest_algo = "md5";
- break;
- case OID_sha1:
- ctx->digest_algo = "sha1";
- break;
case OID_sha256:
ctx->digest_algo = "sha256";
break;
@@ -93,8 +84,14 @@ int mscode_note_digest_algo(void *context, size_t hdrlen,
case OID_sha512:
ctx->digest_algo = "sha512";
break;
- case OID_sha224:
- ctx->digest_algo = "sha224";
+ case OID_sha3_256:
+ ctx->digest_algo = "sha3-256";
+ break;
+ case OID_sha3_384:
+ ctx->digest_algo = "sha3-384";
+ break;
+ case OID_sha3_512:
+ ctx->digest_algo = "sha3-512";
break;
case OID__NR:
diff --git a/crypto/asymmetric_keys/pkcs7.asn1 b/crypto/asymmetric_keys/pkcs7.asn1
index 1eca740b81..28e1f4a41c 100644
--- a/crypto/asymmetric_keys/pkcs7.asn1
+++ b/crypto/asymmetric_keys/pkcs7.asn1
@@ -1,3 +1,10 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2009 IETF Trust and the persons identified as authors
+-- of the code
+--
+-- https://www.rfc-editor.org/rfc/rfc5652#section-3
+
PKCS7ContentInfo ::= SEQUENCE {
contentType ContentType ({ pkcs7_check_content_type }),
content [0] EXPLICIT SignedData OPTIONAL
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 277482bb17..5b08c50722 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -227,15 +227,6 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
struct pkcs7_parse_context *ctx = context;
switch (ctx->last_oid) {
- case OID_md4:
- ctx->sinfo->sig->hash_algo = "md4";
- break;
- case OID_md5:
- ctx->sinfo->sig->hash_algo = "md5";
- break;
- case OID_sha1:
- ctx->sinfo->sig->hash_algo = "sha1";
- break;
case OID_sha256:
ctx->sinfo->sig->hash_algo = "sha256";
break;
@@ -257,6 +248,15 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
case OID_gost2012Digest512:
ctx->sinfo->sig->hash_algo = "streebog512";
break;
+ case OID_sha3_256:
+ ctx->sinfo->sig->hash_algo = "sha3-256";
+ break;
+ case OID_sha3_384:
+ ctx->sinfo->sig->hash_algo = "sha3-384";
+ break;
+ case OID_sha3_512:
+ ctx->sinfo->sig->hash_algo = "sha3-512";
+ break;
default:
printk("Unsupported digest algo: %u\n", ctx->last_oid);
return -ENOPKG;
@@ -278,11 +278,13 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
ctx->sinfo->sig->pkey_algo = "rsa";
ctx->sinfo->sig->encoding = "pkcs1";
break;
- case OID_id_ecdsa_with_sha1:
case OID_id_ecdsa_with_sha224:
case OID_id_ecdsa_with_sha256:
case OID_id_ecdsa_with_sha384:
case OID_id_ecdsa_with_sha512:
+ case OID_id_ecdsa_with_sha3_256:
+ case OID_id_ecdsa_with_sha3_384:
+ case OID_id_ecdsa_with_sha3_512:
ctx->sinfo->sig->pkey_algo = "ecdsa";
ctx->sinfo->sig->encoding = "x962";
break;
diff --git a/crypto/asymmetric_keys/pkcs8.asn1 b/crypto/asymmetric_keys/pkcs8.asn1
index 702c41a3c7..a2a8af2633 100644
--- a/crypto/asymmetric_keys/pkcs8.asn1
+++ b/crypto/asymmetric_keys/pkcs8.asn1
@@ -1,3 +1,9 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2010 IETF Trust and the persons identified as authors
+-- of the code
+--
+-- https://www.rfc-editor.org/rfc/rfc5958#section-2
--
-- This is the unencrypted variant
--
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 1dcab27986..e5f22691fe 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -115,11 +115,13 @@ software_key_determine_akcipher(const struct public_key *pkey,
*/
if (!hash_algo)
return -EINVAL;
- if (strcmp(hash_algo, "sha1") != 0 &&
- strcmp(hash_algo, "sha224") != 0 &&
+ if (strcmp(hash_algo, "sha224") != 0 &&
strcmp(hash_algo, "sha256") != 0 &&
strcmp(hash_algo, "sha384") != 0 &&
- strcmp(hash_algo, "sha512") != 0)
+ strcmp(hash_algo, "sha512") != 0 &&
+ strcmp(hash_algo, "sha3-256") != 0 &&
+ strcmp(hash_algo, "sha3-384") != 0 &&
+ strcmp(hash_algo, "sha3-512") != 0)
return -EINVAL;
} else if (strcmp(pkey->pkey_algo, "sm2") == 0) {
if (strcmp(encoding, "raw") != 0)
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index 6b69ea40da..afcd4d101a 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -102,6 +102,10 @@ int restrict_link_by_signature(struct key *dest_keyring,
if (use_builtin_keys && !test_bit(KEY_FLAG_BUILTIN, &key->flags))
ret = -ENOKEY;
+ else if (IS_BUILTIN(CONFIG_SECONDARY_TRUSTED_KEYRING_SIGNED_BY_BUILTIN) &&
+ !strcmp(dest_keyring->description, ".secondary_trusted_keys") &&
+ !test_bit(KEY_FLAG_BUILTIN, &key->flags))
+ ret = -ENOKEY;
else
ret = verify_signature(key, sig);
key_put(key);
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 2deff81f8a..398983be77 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(decrypt_blob);
* Sign the specified data blob using the private key specified by params->key.
* The signature is wrapped in an encoding if params->encoding is specified
* (eg. "pkcs1"). If the encoding needs to know the digest type, this can be
- * passed through params->hash_algo (eg. "sha1").
+ * passed through params->hash_algo (eg. "sha512").
*
* Returns the length of the data placed in the signature buffer or an error.
*/
diff --git a/crypto/asymmetric_keys/x509.asn1 b/crypto/asymmetric_keys/x509.asn1
index 92d59c32f9..feb9573cac 100644
--- a/crypto/asymmetric_keys/x509.asn1
+++ b/crypto/asymmetric_keys/x509.asn1
@@ -1,3 +1,10 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2008 IETF Trust and the persons identified as authors
+-- of the code
+--
+-- https://www.rfc-editor.org/rfc/rfc5280#section-4
+
Certificate ::= SEQUENCE {
tbsCertificate TBSCertificate ({ x509_note_tbs_certificate }),
signatureAlgorithm AlgorithmIdentifier,
diff --git a/crypto/asymmetric_keys/x509_akid.asn1 b/crypto/asymmetric_keys/x509_akid.asn1
index 1a33231a75..0f8355cf19 100644
--- a/crypto/asymmetric_keys/x509_akid.asn1
+++ b/crypto/asymmetric_keys/x509_akid.asn1
@@ -1,3 +1,8 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2008 IETF Trust and the persons identified as authors
+-- of the code
+--
-- X.509 AuthorityKeyIdentifier
-- rfc5280 section 4.2.1.1
@@ -14,15 +19,15 @@ CertificateSerialNumber ::= INTEGER ({ x509_akid_note_serial })
GeneralNames ::= SEQUENCE OF GeneralName
GeneralName ::= CHOICE {
- otherName [0] ANY,
- rfc822Name [1] IA5String,
- dNSName [2] IA5String,
+ otherName [0] IMPLICIT OtherName,
+ rfc822Name [1] IMPLICIT IA5String,
+ dNSName [2] IMPLICIT IA5String,
x400Address [3] ANY,
directoryName [4] Name ({ x509_akid_note_name }),
- ediPartyName [5] ANY,
- uniformResourceIdentifier [6] IA5String,
- iPAddress [7] OCTET STRING,
- registeredID [8] OBJECT IDENTIFIER
+ ediPartyName [5] IMPLICIT EDIPartyName,
+ uniformResourceIdentifier [6] IMPLICIT IA5String,
+ iPAddress [7] IMPLICIT OCTET STRING,
+ registeredID [8] IMPLICIT OBJECT IDENTIFIER
}
Name ::= SEQUENCE OF RelativeDistinguishedName
@@ -33,3 +38,13 @@ AttributeValueAssertion ::= SEQUENCE {
attributeType OBJECT IDENTIFIER ({ x509_note_OID }),
attributeValue ANY ({ x509_extract_name_segment })
}
+
+OtherName ::= SEQUENCE {
+ type-id OBJECT IDENTIFIER,
+ value [0] ANY
+ }
+
+EDIPartyName ::= SEQUENCE {
+ nameAssigner [0] ANY OPTIONAL,
+ partyName [1] ANY
+ }
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 0a7049b470..487204d394 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -195,19 +195,9 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
pr_debug("PubKey Algo: %u\n", ctx->last_oid);
switch (ctx->last_oid) {
- case OID_md2WithRSAEncryption:
- case OID_md3WithRSAEncryption:
default:
return -ENOPKG; /* Unsupported combination */
- case OID_md4WithRSAEncryption:
- ctx->cert->sig->hash_algo = "md4";
- goto rsa_pkcs1;
-
- case OID_sha1WithRSAEncryption:
- ctx->cert->sig->hash_algo = "sha1";
- goto rsa_pkcs1;
-
case OID_sha256WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha256";
goto rsa_pkcs1;
@@ -224,9 +214,17 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
ctx->cert->sig->hash_algo = "sha224";
goto rsa_pkcs1;
- case OID_id_ecdsa_with_sha1:
- ctx->cert->sig->hash_algo = "sha1";
- goto ecdsa;
+ case OID_id_rsassa_pkcs1_v1_5_with_sha3_256:
+ ctx->cert->sig->hash_algo = "sha3-256";
+ goto rsa_pkcs1;
+
+ case OID_id_rsassa_pkcs1_v1_5_with_sha3_384:
+ ctx->cert->sig->hash_algo = "sha3-384";
+ goto rsa_pkcs1;
+
+ case OID_id_rsassa_pkcs1_v1_5_with_sha3_512:
+ ctx->cert->sig->hash_algo = "sha3-512";
+ goto rsa_pkcs1;
case OID_id_ecdsa_with_sha224:
ctx->cert->sig->hash_algo = "sha224";
@@ -244,6 +242,18 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
ctx->cert->sig->hash_algo = "sha512";
goto ecdsa;
+ case OID_id_ecdsa_with_sha3_256:
+ ctx->cert->sig->hash_algo = "sha3-256";
+ goto ecdsa;
+
+ case OID_id_ecdsa_with_sha3_384:
+ ctx->cert->sig->hash_algo = "sha3-384";
+ goto ecdsa;
+
+ case OID_id_ecdsa_with_sha3_512:
+ ctx->cert->sig->hash_algo = "sha3-512";
+ goto ecdsa;
+
case OID_gost2012Signature256:
ctx->cert->sig->hash_algo = "streebog256";
goto ecrdsa;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 3326c7343e..3aaf3ab4e3 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -141,9 +141,6 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
u8 *hash = areq_ctx->tail;
int err;
- hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
- crypto_ahash_alignmask(auth) + 1);
-
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, req->dst, hash,
req->assoclen + req->cryptlen);
@@ -286,9 +283,6 @@ static int crypto_authenc_decrypt(struct aead_request *req)
u8 *hash = areq_ctx->tail;
int err;
- hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
- crypto_ahash_alignmask(auth) + 1);
-
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, req->src, hash,
req->assoclen + req->cryptlen - authsize);
@@ -373,9 +367,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
u32 mask;
struct aead_instance *inst;
struct authenc_instance_ctx *ctx;
+ struct skcipher_alg_common *enc;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
- struct skcipher_alg *enc;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
@@ -398,10 +392,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
goto err_free_inst;
- enc = crypto_spawn_skcipher_alg(&ctx->enc);
+ enc = crypto_spawn_skcipher_alg_common(&ctx->enc);
- ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
- auth_base->cra_alignmask + 1);
+ ctx->reqoff = 2 * auth->digestsize;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
@@ -418,12 +411,11 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
- inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
- enc->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = enc->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
- inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
+ inst->alg.ivsize = enc->ivsize;
+ inst->alg.chunksize = enc->chunksize;
inst->alg.maxauthsize = auth->digestsize;
inst->alg.init = crypto_authenc_init_tfm;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 91424e791d..2cc933e2f7 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -87,11 +87,8 @@ static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
unsigned int flags)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
- struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
- struct crypto_ahash *auth = ctx->auth;
- u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
- crypto_ahash_alignmask(auth) + 1);
+ u8 *hash = areq_ctx->tail;
unsigned int authsize = crypto_aead_authsize(authenc_esn);
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
@@ -122,8 +119,7 @@ static int crypto_authenc_esn_genicv(struct aead_request *req,
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
- u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
- crypto_ahash_alignmask(auth) + 1);
+ u8 *hash = areq_ctx->tail;
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc_esn);
unsigned int assoclen = req->assoclen;
@@ -224,8 +220,7 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ctx->reqoff);
struct crypto_ahash *auth = ctx->auth;
- u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
- crypto_ahash_alignmask(auth) + 1);
+ u8 *ohash = areq_ctx->tail;
unsigned int cryptlen = req->cryptlen - authsize;
unsigned int assoclen = req->assoclen;
struct scatterlist *dst = req->dst;
@@ -272,8 +267,7 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
- u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
- crypto_ahash_alignmask(auth) + 1);
+ u8 *ohash = areq_ctx->tail;
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
@@ -344,8 +338,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
ctx->enc = enc;
ctx->null = null;
- ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth),
- crypto_ahash_alignmask(auth) + 1);
+ ctx->reqoff = 2 * crypto_ahash_digestsize(auth);
crypto_aead_set_reqsize(
tfm,
@@ -390,9 +383,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
u32 mask;
struct aead_instance *inst;
struct authenc_esn_instance_ctx *ctx;
+ struct skcipher_alg_common *enc;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
- struct skcipher_alg *enc;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
@@ -415,7 +408,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
goto err_free_inst;
- enc = crypto_spawn_skcipher_alg(&ctx->enc);
+ enc = crypto_spawn_skcipher_alg_common(&ctx->enc);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
@@ -431,12 +424,11 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
- inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
- enc->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = enc->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
- inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
+ inst->alg.ivsize = enc->ivsize;
+ inst->alg.chunksize = enc->chunksize;
inst->alg.maxauthsize = auth->digestsize;
inst->alg.init = crypto_authenc_esn_init_tfm;
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 6c03e96b94..28345b8d92 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -5,8 +5,6 @@
* Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au>
*/
-#include <crypto/algapi.h>
-#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -14,99 +12,71 @@
#include <linux/log2.h>
#include <linux/module.h>
-static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_encrypt_segment(struct crypto_lskcipher *tfm,
+ const u8 *src, u8 *dst, unsigned nbytes,
+ u8 *iv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- u8 *iv = walk->iv;
-
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_encrypt;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
- do {
+ for (; nbytes >= bsize; src += bsize, dst += bsize, nbytes -= bsize) {
crypto_xor(iv, src, bsize);
- fn(tfm, dst, iv);
+ crypto_lskcipher_encrypt(tfm, iv, dst, bsize, NULL);
memcpy(iv, dst, bsize);
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
+ }
return nbytes;
}
-static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_encrypt_inplace(struct crypto_lskcipher *tfm,
+ u8 *src, unsigned nbytes, u8 *oiv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- u8 *iv = walk->iv;
-
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_encrypt;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
+ u8 *iv = oiv;
+
+ if (nbytes < bsize)
+ goto out;
do {
crypto_xor(src, iv, bsize);
- fn(tfm, src, src);
+ crypto_lskcipher_encrypt(tfm, src, src, bsize, NULL);
iv = src;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
+ memcpy(oiv, iv, bsize);
+out:
return nbytes;
}
-static int crypto_cbc_encrypt(struct skcipher_request *req)
+static int crypto_cbc_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, bool final)
{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- int err;
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_lskcipher *cipher = *ctx;
+ int rem;
- err = skcipher_walk_virt(&walk, req, false);
+ if (src == dst)
+ rem = crypto_cbc_encrypt_inplace(cipher, dst, len, iv);
+ else
+ rem = crypto_cbc_encrypt_segment(cipher, src, dst, len, iv);
- while (walk.nbytes) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- err = crypto_cbc_encrypt_inplace(&walk, skcipher);
- else
- err = crypto_cbc_encrypt_segment(&walk, skcipher);
- err = skcipher_walk_done(&walk, err);
- }
-
- return err;
+ return rem && final ? -EINVAL : rem;
}
-static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_decrypt_segment(struct crypto_lskcipher *tfm,
+ const u8 *src, u8 *dst, unsigned nbytes,
+ u8 *oiv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- u8 *iv = walk->iv;
-
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_decrypt;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
+ const u8 *iv = oiv;
+
+ if (nbytes < bsize)
+ goto out;
do {
- fn(tfm, dst, src);
+ crypto_lskcipher_decrypt(tfm, src, dst, bsize, NULL);
crypto_xor(dst, iv, bsize);
iv = src;
@@ -114,83 +84,72 @@ static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
+ memcpy(oiv, iv, bsize);
+out:
return nbytes;
}
-static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_decrypt_inplace(struct crypto_lskcipher *tfm,
+ u8 *src, unsigned nbytes, u8 *iv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
u8 last_iv[MAX_CIPHER_BLOCKSIZE];
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_decrypt;
+ if (nbytes < bsize)
+ goto out;
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
memcpy(last_iv, src, bsize);
for (;;) {
- fn(tfm, src, src);
+ crypto_lskcipher_decrypt(tfm, src, src, bsize, NULL);
if ((nbytes -= bsize) < bsize)
break;
crypto_xor(src, src - bsize, bsize);
src -= bsize;
}
- crypto_xor(src, walk->iv, bsize);
- memcpy(walk->iv, last_iv, bsize);
+ crypto_xor(src, iv, bsize);
+ memcpy(iv, last_iv, bsize);
+out:
return nbytes;
}
-static int crypto_cbc_decrypt(struct skcipher_request *req)
+static int crypto_cbc_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, bool final)
{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- int err;
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_lskcipher *cipher = *ctx;
+ int rem;
- err = skcipher_walk_virt(&walk, req, false);
+ if (src == dst)
+ rem = crypto_cbc_decrypt_inplace(cipher, dst, len, iv);
+ else
+ rem = crypto_cbc_decrypt_segment(cipher, src, dst, len, iv);
- while (walk.nbytes) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- err = crypto_cbc_decrypt_inplace(&walk, skcipher);
- else
- err = crypto_cbc_decrypt_segment(&walk, skcipher);
- err = skcipher_walk_done(&walk, err);
- }
-
- return err;
+ return rem && final ? -EINVAL : rem;
}
static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct skcipher_instance *inst;
- struct crypto_alg *alg;
+ struct lskcipher_instance *inst;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb);
+ inst = lskcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
- alg = skcipher_ialg_simple(inst);
-
err = -EINVAL;
- if (!is_power_of_2(alg->cra_blocksize))
+ if (!is_power_of_2(inst->alg.co.base.cra_blocksize))
goto out_free_inst;
inst->alg.encrypt = crypto_cbc_encrypt;
inst->alg.decrypt = crypto_cbc_decrypt;
- err = skcipher_register_instance(tmpl, inst);
+ err = lskcipher_register_instance(tmpl, inst);
if (err) {
out_free_inst:
inst->free(inst);
diff --git a/crypto/ccm.c b/crypto/ccm.c
index a9453129c5..36f0acec32 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -56,6 +56,7 @@ struct cbcmac_tfm_ctx {
struct cbcmac_desc_ctx {
unsigned int len;
+ u8 dg[];
};
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
@@ -447,10 +448,10 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
const char *ctr_name,
const char *mac_name)
{
+ struct skcipher_alg_common *ctr;
u32 mask;
struct aead_instance *inst;
struct ccm_instance_ctx *ictx;
- struct skcipher_alg *ctr;
struct hash_alg_common *mac;
int err;
@@ -478,13 +479,12 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
ctr_name, 0, mask);
if (err)
goto err_free_inst;
- ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
+ ctr = crypto_spawn_skcipher_alg_common(&ictx->ctr);
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
- crypto_skcipher_alg_ivsize(ctr) != 16 ||
- ctr->base.cra_blocksize != 1)
+ ctr->ivsize != 16 || ctr->base.cra_blocksize != 1)
goto err_free_inst;
/* ctr and cbcmac must use the same underlying block cipher. */
@@ -504,10 +504,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
inst->alg.base.cra_priority = (mac->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
- inst->alg.base.cra_alignmask = mac->base.cra_alignmask |
- ctr->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = ctr->base.cra_alignmask;
inst->alg.ivsize = 16;
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
+ inst->alg.chunksize = ctr->chunksize;
inst->alg.maxauthsize = 16;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
inst->alg.init = crypto_ccm_init_tfm;
@@ -786,10 +785,9 @@ static int crypto_cbcmac_digest_init(struct shash_desc *pdesc)
{
struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_digestsize(pdesc->tfm);
- u8 *dg = (u8 *)ctx + crypto_shash_descsize(pdesc->tfm) - bs;
ctx->len = 0;
- memset(dg, 0, bs);
+ memset(ctx->dg, 0, bs);
return 0;
}
@@ -802,18 +800,17 @@ static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p,
struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
- u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs;
while (len > 0) {
unsigned int l = min(len, bs - ctx->len);
- crypto_xor(dg + ctx->len, p, l);
+ crypto_xor(&ctx->dg[ctx->len], p, l);
ctx->len +=l;
len -= l;
p += l;
if (ctx->len == bs) {
- crypto_cipher_encrypt_one(tfm, dg, dg);
+ crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg);
ctx->len = 0;
}
}
@@ -828,12 +825,11 @@ static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out)
struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
- u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs;
if (ctx->len)
- crypto_cipher_encrypt_one(tfm, dg, dg);
+ crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg);
- memcpy(out, dg, bs);
+ memcpy(out, ctx->dg, bs);
return 0;
}
@@ -890,8 +886,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = 1;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = ALIGN(sizeof(struct cbcmac_desc_ctx),
- alg->cra_alignmask + 1) +
+ inst->alg.descsize = sizeof(struct cbcmac_desc_ctx) +
alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx);
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 3a905c5d8f..9e46513308 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -558,7 +558,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
u32 mask;
struct aead_instance *inst;
struct chachapoly_instance_ctx *ctx;
- struct skcipher_alg *chacha;
+ struct skcipher_alg_common *chacha;
struct hash_alg_common *poly;
int err;
@@ -579,7 +579,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
- chacha = crypto_spawn_skcipher_alg(&ctx->chacha);
+ chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha);
err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[2]), 0, mask);
@@ -591,7 +591,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
if (poly->digestsize != POLY1305_DIGEST_SIZE)
goto err_free_inst;
/* Need 16-byte IV size, including Initial Block Counter value */
- if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE)
+ if (chacha->ivsize != CHACHA_IV_SIZE)
goto err_free_inst;
/* Not a stream cipher? */
if (chacha->base.cra_blocksize != 1)
@@ -610,12 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.base.cra_priority = (chacha->base.cra_priority +
poly->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
- inst->alg.base.cra_alignmask = chacha->base.cra_alignmask |
- poly->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = chacha->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
ctx->saltlen;
inst->alg.ivsize = ivsize;
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha);
+ inst->alg.chunksize = chacha->chunksize;
inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
inst->alg.init = chachapoly_init;
inst->alg.exit = chachapoly_exit;
diff --git a/crypto/cmac.c b/crypto/cmac.c
index fce6b0f58e..c7aa3665b0 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -28,7 +28,7 @@
*/
struct cmac_tfm_ctx {
struct crypto_cipher *child;
- u8 ctx[];
+ __be64 consts[];
};
/*
@@ -44,17 +44,15 @@ struct cmac_tfm_ctx {
*/
struct cmac_desc_ctx {
unsigned int len;
- u8 ctx[];
+ u8 odds[];
};
static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent);
unsigned int bs = crypto_shash_blocksize(parent);
- __be64 *consts = PTR_ALIGN((void *)ctx->ctx,
- (alignmask | (__alignof__(__be64) - 1)) + 1);
+ __be64 *consts = ctx->consts;
u64 _const[2];
int i, err = 0;
u8 msb_mask, gfmask;
@@ -104,10 +102,9 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
static int crypto_cmac_digest_init(struct shash_desc *pdesc)
{
- unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
- u8 *prev = PTR_ALIGN((void *)ctx->ctx, alignmask + 1) + bs;
+ u8 *prev = &ctx->odds[bs];
ctx->len = 0;
memset(prev, 0, bs);
@@ -119,12 +116,11 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
+ u8 *odds = ctx->odds;
u8 *prev = odds + bs;
/* checking the data can fill the block */
@@ -165,14 +161,11 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p,
static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *consts = PTR_ALIGN((void *)tctx->ctx,
- (alignmask | (__alignof__(__be64) - 1)) + 1);
- u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
+ u8 *odds = ctx->odds;
u8 *prev = odds + bs;
unsigned int offset = 0;
@@ -191,7 +184,7 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
}
crypto_xor(prev, odds, bs);
- crypto_xor(prev, consts + offset, bs);
+ crypto_xor(prev, (const u8 *)tctx->consts + offset, bs);
crypto_cipher_encrypt_one(tfm, out, prev);
@@ -241,7 +234,6 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
- unsigned long alignmask;
u32 mask;
int err;
@@ -273,23 +265,14 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- alignmask = alg->cra_alignmask;
- inst->alg.base.cra_alignmask = alignmask;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) +
+ alg->cra_blocksize * 2;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize =
- ALIGN(sizeof(struct cmac_desc_ctx), crypto_tfm_ctx_alignment())
- + (alignmask & ~(crypto_tfm_ctx_alignment() - 1))
- + alg->cra_blocksize * 2;
-
- inst->alg.base.cra_ctxsize =
- ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment())
- + ((alignmask | (__alignof__(__be64) - 1)) &
- ~(crypto_tfm_ctx_alignment() - 1))
- + alg->cra_blocksize * 2;
-
+ inst->alg.descsize = sizeof(struct cmac_desc_ctx) +
+ alg->cra_blocksize * 2;
inst->alg.init = crypto_cmac_digest_init;
inst->alg.update = crypto_cmac_digest_update;
inst->alg.final = crypto_cmac_digest_final;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index bbcc368b6a..31d022d47f 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -377,7 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
{
struct skcipherd_instance_ctx *ctx;
struct skcipher_instance *inst;
- struct skcipher_alg *alg;
+ struct skcipher_alg_common *alg;
u32 type;
u32 mask;
int err;
@@ -396,17 +396,17 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
if (err)
goto err_free_inst;
- alg = crypto_spawn_skcipher_alg(&ctx->spawn);
+ alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
if (err)
goto err_free_inst;
inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
- inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
+ inst->alg.ivsize = alg->ivsize;
+ inst->alg.chunksize = alg->chunksize;
+ inst->alg.min_keysize = alg->min_keysize;
+ inst->alg.max_keysize = alg->max_keysize;
inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
@@ -929,7 +929,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_SKCIPHER:
+ case CRYPTO_ALG_TYPE_LSKCIPHER:
return cryptd_create_skcipher(tmpl, tb, algt, &queue);
case CRYPTO_ALG_TYPE_HASH:
return cryptd_create_hash(tmpl, tb, algt, &queue);
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 108d9d55c5..e60a0eb628 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -552,20 +552,16 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
/**
* crypto_engine_exit - free the resources of hardware engine when exit
* @engine: the hardware engine need to be freed
- *
- * Return 0 for success.
*/
-int crypto_engine_exit(struct crypto_engine *engine)
+void crypto_engine_exit(struct crypto_engine *engine)
{
int ret;
ret = crypto_engine_stop(engine);
if (ret)
- return ret;
+ return;
kthread_destroy_worker(engine->kworker);
-
- return 0;
}
EXPORT_SYMBOL_GPL(crypto_engine_exit);
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 23c698b220..1420496062 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -258,8 +258,8 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct skcipher_instance *inst;
- struct skcipher_alg *alg;
struct crypto_skcipher_spawn *spawn;
+ struct skcipher_alg_common *alg;
u32 mask;
int err;
@@ -278,11 +278,11 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
if (err)
goto err_free_inst;
- alg = crypto_spawn_skcipher_alg(spawn);
+ alg = crypto_spawn_skcipher_alg_common(spawn);
/* We only support 16-byte blocks. */
err = -EINVAL;
- if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
+ if (alg->ivsize != CTR_RFC3686_BLOCK_SIZE)
goto err_free_inst;
/* Not a stream cipher? */
@@ -303,11 +303,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
- CTR_RFC3686_NONCE_SIZE;
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
- CTR_RFC3686_NONCE_SIZE;
+ inst->alg.chunksize = alg->chunksize;
+ inst->alg.min_keysize = alg->min_keysize + CTR_RFC3686_NONCE_SIZE;
+ inst->alg.max_keysize = alg->max_keysize + CTR_RFC3686_NONCE_SIZE;
inst->alg.setkey = crypto_rfc3686_setkey;
inst->alg.encrypt = crypto_rfc3686_crypt;
diff --git a/crypto/cts.c b/crypto/cts.c
index 8f604f6554..f5b42156b6 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -324,8 +324,8 @@ static void crypto_cts_free(struct skcipher_instance *inst)
static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_skcipher_spawn *spawn;
+ struct skcipher_alg_common *alg;
struct skcipher_instance *inst;
- struct skcipher_alg *alg;
u32 mask;
int err;
@@ -344,10 +344,10 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- alg = crypto_spawn_skcipher_alg(spawn);
+ alg = crypto_spawn_skcipher_alg_common(spawn);
err = -EINVAL;
- if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize)
+ if (alg->ivsize != alg->base.cra_blocksize)
goto err_free_inst;
if (strncmp(alg->base.cra_name, "cbc(", 4))
@@ -363,9 +363,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.ivsize = alg->base.cra_blocksize;
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
+ inst->alg.chunksize = alg->chunksize;
+ inst->alg.min_keysize = alg->min_keysize;
+ inst->alg.max_keysize = alg->max_keysize;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx);
diff --git a/crypto/deflate.c b/crypto/deflate.c
index b2a46f6dc9..6e31e0db0e 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -39,24 +39,20 @@ struct deflate_ctx {
struct z_stream_s decomp_stream;
};
-static int deflate_comp_init(struct deflate_ctx *ctx, int format)
+static int deflate_comp_init(struct deflate_ctx *ctx)
{
int ret = 0;
struct z_stream_s *stream = &ctx->comp_stream;
stream->workspace = vzalloc(zlib_deflate_workspacesize(
- MAX_WBITS, MAX_MEM_LEVEL));
+ -DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL));
if (!stream->workspace) {
ret = -ENOMEM;
goto out;
}
- if (format)
- ret = zlib_deflateInit(stream, 3);
- else
- ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS,
- DEFLATE_DEF_MEMLEVEL,
- Z_DEFAULT_STRATEGY);
+ ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
+ -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ Z_DEFAULT_STRATEGY);
if (ret != Z_OK) {
ret = -EINVAL;
goto out_free;
@@ -68,7 +64,7 @@ out_free:
goto out;
}
-static int deflate_decomp_init(struct deflate_ctx *ctx, int format)
+static int deflate_decomp_init(struct deflate_ctx *ctx)
{
int ret = 0;
struct z_stream_s *stream = &ctx->decomp_stream;
@@ -78,10 +74,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx, int format)
ret = -ENOMEM;
goto out;
}
- if (format)
- ret = zlib_inflateInit(stream);
- else
- ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
+ ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
if (ret != Z_OK) {
ret = -EINVAL;
goto out_free;
@@ -105,21 +98,21 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx)
vfree(ctx->decomp_stream.workspace);
}
-static int __deflate_init(void *ctx, int format)
+static int __deflate_init(void *ctx)
{
int ret;
- ret = deflate_comp_init(ctx, format);
+ ret = deflate_comp_init(ctx);
if (ret)
goto out;
- ret = deflate_decomp_init(ctx, format);
+ ret = deflate_decomp_init(ctx);
if (ret)
deflate_comp_exit(ctx);
out:
return ret;
}
-static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format)
+static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
{
struct deflate_ctx *ctx;
int ret;
@@ -128,7 +121,7 @@ static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format)
if (!ctx)
return ERR_PTR(-ENOMEM);
- ret = __deflate_init(ctx, format);
+ ret = __deflate_init(ctx);
if (ret) {
kfree(ctx);
return ERR_PTR(ret);
@@ -137,21 +130,11 @@ static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format)
return ctx;
}
-static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
-{
- return gen_deflate_alloc_ctx(tfm, 0);
-}
-
-static void *zlib_deflate_alloc_ctx(struct crypto_scomp *tfm)
-{
- return gen_deflate_alloc_ctx(tfm, 1);
-}
-
static int deflate_init(struct crypto_tfm *tfm)
{
struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
- return __deflate_init(ctx, 0);
+ return __deflate_init(ctx);
}
static void __deflate_exit(void *ctx)
@@ -286,7 +269,7 @@ static struct crypto_alg alg = {
.coa_decompress = deflate_decompress } }
};
-static struct scomp_alg scomp[] = { {
+static struct scomp_alg scomp = {
.alloc_ctx = deflate_alloc_ctx,
.free_ctx = deflate_free_ctx,
.compress = deflate_scompress,
@@ -296,17 +279,7 @@ static struct scomp_alg scomp[] = { {
.cra_driver_name = "deflate-scomp",
.cra_module = THIS_MODULE,
}
-}, {
- .alloc_ctx = zlib_deflate_alloc_ctx,
- .free_ctx = deflate_free_ctx,
- .compress = deflate_scompress,
- .decompress = deflate_sdecompress,
- .base = {
- .cra_name = "zlib-deflate",
- .cra_driver_name = "zlib-deflate-scomp",
- .cra_module = THIS_MODULE,
- }
-} };
+};
static int __init deflate_mod_init(void)
{
@@ -316,7 +289,7 @@ static int __init deflate_mod_init(void)
if (ret)
return ret;
- ret = crypto_register_scomps(scomp, ARRAY_SIZE(scomp));
+ ret = crypto_register_scomp(&scomp);
if (ret) {
crypto_unregister_alg(&alg);
return ret;
@@ -328,7 +301,7 @@ static int __init deflate_mod_init(void)
static void __exit deflate_mod_fini(void)
{
crypto_unregister_alg(&alg);
- crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp));
+ crypto_unregister_scomp(&scomp);
}
subsys_initcall(deflate_mod_init);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index ff4ebbc68e..e01f8c7769 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1698,7 +1698,7 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
sdesc->shash.tfm = tfm;
drbg->priv_data = sdesc;
- return crypto_shash_alignmask(tfm);
+ return 0;
}
static int drbg_fini_hash_kernel(struct drbg_state *drbg)
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 71fbb0543d..cc7625d1a4 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -5,75 +5,196 @@
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*/
-#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
-static int crypto_ecb_crypt(struct skcipher_request *req,
- struct crypto_cipher *cipher,
+static int crypto_ecb_crypt(struct crypto_cipher *cipher, const u8 *src,
+ u8 *dst, unsigned nbytes, bool final,
void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
{
const unsigned int bsize = crypto_cipher_blocksize(cipher);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes) != 0) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
+ while (nbytes >= bsize) {
+ fn(crypto_cipher_tfm(cipher), dst, src);
- do {
- fn(crypto_cipher_tfm(cipher), dst, src);
+ src += bsize;
+ dst += bsize;
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
-
- err = skcipher_walk_done(&walk, nbytes);
+ nbytes -= bsize;
}
- return err;
+ return nbytes && final ? -EINVAL : nbytes;
}
-static int crypto_ecb_encrypt(struct skcipher_request *req)
+static int crypto_ecb_encrypt2(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, bool final)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
+ struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_cipher *cipher = *ctx;
- return crypto_ecb_crypt(req, cipher,
+ return crypto_ecb_crypt(cipher, src, dst, len, final,
crypto_cipher_alg(cipher)->cia_encrypt);
}
-static int crypto_ecb_decrypt(struct skcipher_request *req)
+static int crypto_ecb_decrypt2(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, bool final)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
+ struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_cipher *cipher = *ctx;
- return crypto_ecb_crypt(req, cipher,
+ return crypto_ecb_crypt(cipher, src, dst, len, final,
crypto_cipher_alg(cipher)->cia_decrypt);
}
-static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
+static int lskcipher_setkey_simple2(struct crypto_lskcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_cipher *cipher = *ctx;
+
+ crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ return crypto_cipher_setkey(cipher, key, keylen);
+}
+
+static int lskcipher_init_tfm_simple2(struct crypto_lskcipher *tfm)
+{
+ struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
+ struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_cipher_spawn *spawn;
+ struct crypto_cipher *cipher;
+
+ spawn = lskcipher_instance_ctx(inst);
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ *ctx = cipher;
+ return 0;
+}
+
+static void lskcipher_exit_tfm_simple2(struct crypto_lskcipher *tfm)
+{
+ struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm);
+
+ crypto_free_cipher(*ctx);
+}
+
+static void lskcipher_free_instance_simple2(struct lskcipher_instance *inst)
+{
+ crypto_drop_cipher(lskcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static struct lskcipher_instance *lskcipher_alloc_instance_simple2(
+ struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_cipher_spawn *spawn;
+ struct lskcipher_instance *inst;
+ struct crypto_alg *cipher_alg;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
+ if (err)
+ return ERR_PTR(err);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return ERR_PTR(-ENOMEM);
+ spawn = lskcipher_instance_ctx(inst);
+
+ err = crypto_grab_cipher(spawn, lskcipher_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err)
+ goto err_free_inst;
+ cipher_alg = crypto_spawn_cipher_alg(spawn);
+
+ err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name,
+ cipher_alg);
+ if (err)
+ goto err_free_inst;
+
+ inst->free = lskcipher_free_instance_simple2;
+
+ /* Default algorithm properties, can be overridden */
+ inst->alg.co.base.cra_blocksize = cipher_alg->cra_blocksize;
+ inst->alg.co.base.cra_alignmask = cipher_alg->cra_alignmask;
+ inst->alg.co.base.cra_priority = cipher_alg->cra_priority;
+ inst->alg.co.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
+ inst->alg.co.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
+ inst->alg.co.ivsize = cipher_alg->cra_blocksize;
+
+ /* Use struct crypto_cipher * by default, can be overridden */
+ inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_cipher *);
+ inst->alg.setkey = lskcipher_setkey_simple2;
+ inst->alg.init = lskcipher_init_tfm_simple2;
+ inst->alg.exit = lskcipher_exit_tfm_simple2;
+
+ return inst;
+
+err_free_inst:
+ lskcipher_free_instance_simple2(inst);
+ return ERR_PTR(err);
+}
+
+static int crypto_ecb_create2(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct skcipher_instance *inst;
+ struct lskcipher_instance *inst;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb);
+ inst = lskcipher_alloc_instance_simple2(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
- inst->alg.ivsize = 0; /* ECB mode doesn't take an IV */
+ /* ECB mode doesn't take an IV */
+ inst->alg.co.ivsize = 0;
+
+ inst->alg.encrypt = crypto_ecb_encrypt2;
+ inst->alg.decrypt = crypto_ecb_decrypt2;
+
+ err = lskcipher_register_instance(tmpl, inst);
+ if (err)
+ inst->free(inst);
+
+ return err;
+}
+
+static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_lskcipher_spawn *spawn;
+ struct lskcipher_alg *cipher_alg;
+ struct lskcipher_instance *inst;
+ int err;
+
+ inst = lskcipher_alloc_instance_simple(tmpl, tb);
+ if (IS_ERR(inst)) {
+ err = crypto_ecb_create2(tmpl, tb);
+ return err;
+ }
+
+ spawn = lskcipher_instance_ctx(inst);
+ cipher_alg = crypto_lskcipher_spawn_alg(spawn);
+
+ /* ECB mode doesn't take an IV */
+ inst->alg.co.ivsize = 0;
+ if (cipher_alg->co.ivsize)
+ return -EINVAL;
- inst->alg.encrypt = crypto_ecb_encrypt;
- inst->alg.decrypt = crypto_ecb_decrypt;
+ inst->alg.co.base.cra_ctxsize = cipher_alg->co.base.cra_ctxsize;
+ inst->alg.setkey = cipher_alg->setkey;
+ inst->alg.encrypt = cipher_alg->encrypt;
+ inst->alg.decrypt = cipher_alg->decrypt;
+ inst->alg.init = cipher_alg->init;
+ inst->alg.exit = cipher_alg->exit;
- err = skcipher_register_instance(tmpl, inst);
+ err = lskcipher_register_instance(tmpl, inst);
if (err)
inst->free(inst);
@@ -102,3 +223,4 @@ module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ECB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("ecb");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/essiv.c b/crypto/essiv.c
index f7d4ef4837..e63fc6442e 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -442,6 +442,7 @@ out:
static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
{
+ struct skcipher_alg_common *skcipher_alg = NULL;
struct crypto_attr_type *algt;
const char *inner_cipher_name;
const char *shash_name;
@@ -450,7 +451,6 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_instance *inst;
struct crypto_alg *base, *block_base;
struct essiv_instance_ctx *ictx;
- struct skcipher_alg *skcipher_alg = NULL;
struct aead_alg *aead_alg = NULL;
struct crypto_alg *_hash_alg;
struct shash_alg *hash_alg;
@@ -475,7 +475,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
mask = crypto_algt_inherited_mask(algt);
switch (type) {
- case CRYPTO_ALG_TYPE_SKCIPHER:
+ case CRYPTO_ALG_TYPE_LSKCIPHER:
skcipher_inst = kzalloc(sizeof(*skcipher_inst) +
sizeof(*ictx), GFP_KERNEL);
if (!skcipher_inst)
@@ -489,9 +489,10 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
inner_cipher_name, 0, mask);
if (err)
goto out_free_inst;
- skcipher_alg = crypto_spawn_skcipher_alg(&ictx->u.skcipher_spawn);
+ skcipher_alg = crypto_spawn_skcipher_alg_common(
+ &ictx->u.skcipher_spawn);
block_base = &skcipher_alg->base;
- ivsize = crypto_skcipher_alg_ivsize(skcipher_alg);
+ ivsize = skcipher_alg->ivsize;
break;
case CRYPTO_ALG_TYPE_AEAD:
@@ -574,18 +575,17 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
base->cra_alignmask = block_base->cra_alignmask;
base->cra_priority = block_base->cra_priority;
- if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
+ if (type == CRYPTO_ALG_TYPE_LSKCIPHER) {
skcipher_inst->alg.setkey = essiv_skcipher_setkey;
skcipher_inst->alg.encrypt = essiv_skcipher_encrypt;
skcipher_inst->alg.decrypt = essiv_skcipher_decrypt;
skcipher_inst->alg.init = essiv_skcipher_init_tfm;
skcipher_inst->alg.exit = essiv_skcipher_exit_tfm;
- skcipher_inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(skcipher_alg);
- skcipher_inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(skcipher_alg);
+ skcipher_inst->alg.min_keysize = skcipher_alg->min_keysize;
+ skcipher_inst->alg.max_keysize = skcipher_alg->max_keysize;
skcipher_inst->alg.ivsize = ivsize;
- skcipher_inst->alg.chunksize = crypto_skcipher_alg_chunksize(skcipher_alg);
- skcipher_inst->alg.walksize = crypto_skcipher_alg_walksize(skcipher_alg);
+ skcipher_inst->alg.chunksize = skcipher_alg->chunksize;
skcipher_inst->free = essiv_skcipher_free_instance;
@@ -616,7 +616,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
out_free_hash:
crypto_mod_put(_hash_alg);
out_drop_skcipher:
- if (type == CRYPTO_ALG_TYPE_SKCIPHER)
+ if (type == CRYPTO_ALG_TYPE_LSKCIPHER)
crypto_drop_skcipher(&ictx->u.skcipher_spawn);
else
crypto_drop_aead(&ictx->u.aead_spawn);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 4ba624450c..84f7c23d14 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -576,10 +576,10 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
const char *ctr_name,
const char *ghash_name)
{
+ struct skcipher_alg_common *ctr;
u32 mask;
struct aead_instance *inst;
struct gcm_instance_ctx *ctx;
- struct skcipher_alg *ctr;
struct hash_alg_common *ghash;
int err;
@@ -607,13 +607,12 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
ctr_name, 0, mask);
if (err)
goto err_free_inst;
- ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
+ ctr = crypto_spawn_skcipher_alg_common(&ctx->ctr);
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
- crypto_skcipher_alg_ivsize(ctr) != 16 ||
- ctr->base.cra_blocksize != 1)
+ ctr->ivsize != 16 || ctr->base.cra_blocksize != 1)
goto err_free_inst;
err = -ENAMETOOLONG;
@@ -630,11 +629,10 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
inst->alg.base.cra_priority = (ghash->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
- inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
- ctr->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = ctr->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
inst->alg.ivsize = GCM_AES_IV_SIZE;
- inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
+ inst->alg.chunksize = ctr->chunksize;
inst->alg.maxauthsize = 16;
inst->alg.init = crypto_gcm_init_tfm;
inst->alg.exit = crypto_gcm_exit_tfm;
diff --git a/crypto/hash.h b/crypto/hash.h
index 7e6c1a9486..93f6ba0df2 100644
--- a/crypto/hash.h
+++ b/crypto/hash.h
@@ -12,6 +12,16 @@
#include "internal.h"
+static inline struct crypto_istat_hash *hash_get_stat(
+ struct hash_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
static inline int crypto_hash_report_stat(struct sk_buff *skb,
struct crypto_alg *alg,
const char *type)
@@ -31,9 +41,7 @@ static inline int crypto_hash_report_stat(struct sk_buff *skb,
return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
}
-int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
-struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
- struct crypto_ahash *hash);
+extern const struct crypto_type crypto_shash_type;
int hash_prepare_alg(struct hash_alg_common *alg);
diff --git a/crypto/hash_info.c b/crypto/hash_info.c
index a49ff96bde..9a467638c9 100644
--- a/crypto/hash_info.c
+++ b/crypto/hash_info.c
@@ -29,6 +29,9 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = {
[HASH_ALGO_SM3_256] = "sm3",
[HASH_ALGO_STREEBOG_256] = "streebog256",
[HASH_ALGO_STREEBOG_512] = "streebog512",
+ [HASH_ALGO_SHA3_256] = "sha3-256",
+ [HASH_ALGO_SHA3_384] = "sha3-384",
+ [HASH_ALGO_SHA3_512] = "sha3-512",
};
EXPORT_SYMBOL_GPL(hash_algo_name);
@@ -53,5 +56,8 @@ const int hash_digest_size[HASH_ALGO__LAST] = {
[HASH_ALGO_SM3_256] = SM3256_DIGEST_SIZE,
[HASH_ALGO_STREEBOG_256] = STREEBOG256_DIGEST_SIZE,
[HASH_ALGO_STREEBOG_512] = STREEBOG512_DIGEST_SIZE,
+ [HASH_ALGO_SHA3_256] = SHA3_256_DIGEST_SIZE,
+ [HASH_ALGO_SHA3_384] = SHA3_384_DIGEST_SIZE,
+ [HASH_ALGO_SHA3_512] = SHA3_512_DIGEST_SIZE,
};
EXPORT_SYMBOL_GPL(hash_digest_size);
diff --git a/crypto/hctr2.c b/crypto/hctr2.c
index 6f4c1884d0..87e7547ad1 100644
--- a/crypto/hctr2.c
+++ b/crypto/hctr2.c
@@ -406,10 +406,10 @@ static int hctr2_create_common(struct crypto_template *tmpl,
const char *xctr_name,
const char *polyval_name)
{
+ struct skcipher_alg_common *xctr_alg;
u32 mask;
struct skcipher_instance *inst;
struct hctr2_instance_ctx *ictx;
- struct skcipher_alg *xctr_alg;
struct crypto_alg *blockcipher_alg;
struct shash_alg *polyval_alg;
char blockcipher_name[CRYPTO_MAX_ALG_NAME];
@@ -431,7 +431,7 @@ static int hctr2_create_common(struct crypto_template *tmpl,
xctr_name, 0, mask);
if (err)
goto err_free_inst;
- xctr_alg = crypto_spawn_skcipher_alg(&ictx->xctr_spawn);
+ xctr_alg = crypto_spawn_skcipher_alg_common(&ictx->xctr_spawn);
err = -EINVAL;
if (strncmp(xctr_alg->base.cra_name, "xctr(", 5))
@@ -485,8 +485,7 @@ static int hctr2_create_common(struct crypto_template *tmpl,
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx) +
polyval_alg->statesize * 2;
- inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask |
- polyval_alg->base.cra_alignmask;
+ inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask;
/*
* The hash function is called twice, so it is weighted higher than the
* xctr and blockcipher.
@@ -500,8 +499,8 @@ static int hctr2_create_common(struct crypto_template *tmpl,
inst->alg.decrypt = hctr2_decrypt;
inst->alg.init = hctr2_init_tfm;
inst->alg.exit = hctr2_exit_tfm;
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(xctr_alg);
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(xctr_alg);
+ inst->alg.min_keysize = xctr_alg->min_keysize;
+ inst->alg.max_keysize = xctr_alg->max_keysize;
inst->alg.ivsize = TWEAK_SIZE;
inst->free = hctr2_free_instance;
diff --git a/crypto/hmac.c b/crypto/hmac.c
index ea93f4c55f..7cec25ff98 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -24,31 +24,20 @@
struct hmac_ctx {
struct crypto_shash *hash;
+ /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */
+ u8 pads[];
};
-static inline void *align_ptr(void *p, unsigned int align)
-{
- return (void *)ALIGN((unsigned long)p, align);
-}
-
-static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm)
-{
- return align_ptr(crypto_shash_ctx_aligned(tfm) +
- crypto_shash_statesize(tfm) * 2,
- crypto_tfm_ctx_alignment());
-}
-
static int hmac_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
int bs = crypto_shash_blocksize(parent);
int ds = crypto_shash_digestsize(parent);
int ss = crypto_shash_statesize(parent);
- char *ipad = crypto_shash_ctx_aligned(parent);
- char *opad = ipad + ss;
- struct hmac_ctx *ctx = align_ptr(opad + ss,
- crypto_tfm_ctx_alignment());
- struct crypto_shash *hash = ctx->hash;
+ struct hmac_ctx *tctx = crypto_shash_ctx(parent);
+ struct crypto_shash *hash = tctx->hash;
+ u8 *ipad = &tctx->pads[0];
+ u8 *opad = &tctx->pads[ss];
SHASH_DESC_ON_STACK(shash, hash);
unsigned int i;
@@ -94,16 +83,18 @@ static int hmac_export(struct shash_desc *pdesc, void *out)
static int hmac_import(struct shash_desc *pdesc, const void *in)
{
struct shash_desc *desc = shash_desc_ctx(pdesc);
- struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
+ const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
- desc->tfm = ctx->hash;
+ desc->tfm = tctx->hash;
return crypto_shash_import(desc, in);
}
static int hmac_init(struct shash_desc *pdesc)
{
- return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm));
+ const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
+
+ return hmac_import(pdesc, &tctx->pads[0]);
}
static int hmac_update(struct shash_desc *pdesc,
@@ -119,7 +110,8 @@ static int hmac_final(struct shash_desc *pdesc, u8 *out)
struct crypto_shash *parent = pdesc->tfm;
int ds = crypto_shash_digestsize(parent);
int ss = crypto_shash_statesize(parent);
- char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ const struct hmac_ctx *tctx = crypto_shash_ctx(parent);
+ const u8 *opad = &tctx->pads[ss];
struct shash_desc *desc = shash_desc_ctx(pdesc);
return crypto_shash_final(desc, out) ?:
@@ -134,7 +126,8 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
struct crypto_shash *parent = pdesc->tfm;
int ds = crypto_shash_digestsize(parent);
int ss = crypto_shash_statesize(parent);
- char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ const struct hmac_ctx *tctx = crypto_shash_ctx(parent);
+ const u8 *opad = &tctx->pads[ss];
struct shash_desc *desc = shash_desc_ctx(pdesc);
return crypto_shash_finup(desc, data, nbytes, out) ?:
@@ -147,7 +140,7 @@ static int hmac_init_tfm(struct crypto_shash *parent)
struct crypto_shash *hash;
struct shash_instance *inst = shash_alg_instance(parent);
struct crypto_shash_spawn *spawn = shash_instance_ctx(inst);
- struct hmac_ctx *ctx = hmac_ctx(parent);
+ struct hmac_ctx *tctx = crypto_shash_ctx(parent);
hash = crypto_spawn_shash(spawn);
if (IS_ERR(hash))
@@ -156,14 +149,14 @@ static int hmac_init_tfm(struct crypto_shash *parent)
parent->descsize = sizeof(struct shash_desc) +
crypto_shash_descsize(hash);
- ctx->hash = hash;
+ tctx->hash = hash;
return 0;
}
static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src)
{
- struct hmac_ctx *sctx = hmac_ctx(src);
- struct hmac_ctx *dctx = hmac_ctx(dst);
+ struct hmac_ctx *sctx = crypto_shash_ctx(src);
+ struct hmac_ctx *dctx = crypto_shash_ctx(dst);
struct crypto_shash *hash;
hash = crypto_clone_shash(sctx->hash);
@@ -176,9 +169,9 @@ static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src)
static void hmac_exit_tfm(struct crypto_shash *parent)
{
- struct hmac_ctx *ctx = hmac_ctx(parent);
+ struct hmac_ctx *tctx = crypto_shash_ctx(parent);
- crypto_free_shash(ctx->hash);
+ crypto_free_shash(tctx->hash);
}
static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -225,15 +218,10 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
- inst->alg.base.cra_alignmask = alg->cra_alignmask;
+ inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + (ss * 2);
- ss = ALIGN(ss, alg->cra_alignmask + 1);
inst->alg.digestsize = ds;
inst->alg.statesize = ss;
-
- inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
- ALIGN(ss * 2, crypto_tfm_ctx_alignment());
-
inst->alg.init = hmac_init;
inst->alg.update = hmac_update;
inst->alg.final = hmac_final;
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 7d1463a156..76edbf8af0 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -54,6 +54,17 @@
* Helper function
***************************************************************************/
+void *jent_kvzalloc(unsigned int len)
+{
+ return kvzalloc(len, GFP_KERNEL);
+}
+
+void jent_kvzfree(void *ptr, unsigned int len)
+{
+ memzero_explicit(ptr, len);
+ kvfree(ptr);
+}
+
void *jent_zalloc(unsigned int len)
{
return kzalloc(len, GFP_KERNEL);
@@ -245,7 +256,9 @@ static int jent_kcapi_init(struct crypto_tfm *tfm)
crypto_shash_init(sdesc);
rng->sdesc = sdesc;
- rng->entropy_collector = jent_entropy_collector_alloc(1, 0, sdesc);
+ rng->entropy_collector =
+ jent_entropy_collector_alloc(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0,
+ sdesc);
if (!rng->entropy_collector) {
ret = -ENOMEM;
goto err;
@@ -334,7 +347,7 @@ static int __init jent_mod_init(void)
desc->tfm = tfm;
crypto_shash_init(desc);
- ret = jent_entropy_init(desc);
+ ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc, NULL);
shash_desc_zero(desc);
crypto_free_shash(tfm);
if (ret) {
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index fe9c233ec7..26a9048bc8 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -72,11 +72,13 @@ struct rand_data {
__u64 prev_time; /* SENSITIVE Previous time stamp */
__u64 last_delta; /* SENSITIVE stuck test */
__s64 last_delta2; /* SENSITIVE stuck test */
+
+ unsigned int flags; /* Flags used to initialize */
unsigned int osr; /* Oversample rate */
-#define JENT_MEMORY_BLOCKS 64
-#define JENT_MEMORY_BLOCKSIZE 32
#define JENT_MEMORY_ACCESSLOOPS 128
-#define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE)
+#define JENT_MEMORY_SIZE \
+ (CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS * \
+ CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE)
unsigned char *mem; /* Memory access location with size of
* memblocks * memblocksize */
unsigned int memlocation; /* Pointer to byte in *mem */
@@ -88,16 +90,9 @@ struct rand_data {
/* Repetition Count Test */
unsigned int rct_count; /* Number of stuck values */
- /* Intermittent health test failure threshold of 2^-30 */
- /* From an SP800-90B perspective, this RCT cutoff value is equal to 31. */
- /* However, our RCT implementation starts at 1, so we subtract 1 here. */
-#define JENT_RCT_CUTOFF (31 - 1) /* Taken from SP800-90B sec 4.4.1 */
-#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
- /* Permanent health test failure threshold of 2^-60 */
- /* From an SP800-90B perspective, this RCT cutoff value is equal to 61. */
- /* However, our RCT implementation starts at 1, so we subtract 1 here. */
-#define JENT_RCT_CUTOFF_PERMANENT (61 - 1)
-#define JENT_APT_CUTOFF_PERMANENT 355
+ /* Adaptive Proportion Test cutoff values */
+ unsigned int apt_cutoff; /* Intermittent health test failure */
+ unsigned int apt_cutoff_permanent; /* Permanent health test failure */
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */
#define JENT_APT_LSB 16
@@ -105,6 +100,8 @@ struct rand_data {
unsigned int apt_observations; /* Number of collected observations */
unsigned int apt_count; /* APT counter */
unsigned int apt_base; /* APT base reference */
+ unsigned int health_failure; /* Record health failure */
+
unsigned int apt_base_set:1; /* APT base reference set? */
};
@@ -122,6 +119,16 @@ struct rand_data {
* zero). */
#define JENT_ESTUCK 8 /* Too many stuck results during init. */
#define JENT_EHEALTH 9 /* Health test failed during initialization */
+#define JENT_ERCT 10 /* RCT failed during initialization */
+#define JENT_EHASH 11 /* Hash self test failed */
+#define JENT_EMEM 12 /* Can't allocate memory for initialization */
+
+#define JENT_RCT_FAILURE 1 /* Failure in RCT health test. */
+#define JENT_APT_FAILURE 2 /* Failure in APT health test. */
+#define JENT_PERMANENT_FAILURE_SHIFT 16
+#define JENT_PERMANENT_FAILURE(x) (x << JENT_PERMANENT_FAILURE_SHIFT)
+#define JENT_RCT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_RCT_FAILURE)
+#define JENT_APT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_APT_FAILURE)
/*
* The output n bits can receive more than n bits of min entropy, of course,
@@ -148,6 +155,48 @@ struct rand_data {
***************************************************************************/
/*
+ * See the SP 800-90B comment #10b for the corrected cutoff for the SP 800-90B
+ * APT.
+ * http://www.untruth.org/~josh/sp80090b/UL%20SP800-90B-final%20comments%20v1.9%2020191212.pdf
+ * In in the syntax of R, this is C = 2 + qbinom(1 − 2^(−30), 511, 2^(-1/osr)).
+ * (The original formula wasn't correct because the first symbol must
+ * necessarily have been observed, so there is no chance of observing 0 of these
+ * symbols.)
+ *
+ * For the alpha < 2^-53, R cannot be used as it uses a float data type without
+ * arbitrary precision. A SageMath script is used to calculate those cutoff
+ * values.
+ *
+ * For any value above 14, this yields the maximal allowable value of 512
+ * (by FIPS 140-2 IG 7.19 Resolution # 16, we cannot choose a cutoff value that
+ * renders the test unable to fail).
+ */
+static const unsigned int jent_apt_cutoff_lookup[15] = {
+ 325, 422, 459, 477, 488, 494, 499, 502,
+ 505, 507, 508, 509, 510, 511, 512 };
+static const unsigned int jent_apt_cutoff_permanent_lookup[15] = {
+ 355, 447, 479, 494, 502, 507, 510, 512,
+ 512, 512, 512, 512, 512, 512, 512 };
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+static void jent_apt_init(struct rand_data *ec, unsigned int osr)
+{
+ /*
+ * Establish the apt_cutoff based on the presumed entropy rate of
+ * 1/osr.
+ */
+ if (osr >= ARRAY_SIZE(jent_apt_cutoff_lookup)) {
+ ec->apt_cutoff = jent_apt_cutoff_lookup[
+ ARRAY_SIZE(jent_apt_cutoff_lookup) - 1];
+ ec->apt_cutoff_permanent = jent_apt_cutoff_permanent_lookup[
+ ARRAY_SIZE(jent_apt_cutoff_permanent_lookup) - 1];
+ } else {
+ ec->apt_cutoff = jent_apt_cutoff_lookup[osr - 1];
+ ec->apt_cutoff_permanent =
+ jent_apt_cutoff_permanent_lookup[osr - 1];
+ }
+}
+/*
* Reset the APT counter
*
* @ec [in] Reference to entropy collector
@@ -175,26 +224,22 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
return;
}
- if (delta_masked == ec->apt_base)
+ if (delta_masked == ec->apt_base) {
ec->apt_count++;
+ /* Note, ec->apt_count starts with one. */
+ if (ec->apt_count >= ec->apt_cutoff_permanent)
+ ec->health_failure |= JENT_APT_FAILURE_PERMANENT;
+ else if (ec->apt_count >= ec->apt_cutoff)
+ ec->health_failure |= JENT_APT_FAILURE;
+ }
+
ec->apt_observations++;
if (ec->apt_observations >= JENT_APT_WINDOW_SIZE)
jent_apt_reset(ec, delta_masked);
}
-/* APT health test failure detection */
-static int jent_apt_permanent_failure(struct rand_data *ec)
-{
- return (ec->apt_count >= JENT_APT_CUTOFF_PERMANENT) ? 1 : 0;
-}
-
-static int jent_apt_failure(struct rand_data *ec)
-{
- return (ec->apt_count >= JENT_APT_CUTOFF) ? 1 : 0;
-}
-
/***************************************************************************
* Stuck Test and its use as Repetition Count Test
*
@@ -221,6 +266,30 @@ static void jent_rct_insert(struct rand_data *ec, int stuck)
{
if (stuck) {
ec->rct_count++;
+
+ /*
+ * The cutoff value is based on the following consideration:
+ * alpha = 2^-30 or 2^-60 as recommended in SP800-90B.
+ * In addition, we require an entropy value H of 1/osr as this
+ * is the minimum entropy required to provide full entropy.
+ * Note, we collect (DATA_SIZE_BITS + ENTROPY_SAFETY_FACTOR)*osr
+ * deltas for inserting them into the entropy pool which should
+ * then have (close to) DATA_SIZE_BITS bits of entropy in the
+ * conditioned output.
+ *
+ * Note, ec->rct_count (which equals to value B in the pseudo
+ * code of SP800-90B section 4.4.1) starts with zero. Hence
+ * we need to subtract one from the cutoff value as calculated
+ * following SP800-90B. Thus C = ceil(-log_2(alpha)/H) = 30*osr
+ * or 60*osr.
+ */
+ if ((unsigned int)ec->rct_count >= (60 * ec->osr)) {
+ ec->rct_count = -1;
+ ec->health_failure |= JENT_RCT_FAILURE_PERMANENT;
+ } else if ((unsigned int)ec->rct_count >= (30 * ec->osr)) {
+ ec->rct_count = -1;
+ ec->health_failure |= JENT_RCT_FAILURE;
+ }
} else {
/* Reset RCT */
ec->rct_count = 0;
@@ -275,26 +344,25 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta)
return 0;
}
-/* RCT health test failure detection */
-static int jent_rct_permanent_failure(struct rand_data *ec)
-{
- return (ec->rct_count >= JENT_RCT_CUTOFF_PERMANENT) ? 1 : 0;
-}
-
-static int jent_rct_failure(struct rand_data *ec)
-{
- return (ec->rct_count >= JENT_RCT_CUTOFF) ? 1 : 0;
-}
-
-/* Report of health test failures */
-static int jent_health_failure(struct rand_data *ec)
+/*
+ * Report any health test failures
+ *
+ * @ec [in] Reference to entropy collector
+ *
+ * @return a bitmask indicating which tests failed
+ * 0 No health test failure
+ * 1 RCT failure
+ * 2 APT failure
+ * 1<<JENT_PERMANENT_FAILURE_SHIFT RCT permanent failure
+ * 2<<JENT_PERMANENT_FAILURE_SHIFT APT permanent failure
+ */
+static unsigned int jent_health_failure(struct rand_data *ec)
{
- return jent_rct_failure(ec) | jent_apt_failure(ec);
-}
+ /* Test is only enabled in FIPS mode */
+ if (!fips_enabled)
+ return 0;
-static int jent_permanent_health_failure(struct rand_data *ec)
-{
- return jent_rct_permanent_failure(ec) | jent_apt_permanent_failure(ec);
+ return ec->health_failure;
}
/***************************************************************************
@@ -448,7 +516,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
*
* @return result of stuck test
*/
-static int jent_measure_jitter(struct rand_data *ec)
+static int jent_measure_jitter(struct rand_data *ec, __u64 *ret_current_delta)
{
__u64 time = 0;
__u64 current_delta = 0;
@@ -472,6 +540,10 @@ static int jent_measure_jitter(struct rand_data *ec)
if (jent_condition_data(ec, current_delta, stuck))
stuck = 1;
+ /* return the raw entropy value */
+ if (ret_current_delta)
+ *ret_current_delta = current_delta;
+
return stuck;
}
@@ -489,11 +561,11 @@ static void jent_gen_entropy(struct rand_data *ec)
safety_factor = JENT_ENTROPY_SAFETY_FACTOR;
/* priming of the ->prev_time value */
- jent_measure_jitter(ec);
+ jent_measure_jitter(ec, NULL);
while (!jent_health_failure(ec)) {
/* If a stuck measurement is received, repeat measurement */
- if (jent_measure_jitter(ec))
+ if (jent_measure_jitter(ec, NULL))
continue;
/*
@@ -537,11 +609,12 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
return -1;
while (len > 0) {
- unsigned int tocopy;
+ unsigned int tocopy, health_test_result;
jent_gen_entropy(ec);
- if (jent_permanent_health_failure(ec)) {
+ health_test_result = jent_health_failure(ec);
+ if (health_test_result > JENT_PERMANENT_FAILURE_SHIFT) {
/*
* At this point, the Jitter RNG instance is considered
* as a failed instance. There is no rerun of the
@@ -549,13 +622,18 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
* is assumed to not further use this instance.
*/
return -3;
- } else if (jent_health_failure(ec)) {
+ } else if (health_test_result) {
/*
* Perform startup health tests and return permanent
* error if it fails.
*/
- if (jent_entropy_init(ec->hash_state))
+ if (jent_entropy_init(0, 0, NULL, ec)) {
+ /* Mark the permanent error */
+ ec->health_failure &=
+ JENT_RCT_FAILURE_PERMANENT |
+ JENT_APT_FAILURE_PERMANENT;
return -3;
+ }
return -2;
}
@@ -592,23 +670,29 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
/* Allocate memory for adding variations based on memory
* access
*/
- entropy_collector->mem = jent_zalloc(JENT_MEMORY_SIZE);
+ entropy_collector->mem = jent_kvzalloc(JENT_MEMORY_SIZE);
if (!entropy_collector->mem) {
jent_zfree(entropy_collector);
return NULL;
}
- entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE;
- entropy_collector->memblocks = JENT_MEMORY_BLOCKS;
+ entropy_collector->memblocksize =
+ CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE;
+ entropy_collector->memblocks =
+ CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS;
entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS;
}
/* verify and set the oversampling rate */
if (osr == 0)
- osr = 1; /* minimum sampling rate is 1 */
+ osr = 1; /* H_submitter = 1 / osr */
entropy_collector->osr = osr;
+ entropy_collector->flags = flags;
entropy_collector->hash_state = hash_state;
+ /* Initialize the APT */
+ jent_apt_init(entropy_collector, osr);
+
/* fill the data pad with non-zero values */
jent_gen_entropy(entropy_collector);
@@ -617,25 +701,39 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
void jent_entropy_collector_free(struct rand_data *entropy_collector)
{
- jent_zfree(entropy_collector->mem);
+ jent_kvzfree(entropy_collector->mem, JENT_MEMORY_SIZE);
entropy_collector->mem = NULL;
jent_zfree(entropy_collector);
}
-int jent_entropy_init(void *hash_state)
+int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state,
+ struct rand_data *p_ec)
{
- int i;
- __u64 delta_sum = 0;
- __u64 old_delta = 0;
- unsigned int nonstuck = 0;
- int time_backwards = 0;
- int count_mod = 0;
- int count_stuck = 0;
- struct rand_data ec = { 0 };
-
- /* Required for RCT */
- ec.osr = 1;
- ec.hash_state = hash_state;
+ /*
+ * If caller provides an allocated ec, reuse it which implies that the
+ * health test entropy data is used to further still the available
+ * entropy pool.
+ */
+ struct rand_data *ec = p_ec;
+ int i, time_backwards = 0, ret = 0, ec_free = 0;
+ unsigned int health_test_result;
+
+ if (!ec) {
+ ec = jent_entropy_collector_alloc(osr, flags, hash_state);
+ if (!ec)
+ return JENT_EMEM;
+ ec_free = 1;
+ } else {
+ /* Reset the APT */
+ jent_apt_reset(ec, 0);
+ /* Ensure that a new APT base is obtained */
+ ec->apt_base_set = 0;
+ /* Reset the RCT */
+ ec->rct_count = 0;
+ /* Reset intermittent, leave permanent health test result */
+ ec->health_failure &= (~JENT_RCT_FAILURE);
+ ec->health_failure &= (~JENT_APT_FAILURE);
+ }
/* We could perform statistical tests here, but the problem is
* that we only have a few loop counts to do testing. These
@@ -664,31 +762,28 @@ int jent_entropy_init(void *hash_state)
#define TESTLOOPCOUNT 1024
#define CLEARCACHE 100
for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) {
- __u64 time = 0;
- __u64 time2 = 0;
- __u64 delta = 0;
- unsigned int lowdelta = 0;
- int stuck;
+ __u64 start_time = 0, end_time = 0, delta = 0;
/* Invoke core entropy collection logic */
- jent_get_nstime(&time);
- ec.prev_time = time;
- jent_condition_data(&ec, time, 0);
- jent_get_nstime(&time2);
+ jent_measure_jitter(ec, &delta);
+ end_time = ec->prev_time;
+ start_time = ec->prev_time - delta;
/* test whether timer works */
- if (!time || !time2)
- return JENT_ENOTIME;
- delta = jent_delta(time, time2);
+ if (!start_time || !end_time) {
+ ret = JENT_ENOTIME;
+ goto out;
+ }
+
/*
* test whether timer is fine grained enough to provide
* delta even when called shortly after each other -- this
* implies that we also have a high resolution timer
*/
- if (!delta)
- return JENT_ECOARSETIME;
-
- stuck = jent_stuck(&ec, delta);
+ if (!delta || (end_time == start_time)) {
+ ret = JENT_ECOARSETIME;
+ goto out;
+ }
/*
* up to here we did not modify any variable that will be
@@ -700,49 +795,9 @@ int jent_entropy_init(void *hash_state)
if (i < CLEARCACHE)
continue;
- if (stuck)
- count_stuck++;
- else {
- nonstuck++;
-
- /*
- * Ensure that the APT succeeded.
- *
- * With the check below that count_stuck must be less
- * than 10% of the overall generated raw entropy values
- * it is guaranteed that the APT is invoked at
- * floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times.
- */
- if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) {
- jent_apt_reset(&ec,
- delta & JENT_APT_WORD_MASK);
- }
- }
-
- /* Validate health test result */
- if (jent_health_failure(&ec))
- return JENT_EHEALTH;
-
/* test whether we have an increasing timer */
- if (!(time2 > time))
+ if (!(end_time > start_time))
time_backwards++;
-
- /* use 32 bit value to ensure compilation on 32 bit arches */
- lowdelta = time2 - time;
- if (!(lowdelta % 100))
- count_mod++;
-
- /*
- * ensure that we have a varying delta timer which is necessary
- * for the calculation of entropy -- perform this check
- * only after the first loop is executed as we need to prime
- * the old_data value
- */
- if (delta > old_delta)
- delta_sum += (delta - old_delta);
- else
- delta_sum += (old_delta - delta);
- old_delta = delta;
}
/*
@@ -752,31 +807,22 @@ int jent_entropy_init(void *hash_state)
* should not fail. The value of 3 should cover the NTP case being
* performed during our test run.
*/
- if (time_backwards > 3)
- return JENT_ENOMONOTONIC;
-
- /*
- * Variations of deltas of time must on average be larger
- * than 1 to ensure the entropy estimation
- * implied with 1 is preserved
- */
- if ((delta_sum) <= 1)
- return JENT_EVARVAR;
+ if (time_backwards > 3) {
+ ret = JENT_ENOMONOTONIC;
+ goto out;
+ }
- /*
- * Ensure that we have variations in the time stamp below 10 for at
- * least 10% of all checks -- on some platforms, the counter increments
- * in multiples of 100, but not always
- */
- if ((TESTLOOPCOUNT/10 * 9) < count_mod)
- return JENT_ECOARSETIME;
+ /* Did we encounter a health test failure? */
+ health_test_result = jent_health_failure(ec);
+ if (health_test_result) {
+ ret = (health_test_result & JENT_RCT_FAILURE) ? JENT_ERCT :
+ JENT_EHEALTH;
+ goto out;
+ }
- /*
- * If we have more than 90% stuck results, then this Jitter RNG is
- * likely to not work well.
- */
- if ((TESTLOOPCOUNT/10 * 9) < count_stuck)
- return JENT_ESTUCK;
+out:
+ if (ec_free)
+ jent_entropy_collector_free(ec);
- return 0;
+ return ret;
}
diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h
index 4c92176ea2..aa4728675a 100644
--- a/crypto/jitterentropy.h
+++ b/crypto/jitterentropy.h
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+extern void *jent_kvzalloc(unsigned int len);
+extern void jent_kvzfree(void *ptr, unsigned int len);
extern void *jent_zalloc(unsigned int len);
extern void jent_zfree(void *ptr);
extern void jent_get_nstime(__u64 *out);
@@ -9,7 +11,8 @@ extern int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len);
struct rand_data;
-extern int jent_entropy_init(void *hash_state);
+extern int jent_entropy_init(unsigned int osr, unsigned int flags,
+ void *hash_state, struct rand_data *p_ec);
extern int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len);
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 59260aefed..e216fbf2b7 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -299,8 +299,8 @@ static void lrw_free_instance(struct skcipher_instance *inst)
static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_skcipher_spawn *spawn;
+ struct skcipher_alg_common *alg;
struct skcipher_instance *inst;
- struct skcipher_alg *alg;
const char *cipher_name;
char ecb_name[CRYPTO_MAX_ALG_NAME];
u32 mask;
@@ -336,13 +336,13 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- alg = crypto_skcipher_spawn_alg(spawn);
+ alg = crypto_spawn_skcipher_alg_common(spawn);
err = -EINVAL;
if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
goto err_free_inst;
- if (crypto_skcipher_alg_ivsize(alg))
+ if (alg->ivsize)
goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
@@ -382,10 +382,8 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
(__alignof__(be128) - 1);
inst->alg.ivsize = LRW_BLOCK_SIZE;
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
- LRW_BLOCK_SIZE;
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
- LRW_BLOCK_SIZE;
+ inst->alg.min_keysize = alg->min_keysize + LRW_BLOCK_SIZE;
+ inst->alg.max_keysize = alg->max_keysize + LRW_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx);
diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
new file mode 100644
index 0000000000..9edc897309
--- /dev/null
+++ b/crypto/lskcipher.c
@@ -0,0 +1,634 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linear symmetric key cipher operations.
+ *
+ * Generic encrypt/decrypt wrapper for ciphers.
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <linux/cryptouser.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <net/netlink.h>
+#include "skcipher.h"
+
+static inline struct crypto_lskcipher *__crypto_lskcipher_cast(
+ struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_lskcipher, base);
+}
+
+static inline struct lskcipher_alg *__crypto_lskcipher_alg(
+ struct crypto_alg *alg)
+{
+ return container_of(alg, struct lskcipher_alg, co.base);
+}
+
+static inline struct crypto_istat_cipher *lskcipher_get_stat(
+ struct lskcipher_alg *alg)
+{
+ return skcipher_get_stat_common(&alg->co);
+}
+
+static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err)
+{
+ struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
+
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err)
+ atomic64_inc(&istat->err_cnt);
+
+ return err;
+}
+
+static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
+ struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
+ u8 *buffer, *alignbuffer;
+ unsigned long absize;
+ int ret;
+
+ absize = keylen + alignmask;
+ buffer = kmalloc(absize, GFP_ATOMIC);
+ if (!buffer)
+ return -ENOMEM;
+
+ alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ memcpy(alignbuffer, key, keylen);
+ ret = cipher->setkey(tfm, alignbuffer, keylen);
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
+ struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
+
+ if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize)
+ return -EINVAL;
+
+ if ((unsigned long)key & alignmask)
+ return lskcipher_setkey_unaligned(tfm, key, keylen);
+ else
+ return cipher->setkey(tfm, key, keylen);
+}
+EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
+
+static int crypto_lskcipher_crypt_unaligned(
+ struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
+ u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, bool final))
+{
+ unsigned ivsize = crypto_lskcipher_ivsize(tfm);
+ unsigned bs = crypto_lskcipher_blocksize(tfm);
+ unsigned cs = crypto_lskcipher_chunksize(tfm);
+ int err;
+ u8 *tiv;
+ u8 *p;
+
+ BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE ||
+ MAX_CIPHER_ALIGNMASK >= PAGE_SIZE);
+
+ tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!tiv)
+ return -ENOMEM;
+
+ memcpy(tiv, iv, ivsize);
+
+ p = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ err = -ENOMEM;
+ if (!p)
+ goto out;
+
+ while (len >= bs) {
+ unsigned chunk = min((unsigned)PAGE_SIZE, len);
+ int err;
+
+ if (chunk > cs)
+ chunk &= ~(cs - 1);
+
+ memcpy(p, src, chunk);
+ err = crypt(tfm, p, p, chunk, tiv, true);
+ if (err)
+ goto out;
+
+ memcpy(dst, p, chunk);
+ src += chunk;
+ dst += chunk;
+ len -= chunk;
+ }
+
+ err = len ? -EINVAL : 0;
+
+out:
+ memcpy(iv, tiv, ivsize);
+ kfree_sensitive(p);
+ kfree_sensitive(tiv);
+ return err;
+}
+
+static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv,
+ int (*crypt)(struct crypto_lskcipher *tfm,
+ const u8 *src, u8 *dst,
+ unsigned len, u8 *iv,
+ bool final))
+{
+ unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
+ int ret;
+
+ if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
+ alignmask) {
+ ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
+ crypt);
+ goto out;
+ }
+
+ ret = crypt(tfm, src, dst, len, iv, true);
+
+out:
+ return crypto_lskcipher_errstat(alg, ret);
+}
+
+int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv)
+{
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
+
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(len, &istat->encrypt_tlen);
+ }
+
+ return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
+}
+EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
+
+int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv)
+{
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
+
+ atomic64_inc(&istat->decrypt_cnt);
+ atomic64_add(len, &istat->decrypt_tlen);
+ }
+
+ return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
+}
+EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
+
+static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
+ int (*crypt)(struct crypto_lskcipher *tfm,
+ const u8 *src, u8 *dst,
+ unsigned len, u8 *iv,
+ bool final))
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
+ struct crypto_lskcipher *tfm = *ctx;
+ struct skcipher_walk walk;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes) {
+ err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, walk.iv, walk.nbytes == walk.total);
+ err = skcipher_walk_done(&walk, err);
+ }
+
+ return err;
+}
+
+int crypto_lskcipher_encrypt_sg(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
+
+ return crypto_lskcipher_crypt_sg(req, alg->encrypt);
+}
+
+int crypto_lskcipher_decrypt_sg(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
+
+ return crypto_lskcipher_crypt_sg(req, alg->decrypt);
+}
+
+static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
+
+ alg->exit(skcipher);
+}
+
+static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
+
+ if (alg->exit)
+ skcipher->base.exit = crypto_lskcipher_exit_tfm;
+
+ if (alg->init)
+ return alg->init(skcipher);
+
+ return 0;
+}
+
+static void crypto_lskcipher_free_instance(struct crypto_instance *inst)
+{
+ struct lskcipher_instance *skcipher =
+ container_of(inst, struct lskcipher_instance, s.base);
+
+ skcipher->free(skcipher);
+}
+
+static void __maybe_unused crypto_lskcipher_show(
+ struct seq_file *m, struct crypto_alg *alg)
+{
+ struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
+
+ seq_printf(m, "type : lskcipher\n");
+ seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
+ seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize);
+ seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize);
+ seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize);
+ seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize);
+}
+
+static int __maybe_unused crypto_lskcipher_report(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
+ struct crypto_report_blkcipher rblkcipher;
+
+ memset(&rblkcipher, 0, sizeof(rblkcipher));
+
+ strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type));
+ strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
+
+ rblkcipher.blocksize = alg->cra_blocksize;
+ rblkcipher.min_keysize = skcipher->co.min_keysize;
+ rblkcipher.max_keysize = skcipher->co.max_keysize;
+ rblkcipher.ivsize = skcipher->co.ivsize;
+
+ return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+ sizeof(rblkcipher), &rblkcipher);
+}
+
+static int __maybe_unused crypto_lskcipher_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
+ struct crypto_istat_cipher *istat;
+ struct crypto_stat_cipher rcipher;
+
+ istat = lskcipher_get_stat(skcipher);
+
+ memset(&rcipher, 0, sizeof(rcipher));
+
+ strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
+}
+
+static const struct crypto_type crypto_lskcipher_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_lskcipher_init_tfm,
+ .free = crypto_lskcipher_free_instance,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_lskcipher_show,
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
+ .report = crypto_lskcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_lskcipher_report_stat,
+#endif
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_LSKCIPHER,
+ .tfmsize = offsetof(struct crypto_lskcipher, base),
+};
+
+static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
+{
+ struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_lskcipher(*ctx);
+}
+
+int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm)
+{
+ struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *calg = tfm->__crt_alg;
+ struct crypto_lskcipher *skcipher;
+
+ if (!crypto_mod_get(calg))
+ return -EAGAIN;
+
+ skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type);
+ if (IS_ERR(skcipher)) {
+ crypto_mod_put(calg);
+ return PTR_ERR(skcipher);
+ }
+
+ *ctx = skcipher;
+ tfm->exit = crypto_lskcipher_exit_tfm_sg;
+
+ return 0;
+}
+
+int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
+{
+ spawn->base.frontend = &crypto_lskcipher_type;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_lskcipher);
+
+struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher);
+
+static int lskcipher_prepare_alg(struct lskcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->co.base;
+ int err;
+
+ err = skcipher_prepare_alg_common(&alg->co);
+ if (err)
+ return err;
+
+ if (alg->co.chunksize & (alg->co.chunksize - 1))
+ return -EINVAL;
+
+ base->cra_type = &crypto_lskcipher_type;
+ base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER;
+
+ return 0;
+}
+
+int crypto_register_lskcipher(struct lskcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->co.base;
+ int err;
+
+ err = lskcipher_prepare_alg(alg);
+ if (err)
+ return err;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_lskcipher);
+
+void crypto_unregister_lskcipher(struct lskcipher_alg *alg)
+{
+ crypto_unregister_alg(&alg->co.base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher);
+
+int crypto_register_lskciphers(struct lskcipher_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_register_lskcipher(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (--i; i >= 0; --i)
+ crypto_unregister_lskcipher(&algs[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_lskciphers);
+
+void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_unregister_lskcipher(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers);
+
+int lskcipher_register_instance(struct crypto_template *tmpl,
+ struct lskcipher_instance *inst)
+{
+ int err;
+
+ if (WARN_ON(!inst->free))
+ return -EINVAL;
+
+ err = lskcipher_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(lskcipher_register_instance);
+
+static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm);
+
+ crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ return crypto_lskcipher_setkey(cipher, key, keylen);
+}
+
+static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm)
+{
+ struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_lskcipher_spawn *spawn;
+ struct crypto_lskcipher *cipher;
+
+ spawn = lskcipher_instance_ctx(inst);
+ cipher = crypto_spawn_lskcipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ *ctx = cipher;
+ return 0;
+}
+
+static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm)
+{
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+
+ crypto_free_lskcipher(*ctx);
+}
+
+static void lskcipher_free_instance_simple(struct lskcipher_instance *inst)
+{
+ crypto_drop_lskcipher(lskcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+/**
+ * lskcipher_alloc_instance_simple - allocate instance of simple block cipher
+ *
+ * Allocate an lskcipher_instance for a simple block cipher mode of operation,
+ * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
+ * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
+ * alignmask, and priority are set from the underlying cipher but can be
+ * overridden if needed. The tfm context defaults to
+ * struct crypto_lskcipher *, and default ->setkey(), ->init(), and
+ * ->exit() methods are installed.
+ *
+ * @tmpl: the template being instantiated
+ * @tb: the template parameters
+ *
+ * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
+ * needs to register the instance.
+ */
+struct lskcipher_instance *lskcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb)
+{
+ u32 mask;
+ struct lskcipher_instance *inst;
+ struct crypto_lskcipher_spawn *spawn;
+ char ecb_name[CRYPTO_MAX_ALG_NAME];
+ struct lskcipher_alg *cipher_alg;
+ const char *cipher_name;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
+ if (err)
+ return ERR_PTR(err);
+
+ cipher_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(cipher_name))
+ return ERR_CAST(cipher_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return ERR_PTR(-ENOMEM);
+
+ spawn = lskcipher_instance_ctx(inst);
+ err = crypto_grab_lskcipher(spawn,
+ lskcipher_crypto_instance(inst),
+ cipher_name, 0, mask);
+
+ ecb_name[0] = 0;
+ if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) {
+ err = -ENAMETOOLONG;
+ if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+ cipher_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ err = crypto_grab_lskcipher(spawn,
+ lskcipher_crypto_instance(inst),
+ ecb_name, 0, mask);
+ }
+
+ if (err)
+ goto err_free_inst;
+
+ cipher_alg = crypto_lskcipher_spawn_alg(spawn);
+
+ err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name,
+ &cipher_alg->co.base);
+ if (err)
+ goto err_free_inst;
+
+ if (ecb_name[0]) {
+ int len;
+
+ err = -EINVAL;
+ len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4],
+ sizeof(ecb_name));
+ if (len < 2)
+ goto err_free_inst;
+
+ if (ecb_name[len - 1] != ')')
+ goto err_free_inst;
+
+ ecb_name[len - 1] = 0;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", tmpl->name, ecb_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ if (strcmp(ecb_name, cipher_name) &&
+ snprintf(inst->alg.co.base.cra_driver_name,
+ CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", tmpl->name, cipher_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+ } else {
+ /* Don't allow nesting. */
+ err = -ELOOP;
+ if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE))
+ goto err_free_inst;
+ }
+
+ err = -EINVAL;
+ if (cipher_alg->co.ivsize)
+ goto err_free_inst;
+
+ inst->free = lskcipher_free_instance_simple;
+
+ /* Default algorithm properties, can be overridden */
+ inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize;
+ inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask;
+ inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority;
+ inst->alg.co.min_keysize = cipher_alg->co.min_keysize;
+ inst->alg.co.max_keysize = cipher_alg->co.max_keysize;
+ inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize;
+
+ /* Use struct crypto_lskcipher * by default, can be overridden */
+ inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *);
+ inst->alg.setkey = lskcipher_setkey_simple;
+ inst->alg.init = lskcipher_init_tfm_simple;
+ inst->alg.exit = lskcipher_exit_tfm_simple;
+
+ return inst;
+
+err_free_inst:
+ lskcipher_free_instance_simple(inst);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple);
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index d2e5e104f8..cd501195f3 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -61,6 +61,24 @@ static const u8 rsa_digest_info_sha512[] = {
0x05, 0x00, 0x04, 0x40
};
+static const u8 rsa_digest_info_sha3_256[] = {
+ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08,
+ 0x05, 0x00, 0x04, 0x20
+};
+
+static const u8 rsa_digest_info_sha3_384[] = {
+ 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09,
+ 0x05, 0x00, 0x04, 0x30
+};
+
+static const u8 rsa_digest_info_sha3_512[] = {
+ 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0A,
+ 0x05, 0x00, 0x04, 0x40
+};
+
static const struct rsa_asn1_template {
const char *name;
const u8 *data;
@@ -74,8 +92,13 @@ static const struct rsa_asn1_template {
_(sha384),
_(sha512),
_(sha224),
- { NULL }
#undef _
+#define _(X) { "sha3-" #X, rsa_digest_info_sha3_##X, sizeof(rsa_digest_info_sha3_##X) }
+ _(256),
+ _(384),
+ _(512),
+#undef _
+ { NULL }
};
static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
@@ -687,3 +710,5 @@ struct crypto_template rsa_pkcs1pad_tmpl = {
.create = pkcs1pad_create,
.module = THIS_MODULE,
};
+
+MODULE_ALIAS_CRYPTO("pkcs1pad");
diff --git a/crypto/rsaprivkey.asn1 b/crypto/rsaprivkey.asn1
index 4ce06758e8..76865124a9 100644
--- a/crypto/rsaprivkey.asn1
+++ b/crypto/rsaprivkey.asn1
@@ -1,3 +1,10 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2016 IETF Trust and the persons identified as authors
+-- of the code
+--
+-- https://www.rfc-editor.org/rfc/rfc8017#appendix-A.1.2
+
RsaPrivKey ::= SEQUENCE {
version INTEGER,
n INTEGER ({ rsa_get_n }),
diff --git a/crypto/rsapubkey.asn1 b/crypto/rsapubkey.asn1
index 725498e461..0d32b1ca62 100644
--- a/crypto/rsapubkey.asn1
+++ b/crypto/rsapubkey.asn1
@@ -1,3 +1,10 @@
+-- SPDX-License-Identifier: BSD-3-Clause
+--
+-- Copyright (C) 2016 IETF Trust and the persons identified as authors
+-- of the code
+--
+-- https://www.rfc-editor.org/rfc/rfc8017#appendix-A.1.1
+
RsaPubKey ::= SEQUENCE {
n INTEGER ({ rsa_get_n }),
e INTEGER ({ rsa_get_e })
diff --git a/crypto/shash.c b/crypto/shash.c
index 1fadb6b59b..d5194221c8 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -10,17 +10,12 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "hash.h"
-#define MAX_SHASH_ALIGNMASK 63
-
-static const struct crypto_type crypto_shash_type;
-
static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
{
return hash_get_stat(&alg->halg);
@@ -28,7 +23,13 @@ static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
{
- return crypto_hash_errstat(&alg->halg, err);
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&shash_get_stat(alg)->err_cnt);
+
+ return err;
}
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -38,27 +39,6 @@ int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(shash_no_setkey);
-static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
- unsigned long absize;
- u8 *buffer, *alignbuffer;
- int err;
-
- absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
- buffer = kmalloc(absize, GFP_ATOMIC);
- if (!buffer)
- return -ENOMEM;
-
- alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- memcpy(alignbuffer, key, keylen);
- err = shash->setkey(tfm, alignbuffer, keylen);
- kfree_sensitive(buffer);
- return err;
-}
-
static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
{
if (crypto_shash_alg_needs_key(alg))
@@ -69,14 +49,9 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
- if ((unsigned long)key & alignmask)
- err = shash_setkey_unaligned(tfm, key, keylen);
- else
- err = shash->setkey(tfm, key, keylen);
-
+ err = shash->setkey(tfm, key, keylen);
if (unlikely(err)) {
shash_set_needkey(tfm, shash);
return err;
@@ -87,108 +62,42 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
-static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
- unsigned int unaligned_len = alignmask + 1 -
- ((unsigned long)data & alignmask);
- /*
- * We cannot count on __aligned() working for large values:
- * https://patchwork.kernel.org/patch/9507697/
- */
- u8 ubuf[MAX_SHASH_ALIGNMASK * 2];
- u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
- int err;
-
- if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf)))
- return -EINVAL;
-
- if (unaligned_len > len)
- unaligned_len = len;
-
- memcpy(buf, data, unaligned_len);
- err = shash->update(desc, buf, unaligned_len);
- memset(buf, 0, unaligned_len);
-
- return err ?:
- shash->update(desc, data + unaligned_len, len - unaligned_len);
-}
-
int crypto_shash_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
+ struct shash_alg *shash = crypto_shash_alg(desc->tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_add(len, &shash_get_stat(shash)->hash_tlen);
- if ((unsigned long)data & alignmask)
- err = shash_update_unaligned(desc, data, len);
- else
- err = shash->update(desc, data, len);
+ err = shash->update(desc, data, len);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_update);
-static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
-{
- struct crypto_shash *tfm = desc->tfm;
- unsigned long alignmask = crypto_shash_alignmask(tfm);
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned int ds = crypto_shash_digestsize(tfm);
- /*
- * We cannot count on __aligned() working for large values:
- * https://patchwork.kernel.org/patch/9507697/
- */
- u8 ubuf[MAX_SHASH_ALIGNMASK + HASH_MAX_DIGESTSIZE];
- u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
- int err;
-
- if (WARN_ON(buf + ds > ubuf + sizeof(ubuf)))
- return -EINVAL;
-
- err = shash->final(desc, buf);
- if (err)
- goto out;
-
- memcpy(out, buf, ds);
-
-out:
- memset(buf, 0, ds);
- return err;
-}
-
int crypto_shash_final(struct shash_desc *desc, u8 *out)
{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
+ struct shash_alg *shash = crypto_shash_alg(desc->tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_inc(&shash_get_stat(shash)->hash_cnt);
- if ((unsigned long)out & alignmask)
- err = shash_final_unaligned(desc, out);
- else
- err = shash->final(desc, out);
+ err = shash->final(desc, out);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_final);
-static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int shash_default_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- return shash_update_unaligned(desc, data, len) ?:
- shash_final_unaligned(desc, out);
+ struct shash_alg *shash = crypto_shash_alg(desc->tfm);
+
+ return shash->update(desc, data, len) ?:
+ shash->final(desc, out);
}
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
@@ -196,7 +105,6 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
@@ -206,22 +114,19 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
atomic64_add(len, &istat->hash_tlen);
}
- if (((unsigned long)data | (unsigned long)out) & alignmask)
- err = shash_finup_unaligned(desc, data, len, out);
- else
- err = shash->finup(desc, data, len, out);
-
+ err = shash->finup(desc, data, len, out);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
-static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int shash_default_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- return crypto_shash_init(desc) ?:
- shash_update_unaligned(desc, data, len) ?:
- shash_final_unaligned(desc, out);
+ struct shash_alg *shash = crypto_shash_alg(desc->tfm);
+
+ return shash->init(desc) ?:
+ shash->finup(desc, data, len, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -229,7 +134,6 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
- unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
@@ -241,8 +145,6 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
err = -ENOKEY;
- else if (((unsigned long)data | (unsigned long)out) & alignmask)
- err = shash_digest_unaligned(desc, data, len, out);
else
err = shash->digest(desc, data, len, out);
@@ -266,202 +168,34 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
}
EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
-static int shash_default_export(struct shash_desc *desc, void *out)
-{
- memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
- return 0;
-}
-
-static int shash_default_import(struct shash_desc *desc, const void *in)
-{
- memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm));
- return 0;
-}
-
-static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_shash **ctx = crypto_ahash_ctx(tfm);
-
- return crypto_shash_setkey(*ctx, key, keylen);
-}
-
-static int shash_async_init(struct ahash_request *req)
-{
- struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
-
- desc->tfm = *ctx;
-
- return crypto_shash_init(desc);
-}
-
-int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
-{
- struct crypto_hash_walk walk;
- int nbytes;
-
- for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
- nbytes = crypto_hash_walk_done(&walk, nbytes))
- nbytes = crypto_shash_update(desc, walk.data, nbytes);
-
- return nbytes;
-}
-EXPORT_SYMBOL_GPL(shash_ahash_update);
-
-static int shash_async_update(struct ahash_request *req)
-{
- return shash_ahash_update(req, ahash_request_ctx(req));
-}
-
-static int shash_async_final(struct ahash_request *req)
-{
- return crypto_shash_final(ahash_request_ctx(req), req->result);
-}
-
-int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
-{
- struct crypto_hash_walk walk;
- int nbytes;
-
- nbytes = crypto_hash_walk_first(req, &walk);
- if (!nbytes)
- return crypto_shash_final(desc, req->result);
-
- do {
- nbytes = crypto_hash_walk_last(&walk) ?
- crypto_shash_finup(desc, walk.data, nbytes,
- req->result) :
- crypto_shash_update(desc, walk.data, nbytes);
- nbytes = crypto_hash_walk_done(&walk, nbytes);
- } while (nbytes > 0);
-
- return nbytes;
-}
-EXPORT_SYMBOL_GPL(shash_ahash_finup);
-
-static int shash_async_finup(struct ahash_request *req)
-{
- struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
-
- desc->tfm = *ctx;
-
- return shash_ahash_finup(req, desc);
-}
-
-int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
-{
- unsigned int nbytes = req->nbytes;
- struct scatterlist *sg;
- unsigned int offset;
- int err;
-
- if (nbytes &&
- (sg = req->src, offset = sg->offset,
- nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
- void *data;
-
- data = kmap_local_page(sg_page(sg));
- err = crypto_shash_digest(desc, data + offset, nbytes,
- req->result);
- kunmap_local(data);
- } else
- err = crypto_shash_init(desc) ?:
- shash_ahash_finup(req, desc);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(shash_ahash_digest);
-
-static int shash_async_digest(struct ahash_request *req)
-{
- struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
-
- desc->tfm = *ctx;
-
- return shash_ahash_digest(req, desc);
-}
-
-static int shash_async_export(struct ahash_request *req, void *out)
-{
- return crypto_shash_export(ahash_request_ctx(req), out);
-}
-
-static int shash_async_import(struct ahash_request *req, const void *in)
-{
- struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
-
- desc->tfm = *ctx;
-
- return crypto_shash_import(desc, in);
-}
-
-static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
+int crypto_shash_export(struct shash_desc *desc, void *out)
{
- struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_shash(*ctx);
-}
-
-int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
-{
- struct crypto_alg *calg = tfm->__crt_alg;
- struct shash_alg *alg = __crypto_shash_alg(calg);
- struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
- struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
- struct crypto_shash *shash;
-
- if (!crypto_mod_get(calg))
- return -EAGAIN;
-
- shash = crypto_create_tfm(calg, &crypto_shash_type);
- if (IS_ERR(shash)) {
- crypto_mod_put(calg);
- return PTR_ERR(shash);
- }
-
- *ctx = shash;
- tfm->exit = crypto_exit_shash_ops_async;
-
- crt->init = shash_async_init;
- crt->update = shash_async_update;
- crt->final = shash_async_final;
- crt->finup = shash_async_finup;
- crt->digest = shash_async_digest;
- if (crypto_shash_alg_has_setkey(alg))
- crt->setkey = shash_async_setkey;
-
- crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
- CRYPTO_TFM_NEED_KEY);
-
- crt->export = shash_async_export;
- crt->import = shash_async_import;
+ struct crypto_shash *tfm = desc->tfm;
+ struct shash_alg *shash = crypto_shash_alg(tfm);
- crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
+ if (shash->export)
+ return shash->export(desc, out);
+ memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(tfm));
return 0;
}
+EXPORT_SYMBOL_GPL(crypto_shash_export);
-struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
- struct crypto_ahash *hash)
+int crypto_shash_import(struct shash_desc *desc, const void *in)
{
- struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
- struct crypto_shash **ctx = crypto_ahash_ctx(hash);
- struct crypto_shash *shash;
+ struct crypto_shash *tfm = desc->tfm;
+ struct shash_alg *shash = crypto_shash_alg(tfm);
- shash = crypto_clone_shash(*ctx);
- if (IS_ERR(shash)) {
- crypto_free_ahash(nhash);
- return ERR_CAST(shash);
- }
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
- *nctx = shash;
+ if (shash->import)
+ return shash->import(desc, in);
- return nhash;
+ memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
+ return 0;
}
+EXPORT_SYMBOL_GPL(crypto_shash_import);
static void crypto_shash_exit_tfm(struct crypto_tfm *tfm)
{
@@ -541,7 +275,7 @@ static int __maybe_unused crypto_shash_report_stat(
return crypto_hash_report_stat(skb, alg, "shash");
}
-static const struct crypto_type crypto_shash_type = {
+const struct crypto_type crypto_shash_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_shash_init_tfm,
.free = crypto_shash_free_instance,
@@ -626,6 +360,10 @@ int hash_prepare_alg(struct hash_alg_common *alg)
if (alg->digestsize > HASH_MAX_DIGESTSIZE)
return -EINVAL;
+ /* alignmask is not useful for hashes, so it is not supported. */
+ if (base->cra_alignmask)
+ return -EINVAL;
+
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
@@ -642,9 +380,6 @@ static int shash_prepare_alg(struct shash_alg *alg)
if (alg->descsize > HASH_MAX_DESCSIZE)
return -EINVAL;
- if (base->cra_alignmask > MAX_SHASH_ALIGNMASK)
- return -EINVAL;
-
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
@@ -655,15 +390,23 @@ static int shash_prepare_alg(struct shash_alg *alg)
base->cra_type = &crypto_shash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
+ /*
+ * Handle missing optional functions. For each one we can either
+ * install a default here, or we can leave the pointer as NULL and check
+ * the pointer for NULL in crypto_shash_*(), avoiding an indirect call
+ * when the default behavior is desired. For ->finup and ->digest we
+ * install defaults, since for optimal performance algorithms should
+ * implement these anyway. On the other hand, for ->import and
+ * ->export the common case and best performance comes from the simple
+ * memcpy of the shash_desc_ctx, so when those pointers are NULL we
+ * leave them NULL and provide the memcpy with no indirect call.
+ */
if (!alg->finup)
- alg->finup = shash_finup_unaligned;
+ alg->finup = shash_default_finup;
if (!alg->digest)
- alg->digest = shash_digest_unaligned;
- if (!alg->export) {
- alg->export = shash_default_export;
- alg->import = shash_default_import;
+ alg->digest = shash_default_digest;
+ if (!alg->export)
alg->halg.statesize = alg->descsize;
- }
if (!alg->setkey)
alg->setkey = shash_no_setkey;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 7b275716cf..ac8b8c0426 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -24,8 +24,9 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <net/netlink.h>
+#include "skcipher.h"
-#include "internal.h"
+#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
enum {
SKCIPHER_WALK_PHYS = 1 << 0,
@@ -43,6 +44,8 @@ struct skcipher_walk_buffer {
u8 buffer[];
};
+static const struct crypto_type crypto_skcipher_type;
+
static int skcipher_walk_next(struct skcipher_walk *walk);
static inline void skcipher_map_src(struct skcipher_walk *walk)
@@ -89,11 +92,7 @@ static inline struct skcipher_alg *__crypto_skcipher_alg(
static inline struct crypto_istat_cipher *skcipher_get_stat(
struct skcipher_alg *alg)
{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
+ return skcipher_get_stat_common(&alg->co);
}
static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
@@ -468,6 +467,7 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
walk->total = req->cryptlen;
walk->nbytes = 0;
@@ -485,10 +485,14 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
SKCIPHER_WALK_SLEEP : 0;
walk->blocksize = crypto_skcipher_blocksize(tfm);
- walk->stride = crypto_skcipher_walksize(tfm);
walk->ivsize = crypto_skcipher_ivsize(tfm);
walk->alignmask = crypto_skcipher_alignmask(tfm);
+ if (alg->co.base.cra_type != &crypto_skcipher_type)
+ walk->stride = alg->co.chunksize;
+ else
+ walk->stride = alg->walksize;
+
return skcipher_walk_first(walk);
}
@@ -616,6 +620,17 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
int err;
+ if (cipher->co.base.cra_type != &crypto_skcipher_type) {
+ struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
+ crypto_lskcipher_set_flags(*ctx,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_lskcipher_setkey(*ctx, key, keylen);
+ goto out;
+ }
+
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
return -EINVAL;
@@ -624,6 +639,7 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
else
err = cipher->setkey(tfm, key, keylen);
+out:
if (unlikely(err)) {
skcipher_set_needkey(tfm);
return err;
@@ -649,6 +665,8 @@ int crypto_skcipher_encrypt(struct skcipher_request *req)
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
+ else if (alg->co.base.cra_type != &crypto_skcipher_type)
+ ret = crypto_lskcipher_encrypt_sg(req);
else
ret = alg->encrypt(req);
@@ -671,6 +689,8 @@ int crypto_skcipher_decrypt(struct skcipher_request *req)
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
+ else if (alg->co.base.cra_type != &crypto_skcipher_type)
+ ret = crypto_lskcipher_decrypt_sg(req);
else
ret = alg->decrypt(req);
@@ -693,6 +713,9 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
skcipher_set_needkey(skcipher);
+ if (tfm->__crt_alg->cra_type != &crypto_skcipher_type)
+ return crypto_init_lskcipher_ops_sg(tfm);
+
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
@@ -702,6 +725,14 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
return 0;
}
+static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
+{
+ if (alg->cra_type != &crypto_skcipher_type)
+ return sizeof(struct crypto_lskcipher *);
+
+ return crypto_alg_extsize(alg);
+}
+
static void crypto_skcipher_free_instance(struct crypto_instance *inst)
{
struct skcipher_instance *skcipher =
@@ -770,7 +801,7 @@ static int __maybe_unused crypto_skcipher_report_stat(
}
static const struct crypto_type crypto_skcipher_type = {
- .extsize = crypto_alg_extsize,
+ .extsize = crypto_skcipher_extsize,
.init_tfm = crypto_skcipher_init_tfm,
.free = crypto_skcipher_free_instance,
#ifdef CONFIG_PROC_FS
@@ -783,7 +814,7 @@ static const struct crypto_type crypto_skcipher_type = {
.report_stat = crypto_skcipher_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
};
@@ -834,23 +865,18 @@ int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_skcipher);
-static int skcipher_prepare_alg(struct skcipher_alg *alg)
+int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
{
- struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+ struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg);
struct crypto_alg *base = &alg->base;
- if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
- alg->walksize > PAGE_SIZE / 8)
+ if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8)
return -EINVAL;
if (!alg->chunksize)
alg->chunksize = base->cra_blocksize;
- if (!alg->walksize)
- alg->walksize = alg->chunksize;
- base->cra_type = &crypto_skcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
- base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
@@ -858,6 +884,27 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg)
return 0;
}
+static int skcipher_prepare_alg(struct skcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int err;
+
+ err = skcipher_prepare_alg_common(&alg->co);
+ if (err)
+ return err;
+
+ if (alg->walksize > PAGE_SIZE / 8)
+ return -EINVAL;
+
+ if (!alg->walksize)
+ alg->walksize = alg->chunksize;
+
+ base->cra_type = &crypto_skcipher_type;
+ base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
+
+ return 0;
+}
+
int crypto_register_skcipher(struct skcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
diff --git a/crypto/skcipher.h b/crypto/skcipher.h
new file mode 100644
index 0000000000..16c9484360
--- /dev/null
+++ b/crypto/skcipher.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _LOCAL_CRYPTO_SKCIPHER_H
+#define _LOCAL_CRYPTO_SKCIPHER_H
+
+#include <crypto/internal/skcipher.h>
+#include "internal.h"
+
+static inline struct crypto_istat_cipher *skcipher_get_stat_common(
+ struct skcipher_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+int crypto_lskcipher_encrypt_sg(struct skcipher_request *req);
+int crypto_lskcipher_decrypt_sg(struct skcipher_request *req);
+int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm);
+int skcipher_prepare_alg_common(struct skcipher_alg_common *alg);
+
+#endif /* _LOCAL_CRYPTO_SKCIPHER_H */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 216878c8bc..15c7a30112 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -408,17 +408,15 @@ static const struct testvec_config default_hash_testvec_configs[] = {
.finalization_type = FINALIZATION_TYPE_FINAL,
.key_offset = 1,
}, {
- .name = "digest buffer aligned only to alignmask",
+ .name = "digest misaligned buffer",
.src_divs = {
{
.proportion_of_total = 10000,
.offset = 1,
- .offset_relative_to_alignmask = true,
},
},
.finalization_type = FINALIZATION_TYPE_DIGEST,
.key_offset = 1,
- .key_offset_relative_to_alignmask = true,
}, {
.name = "init+update+update+final two even splits",
.src_divs = {
@@ -1275,7 +1273,6 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
u8 *hashstate)
{
struct crypto_shash *tfm = desc->tfm;
- const unsigned int alignmask = crypto_shash_alignmask(tfm);
const unsigned int digestsize = crypto_shash_digestsize(tfm);
const unsigned int statesize = crypto_shash_statesize(tfm);
const char *driver = crypto_shash_driver_name(tfm);
@@ -1287,7 +1284,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
/* Set the key, if specified */
if (vec->ksize) {
err = do_setkey(crypto_shash_setkey, tfm, vec->key, vec->ksize,
- cfg, alignmask);
+ cfg, 0);
if (err) {
if (err == vec->setkey_error)
return 0;
@@ -1304,7 +1301,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
}
/* Build the scatterlist for the source data */
- err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
+ err = build_hash_sglist(tsgl, vec, cfg, 0, divs);
if (err) {
pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
driver, vec_name, cfg->name);
@@ -1459,7 +1456,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
u8 *hashstate)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- const unsigned int alignmask = crypto_ahash_alignmask(tfm);
const unsigned int digestsize = crypto_ahash_digestsize(tfm);
const unsigned int statesize = crypto_ahash_statesize(tfm);
const char *driver = crypto_ahash_driver_name(tfm);
@@ -1475,7 +1471,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
/* Set the key, if specified */
if (vec->ksize) {
err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize,
- cfg, alignmask);
+ cfg, 0);
if (err) {
if (err == vec->setkey_error)
return 0;
@@ -1492,7 +1488,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
}
/* Build the scatterlist for the source data */
- err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
+ err = build_hash_sglist(tsgl, vec, cfg, 0, divs);
if (err) {
pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
driver, vec_name, cfg->name);
@@ -4963,7 +4959,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "ecb(arc4)",
- .generic_driver = "ecb(arc4)-generic",
+ .generic_driver = "arc4-generic",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(arc4_tv_template)
@@ -5461,6 +5457,18 @@ static const struct alg_test_desc alg_test_descs[] = {
.akcipher = __VECS(pkcs1pad_rsa_tv_template)
}
}, {
+ .alg = "pkcs1pad(rsa,sha3-256)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "pkcs1pad(rsa,sha3-384)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
+ .alg = "pkcs1pad(rsa,sha3-512)",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
.alg = "pkcs1pad(rsa,sha384)",
.test = alg_test_null,
.fips_allowed = 1,
@@ -5773,16 +5781,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(xxhash64_tv_template)
}
}, {
- .alg = "zlib-deflate",
- .test = alg_test_comp,
- .fips_allowed = 1,
- .suite = {
- .comp = {
- .comp = __VECS(zlib_deflate_comp_tv_template),
- .decomp = __VECS(zlib_deflate_decomp_tv_template)
- }
- }
- }, {
.alg = "zstd",
.test = alg_test_comp,
.fips_allowed = 1,
@@ -5945,6 +5943,25 @@ test_done:
return rc;
notest:
+ if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_LSKCIPHER) {
+ char nalg[CRYPTO_MAX_ALG_NAME];
+
+ if (snprintf(nalg, sizeof(nalg), "ecb(%s)", alg) >=
+ sizeof(nalg))
+ goto notest2;
+
+ i = alg_find_test(nalg);
+ if (i < 0)
+ goto notest2;
+
+ if (fips_enabled && !alg_test_descs[i].fips_allowed)
+ goto non_fips_alg;
+
+ rc = alg_test_skcipher(alg_test_descs + i, driver, type, mask);
+ goto test_done;
+ }
+
+notest2:
printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
if (type & CRYPTO_ALG_FIPS_INTERNAL)
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 5ca7a41250..d7e9839754 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -653,30 +653,6 @@ static const struct akcipher_testvec rsa_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
{
.key =
- "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1"
- "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68"
- "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08"
- "\x98",
- .key_len = 49,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x01",
- .param_len = 21,
- .m =
- "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae"
- "\x63\x85\xe7\x82",
- .m_size = 20,
- .algo = OID_id_ecdsa_with_sha1,
- .c =
- "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91"
- "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10"
- "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86"
- "\x80\x6f\xa5\x79\x77\xda\xd0",
- .c_size = 55,
- .public_key_vec = true,
- .siggen_sigver_test = true,
- }, {
- .key =
"\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd"
"\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75"
"\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee"
@@ -780,32 +756,6 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
{
.key =
- "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41"
- "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9"
- "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc"
- "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8"
- "\xaf",
- .key_len = 65,
- .params =
- "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
- "\xce\x3d\x03\x01\x07",
- .param_len = 21,
- .m =
- "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c"
- "\x0b\xde\x6a\x42",
- .m_size = 20,
- .algo = OID_id_ecdsa_with_sha1,
- .c =
- "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7"
- "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a"
- "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d"
- "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7"
- "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad",
- .c_size = 72,
- .public_key_vec = true,
- .siggen_sigver_test = true,
- }, {
- .key =
"\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9"
"\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0"
"\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88"
@@ -916,36 +866,6 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
{
- .key = /* secp384r1(sha1) */
- "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1"
- "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f"
- "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42"
- "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e"
- "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e"
- "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3"
- "\xf1",
- .key_len = 97,
- .params =
- "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
- "\x00\x22",
- .param_len = 18,
- .m =
- "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22"
- "\x3a\x69\xc1\x93",
- .m_size = 20,
- .algo = OID_id_ecdsa_with_sha1,
- .c =
- "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07"
- "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1"
- "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e"
- "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0"
- "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88"
- "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26"
- "\x79\x12\x2a\xb7\xc5\x15\x92\xc5",
- .c_size = 104,
- .public_key_vec = true,
- .siggen_sigver_test = true,
- }, {
.key = /* secp384r1(sha224) */
"\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0"
"\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18"
@@ -35754,81 +35674,6 @@ static const struct comp_testvec deflate_decomp_tv_template[] = {
},
};
-static const struct comp_testvec zlib_deflate_comp_tv_template[] = {
- {
- .inlen = 70,
- .outlen = 44,
- .input = "Join us now and share the software "
- "Join us now and share the software ",
- .output = "\x78\x5e\xf3\xca\xcf\xcc\x53\x28"
- "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc"
- "\x4b\x51\x28\xce\x48\x2c\x4a\x55"
- "\x28\xc9\x48\x55\x28\xce\x4f\x2b"
- "\x29\x07\x71\xbc\x08\x2b\x01\x00"
- "\x7c\x65\x19\x3d",
- }, {
- .inlen = 191,
- .outlen = 129,
- .input = "This document describes a compression method based on the DEFLATE"
- "compression algorithm. This document defines the application of "
- "the DEFLATE algorithm to the IP Payload Compression Protocol.",
- .output = "\x78\x5e\x5d\xce\x41\x0a\xc3\x30"
- "\x0c\x04\xc0\xaf\xec\x0b\xf2\x87"
- "\xd2\xa6\x50\xe8\xc1\x07\x7f\x40"
- "\xb1\x95\x5a\x60\x5b\xc6\x56\x0f"
- "\xfd\x7d\x93\x1e\x42\xe8\x51\xec"
- "\xee\x20\x9f\x64\x20\x6a\x78\x17"
- "\xae\x86\xc8\x23\x74\x59\x78\x80"
- "\x10\xb4\xb4\xce\x63\x88\x56\x14"
- "\xb6\xa4\x11\x0b\x0d\x8e\xd8\x6e"
- "\x4b\x8c\xdb\x7c\x7f\x5e\xfc\x7c"
- "\xae\x51\x7e\x69\x17\x4b\x65\x02"
- "\xfc\x1f\xbc\x4a\xdd\xd8\x7d\x48"
- "\xad\x65\x09\x64\x3b\xac\xeb\xd9"
- "\xc2\x01\xc0\xf4\x17\x3c\x1c\x1c"
- "\x7d\xb2\x52\xc4\xf5\xf4\x8f\xeb"
- "\x6a\x1a\x34\x4f\x5f\x2e\x32\x45"
- "\x4e",
- },
-};
-
-static const struct comp_testvec zlib_deflate_decomp_tv_template[] = {
- {
- .inlen = 128,
- .outlen = 191,
- .input = "\x78\x9c\x5d\x8d\x31\x0e\xc2\x30"
- "\x10\x04\xbf\xb2\x2f\xc8\x1f\x10"
- "\x04\x09\x89\xc2\x85\x3f\x70\xb1"
- "\x2f\xf8\x24\xdb\x67\xd9\x47\xc1"
- "\xef\x49\x68\x12\x51\xae\x76\x67"
- "\xd6\x27\x19\x88\x1a\xde\x85\xab"
- "\x21\xf2\x08\x5d\x16\x1e\x20\x04"
- "\x2d\xad\xf3\x18\xa2\x15\x85\x2d"
- "\x69\xc4\x42\x83\x23\xb6\x6c\x89"
- "\x71\x9b\xef\xcf\x8b\x9f\xcf\x33"
- "\xca\x2f\xed\x62\xa9\x4c\x80\xff"
- "\x13\xaf\x52\x37\xed\x0e\x52\x6b"
- "\x59\x02\xd9\x4e\xe8\x7a\x76\x1d"
- "\x02\x98\xfe\x8a\x87\x83\xa3\x4f"
- "\x56\x8a\xb8\x9e\x8e\x5c\x57\xd3"
- "\xa0\x79\xfa\x02\x2e\x32\x45\x4e",
- .output = "This document describes a compression method based on the DEFLATE"
- "compression algorithm. This document defines the application of "
- "the DEFLATE algorithm to the IP Payload Compression Protocol.",
- }, {
- .inlen = 44,
- .outlen = 70,
- .input = "\x78\x9c\xf3\xca\xcf\xcc\x53\x28"
- "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc"
- "\x4b\x51\x28\xce\x48\x2c\x4a\x55"
- "\x28\xc9\x48\x55\x28\xce\x4f\x2b"
- "\x29\x07\x71\xbc\x08\x2b\x01\x00"
- "\x7c\x65\x19\x3d",
- .output = "Join us now and share the software "
- "Join us now and share the software ",
- },
-};
-
/*
* LZO test vectors (null-terminated strings).
*/
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 4633b2dda1..0a1d8efa6c 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -649,7 +649,6 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
- inst->alg.base.cra_alignmask = alg->cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
inst->alg.base.cra_init = vmac_init_tfm;
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 6074c5c1da..a9e8ee9c19 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -27,7 +27,7 @@ static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
*/
struct xcbc_tfm_ctx {
struct crypto_cipher *child;
- u8 ctx[];
+ u8 consts[];
};
/*
@@ -43,7 +43,7 @@ struct xcbc_tfm_ctx {
*/
struct xcbc_desc_ctx {
unsigned int len;
- u8 ctx[];
+ u8 odds[];
};
#define XCBC_BLOCKSIZE 16
@@ -51,9 +51,8 @@ struct xcbc_desc_ctx {
static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
- u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
+ u8 *consts = ctx->consts;
int err = 0;
u8 key1[XCBC_BLOCKSIZE];
int bs = sizeof(key1);
@@ -71,10 +70,9 @@ static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
{
- unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
- u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs;
+ u8 *prev = &ctx->odds[bs];
ctx->len = 0;
memset(prev, 0, bs);
@@ -86,12 +84,11 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
+ u8 *odds = ctx->odds;
u8 *prev = odds + bs;
/* checking the data can fill the block */
@@ -132,13 +129,11 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
- unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1);
- u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
+ u8 *odds = ctx->odds;
u8 *prev = odds + bs;
unsigned int offset = 0;
@@ -157,7 +152,7 @@ static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
}
crypto_xor(prev, odds, bs);
- crypto_xor(prev, consts + offset, bs);
+ crypto_xor(prev, &tctx->consts[offset], bs);
crypto_cipher_encrypt_one(tfm, out, prev);
@@ -191,7 +186,6 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
- unsigned long alignmask;
u32 mask;
int err;
@@ -218,21 +212,15 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- alignmask = alg->cra_alignmask | 3;
- inst->alg.base.cra_alignmask = alignmask;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) +
+ alg->cra_blocksize * 2;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx),
- crypto_tfm_ctx_alignment()) +
- (alignmask &
- ~(crypto_tfm_ctx_alignment() - 1)) +
+ inst->alg.descsize = sizeof(struct xcbc_desc_ctx) +
alg->cra_blocksize * 2;
- inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx),
- alignmask + 1) +
- alg->cra_blocksize * 2;
inst->alg.base.cra_init = xcbc_init_tfm;
inst->alg.base.cra_exit = xcbc_exit_tfm;
diff --git a/crypto/xts.c b/crypto/xts.c
index 038f60dd51..672e1a3f0b 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -339,10 +339,10 @@ static void xts_free_instance(struct skcipher_instance *inst)
static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
{
+ struct skcipher_alg_common *alg;
char name[CRYPTO_MAX_ALG_NAME];
struct skcipher_instance *inst;
struct xts_instance_ctx *ctx;
- struct skcipher_alg *alg;
const char *cipher_name;
u32 mask;
int err;
@@ -377,13 +377,13 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- alg = crypto_skcipher_spawn_alg(&ctx->spawn);
+ alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
err = -EINVAL;
if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
goto err_free_inst;
- if (crypto_skcipher_alg_ivsize(alg))
+ if (alg->ivsize)
goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
@@ -428,8 +428,8 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
(__alignof__(u64) - 1);
inst->alg.ivsize = XTS_BLOCK_SIZE;
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
+ inst->alg.min_keysize = alg->min_keysize * 2;
+ inst->alg.max_keysize = alg->max_keysize * 2;
inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);