summaryrefslogtreecommitdiffstats
path: root/crypto/cbc.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
commitdc50eab76b709d68175a358d6e23a5a3890764d3 (patch)
treec754d0390db060af0213ff994f0ac310e4cfd6e9 /crypto/cbc.c
parentAdding debian version 6.6.15-2. (diff)
downloadlinux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz
linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'crypto/cbc.c')
-rw-r--r--crypto/cbc.c163
1 files changed, 61 insertions, 102 deletions
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 6c03e96b94..28345b8d92 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -5,8 +5,6 @@
* Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au>
*/
-#include <crypto/algapi.h>
-#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -14,99 +12,71 @@
#include <linux/log2.h>
#include <linux/module.h>
-static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_encrypt_segment(struct crypto_lskcipher *tfm,
+ const u8 *src, u8 *dst, unsigned nbytes,
+ u8 *iv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- u8 *iv = walk->iv;
-
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_encrypt;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
- do {
+ for (; nbytes >= bsize; src += bsize, dst += bsize, nbytes -= bsize) {
crypto_xor(iv, src, bsize);
- fn(tfm, dst, iv);
+ crypto_lskcipher_encrypt(tfm, iv, dst, bsize, NULL);
memcpy(iv, dst, bsize);
-
- src += bsize;
- dst += bsize;
- } while ((nbytes -= bsize) >= bsize);
+ }
return nbytes;
}
-static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_encrypt_inplace(struct crypto_lskcipher *tfm,
+ u8 *src, unsigned nbytes, u8 *oiv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- u8 *iv = walk->iv;
-
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_encrypt;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
+ u8 *iv = oiv;
+
+ if (nbytes < bsize)
+ goto out;
do {
crypto_xor(src, iv, bsize);
- fn(tfm, src, src);
+ crypto_lskcipher_encrypt(tfm, src, src, bsize, NULL);
iv = src;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
+ memcpy(oiv, iv, bsize);
+out:
return nbytes;
}
-static int crypto_cbc_encrypt(struct skcipher_request *req)
+static int crypto_cbc_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, bool final)
{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- int err;
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_lskcipher *cipher = *ctx;
+ int rem;
- err = skcipher_walk_virt(&walk, req, false);
+ if (src == dst)
+ rem = crypto_cbc_encrypt_inplace(cipher, dst, len, iv);
+ else
+ rem = crypto_cbc_encrypt_segment(cipher, src, dst, len, iv);
- while (walk.nbytes) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- err = crypto_cbc_encrypt_inplace(&walk, skcipher);
- else
- err = crypto_cbc_encrypt_segment(&walk, skcipher);
- err = skcipher_walk_done(&walk, err);
- }
-
- return err;
+ return rem && final ? -EINVAL : rem;
}
-static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_decrypt_segment(struct crypto_lskcipher *tfm,
+ const u8 *src, u8 *dst, unsigned nbytes,
+ u8 *oiv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- u8 *iv = walk->iv;
-
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_decrypt;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
+ const u8 *iv = oiv;
+
+ if (nbytes < bsize)
+ goto out;
do {
- fn(tfm, dst, src);
+ crypto_lskcipher_decrypt(tfm, src, dst, bsize, NULL);
crypto_xor(dst, iv, bsize);
iv = src;
@@ -114,83 +84,72 @@ static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
+ memcpy(oiv, iv, bsize);
+out:
return nbytes;
}
-static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk,
- struct crypto_skcipher *skcipher)
+static int crypto_cbc_decrypt_inplace(struct crypto_lskcipher *tfm,
+ u8 *src, unsigned nbytes, u8 *iv)
{
- unsigned int bsize = crypto_skcipher_blocksize(skcipher);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
- unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ unsigned int bsize = crypto_lskcipher_blocksize(tfm);
u8 last_iv[MAX_CIPHER_BLOCKSIZE];
- struct crypto_cipher *cipher;
- struct crypto_tfm *tfm;
- cipher = skcipher_cipher_simple(skcipher);
- tfm = crypto_cipher_tfm(cipher);
- fn = crypto_cipher_alg(cipher)->cia_decrypt;
+ if (nbytes < bsize)
+ goto out;
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
memcpy(last_iv, src, bsize);
for (;;) {
- fn(tfm, src, src);
+ crypto_lskcipher_decrypt(tfm, src, src, bsize, NULL);
if ((nbytes -= bsize) < bsize)
break;
crypto_xor(src, src - bsize, bsize);
src -= bsize;
}
- crypto_xor(src, walk->iv, bsize);
- memcpy(walk->iv, last_iv, bsize);
+ crypto_xor(src, iv, bsize);
+ memcpy(iv, last_iv, bsize);
+out:
return nbytes;
}
-static int crypto_cbc_decrypt(struct skcipher_request *req)
+static int crypto_cbc_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *iv, bool final)
{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct skcipher_walk walk;
- int err;
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+ struct crypto_lskcipher *cipher = *ctx;
+ int rem;
- err = skcipher_walk_virt(&walk, req, false);
+ if (src == dst)
+ rem = crypto_cbc_decrypt_inplace(cipher, dst, len, iv);
+ else
+ rem = crypto_cbc_decrypt_segment(cipher, src, dst, len, iv);
- while (walk.nbytes) {
- if (walk.src.virt.addr == walk.dst.virt.addr)
- err = crypto_cbc_decrypt_inplace(&walk, skcipher);
- else
- err = crypto_cbc_decrypt_segment(&walk, skcipher);
- err = skcipher_walk_done(&walk, err);
- }
-
- return err;
+ return rem && final ? -EINVAL : rem;
}
static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct skcipher_instance *inst;
- struct crypto_alg *alg;
+ struct lskcipher_instance *inst;
int err;
- inst = skcipher_alloc_instance_simple(tmpl, tb);
+ inst = lskcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
- alg = skcipher_ialg_simple(inst);
-
err = -EINVAL;
- if (!is_power_of_2(alg->cra_blocksize))
+ if (!is_power_of_2(inst->alg.co.base.cra_blocksize))
goto out_free_inst;
inst->alg.encrypt = crypto_cbc_encrypt;
inst->alg.decrypt = crypto_cbc_decrypt;
- err = skcipher_register_instance(tmpl, inst);
+ err = lskcipher_register_instance(tmpl, inst);
if (err) {
out_free_inst:
inst->free(inst);