diff options
Diffstat (limited to 'crypto/cfb.c')
-rw-r--r-- | crypto/cfb.c | 360 |
1 files changed, 360 insertions, 0 deletions
diff --git a/crypto/cfb.c b/crypto/cfb.c new file mode 100644 index 000000000..4abfe32ff --- /dev/null +++ b/crypto/cfb.c @@ -0,0 +1,360 @@ +//SPDX-License-Identifier: GPL-2.0 +/* + * CFB: Cipher FeedBack mode + * + * Copyright (c) 2018 James.Bottomley@HansenPartnership.com + * + * CFB is a stream cipher mode which is layered on to a block + * encryption scheme. It works very much like a one time pad where + * the pad is generated initially from the encrypted IV and then + * subsequently from the encrypted previous block of ciphertext. The + * pad is XOR'd into the plain text to get the final ciphertext. + * + * The scheme of CFB is best described by wikipedia: + * + * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB + * + * Note that since the pad for both encryption and decryption is + * generated by an encryption operation, CFB never uses the block + * decryption function. + */ + +#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> + +struct crypto_cfb_ctx { + struct crypto_cipher *child; +}; + +static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm) +{ + struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_cipher *child = ctx->child; + + return crypto_cipher_blocksize(child); +} + +static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm, + const u8 *src, u8 *dst) +{ + struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_cipher_encrypt_one(ctx->child, dst, src); +} + +/* final encrypt and decrypt is the same */ +static void crypto_cfb_final(struct skcipher_walk *walk, + struct crypto_skcipher *tfm) +{ + const unsigned long alignmask = crypto_skcipher_alignmask(tfm); + u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; + u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1); + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + u8 *iv = walk->iv; + unsigned int nbytes = walk->nbytes; + + crypto_cfb_encrypt_one(tfm, iv, stream); + crypto_xor_cpy(dst, stream, src, nbytes); +} + +static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk, + struct crypto_skcipher *tfm) +{ + const unsigned int bsize = crypto_cfb_bsize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + u8 *iv = walk->iv; + + do { + crypto_cfb_encrypt_one(tfm, iv, dst); + crypto_xor(dst, src, bsize); + iv = dst; + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; +} + +static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk, + struct crypto_skcipher *tfm) +{ + const unsigned int bsize = crypto_cfb_bsize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *iv = walk->iv; + u8 tmp[MAX_CIPHER_BLOCKSIZE]; + + do { + crypto_cfb_encrypt_one(tfm, iv, tmp); + crypto_xor(src, tmp, bsize); + iv = src; + + src += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; +} + +static int crypto_cfb_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_walk walk; + unsigned int bsize = crypto_cfb_bsize(tfm); + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes >= bsize) { + if (walk.src.virt.addr == walk.dst.virt.addr) + err = crypto_cfb_encrypt_inplace(&walk, tfm); + else + err = crypto_cfb_encrypt_segment(&walk, tfm); + err = skcipher_walk_done(&walk, err); + } + + if (walk.nbytes) { + crypto_cfb_final(&walk, tfm); + err = skcipher_walk_done(&walk, 0); + } + + return err; +} + +static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk, + struct crypto_skcipher *tfm) +{ + const unsigned int bsize = crypto_cfb_bsize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + u8 *iv = walk->iv; + + do { + crypto_cfb_encrypt_one(tfm, iv, dst); + crypto_xor(dst, src, bsize); + iv = src; + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; +} + +static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk, + struct crypto_skcipher *tfm) +{ + const unsigned int bsize = crypto_cfb_bsize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 * const iv = walk->iv; + u8 tmp[MAX_CIPHER_BLOCKSIZE]; + + do { + crypto_cfb_encrypt_one(tfm, iv, tmp); + memcpy(iv, src, bsize); + crypto_xor(src, tmp, bsize); + src += bsize; + } while ((nbytes -= bsize) >= bsize); + + return nbytes; +} + +static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk, + struct crypto_skcipher *tfm) +{ + if (walk->src.virt.addr == walk->dst.virt.addr) + return crypto_cfb_decrypt_inplace(walk, tfm); + else + return crypto_cfb_decrypt_segment(walk, tfm); +} + +static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) +{ + struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent); + struct crypto_cipher *child = ctx->child; + int err; + + crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(child, key, keylen); + crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static int crypto_cfb_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_walk walk; + const unsigned int bsize = crypto_cfb_bsize(tfm); + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes >= bsize) { + err = crypto_cfb_decrypt_blocks(&walk, tfm); + err = skcipher_walk_done(&walk, err); + } + + if (walk.nbytes) { + crypto_cfb_final(&walk, tfm); + err = skcipher_walk_done(&walk, 0); + } + + return err; +} + +static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm) +{ + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct crypto_spawn *spawn = skcipher_instance_ctx(inst); + struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_cipher *cipher; + + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + return 0; +} + +static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm) +{ + struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_cipher(ctx->child); +} + +static void crypto_cfb_free(struct skcipher_instance *inst) +{ + crypto_drop_skcipher(skcipher_instance_ctx(inst)); + kfree(inst); +} + +static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct skcipher_instance *inst; + struct crypto_attr_type *algt; + struct crypto_spawn *spawn; + struct crypto_alg *alg; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); + if (err) + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) + goto err_free_inst; + + mask = CRYPTO_ALG_TYPE_MASK | + crypto_requires_off(algt->type, algt->mask, + CRYPTO_ALG_NEED_FALLBACK); + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); + err = PTR_ERR(alg); + if (IS_ERR(alg)) + goto err_free_inst; + + spawn = skcipher_instance_ctx(inst); + err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), + CRYPTO_ALG_TYPE_MASK); + if (err) + goto err_put_alg; + + err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg); + if (err) + goto err_drop_spawn; + + inst->alg.base.cra_priority = alg->cra_priority; + /* we're a stream cipher independend of the crypto cra_blocksize */ + inst->alg.base.cra_blocksize = 1; + inst->alg.base.cra_alignmask = alg->cra_alignmask; + + /* + * To simplify the implementation, configure the skcipher walk to only + * give a partial block at the very end, never earlier. + */ + inst->alg.chunksize = alg->cra_blocksize; + + inst->alg.ivsize = alg->cra_blocksize; + inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; + inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; + + inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx); + + inst->alg.init = crypto_cfb_init_tfm; + inst->alg.exit = crypto_cfb_exit_tfm; + + inst->alg.setkey = crypto_cfb_setkey; + inst->alg.encrypt = crypto_cfb_encrypt; + inst->alg.decrypt = crypto_cfb_decrypt; + + inst->free = crypto_cfb_free; + + err = skcipher_register_instance(tmpl, inst); + if (err) + goto err_drop_spawn; + crypto_mod_put(alg); + +out: + return err; + +err_drop_spawn: + crypto_drop_spawn(spawn); +err_put_alg: + crypto_mod_put(alg); +err_free_inst: + kfree(inst); + goto out; +} + +static struct crypto_template crypto_cfb_tmpl = { + .name = "cfb", + .create = crypto_cfb_create, + .module = THIS_MODULE, +}; + +static int __init crypto_cfb_module_init(void) +{ + return crypto_register_template(&crypto_cfb_tmpl); +} + +static void __exit crypto_cfb_module_exit(void) +{ + crypto_unregister_template(&crypto_cfb_tmpl); +} + +module_init(crypto_cfb_module_init); +module_exit(crypto_cfb_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CFB block cipher algorithm"); +MODULE_ALIAS_CRYPTO("cfb"); |