diff options
Diffstat (limited to 'debian/patches-rt/0240-x86-crypto-Reduce-preempt-disabled-regions.patch')
-rw-r--r-- | debian/patches-rt/0240-x86-crypto-Reduce-preempt-disabled-regions.patch | 118 |
1 files changed, 118 insertions, 0 deletions
diff --git a/debian/patches-rt/0240-x86-crypto-Reduce-preempt-disabled-regions.patch b/debian/patches-rt/0240-x86-crypto-Reduce-preempt-disabled-regions.patch new file mode 100644 index 000000000..b84653cfb --- /dev/null +++ b/debian/patches-rt/0240-x86-crypto-Reduce-preempt-disabled-regions.patch @@ -0,0 +1,118 @@ +From 523f64f1a87e6454e46f5e25ae797b6a7050a6a8 Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra <peterz@infradead.org> +Date: Mon, 14 Nov 2011 18:19:27 +0100 +Subject: [PATCH 240/323] x86: crypto: Reduce preempt disabled regions +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +Restrict the preempt disabled regions to the actual floating point +operations and enable preemption for the administrative actions. + +This is necessary on RT to avoid that kfree and other operations are +called with preemption disabled. + +Reported-and-tested-by: Carsten Emde <cbe@osadl.org> +Signed-off-by: Peter Zijlstra <peterz@infradead.org> + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + arch/x86/crypto/aesni-intel_glue.c | 22 ++++++++++++---------- + 1 file changed, 12 insertions(+), 10 deletions(-) + +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c +index be891fdf8d17..29c716ed103f 100644 +--- a/arch/x86/crypto/aesni-intel_glue.c ++++ b/arch/x86/crypto/aesni-intel_glue.c +@@ -379,14 +379,14 @@ static int ecb_encrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -401,14 +401,14 @@ static int ecb_decrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -423,14 +423,14 @@ static int cbc_encrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -445,14 +445,14 @@ static int cbc_decrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -500,18 +500,20 @@ static int ctr_crypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { ++ kernel_fpu_begin(); + aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + if (walk.nbytes) { ++ kernel_fpu_begin(); + ctr_crypt_final(ctx, &walk); ++ kernel_fpu_end(); + err = skcipher_walk_done(&walk, 0); + } +- kernel_fpu_end(); + + return err; + } +-- +2.43.0 + |