diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
commit | 08b74a000942a380fe028845f92cd3a0dee827d5 (patch) | |
tree | aa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0220-x86-crypto-Reduce-preempt-disabled-regions.patch | |
parent | Adding upstream version 4.19.249. (diff) | |
download | linux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip |
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0220-x86-crypto-Reduce-preempt-disabled-regions.patch')
-rw-r--r-- | debian/patches-rt/0220-x86-crypto-Reduce-preempt-disabled-regions.patch | 118 |
1 files changed, 118 insertions, 0 deletions
diff --git a/debian/patches-rt/0220-x86-crypto-Reduce-preempt-disabled-regions.patch b/debian/patches-rt/0220-x86-crypto-Reduce-preempt-disabled-regions.patch new file mode 100644 index 000000000..eeb0ae4e6 --- /dev/null +++ b/debian/patches-rt/0220-x86-crypto-Reduce-preempt-disabled-regions.patch @@ -0,0 +1,118 @@ +From 795856da132e0e41e6ba20d6020b7016f23159cf Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra <peterz@infradead.org> +Date: Mon, 14 Nov 2011 18:19:27 +0100 +Subject: [PATCH 220/347] x86: crypto: Reduce preempt disabled regions +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +Restrict the preempt disabled regions to the actual floating point +operations and enable preemption for the administrative actions. + +This is necessary on RT to avoid that kfree and other operations are +called with preemption disabled. + +Reported-and-tested-by: Carsten Emde <cbe@osadl.org> +Signed-off-by: Peter Zijlstra <peterz@infradead.org> + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + arch/x86/crypto/aesni-intel_glue.c | 22 ++++++++++++---------- + 1 file changed, 12 insertions(+), 10 deletions(-) + +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c +index 917f25e4d0a8..58d8c03fc32d 100644 +--- a/arch/x86/crypto/aesni-intel_glue.c ++++ b/arch/x86/crypto/aesni-intel_glue.c +@@ -434,14 +434,14 @@ static int ecb_encrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -456,14 +456,14 @@ static int ecb_decrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -478,14 +478,14 @@ static int cbc_encrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -500,14 +500,14 @@ static int cbc_decrypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -557,18 +557,20 @@ static int ctr_crypt(struct skcipher_request *req) + + err = skcipher_walk_virt(&walk, req, true); + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { ++ kernel_fpu_begin(); + aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + if (walk.nbytes) { ++ kernel_fpu_begin(); + ctr_crypt_final(ctx, &walk); ++ kernel_fpu_end(); + err = skcipher_walk_done(&walk, 0); + } +- kernel_fpu_end(); + + return err; + } +-- +2.36.1 + |