diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
commit | b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch) | |
tree | 1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0244-x86-stackprotector-Avoid-random-pool-on-rt.patch | |
parent | Adding upstream version 5.10.209. (diff) | |
download | linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip |
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0244-x86-stackprotector-Avoid-random-pool-on-rt.patch')
-rw-r--r-- | debian/patches-rt/0244-x86-stackprotector-Avoid-random-pool-on-rt.patch | 51 |
1 files changed, 51 insertions, 0 deletions
diff --git a/debian/patches-rt/0244-x86-stackprotector-Avoid-random-pool-on-rt.patch b/debian/patches-rt/0244-x86-stackprotector-Avoid-random-pool-on-rt.patch new file mode 100644 index 000000000..c8e46ef39 --- /dev/null +++ b/debian/patches-rt/0244-x86-stackprotector-Avoid-random-pool-on-rt.patch @@ -0,0 +1,51 @@ +From f7c4220e56be1d8fc2bc339984fb5919ee02cf4f Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Thu, 16 Dec 2010 14:25:18 +0100 +Subject: [PATCH 244/323] x86: stackprotector: Avoid random pool on rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +CPU bringup calls into the random pool to initialize the stack +canary. During boot that works nicely even on RT as the might sleep +checks are disabled. During CPU hotplug the might sleep checks +trigger. Making the locks in random raw is a major PITA, so avoid the +call on RT is the only sensible solution. This is basically the same +randomness which we get during boot where the random pool has no +entropy and we rely on the TSC randomnness. + +Reported-by: Carsten Emde <carsten.emde@osadl.org> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + arch/x86/include/asm/stackprotector.h | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h +index 7fb482f0f25b..3df0a95c9e13 100644 +--- a/arch/x86/include/asm/stackprotector.h ++++ b/arch/x86/include/asm/stackprotector.h +@@ -65,7 +65,7 @@ + */ + static __always_inline void boot_init_stack_canary(void) + { +- u64 canary; ++ u64 canary = 0; + u64 tsc; + + #ifdef CONFIG_X86_64 +@@ -76,8 +76,14 @@ static __always_inline void boot_init_stack_canary(void) + * of randomness. The TSC only matters for very early init, + * there it already has some randomness on most systems. Later + * on during the bootup the random pool has true entropy too. ++ * For preempt-rt we need to weaken the randomness a bit, as ++ * we can't call into the random generator from atomic context ++ * due to locking constraints. We just leave canary ++ * uninitialized and use the TSC based randomness on top of it. + */ ++#ifndef CONFIG_PREEMPT_RT + get_random_bytes(&canary, sizeof(canary)); ++#endif + tsc = rdtsc(); + canary += tsc + (tsc << 32UL); + canary &= CANARY_MASK; +-- +2.43.0 + |