diff options
Diffstat (limited to 'debian/patches/bugfix/x86/gds')
21 files changed, 2402 insertions, 0 deletions
diff --git a/debian/patches/bugfix/x86/gds/arm-cpu-switch-to-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/arm-cpu-switch-to-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..8b3c04d39 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/arm-cpu-switch-to-arch_cpu_finalize_init.patch @@ -0,0 +1,72 @@ +From 980d7eb01638f535e9ab885449c0bbb4cec0fde6 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:25 +0200 +Subject: ARM: cpu: Switch to arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit ee31bb0524a2e7c99b03f50249a411cc1eaa411f upstream + +check_bugs() is about to be phased out. Switch over to the new +arch_cpu_finalize_init() implementation. + +No functional change. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20230613224545.078124882@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm/Kconfig | 1 + + arch/arm/include/asm/bugs.h | 4 ---- + arch/arm/kernel/bugs.c | 3 ++- + 3 files changed, 3 insertions(+), 5 deletions(-) + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -4,6 +4,7 @@ config ARM + default y + select ARCH_CLOCKSOURCE_DATA + select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC ++ select ARCH_HAS_CPU_FINALIZE_INIT if MMU + select ARCH_HAS_DEBUG_VIRTUAL if MMU + select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_ELF_RANDOMIZE +--- a/arch/arm/include/asm/bugs.h ++++ b/arch/arm/include/asm/bugs.h +@@ -1,6 +1,4 @@ + /* +- * arch/arm/include/asm/bugs.h +- * + * Copyright (C) 1995-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify +@@ -13,10 +11,8 @@ + extern void check_writebuffer_bugs(void); + + #ifdef CONFIG_MMU +-extern void check_bugs(void); + extern void check_other_bugs(void); + #else +-#define check_bugs() do { } while (0) + #define check_other_bugs() do { } while (0) + #endif + +--- a/arch/arm/kernel/bugs.c ++++ b/arch/arm/kernel/bugs.c +@@ -1,5 +1,6 @@ + // SPDX-Identifier: GPL-2.0 + #include <linux/init.h> ++#include <linux/cpu.h> + #include <asm/bugs.h> + #include <asm/proc-fns.h> + +@@ -11,7 +12,7 @@ void check_other_bugs(void) + #endif + } + +-void __init check_bugs(void) ++void __init arch_cpu_finalize_init(void) + { + check_writebuffer_bugs(); + check_other_bugs(); diff --git a/debian/patches/bugfix/x86/gds/documentation-x86-fix-backwards-on-off-logic-about-ymm-support.patch b/debian/patches/bugfix/x86/gds/documentation-x86-fix-backwards-on-off-logic-about-ymm-support.patch new file mode 100644 index 000000000..7bb1f3261 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/documentation-x86-fix-backwards-on-off-logic-about-ymm-support.patch @@ -0,0 +1,31 @@ +From 1b0fc0345f2852ffe54fb9ae0e12e2ee69ad6a20 Mon Sep 17 00:00:00 2001 +From: Dave Hansen <dave.hansen@linux.intel.com> +Date: Tue, 1 Aug 2023 07:31:07 -0700 +Subject: Documentation/x86: Fix backwards on/off logic about YMM support + +From: Dave Hansen <dave.hansen@linux.intel.com> + +commit 1b0fc0345f2852ffe54fb9ae0e12e2ee69ad6a20 upstream + +These options clearly turn *off* XSAVE YMM support. Correct the +typo. + +Reported-by: Ben Hutchings <ben@decadent.org.uk> +Fixes: 553a5c03e90a ("x86/speculation: Add force option to GDS mitigation") +Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + Documentation/admin-guide/hw-vuln/gather_data_sampling.rst | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst ++++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst +@@ -63,7 +63,7 @@ GDS can also be mitigated on systems tha + disabling AVX. This can be done by setting gather_data_sampling="force" or + "clearcpuid=avx" on the kernel command-line. + +-If used, these options will disable AVX use by turning on XSAVE YMM support. ++If used, these options will disable AVX use by turning off XSAVE YMM support. + However, the processor will still enumerate AVX support. Userspace that + does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM + support will break. diff --git a/debian/patches/bugfix/x86/gds/ia64-cpu-switch-to-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/ia64-cpu-switch-to-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..12aa098f7 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/ia64-cpu-switch-to-arch_cpu_finalize_init.patch @@ -0,0 +1,70 @@ +From dff0dc6635f86b571b1bb61f3f3525b3763c3566 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:27 +0200 +Subject: ia64/cpu: Switch to arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 6c38e3005621800263f117fb00d6787a76e16de7 upstream + +check_bugs() is about to be phased out. Switch over to the new +arch_cpu_finalize_init() implementation. + +No functional change. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20230613224545.137045745@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/ia64/Kconfig | 1 + + arch/ia64/include/asm/bugs.h | 20 -------------------- + arch/ia64/kernel/setup.c | 3 +-- + 3 files changed, 2 insertions(+), 22 deletions(-) + delete mode 100644 arch/ia64/include/asm/bugs.h + +--- a/arch/ia64/Kconfig ++++ b/arch/ia64/Kconfig +@@ -8,6 +8,7 @@ menu "Processor type and features" + + config IA64 + bool ++ select ARCH_HAS_CPU_FINALIZE_INIT + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_MIGHT_HAVE_PC_SERIO + select PCI if (!IA64_HP_SIM) +--- a/arch/ia64/include/asm/bugs.h ++++ /dev/null +@@ -1,20 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * This is included by init/main.c to check for architecture-dependent bugs. +- * +- * Needs: +- * void check_bugs(void); +- * +- * Based on <asm-alpha/bugs.h>. +- * +- * Modified 1998, 1999, 2003 +- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. +- */ +-#ifndef _ASM_IA64_BUGS_H +-#define _ASM_IA64_BUGS_H +- +-#include <asm/processor.h> +- +-extern void check_bugs (void); +- +-#endif /* _ASM_IA64_BUGS_H */ +--- a/arch/ia64/kernel/setup.c ++++ b/arch/ia64/kernel/setup.c +@@ -1050,8 +1050,7 @@ cpu_init (void) + platform_cpu_init(); + } + +-void __init +-check_bugs (void) ++void __init arch_cpu_finalize_init(void) + { + ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, + (unsigned long) __end___mckinley_e9_bundles); diff --git a/debian/patches/bugfix/x86/gds/init-invoke-arch_cpu_finalize_init-earlier.patch b/debian/patches/bugfix/x86/gds/init-invoke-arch_cpu_finalize_init-earlier.patch new file mode 100644 index 000000000..bcb206f87 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/init-invoke-arch_cpu_finalize_init-earlier.patch @@ -0,0 +1,58 @@ +From 4f8644b469a237107a34deb77ca301377c7def7c Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:39 +0200 +Subject: init: Invoke arch_cpu_finalize_init() earlier + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 9df9d2f0471b4c4702670380b8d8a45b40b23a7d upstream + +X86 is reworking the boot process so that initializations which are not +required during early boot can be moved into the late boot process and out +of the fragile and restricted initial boot phase. + +arch_cpu_finalize_init() is the obvious place to do such initializations, +but arch_cpu_finalize_init() is invoked too late in start_kernel() e.g. for +initializing the FPU completely. fork_init() requires that the FPU is +initialized as the size of task_struct on X86 depends on the size of the +required FPU register buffer. + +Fortunately none of the init calls between calibrate_delay() and +arch_cpu_finalize_init() is relevant for the functionality of +arch_cpu_finalize_init(). + +Invoke it right after calibrate_delay() where everything which is relevant +for arch_cpu_finalize_init() has been set up already. + +No functional change intended. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Reviewed-by: Rick Edgecombe <rick.p.edgecombe@intel.com> +Link: https://lore.kernel.org/r/20230613224545.612182854@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + init/main.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/init/main.c ++++ b/init/main.c +@@ -699,6 +699,9 @@ asmlinkage __visible void __init start_k + late_time_init(); + sched_clock_init(); + calibrate_delay(); ++ ++ arch_cpu_finalize_init(); ++ + pid_idr_init(); + anon_vma_init(); + #ifdef CONFIG_X86 +@@ -726,8 +729,6 @@ asmlinkage __visible void __init start_k + delayacct_init(); + + +- arch_cpu_finalize_init(); +- + acpi_subsystem_init(); + arch_post_acpi_subsys_init(); + sfi_init_late(); diff --git a/debian/patches/bugfix/x86/gds/init-provide-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/init-provide-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..9e56c09a8 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/init-provide-arch_cpu_finalize_init.patch @@ -0,0 +1,77 @@ +From 11bc27b01a313cc489c807ceddb0a93a6770fd11 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:22 +0200 +Subject: init: Provide arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 7725acaa4f0c04fbefb0e0d342635b967bb7d414 upstream + +check_bugs() has become a dumping ground for all sorts of activities to +finalize the CPU initialization before running the rest of the init code. + +Most are empty, a few do actual bug checks, some do alternative patching +and some cobble a CPU advertisement string together.... + +Aside of that the current implementation requires duplicated function +declaration and mostly empty header files for them. + +Provide a new function arch_cpu_finalize_init(). Provide a generic +declaration if CONFIG_ARCH_HAS_CPU_FINALIZE_INIT is selected and a stub +inline otherwise. + +This requires a temporary #ifdef in start_kernel() which will be removed +along with check_bugs() once the architectures are converted over. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20230613224544.957805717@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/Kconfig | 3 +++ + include/linux/cpu.h | 6 ++++++ + init/main.c | 5 +++++ + 3 files changed, 14 insertions(+) + +--- a/arch/Kconfig ++++ b/arch/Kconfig +@@ -249,6 +249,9 @@ config ARCH_HAS_FORTIFY_SOURCE + config ARCH_HAS_SET_MEMORY + bool + ++config ARCH_HAS_CPU_FINALIZE_INIT ++ bool ++ + # Select if arch init_task must go in the __init_task_data section + config ARCH_TASK_STRUCT_ON_STACK + bool +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -170,6 +170,12 @@ void arch_cpu_idle_enter(void); + void arch_cpu_idle_exit(void); + void arch_cpu_idle_dead(void); + ++#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT ++void arch_cpu_finalize_init(void); ++#else ++static inline void arch_cpu_finalize_init(void) { } ++#endif ++ + int cpu_report_state(int cpu); + int cpu_check_up_prepare(int cpu); + void cpu_set_state_online(int cpu); +--- a/init/main.c ++++ b/init/main.c +@@ -726,7 +726,12 @@ asmlinkage __visible void __init start_k + taskstats_init_early(); + delayacct_init(); + ++ ++ arch_cpu_finalize_init(); ++ /* Temporary conditional until everything has been converted */ ++#ifndef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT + check_bugs(); ++#endif + + acpi_subsystem_init(); + arch_post_acpi_subsys_init(); diff --git a/debian/patches/bugfix/x86/gds/init-remove-check_bugs-leftovers.patch b/debian/patches/bugfix/x86/gds/init-remove-check_bugs-leftovers.patch new file mode 100644 index 000000000..8698e892d --- /dev/null +++ b/debian/patches/bugfix/x86/gds/init-remove-check_bugs-leftovers.patch @@ -0,0 +1,155 @@ +From eb6d42cbbc5b7384bebb9b783970c5b07ac10fc5 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:38 +0200 +Subject: init: Remove check_bugs() leftovers + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 61235b24b9cb37c13fcad5b9596d59a1afdcec30 upstream + +Everything is converted over to arch_cpu_finalize_init(). Remove the +check_bugs() leftovers including the empty stubs in asm-generic, alpha, +parisc, powerpc and xtensa. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Reviewed-by: Richard Henderson <richard.henderson@linaro.org> +Link: https://lore.kernel.org/r/20230613224545.553215951@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/alpha/include/asm/bugs.h | 20 -------------------- + arch/parisc/include/asm/bugs.h | 20 -------------------- + arch/powerpc/include/asm/bugs.h | 18 ------------------ + arch/xtensa/include/asm/bugs.h | 18 ------------------ + include/asm-generic/bugs.h | 11 ----------- + init/main.c | 5 ----- + 6 files changed, 92 deletions(-) + delete mode 100644 arch/alpha/include/asm/bugs.h + delete mode 100644 arch/parisc/include/asm/bugs.h + delete mode 100644 arch/powerpc/include/asm/bugs.h + delete mode 100644 arch/xtensa/include/asm/bugs.h + delete mode 100644 include/asm-generic/bugs.h + +--- a/arch/alpha/include/asm/bugs.h ++++ /dev/null +@@ -1,20 +0,0 @@ +-/* +- * include/asm-alpha/bugs.h +- * +- * Copyright (C) 1994 Linus Torvalds +- */ +- +-/* +- * This is included by init/main.c to check for architecture-dependent bugs. +- * +- * Needs: +- * void check_bugs(void); +- */ +- +-/* +- * I don't know of any alpha bugs yet.. Nice chip +- */ +- +-static void check_bugs(void) +-{ +-} +--- a/arch/parisc/include/asm/bugs.h ++++ /dev/null +@@ -1,20 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * include/asm-parisc/bugs.h +- * +- * Copyright (C) 1999 Mike Shaver +- */ +- +-/* +- * This is included by init/main.c to check for architecture-dependent bugs. +- * +- * Needs: +- * void check_bugs(void); +- */ +- +-#include <asm/processor.h> +- +-static inline void check_bugs(void) +-{ +-// identify_cpu(&boot_cpu_data); +-} +--- a/arch/powerpc/include/asm/bugs.h ++++ /dev/null +@@ -1,18 +0,0 @@ +-#ifndef _ASM_POWERPC_BUGS_H +-#define _ASM_POWERPC_BUGS_H +- +-/* +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License +- * as published by the Free Software Foundation; either version +- * 2 of the License, or (at your option) any later version. +- */ +- +-/* +- * This file is included by 'init/main.c' to check for +- * architecture-dependent bugs. +- */ +- +-static inline void check_bugs(void) { } +- +-#endif /* _ASM_POWERPC_BUGS_H */ +--- a/arch/xtensa/include/asm/bugs.h ++++ /dev/null +@@ -1,18 +0,0 @@ +-/* +- * include/asm-xtensa/bugs.h +- * +- * This is included by init/main.c to check for architecture-dependent bugs. +- * +- * Xtensa processors don't have any bugs. :) +- * +- * This file is subject to the terms and conditions of the GNU General +- * Public License. See the file "COPYING" in the main directory of +- * this archive for more details. +- */ +- +-#ifndef _XTENSA_BUGS_H +-#define _XTENSA_BUGS_H +- +-static void check_bugs(void) { } +- +-#endif /* _XTENSA_BUGS_H */ +--- a/include/asm-generic/bugs.h ++++ /dev/null +@@ -1,11 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-#ifndef __ASM_GENERIC_BUGS_H +-#define __ASM_GENERIC_BUGS_H +-/* +- * This file is included by 'init/main.c' to check for +- * architecture-dependent bugs. +- */ +- +-static inline void check_bugs(void) { } +- +-#endif /* __ASM_GENERIC_BUGS_H */ +--- a/init/main.c ++++ b/init/main.c +@@ -94,7 +94,6 @@ + #include <linux/mem_encrypt.h> + + #include <asm/io.h> +-#include <asm/bugs.h> + #include <asm/setup.h> + #include <asm/sections.h> + #include <asm/cacheflush.h> +@@ -728,10 +727,6 @@ asmlinkage __visible void __init start_k + + + arch_cpu_finalize_init(); +- /* Temporary conditional until everything has been converted */ +-#ifndef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT +- check_bugs(); +-#endif + + acpi_subsystem_init(); + arch_post_acpi_subsys_init(); diff --git a/debian/patches/bugfix/x86/gds/init-x86-move-mem_encrypt_init-into-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/init-x86-move-mem_encrypt_init-into-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..aede33ff2 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/init-x86-move-mem_encrypt_init-into-arch_cpu_finalize_init.patch @@ -0,0 +1,94 @@ +From 555b9962472818fba44eb42f31cfd1e118d20478 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:41 +0200 +Subject: init, x86: Move mem_encrypt_init() into arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 439e17576eb47f26b78c5bbc72e344d4206d2327 upstream + +Invoke the X86ism mem_encrypt_init() from X86 arch_cpu_finalize_init() and +remove the weak fallback from the core code. + +No functional change. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20230613224545.670360645@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/x86/include/asm/mem_encrypt.h | 2 ++ + arch/x86/kernel/cpu/common.c | 11 +++++++++++ + init/main.c | 11 ----------- + 3 files changed, 13 insertions(+), 11 deletions(-) + +--- a/arch/x86/include/asm/mem_encrypt.h ++++ b/arch/x86/include/asm/mem_encrypt.h +@@ -80,6 +80,8 @@ early_set_memory_decrypted(unsigned long + static inline int __init + early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; } + ++static inline void mem_encrypt_init(void) { } ++ + #define __bss_decrypted + + #endif /* CONFIG_AMD_MEM_ENCRYPT */ +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -17,6 +17,7 @@ + #include <linux/init.h> + #include <linux/kprobes.h> + #include <linux/kgdb.h> ++#include <linux/mem_encrypt.h> + #include <linux/smp.h> + #include <linux/cpu.h> + #include <linux/io.h> +@@ -2151,4 +2152,14 @@ void __init arch_cpu_finalize_init(void) + } else { + fpu__init_check_bugs(); + } ++ ++ /* ++ * This needs to be called before any devices perform DMA ++ * operations that might use the SWIOTLB bounce buffers. It will ++ * mark the bounce buffers as decrypted so that their usage will ++ * not cause "plain-text" data to be decrypted when accessed. It ++ * must be called after late_time_init() so that Hyper-V x86/x64 ++ * hypercalls work when the SWIOTLB bounce buffers are decrypted. ++ */ ++ mem_encrypt_init(); + } +--- a/init/main.c ++++ b/init/main.c +@@ -91,7 +91,6 @@ + #include <linux/cache.h> + #include <linux/rodata_test.h> + #include <linux/jump_label.h> +-#include <linux/mem_encrypt.h> + + #include <asm/io.h> + #include <asm/setup.h> +@@ -492,8 +491,6 @@ void __init __weak thread_stack_cache_in + } + #endif + +-void __init __weak mem_encrypt_init(void) { } +- + bool initcall_debug; + core_param(initcall_debug, initcall_debug, bool, 0644); + +@@ -673,14 +670,6 @@ asmlinkage __visible void __init start_k + */ + locking_selftest(); + +- /* +- * This needs to be called before any devices perform DMA +- * operations that might use the SWIOTLB bounce buffers. It will +- * mark the bounce buffers as decrypted so that their usage will +- * not cause "plain-text" data to be decrypted when accessed. +- */ +- mem_encrypt_init(); +- + #ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start && !initrd_below_start_ok && + page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { diff --git a/debian/patches/bugfix/x86/gds/kvm-add-gds_no-support-to-kvm.patch b/debian/patches/bugfix/x86/gds/kvm-add-gds_no-support-to-kvm.patch new file mode 100644 index 000000000..e16870f6e --- /dev/null +++ b/debian/patches/bugfix/x86/gds/kvm-add-gds_no-support-to-kvm.patch @@ -0,0 +1,69 @@ +From e9a103c76a5ffb605204f25222e6217931ff129b Mon Sep 17 00:00:00 2001 +From: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Date: Wed, 12 Jul 2023 19:43:14 -0700 +Subject: KVM: Add GDS_NO support to KVM + +From: Daniel Sneddon <daniel.sneddon@linux.intel.com> + +commit 81ac7e5d741742d650b4ed6186c4826c1a0631a7 upstream + +Gather Data Sampling (GDS) is a transient execution attack using +gather instructions from the AVX2 and AVX512 extensions. This attack +allows malicious code to infer data that was previously stored in +vector registers. Systems that are not vulnerable to GDS will set the +GDS_NO bit of the IA32_ARCH_CAPABILITIES MSR. This is useful for VM +guests that may think they are on vulnerable systems that are, in +fact, not affected. Guests that are running on affected hosts where +the mitigation is enabled are protected as if they were running +on an unaffected system. + +On all hosts that are not affected or that are mitigated, set the +GDS_NO bit. + +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> +Acked-by: Josh Poimboeuf <jpoimboe@kernel.org> +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/x86/kernel/cpu/bugs.c | 7 +++++++ + arch/x86/kvm/x86.c | 5 +++++ + 2 files changed, 12 insertions(+) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -628,6 +628,13 @@ static const char * const gds_strings[] + [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", + }; + ++bool gds_ucode_mitigated(void) ++{ ++ return (gds_mitigation == GDS_MITIGATION_FULL || ++ gds_mitigation == GDS_MITIGATION_FULL_LOCKED); ++} ++EXPORT_SYMBOL_GPL(gds_ucode_mitigated); ++ + void update_gds_msr(void) + { + u64 mcu_ctrl_after; +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -217,6 +217,8 @@ struct kvm_stats_debugfs_item debugfs_en + + u64 __read_mostly host_xcr0; + ++extern bool gds_ucode_mitigated(void); ++ + static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); + + static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) +@@ -1224,6 +1226,9 @@ u64 kvm_get_arch_capabilities(void) + /* Guests don't need to know "Fill buffer clear control" exists */ + data &= ~ARCH_CAP_FB_CLEAR_CTRL; + ++ if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated()) ++ data |= ARCH_CAP_GDS_NO; ++ + return data; + } + diff --git a/debian/patches/bugfix/x86/gds/m68k-cpu-switch-to-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/m68k-cpu-switch-to-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..8cece9900 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/m68k-cpu-switch-to-arch_cpu_finalize_init.patch @@ -0,0 +1,79 @@ +From ca442015ccef31abd0a73cd621c4a4da3a76d20b Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:30 +0200 +Subject: m68k/cpu: Switch to arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 9ceecc2589b9d7cef6b321339ed8de484eac4b20 upstream + +check_bugs() is about to be phased out. Switch over to the new +arch_cpu_finalize_init() implementation. + +No functional change. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> +Link: https://lore.kernel.org/r/20230613224545.254342916@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/m68k/Kconfig | 1 + + arch/m68k/include/asm/bugs.h | 21 --------------------- + arch/m68k/kernel/setup_mm.c | 3 ++- + 3 files changed, 3 insertions(+), 22 deletions(-) + delete mode 100644 arch/m68k/include/asm/bugs.h + +--- a/arch/m68k/Kconfig ++++ b/arch/m68k/Kconfig +@@ -2,6 +2,7 @@ + config M68K + bool + default y ++ select ARCH_HAS_CPU_FINALIZE_INIT if MMU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA + select ARCH_MIGHT_HAVE_PC_PARPORT if ISA + select ARCH_NO_COHERENT_DMA_MMAP if !MMU +--- a/arch/m68k/include/asm/bugs.h ++++ /dev/null +@@ -1,21 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * include/asm-m68k/bugs.h +- * +- * Copyright (C) 1994 Linus Torvalds +- */ +- +-/* +- * This is included by init/main.c to check for architecture-dependent bugs. +- * +- * Needs: +- * void check_bugs(void); +- */ +- +-#ifdef CONFIG_MMU +-extern void check_bugs(void); /* in arch/m68k/kernel/setup.c */ +-#else +-static void check_bugs(void) +-{ +-} +-#endif +--- a/arch/m68k/kernel/setup_mm.c ++++ b/arch/m68k/kernel/setup_mm.c +@@ -10,6 +10,7 @@ + */ + + #include <linux/kernel.h> ++#include <linux/cpu.h> + #include <linux/mm.h> + #include <linux/sched.h> + #include <linux/delay.h> +@@ -526,7 +527,7 @@ static int __init proc_hardware_init(voi + module_init(proc_hardware_init); + #endif + +-void check_bugs(void) ++void __init arch_cpu_finalize_init(void) + { + #if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU) + if (m68k_fputype == 0) { diff --git a/debian/patches/bugfix/x86/gds/mips-cpu-switch-to-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/mips-cpu-switch-to-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..575e1e0a5 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/mips-cpu-switch-to-arch_cpu_finalize_init.patch @@ -0,0 +1,98 @@ +From 2bedb079d39e87a51a6af0a9606dbd147a9bbfde Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:32 +0200 +Subject: mips/cpu: Switch to arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 7f066a22fe353a827a402ee2835e81f045b1574d upstream + +check_bugs() is about to be phased out. Switch over to the new +arch_cpu_finalize_init() implementation. + +No functional change. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20230613224545.312438573@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/mips/Kconfig | 1 + + arch/mips/include/asm/bugs.h | 17 ----------------- + arch/mips/kernel/setup.c | 13 +++++++++++++ + 3 files changed, 14 insertions(+), 17 deletions(-) + +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -4,6 +4,7 @@ config MIPS + default y + select ARCH_BINFMT_ELF_STATE + select ARCH_CLOCKSOURCE_DATA ++ select ARCH_HAS_CPU_FINALIZE_INIT + select ARCH_DISCARD_MEMBLOCK + select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST +--- a/arch/mips/include/asm/bugs.h ++++ b/arch/mips/include/asm/bugs.h +@@ -1,17 +1,11 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + /* +- * This is included by init/main.c to check for architecture-dependent bugs. +- * + * Copyright (C) 2007 Maciej W. Rozycki +- * +- * Needs: +- * void check_bugs(void); + */ + #ifndef _ASM_BUGS_H + #define _ASM_BUGS_H + + #include <linux/bug.h> +-#include <linux/delay.h> + #include <linux/smp.h> + + #include <asm/cpu.h> +@@ -31,17 +25,6 @@ static inline void check_bugs_early(void + #endif + } + +-static inline void check_bugs(void) +-{ +- unsigned int cpu = smp_processor_id(); +- +- cpu_data[cpu].udelay_val = loops_per_jiffy; +- check_bugs32(); +-#ifdef CONFIG_64BIT +- check_bugs64(); +-#endif +-} +- + static inline int r4k_daddiu_bug(void) + { + #ifdef CONFIG_64BIT +--- a/arch/mips/kernel/setup.c ++++ b/arch/mips/kernel/setup.c +@@ -11,6 +11,8 @@ + * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki + */ + #include <linux/init.h> ++#include <linux/cpu.h> ++#include <linux/delay.h> + #include <linux/ioport.h> + #include <linux/export.h> + #include <linux/screen_info.h> +@@ -1108,3 +1110,14 @@ static int __init setnocoherentio(char * + } + early_param("nocoherentio", setnocoherentio); + #endif ++ ++void __init arch_cpu_finalize_init(void) ++{ ++ unsigned int cpu = smp_processor_id(); ++ ++ cpu_data[cpu].udelay_val = loops_per_jiffy; ++ check_bugs32(); ++ ++ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) ++ check_bugs64(); ++} diff --git a/debian/patches/bugfix/x86/gds/sh-cpu-switch-to-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/sh-cpu-switch-to-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..55c9d37fd --- /dev/null +++ b/debian/patches/bugfix/x86/gds/sh-cpu-switch-to-arch_cpu_finalize_init.patch @@ -0,0 +1,207 @@ +From 3ea1c65b457df5417ae78185f0381816b6d0c22c Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:33 +0200 +Subject: sh/cpu: Switch to arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 01eb454e9bfe593f320ecbc9aaec60bf87cd453d upstream + +check_bugs() is about to be phased out. Switch over to the new +arch_cpu_finalize_init() implementation. + +No functional change. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20230613224545.371697797@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/sh/Kconfig | 1 + arch/sh/include/asm/bugs.h | 78 ---------------------------------------- + arch/sh/include/asm/processor.h | 2 + + arch/sh/kernel/idle.c | 1 + arch/sh/kernel/setup.c | 55 ++++++++++++++++++++++++++++ + 5 files changed, 59 insertions(+), 78 deletions(-) + delete mode 100644 arch/sh/include/asm/bugs.h + +--- a/arch/sh/Kconfig ++++ b/arch/sh/Kconfig +@@ -1,6 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + config SUPERH + def_bool y ++ select ARCH_HAS_CPU_FINALIZE_INIT + select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_MIGHT_HAVE_PC_PARPORT +--- a/arch/sh/include/asm/bugs.h ++++ /dev/null +@@ -1,78 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-#ifndef __ASM_SH_BUGS_H +-#define __ASM_SH_BUGS_H +- +-/* +- * This is included by init/main.c to check for architecture-dependent bugs. +- * +- * Needs: +- * void check_bugs(void); +- */ +- +-/* +- * I don't know of any Super-H bugs yet. +- */ +- +-#include <asm/processor.h> +- +-extern void select_idle_routine(void); +- +-static void __init check_bugs(void) +-{ +- extern unsigned long loops_per_jiffy; +- char *p = &init_utsname()->machine[2]; /* "sh" */ +- +- select_idle_routine(); +- +- current_cpu_data.loops_per_jiffy = loops_per_jiffy; +- +- switch (current_cpu_data.family) { +- case CPU_FAMILY_SH2: +- *p++ = '2'; +- break; +- case CPU_FAMILY_SH2A: +- *p++ = '2'; +- *p++ = 'a'; +- break; +- case CPU_FAMILY_SH3: +- *p++ = '3'; +- break; +- case CPU_FAMILY_SH4: +- *p++ = '4'; +- break; +- case CPU_FAMILY_SH4A: +- *p++ = '4'; +- *p++ = 'a'; +- break; +- case CPU_FAMILY_SH4AL_DSP: +- *p++ = '4'; +- *p++ = 'a'; +- *p++ = 'l'; +- *p++ = '-'; +- *p++ = 'd'; +- *p++ = 's'; +- *p++ = 'p'; +- break; +- case CPU_FAMILY_SH5: +- *p++ = '6'; +- *p++ = '4'; +- break; +- case CPU_FAMILY_UNKNOWN: +- /* +- * Specifically use CPU_FAMILY_UNKNOWN rather than +- * default:, so we're able to have the compiler whine +- * about unhandled enumerations. +- */ +- break; +- } +- +- printk("CPU: %s\n", get_cpu_subtype(¤t_cpu_data)); +- +-#ifndef __LITTLE_ENDIAN__ +- /* 'eb' means 'Endian Big' */ +- *p++ = 'e'; +- *p++ = 'b'; +-#endif +- *p = '\0'; +-} +-#endif /* __ASM_SH_BUGS_H */ +--- a/arch/sh/include/asm/processor.h ++++ b/arch/sh/include/asm/processor.h +@@ -173,6 +173,8 @@ extern unsigned int instruction_size(uns + #define instruction_size(insn) (4) + #endif + ++void select_idle_routine(void); ++ + #endif /* __ASSEMBLY__ */ + + #ifdef CONFIG_SUPERH32 +--- a/arch/sh/kernel/idle.c ++++ b/arch/sh/kernel/idle.c +@@ -18,6 +18,7 @@ + #include <linux/smp.h> + #include <linux/atomic.h> + #include <asm/pgalloc.h> ++#include <asm/processor.h> + #include <asm/smp.h> + #include <asm/bl_bit.h> + +--- a/arch/sh/kernel/setup.c ++++ b/arch/sh/kernel/setup.c +@@ -42,6 +42,7 @@ + #include <asm/smp.h> + #include <asm/mmu_context.h> + #include <asm/mmzone.h> ++#include <asm/processor.h> + #include <asm/sparsemem.h> + + /* +@@ -361,3 +362,57 @@ int test_mode_pin(int pin) + { + return sh_mv.mv_mode_pins() & pin; + } ++ ++void __init arch_cpu_finalize_init(void) ++{ ++ char *p = &init_utsname()->machine[2]; /* "sh" */ ++ ++ select_idle_routine(); ++ ++ current_cpu_data.loops_per_jiffy = loops_per_jiffy; ++ ++ switch (current_cpu_data.family) { ++ case CPU_FAMILY_SH2: ++ *p++ = '2'; ++ break; ++ case CPU_FAMILY_SH2A: ++ *p++ = '2'; ++ *p++ = 'a'; ++ break; ++ case CPU_FAMILY_SH3: ++ *p++ = '3'; ++ break; ++ case CPU_FAMILY_SH4: ++ *p++ = '4'; ++ break; ++ case CPU_FAMILY_SH4A: ++ *p++ = '4'; ++ *p++ = 'a'; ++ break; ++ case CPU_FAMILY_SH4AL_DSP: ++ *p++ = '4'; ++ *p++ = 'a'; ++ *p++ = 'l'; ++ *p++ = '-'; ++ *p++ = 'd'; ++ *p++ = 's'; ++ *p++ = 'p'; ++ break; ++ case CPU_FAMILY_UNKNOWN: ++ /* ++ * Specifically use CPU_FAMILY_UNKNOWN rather than ++ * default:, so we're able to have the compiler whine ++ * about unhandled enumerations. ++ */ ++ break; ++ } ++ ++ pr_info("CPU: %s\n", get_cpu_subtype(¤t_cpu_data)); ++ ++#ifndef __LITTLE_ENDIAN__ ++ /* 'eb' means 'Endian Big' */ ++ *p++ = 'e'; ++ *p++ = 'b'; ++#endif ++ *p = '\0'; ++} diff --git a/debian/patches/bugfix/x86/gds/sparc-cpu-switch-to-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/sparc-cpu-switch-to-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..a1677f487 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/sparc-cpu-switch-to-arch_cpu_finalize_init.patch @@ -0,0 +1,70 @@ +From e7a2caf480097e1131b5239e95083c3e8995be07 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:35 +0200 +Subject: sparc/cpu: Switch to arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 44ade508e3bfac45ae97864587de29eb1a881ec0 upstream + +check_bugs() is about to be phased out. Switch over to the new +arch_cpu_finalize_init() implementation. + +No functional change. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Reviewed-by: Sam Ravnborg <sam@ravnborg.org> +Link: https://lore.kernel.org/r/20230613224545.431995857@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/sparc/Kconfig | 1 + + arch/sparc/include/asm/bugs.h | 18 ------------------ + arch/sparc/kernel/setup_32.c | 7 +++++++ + 3 files changed, 8 insertions(+), 18 deletions(-) + delete mode 100644 arch/sparc/include/asm/bugs.h + +--- a/arch/sparc/Kconfig ++++ b/arch/sparc/Kconfig +@@ -12,6 +12,7 @@ config 64BIT + config SPARC + bool + default y ++ select ARCH_HAS_CPU_FINALIZE_INIT if !SMP + select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI + select ARCH_MIGHT_HAVE_PC_SERIO + select OF +--- a/arch/sparc/include/asm/bugs.h ++++ /dev/null +@@ -1,18 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* include/asm/bugs.h: Sparc probes for various bugs. +- * +- * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net) +- */ +- +-#ifdef CONFIG_SPARC32 +-#include <asm/cpudata.h> +-#endif +- +-extern unsigned long loops_per_jiffy; +- +-static void __init check_bugs(void) +-{ +-#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP) +- cpu_data(0).udelay_val = loops_per_jiffy; +-#endif +-} +--- a/arch/sparc/kernel/setup_32.c ++++ b/arch/sparc/kernel/setup_32.c +@@ -422,3 +422,10 @@ static int __init topology_init(void) + } + + subsys_initcall(topology_init); ++ ++#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP) ++void __init arch_cpu_finalize_init(void) ++{ ++ cpu_data(0).udelay_val = loops_per_jiffy; ++} ++#endif diff --git a/debian/patches/bugfix/x86/gds/um-cpu-switch-to-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/um-cpu-switch-to-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..372822010 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/um-cpu-switch-to-arch_cpu_finalize_init.patch @@ -0,0 +1,65 @@ +From 760b926637a95305fe8b066e8943ef688607dc0e Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:36 +0200 +Subject: um/cpu: Switch to arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 9349b5cd0908f8afe95529fc7a8cbb1417df9b0c upstream + +check_bugs() is about to be phased out. Switch over to the new +arch_cpu_finalize_init() implementation. + +No functional change. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Richard Weinberger <richard@nod.at> +Link: https://lore.kernel.org/r/20230613224545.493148694@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/um/Kconfig | 1 + + arch/um/include/asm/bugs.h | 7 ------- + arch/um/kernel/um_arch.c | 3 ++- + 3 files changed, 3 insertions(+), 8 deletions(-) + delete mode 100644 arch/um/include/asm/bugs.h + +--- a/arch/um/Kconfig ++++ b/arch/um/Kconfig +@@ -5,6 +5,7 @@ menu "UML-specific options" + config UML + bool + default y ++ select ARCH_HAS_CPU_FINALIZE_INIT + select ARCH_HAS_KCOV + select ARCH_NO_PREEMPT + select HAVE_ARCH_AUDITSYSCALL +--- a/arch/um/include/asm/bugs.h ++++ /dev/null +@@ -1,7 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-#ifndef __UM_BUGS_H +-#define __UM_BUGS_H +- +-void check_bugs(void); +- +-#endif +--- a/arch/um/kernel/um_arch.c ++++ b/arch/um/kernel/um_arch.c +@@ -3,6 +3,7 @@ + * Licensed under the GPL + */ + ++#include <linux/cpu.h> + #include <linux/delay.h> + #include <linux/init.h> + #include <linux/mm.h> +@@ -352,7 +353,7 @@ void __init setup_arch(char **cmdline_p) + setup_hostinfo(host_info, sizeof host_info); + } + +-void __init check_bugs(void) ++void __init arch_cpu_finalize_init(void) + { + arch_check_bugs(); + os_check_bugs(); diff --git a/debian/patches/bugfix/x86/gds/x86-cpu-switch-to-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/x86-cpu-switch-to-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..7d9b171a2 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/x86-cpu-switch-to-arch_cpu_finalize_init.patch @@ -0,0 +1,228 @@ +From b3454eb2d26a6cecada04b38e72e255ae702ccdb Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:24 +0200 +Subject: x86/cpu: Switch to arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 7c7077a72674402654f3291354720cd73cdf649e upstream + +check_bugs() is a dumping ground for finalizing the CPU bringup. Only parts of +it has to do with actual CPU bugs. + +Split it apart into arch_cpu_finalize_init() and cpu_select_mitigations(). + +Fixup the bogus 32bit comments while at it. + +No functional change. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de> +Link: https://lore.kernel.org/r/20230613224545.019583869@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/x86/Kconfig | 1 + arch/x86/include/asm/bugs.h | 2 - + arch/x86/kernel/cpu/bugs.c | 51 --------------------------------------- + arch/x86/kernel/cpu/common.c | 55 +++++++++++++++++++++++++++++++++++++++++++ + arch/x86/kernel/cpu/cpu.h | 1 + 5 files changed, 58 insertions(+), 52 deletions(-) + +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -50,6 +50,7 @@ config X86 + select ARCH_CLOCKSOURCE_DATA + select ARCH_DISCARD_MEMBLOCK + select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI ++ select ARCH_HAS_CPU_FINALIZE_INIT + select ARCH_HAS_DEBUG_VIRTUAL + select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_ELF_RANDOMIZE +--- a/arch/x86/include/asm/bugs.h ++++ b/arch/x86/include/asm/bugs.h +@@ -4,8 +4,6 @@ + + #include <asm/processor.h> + +-extern void check_bugs(void); +- + #if defined(CONFIG_CPU_SUP_INTEL) + void check_mpx_erratum(struct cpuinfo_x86 *c); + #else +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -9,7 +9,6 @@ + * - Andrew D. Balsa (code cleanup). + */ + #include <linux/init.h> +-#include <linux/utsname.h> + #include <linux/cpu.h> + #include <linux/module.h> + #include <linux/nospec.h> +@@ -25,9 +24,7 @@ + #include <asm/msr.h> + #include <asm/vmx.h> + #include <asm/paravirt.h> +-#include <asm/alternative.h> + #include <asm/pgtable.h> +-#include <asm/set_memory.h> + #include <asm/intel-family.h> + #include <asm/e820/api.h> + #include <asm/hypervisor.h> +@@ -115,21 +112,8 @@ EXPORT_SYMBOL_GPL(mds_idle_clear); + DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); + EXPORT_SYMBOL_GPL(mmio_stale_data_clear); + +-void __init check_bugs(void) ++void __init cpu_select_mitigations(void) + { +- identify_boot_cpu(); +- +- /* +- * identify_boot_cpu() initialized SMT support information, let the +- * core code know. +- */ +- cpu_smt_check_topology(); +- +- if (!IS_ENABLED(CONFIG_SMP)) { +- pr_info("CPU: "); +- print_cpu_info(&boot_cpu_data); +- } +- + /* + * Read the SPEC_CTRL MSR to account for reserved bits which may + * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD +@@ -165,39 +149,6 @@ void __init check_bugs(void) + l1tf_select_mitigation(); + md_clear_select_mitigation(); + srbds_select_mitigation(); +- +- arch_smt_update(); +- +-#ifdef CONFIG_X86_32 +- /* +- * Check whether we are able to run this kernel safely on SMP. +- * +- * - i386 is no longer supported. +- * - In order to run on anything without a TSC, we need to be +- * compiled for a i486. +- */ +- if (boot_cpu_data.x86 < 4) +- panic("Kernel requires i486+ for 'invlpg' and other features"); +- +- init_utsname()->machine[1] = +- '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); +- alternative_instructions(); +- +- fpu__init_check_bugs(); +-#else /* CONFIG_X86_64 */ +- alternative_instructions(); +- +- /* +- * Make sure the first 2MB area is not mapped by huge pages +- * There are typically fixed size MTRRs in there and overlapping +- * MTRRs into large pages causes slow downs. +- * +- * Right now we don't do that with gbpages because there seems +- * very little benefit for that case. +- */ +- if (!direct_gbpages) +- set_memory_4k((unsigned long)__va(0), 1); +-#endif + } + + /* +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -13,14 +13,19 @@ + #include <linux/sched/mm.h> + #include <linux/sched/clock.h> + #include <linux/sched/task.h> ++#include <linux/sched/smt.h> + #include <linux/init.h> + #include <linux/kprobes.h> + #include <linux/kgdb.h> + #include <linux/smp.h> ++#include <linux/cpu.h> + #include <linux/io.h> + #include <linux/syscore_ops.h> + + #include <asm/stackprotector.h> ++#include <linux/utsname.h> ++ ++#include <asm/alternative.h> + #include <asm/perf_event.h> + #include <asm/mmu_context.h> + #include <asm/archrandom.h> +@@ -56,6 +61,7 @@ + #ifdef CONFIG_X86_LOCAL_APIC + #include <asm/uv/uv.h> + #endif ++#include <asm/set_memory.h> + + #include "cpu.h" + +@@ -2097,3 +2103,52 @@ void microcode_check(void) + pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); + pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); + } ++ ++void __init arch_cpu_finalize_init(void) ++{ ++ identify_boot_cpu(); ++ ++ /* ++ * identify_boot_cpu() initialized SMT support information, let the ++ * core code know. ++ */ ++ cpu_smt_check_topology(); ++ ++ if (!IS_ENABLED(CONFIG_SMP)) { ++ pr_info("CPU: "); ++ print_cpu_info(&boot_cpu_data); ++ } ++ ++ cpu_select_mitigations(); ++ ++ arch_smt_update(); ++ ++ if (IS_ENABLED(CONFIG_X86_32)) { ++ /* ++ * Check whether this is a real i386 which is not longer ++ * supported and fixup the utsname. ++ */ ++ if (boot_cpu_data.x86 < 4) ++ panic("Kernel requires i486+ for 'invlpg' and other features"); ++ ++ init_utsname()->machine[1] = ++ '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); ++ } ++ ++ alternative_instructions(); ++ ++ if (IS_ENABLED(CONFIG_X86_64)) { ++ /* ++ * Make sure the first 2MB area is not mapped by huge pages ++ * There are typically fixed size MTRRs in there and overlapping ++ * MTRRs into large pages causes slow downs. ++ * ++ * Right now we don't do that with gbpages because there seems ++ * very little benefit for that case. ++ */ ++ if (!direct_gbpages) ++ set_memory_4k((unsigned long)__va(0), 1); ++ } else { ++ fpu__init_check_bugs(); ++ } ++} +--- a/arch/x86/kernel/cpu/cpu.h ++++ b/arch/x86/kernel/cpu/cpu.h +@@ -79,6 +79,7 @@ extern void detect_ht(struct cpuinfo_x86 + extern void check_null_seg_clears_base(struct cpuinfo_x86 *c); + + unsigned int aperfmperf_get_khz(int cpu); ++void cpu_select_mitigations(void); + + extern void x86_spec_ctrl_setup_ap(void); + extern void update_srbds_msr(void); diff --git a/debian/patches/bugfix/x86/gds/x86-fpu-mark-init-functions-__init.patch b/debian/patches/bugfix/x86/gds/x86-fpu-mark-init-functions-__init.patch new file mode 100644 index 000000000..b6e116465 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/x86-fpu-mark-init-functions-__init.patch @@ -0,0 +1,39 @@ +From 677d1e9bb0bff552b161e9058f1b6fdfd88ece91 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:45 +0200 +Subject: x86/fpu: Mark init functions __init + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 1703db2b90c91b2eb2d699519fc505fe431dde0e upstream + +No point in keeping them around. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20230613224545.841685728@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/x86/kernel/fpu/init.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/x86/kernel/fpu/init.c ++++ b/arch/x86/kernel/fpu/init.c +@@ -49,7 +49,7 @@ void fpu__init_cpu(void) + fpu__init_cpu_xstate(); + } + +-static bool fpu__probe_without_cpuid(void) ++static bool __init fpu__probe_without_cpuid(void) + { + unsigned long cr0; + u16 fsw, fcw; +@@ -67,7 +67,7 @@ static bool fpu__probe_without_cpuid(voi + return fsw == 0 && (fcw & 0x103f) == 0x003f; + } + +-static void fpu__init_system_early_generic(void) ++static void __init fpu__init_system_early_generic(void) + { + if (!boot_cpu_has(X86_FEATURE_CPUID) && + !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { diff --git a/debian/patches/bugfix/x86/gds/x86-fpu-move-fpu-initialization-into-arch_cpu_finalize_init.patch b/debian/patches/bugfix/x86/gds/x86-fpu-move-fpu-initialization-into-arch_cpu_finalize_init.patch new file mode 100644 index 000000000..b0423f814 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/x86-fpu-move-fpu-initialization-into-arch_cpu_finalize_init.patch @@ -0,0 +1,86 @@ +From 8a3b312da29169625141ff9c984a796724240ac1 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:46 +0200 +Subject: x86/fpu: Move FPU initialization into arch_cpu_finalize_init() + +From: Thomas Gleixner <tglx@linutronix.de> + +commit b81fac906a8f9e682e513ddd95697ec7a20878d4 upstream + +Initializing the FPU during the early boot process is a pointless +exercise. Early boot is convoluted and fragile enough. + +Nothing requires that the FPU is set up early. It has to be initialized +before fork_init() because the task_struct size depends on the FPU register +buffer size. + +Move the initialization to arch_cpu_finalize_init() which is the perfect +place to do so. + +No functional change. + +This allows to remove quite some of the custom early command line parsing, +but that's subject to the next installment. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20230613224545.902376621@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/x86/kernel/cpu/common.c | 13 +++++++------ + arch/x86/kernel/smpboot.c | 1 + + 2 files changed, 8 insertions(+), 6 deletions(-) + +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1278,8 +1278,6 @@ static void __init early_identify_cpu(st + + cpu_set_bug_bits(c); + +- fpu__init_system(); +- + #ifdef CONFIG_X86_32 + /* + * Regardless of whether PCID is enumerated, the SDM says +@@ -1985,8 +1983,6 @@ void cpu_init(void) + clear_all_debug_regs(); + dbg_restore_debug_regs(); + +- fpu__init_cpu(); +- + if (is_uv_system()) + uv_cpu_init(); + +@@ -2050,8 +2046,6 @@ void cpu_init(void) + clear_all_debug_regs(); + dbg_restore_debug_regs(); + +- fpu__init_cpu(); +- + load_fixmap_gdt(cpu); + } + #endif +@@ -2136,6 +2130,13 @@ void __init arch_cpu_finalize_init(void) + '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); + } + ++ /* ++ * Must be before alternatives because it might set or clear ++ * feature bits. ++ */ ++ fpu__init_system(); ++ fpu__init_cpu(); ++ + alternative_instructions(); + + if (IS_ENABLED(CONFIG_X86_64)) { +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -231,6 +231,7 @@ static void notrace start_secondary(void + #endif + load_current_idt(); + cpu_init(); ++ fpu__init_cpu(); + x86_cpuinit.early_percpu_clock_init(); + preempt_disable(); + smp_callin(); diff --git a/debian/patches/bugfix/x86/gds/x86-fpu-remove-cpuinfo-argument-from-init-functions.patch b/debian/patches/bugfix/x86/gds/x86-fpu-remove-cpuinfo-argument-from-init-functions.patch new file mode 100644 index 000000000..f5eddda28 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/x86-fpu-remove-cpuinfo-argument-from-init-functions.patch @@ -0,0 +1,67 @@ +From a03b110dad183d18d01f0f8e370228c2747133e6 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 14 Jun 2023 01:39:43 +0200 +Subject: x86/fpu: Remove cpuinfo argument from init functions + +From: Thomas Gleixner <tglx@linutronix.de> + +commit 1f34bb2a24643e0087652d81078e4f616562738d upstream + +Nothing in the call chain requires it + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20230613224545.783704297@linutronix.de +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/x86/include/asm/fpu/internal.h | 2 +- + arch/x86/kernel/cpu/common.c | 2 +- + arch/x86/kernel/fpu/init.c | 6 +++--- + 3 files changed, 5 insertions(+), 5 deletions(-) + +--- a/arch/x86/include/asm/fpu/internal.h ++++ b/arch/x86/include/asm/fpu/internal.h +@@ -42,7 +42,7 @@ extern int dump_fpu(struct pt_regs *ptr + extern void fpu__init_cpu(void); + extern void fpu__init_system_xstate(void); + extern void fpu__init_cpu_xstate(void); +-extern void fpu__init_system(struct cpuinfo_x86 *c); ++extern void fpu__init_system(void); + extern void fpu__init_check_bugs(void); + extern void fpu__resume_cpu(void); + extern u64 fpu__get_supported_xfeatures_mask(void); +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1278,7 +1278,7 @@ static void __init early_identify_cpu(st + + cpu_set_bug_bits(c); + +- fpu__init_system(c); ++ fpu__init_system(); + + #ifdef CONFIG_X86_32 + /* +--- a/arch/x86/kernel/fpu/init.c ++++ b/arch/x86/kernel/fpu/init.c +@@ -67,7 +67,7 @@ static bool fpu__probe_without_cpuid(voi + return fsw == 0 && (fcw & 0x103f) == 0x003f; + } + +-static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) ++static void fpu__init_system_early_generic(void) + { + if (!boot_cpu_has(X86_FEATURE_CPUID) && + !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { +@@ -297,10 +297,10 @@ static void __init fpu__init_parse_early + * Called on the boot CPU once per system bootup, to set up the initial + * FPU state that is later cloned into all processes: + */ +-void __init fpu__init_system(struct cpuinfo_x86 *c) ++void __init fpu__init_system(void) + { + fpu__init_parse_early_param(); +- fpu__init_system_early_generic(c); ++ fpu__init_system_early_generic(); + + /* + * The FPU has to be operational for some of the diff --git a/debian/patches/bugfix/x86/gds/x86-speculation-add-force-option-to-gds-mitigation.patch b/debian/patches/bugfix/x86/gds/x86-speculation-add-force-option-to-gds-mitigation.patch new file mode 100644 index 000000000..65c4594b1 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/x86-speculation-add-force-option-to-gds-mitigation.patch @@ -0,0 +1,163 @@ +From ead252286b6800873dd961075a36939f15e9b163 Mon Sep 17 00:00:00 2001 +From: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Date: Wed, 12 Jul 2023 19:43:12 -0700 +Subject: x86/speculation: Add force option to GDS mitigation + +From: Daniel Sneddon <daniel.sneddon@linux.intel.com> + +commit 553a5c03e90a6087e88f8ff878335ef0621536fb upstream + +The Gather Data Sampling (GDS) vulnerability allows malicious software +to infer stale data previously stored in vector registers. This may +include sensitive data such as cryptographic keys. GDS is mitigated in +microcode, and systems with up-to-date microcode are protected by +default. However, any affected system that is running with older +microcode will still be vulnerable to GDS attacks. + +Since the gather instructions used by the attacker are part of the +AVX2 and AVX512 extensions, disabling these extensions prevents gather +instructions from being executed, thereby mitigating the system from +GDS. Disabling AVX2 is sufficient, but we don't have the granularity +to do this. The XCR0[2] disables AVX, with no option to just disable +AVX2. + +Add a kernel parameter gather_data_sampling=force that will enable the +microcode mitigation if available, otherwise it will disable AVX on +affected systems. + +This option will be ignored if cmdline mitigations=off. + +This is a *big* hammer. It is known to break buggy userspace that +uses incomplete, buggy AVX enumeration. Unfortunately, such userspace +does exist in the wild: + + https://www.mail-archive.com/bug-coreutils@gnu.org/msg33046.html + +[ dhansen: add some more ominous warnings about disabling AVX ] + +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> +Acked-by: Josh Poimboeuf <jpoimboe@kernel.org> +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + Documentation/admin-guide/hw-vuln/gather_data_sampling.rst | 18 +++++++++-- + Documentation/admin-guide/kernel-parameters.txt | 8 ++++- + arch/x86/kernel/cpu/bugs.c | 20 ++++++++++++- + 3 files changed, 40 insertions(+), 6 deletions(-) + +--- a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst ++++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst +@@ -60,14 +60,21 @@ bits: + ================================ === ============================ + + GDS can also be mitigated on systems that don't have updated microcode by +-disabling AVX. This can be done by setting "clearcpuid=avx" on the kernel +-command-line. ++disabling AVX. This can be done by setting gather_data_sampling="force" or ++"clearcpuid=avx" on the kernel command-line. ++ ++If used, these options will disable AVX use by turning on XSAVE YMM support. ++However, the processor will still enumerate AVX support. Userspace that ++does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM ++support will break. + + Mitigation control on the kernel command line + --------------------------------------------- + The mitigation can be disabled by setting "gather_data_sampling=off" or +-"mitigations=off" on the kernel command line. Not specifying either will +-default to the mitigation being enabled. ++"mitigations=off" on the kernel command line. Not specifying either will default ++to the mitigation being enabled. Specifying "gather_data_sampling=force" will ++use the microcode mitigation when available or disable AVX on affected systems ++where the microcode hasn't been updated to include the mitigation. + + GDS System Information + ------------------------ +@@ -83,6 +90,9 @@ The possible values contained in this fi + Vulnerable Processor vulnerable and mitigation disabled. + Vulnerable: No microcode Processor vulnerable and microcode is missing + mitigation. ++ Mitigation: AVX disabled, ++ no microcode Processor is vulnerable and microcode is missing ++ mitigation. AVX disabled as mitigation. + Mitigation: Microcode Processor is vulnerable and mitigation is in + effect. + Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -1300,7 +1300,13 @@ + + This issue is mitigated by default in updated microcode. + The mitigation may have a performance impact but can be +- disabled. ++ disabled. On systems without the microcode mitigation ++ disabling AVX serves as a mitigation. ++ ++ force: Disable AVX to mitigate systems without ++ microcode mitigation. No effect if the microcode ++ mitigation is present. Known to cause crashes in ++ userspace with buggy AVX enumeration. + + off: Disable GDS mitigation. + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -607,6 +607,7 @@ early_param("srbds", srbds_parse_cmdline + enum gds_mitigations { + GDS_MITIGATION_OFF, + GDS_MITIGATION_UCODE_NEEDED, ++ GDS_MITIGATION_FORCE, + GDS_MITIGATION_FULL, + GDS_MITIGATION_FULL_LOCKED, + GDS_MITIGATION_HYPERVISOR, +@@ -617,6 +618,7 @@ static enum gds_mitigations gds_mitigati + static const char * const gds_strings[] = { + [GDS_MITIGATION_OFF] = "Vulnerable", + [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", ++ [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", + [GDS_MITIGATION_FULL] = "Mitigation: Microcode", + [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", + [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", +@@ -642,6 +644,7 @@ void update_gds_msr(void) + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + mcu_ctrl &= ~GDS_MITG_DIS; + break; ++ case GDS_MITIGATION_FORCE: + case GDS_MITIGATION_UCODE_NEEDED: + case GDS_MITIGATION_HYPERVISOR: + return; +@@ -676,10 +679,23 @@ static void __init gds_select_mitigation + + /* No microcode */ + if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { +- gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; ++ if (gds_mitigation == GDS_MITIGATION_FORCE) { ++ /* ++ * This only needs to be done on the boot CPU so do it ++ * here rather than in update_gds_msr() ++ */ ++ setup_clear_cpu_cap(X86_FEATURE_AVX); ++ pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); ++ } else { ++ gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; ++ } + goto out; + } + ++ /* Microcode has mitigation, use it */ ++ if (gds_mitigation == GDS_MITIGATION_FORCE) ++ gds_mitigation = GDS_MITIGATION_FULL; ++ + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + if (mcu_ctrl & GDS_MITG_LOCKED) { + if (gds_mitigation == GDS_MITIGATION_OFF) +@@ -710,6 +726,8 @@ static int __init gds_parse_cmdline(char + + if (!strcmp(str, "off")) + gds_mitigation = GDS_MITIGATION_OFF; ++ else if (!strcmp(str, "force")) ++ gds_mitigation = GDS_MITIGATION_FORCE; + + return 0; + } diff --git a/debian/patches/bugfix/x86/gds/x86-speculation-add-gather-data-sampling-mitigation.patch b/debian/patches/bugfix/x86/gds/x86-speculation-add-gather-data-sampling-mitigation.patch new file mode 100644 index 000000000..c426811c6 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/x86-speculation-add-gather-data-sampling-mitigation.patch @@ -0,0 +1,562 @@ +From d63b3f0e819275ee64648eb01330aad3e347d9ba Mon Sep 17 00:00:00 2001 +From: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Date: Wed, 12 Jul 2023 19:43:11 -0700 +Subject: x86/speculation: Add Gather Data Sampling mitigation + +From: Daniel Sneddon <daniel.sneddon@linux.intel.com> + +commit 8974eb588283b7d44a7c91fa09fcbaf380339f3a upstream + +Gather Data Sampling (GDS) is a hardware vulnerability which allows +unprivileged speculative access to data which was previously stored in +vector registers. + +Intel processors that support AVX2 and AVX512 have gather instructions +that fetch non-contiguous data elements from memory. On vulnerable +hardware, when a gather instruction is transiently executed and +encounters a fault, stale data from architectural or internal vector +registers may get transiently stored to the destination vector +register allowing an attacker to infer the stale data using typical +side channel techniques like cache timing attacks. + +This mitigation is different from many earlier ones for two reasons. +First, it is enabled by default and a bit must be set to *DISABLE* it. +This is the opposite of normal mitigation polarity. This means GDS can +be mitigated simply by updating microcode and leaving the new control +bit alone. + +Second, GDS has a "lock" bit. This lock bit is there because the +mitigation affects the hardware security features KeyLocker and SGX. +It needs to be enabled and *STAY* enabled for these features to be +mitigated against GDS. + +The mitigation is enabled in the microcode by default. Disable it by +setting gather_data_sampling=off or by disabling all mitigations with +mitigations=off. The mitigation status can be checked by reading: + + /sys/devices/system/cpu/vulnerabilities/gather_data_sampling + +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> +Acked-by: Josh Poimboeuf <jpoimboe@kernel.org> +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + Documentation/ABI/testing/sysfs-devices-system-cpu | 11 - + Documentation/admin-guide/hw-vuln/gather_data_sampling.rst | 99 +++++++++ + Documentation/admin-guide/hw-vuln/index.rst | 1 + Documentation/admin-guide/kernel-parameters.txt | 33 ++- + arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/msr-index.h | 11 + + arch/x86/kernel/cpu/bugs.c | 129 +++++++++++++ + arch/x86/kernel/cpu/common.c | 34 ++- + arch/x86/kernel/cpu/cpu.h | 1 + drivers/base/cpu.c | 8 + 10 files changed, 305 insertions(+), 23 deletions(-) + create mode 100644 Documentation/admin-guide/hw-vuln/gather_data_sampling.rst + +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -472,16 +472,17 @@ Description: information about CPUs hete + cpu_capacity: capacity of cpu#. + + What: /sys/devices/system/cpu/vulnerabilities ++ /sys/devices/system/cpu/vulnerabilities/gather_data_sampling ++ /sys/devices/system/cpu/vulnerabilities/itlb_multihit ++ /sys/devices/system/cpu/vulnerabilities/l1tf ++ /sys/devices/system/cpu/vulnerabilities/mds + /sys/devices/system/cpu/vulnerabilities/meltdown ++ /sys/devices/system/cpu/vulnerabilities/mmio_stale_data ++ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass + /sys/devices/system/cpu/vulnerabilities/spectre_v1 + /sys/devices/system/cpu/vulnerabilities/spectre_v2 +- /sys/devices/system/cpu/vulnerabilities/spec_store_bypass +- /sys/devices/system/cpu/vulnerabilities/l1tf +- /sys/devices/system/cpu/vulnerabilities/mds + /sys/devices/system/cpu/vulnerabilities/srbds + /sys/devices/system/cpu/vulnerabilities/tsx_async_abort +- /sys/devices/system/cpu/vulnerabilities/itlb_multihit +- /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + Date: January 2018 + Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> + Description: Information about CPU vulnerabilities +--- /dev/null ++++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst +@@ -0,0 +1,99 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++GDS - Gather Data Sampling ++========================== ++ ++Gather Data Sampling is a hardware vulnerability which allows unprivileged ++speculative access to data which was previously stored in vector registers. ++ ++Problem ++------- ++When a gather instruction performs loads from memory, different data elements ++are merged into the destination vector register. However, when a gather ++instruction that is transiently executed encounters a fault, stale data from ++architectural or internal vector registers may get transiently forwarded to the ++destination vector register instead. This will allow a malicious attacker to ++infer stale data using typical side channel techniques like cache timing ++attacks. GDS is a purely sampling-based attack. ++ ++The attacker uses gather instructions to infer the stale vector register data. ++The victim does not need to do anything special other than use the vector ++registers. The victim does not need to use gather instructions to be ++vulnerable. ++ ++Because the buffers are shared between Hyper-Threads cross Hyper-Thread attacks ++are possible. ++ ++Attack scenarios ++---------------- ++Without mitigation, GDS can infer stale data across virtually all ++permission boundaries: ++ ++ Non-enclaves can infer SGX enclave data ++ Userspace can infer kernel data ++ Guests can infer data from hosts ++ Guest can infer guest from other guests ++ Users can infer data from other users ++ ++Because of this, it is important to ensure that the mitigation stays enabled in ++lower-privilege contexts like guests and when running outside SGX enclaves. ++ ++The hardware enforces the mitigation for SGX. Likewise, VMMs should ensure ++that guests are not allowed to disable the GDS mitigation. If a host erred and ++allowed this, a guest could theoretically disable GDS mitigation, mount an ++attack, and re-enable it. ++ ++Mitigation mechanism ++-------------------- ++This issue is mitigated in microcode. The microcode defines the following new ++bits: ++ ++ ================================ === ============================ ++ IA32_ARCH_CAPABILITIES[GDS_CTRL] R/O Enumerates GDS vulnerability ++ and mitigation support. ++ IA32_ARCH_CAPABILITIES[GDS_NO] R/O Processor is not vulnerable. ++ IA32_MCU_OPT_CTRL[GDS_MITG_DIS] R/W Disables the mitigation ++ 0 by default. ++ IA32_MCU_OPT_CTRL[GDS_MITG_LOCK] R/W Locks GDS_MITG_DIS=0. Writes ++ to GDS_MITG_DIS are ignored ++ Can't be cleared once set. ++ ================================ === ============================ ++ ++GDS can also be mitigated on systems that don't have updated microcode by ++disabling AVX. This can be done by setting "clearcpuid=avx" on the kernel ++command-line. ++ ++Mitigation control on the kernel command line ++--------------------------------------------- ++The mitigation can be disabled by setting "gather_data_sampling=off" or ++"mitigations=off" on the kernel command line. Not specifying either will ++default to the mitigation being enabled. ++ ++GDS System Information ++------------------------ ++The kernel provides vulnerability status information through sysfs. For ++GDS this can be accessed by the following sysfs file: ++ ++/sys/devices/system/cpu/vulnerabilities/gather_data_sampling ++ ++The possible values contained in this file are: ++ ++ ============================== ============================================= ++ Not affected Processor not vulnerable. ++ Vulnerable Processor vulnerable and mitigation disabled. ++ Vulnerable: No microcode Processor vulnerable and microcode is missing ++ mitigation. ++ Mitigation: Microcode Processor is vulnerable and mitigation is in ++ effect. ++ Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in ++ effect and cannot be disabled. ++ Unknown: Dependent on ++ hypervisor status Running on a virtual guest processor that is ++ affected but with no way to know if host ++ processor is mitigated or vulnerable. ++ ============================== ============================================= ++ ++GDS Default mitigation ++---------------------- ++The updated microcode will enable the mitigation by default. The kernel's ++default action is to leave the mitigation enabled. +--- a/Documentation/admin-guide/hw-vuln/index.rst ++++ b/Documentation/admin-guide/hw-vuln/index.rst +@@ -16,3 +16,4 @@ are configurable at compile, boot or run + multihit.rst + special-register-buffer-data-sampling.rst + processor_mmio_stale_data.rst ++ gather_data_sampling.rst +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -1290,6 +1290,20 @@ + Format: off | on + default: on + ++ gather_data_sampling= ++ [X86,INTEL] Control the Gather Data Sampling (GDS) ++ mitigation. ++ ++ Gather Data Sampling is a hardware vulnerability which ++ allows unprivileged speculative access to data which was ++ previously stored in vector registers. ++ ++ This issue is mitigated by default in updated microcode. ++ The mitigation may have a performance impact but can be ++ disabled. ++ ++ off: Disable GDS mitigation. ++ + gcov_persist= [GCOV] When non-zero (default), profiling data for + kernel modules is saved and remains accessible via + debugfs, even when the module is unloaded/reloaded. +@@ -2555,22 +2569,23 @@ + Disable all optional CPU mitigations. This + improves system performance, but it may also + expose users to several CPU vulnerabilities. +- Equivalent to: nopti [X86,PPC] ++ Equivalent to: gather_data_sampling=off [X86] + kpti=0 [ARM64] +- nospectre_v1 [PPC] ++ kvm.nx_huge_pages=off [X86] ++ l1tf=off [X86] ++ mds=off [X86] ++ mmio_stale_data=off [X86] ++ no_entry_flush [PPC] ++ no_uaccess_flush [PPC] + nobp=0 [S390] ++ nopti [X86,PPC] ++ nospectre_v1 [PPC] + nospectre_v1 [X86] + nospectre_v2 [X86,PPC,S390,ARM64] +- spectre_v2_user=off [X86] + spec_store_bypass_disable=off [X86,PPC] ++ spectre_v2_user=off [X86] + ssbd=force-off [ARM64] +- l1tf=off [X86] +- mds=off [X86] + tsx_async_abort=off [X86] +- kvm.nx_huge_pages=off [X86] +- no_entry_flush [PPC] +- no_uaccess_flush [PPC] +- mmio_stale_data=off [X86] + + Exceptions: + This does not have any effect on +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -409,5 +409,6 @@ + #define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */ + #define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ + #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ ++#define X86_BUG_GDS X86_BUG(29) /* CPU is affected by Gather Data Sampling */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -138,6 +138,15 @@ + * Not susceptible to Post-Barrier + * Return Stack Buffer Predictions. + */ ++#define ARCH_CAP_GDS_CTRL BIT(25) /* ++ * CPU is vulnerable to Gather ++ * Data Sampling (GDS) and ++ * has controls for mitigation. ++ */ ++#define ARCH_CAP_GDS_NO BIT(26) /* ++ * CPU is not vulnerable to Gather ++ * Data Sampling (GDS). ++ */ + + #define MSR_IA32_FLUSH_CMD 0x0000010b + #define L1D_FLUSH BIT(0) /* +@@ -156,6 +165,8 @@ + #define MSR_IA32_MCU_OPT_CTRL 0x00000123 + #define RNGDS_MITG_DIS BIT(0) + #define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ ++#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */ ++#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */ + + #define MSR_IA32_SYSENTER_CS 0x00000174 + #define MSR_IA32_SYSENTER_ESP 0x00000175 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -44,6 +44,7 @@ static void __init md_clear_select_mitig + static void __init taa_select_mitigation(void); + static void __init mmio_select_mitigation(void); + static void __init srbds_select_mitigation(void); ++static void __init gds_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR without task-specific bits set */ + u64 x86_spec_ctrl_base; +@@ -149,6 +150,7 @@ void __init cpu_select_mitigations(void) + l1tf_select_mitigation(); + md_clear_select_mitigation(); + srbds_select_mitigation(); ++ gds_select_mitigation(); + } + + /* +@@ -600,6 +602,120 @@ static int __init srbds_parse_cmdline(ch + early_param("srbds", srbds_parse_cmdline); + + #undef pr_fmt ++#define pr_fmt(fmt) "GDS: " fmt ++ ++enum gds_mitigations { ++ GDS_MITIGATION_OFF, ++ GDS_MITIGATION_UCODE_NEEDED, ++ GDS_MITIGATION_FULL, ++ GDS_MITIGATION_FULL_LOCKED, ++ GDS_MITIGATION_HYPERVISOR, ++}; ++ ++static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; ++ ++static const char * const gds_strings[] = { ++ [GDS_MITIGATION_OFF] = "Vulnerable", ++ [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", ++ [GDS_MITIGATION_FULL] = "Mitigation: Microcode", ++ [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", ++ [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", ++}; ++ ++void update_gds_msr(void) ++{ ++ u64 mcu_ctrl_after; ++ u64 mcu_ctrl; ++ ++ switch (gds_mitigation) { ++ case GDS_MITIGATION_OFF: ++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); ++ mcu_ctrl |= GDS_MITG_DIS; ++ break; ++ case GDS_MITIGATION_FULL_LOCKED: ++ /* ++ * The LOCKED state comes from the boot CPU. APs might not have ++ * the same state. Make sure the mitigation is enabled on all ++ * CPUs. ++ */ ++ case GDS_MITIGATION_FULL: ++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); ++ mcu_ctrl &= ~GDS_MITG_DIS; ++ break; ++ case GDS_MITIGATION_UCODE_NEEDED: ++ case GDS_MITIGATION_HYPERVISOR: ++ return; ++ }; ++ ++ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); ++ ++ /* ++ * Check to make sure that the WRMSR value was not ignored. Writes to ++ * GDS_MITG_DIS will be ignored if this processor is locked but the boot ++ * processor was not. ++ */ ++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); ++ WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); ++} ++ ++static void __init gds_select_mitigation(void) ++{ ++ u64 mcu_ctrl; ++ ++ if (!boot_cpu_has_bug(X86_BUG_GDS)) ++ return; ++ ++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { ++ gds_mitigation = GDS_MITIGATION_HYPERVISOR; ++ goto out; ++ } ++ ++ if (cpu_mitigations_off()) ++ gds_mitigation = GDS_MITIGATION_OFF; ++ /* Will verify below that mitigation _can_ be disabled */ ++ ++ /* No microcode */ ++ if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { ++ gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; ++ goto out; ++ } ++ ++ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); ++ if (mcu_ctrl & GDS_MITG_LOCKED) { ++ if (gds_mitigation == GDS_MITIGATION_OFF) ++ pr_warn("Mitigation locked. Disable failed.\n"); ++ ++ /* ++ * The mitigation is selected from the boot CPU. All other CPUs ++ * _should_ have the same state. If the boot CPU isn't locked ++ * but others are then update_gds_msr() will WARN() of the state ++ * mismatch. If the boot CPU is locked update_gds_msr() will ++ * ensure the other CPUs have the mitigation enabled. ++ */ ++ gds_mitigation = GDS_MITIGATION_FULL_LOCKED; ++ } ++ ++ update_gds_msr(); ++out: ++ pr_info("%s\n", gds_strings[gds_mitigation]); ++} ++ ++static int __init gds_parse_cmdline(char *str) ++{ ++ if (!str) ++ return -EINVAL; ++ ++ if (!boot_cpu_has_bug(X86_BUG_GDS)) ++ return 0; ++ ++ if (!strcmp(str, "off")) ++ gds_mitigation = GDS_MITIGATION_OFF; ++ ++ return 0; ++} ++early_param("gather_data_sampling", gds_parse_cmdline); ++ ++#undef pr_fmt + #define pr_fmt(fmt) "Spectre V1 : " fmt + + enum spectre_v1_mitigation { +@@ -2147,6 +2263,11 @@ static ssize_t retbleed_show_state(char + return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); + } + ++static ssize_t gds_show_state(char *buf) ++{ ++ return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); ++} ++ + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, + char *buf, unsigned int bug) + { +@@ -2196,6 +2317,9 @@ static ssize_t cpu_show_common(struct de + case X86_BUG_RETBLEED: + return retbleed_show_state(buf); + ++ case X86_BUG_GDS: ++ return gds_show_state(buf); ++ + default: + break; + } +@@ -2260,4 +2384,9 @@ ssize_t cpu_show_retbleed(struct device + { + return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); + } ++ ++ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_GDS); ++} + #endif +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1047,6 +1047,8 @@ static const __initconst struct x86_cpu_ + #define MMIO_SBDS BIT(2) + /* CPU is affected by RETbleed, speculating where you would not expect it */ + #define RETBLEED BIT(3) ++/* CPU is affected by GDS */ ++#define GDS BIT(4) + + static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), +@@ -1059,18 +1061,20 @@ static const struct x86_cpu_id cpu_vuln_ + VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL_CORE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), +- VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), +- VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), +- VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(CANNONLAKE_MOBILE,X86_STEPPING_ANY, RETBLEED), +- VULNBL_INTEL_STEPPINGS(ICELAKE_MOBILE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), +- VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPING_ANY, MMIO), +- VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO), +- VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), +- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_MOBILE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPING_ANY, MMIO | GDS), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), ++ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), ++ VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), ++ VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), + VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), +- VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS), +@@ -1193,6 +1197,16 @@ static void __init cpu_set_bug_bits(stru + !(ia32_cap & ARCH_CAP_PBRSB_NO)) + setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); + ++ /* ++ * Check if CPU is vulnerable to GDS. If running in a virtual machine on ++ * an affected processor, the VMM may have disabled the use of GATHER by ++ * disabling AVX2. The only way to do this in HW is to clear XCR0[2], ++ * which means that AVX will be disabled. ++ */ ++ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) && ++ boot_cpu_has(X86_FEATURE_AVX)) ++ setup_force_cpu_bug(X86_BUG_GDS); ++ + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + +@@ -1666,6 +1680,8 @@ void identify_secondary_cpu(struct cpuin + validate_apic_and_package_id(c); + x86_spec_ctrl_setup_ap(); + update_srbds_msr(); ++ if (boot_cpu_has_bug(X86_BUG_GDS)) ++ update_gds_msr(); + } + + static __init int setup_noclflush(char *arg) +--- a/arch/x86/kernel/cpu/cpu.h ++++ b/arch/x86/kernel/cpu/cpu.h +@@ -83,6 +83,7 @@ void cpu_select_mitigations(void); + + extern void x86_spec_ctrl_setup_ap(void); + extern void update_srbds_msr(void); ++extern void update_gds_msr(void); + + extern u64 x86_read_arch_cap_msr(void); + +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -584,6 +584,12 @@ ssize_t __weak cpu_show_retbleed(struct + return sysfs_emit(buf, "Not affected\n"); + } + ++ssize_t __weak cpu_show_gds(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sysfs_emit(buf, "Not affected\n"); ++} ++ + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); + static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); +@@ -595,6 +601,7 @@ static DEVICE_ATTR(itlb_multihit, 0444, + static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); + static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); + static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); ++static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -608,6 +615,7 @@ static struct attribute *cpu_root_vulner + &dev_attr_srbds.attr, + &dev_attr_mmio_stale_data.attr, + &dev_attr_retbleed.attr, ++ &dev_attr_gather_data_sampling.attr, + NULL + }; + diff --git a/debian/patches/bugfix/x86/gds/x86-speculation-add-kconfig-option-for-gds.patch b/debian/patches/bugfix/x86/gds/x86-speculation-add-kconfig-option-for-gds.patch new file mode 100644 index 000000000..5d01d4fa9 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/x86-speculation-add-kconfig-option-for-gds.patch @@ -0,0 +1,68 @@ +From dc9710d3e8c3a26fbd764f4bd733814c9464bf31 Mon Sep 17 00:00:00 2001 +From: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Date: Wed, 12 Jul 2023 19:43:13 -0700 +Subject: x86/speculation: Add Kconfig option for GDS + +From: Daniel Sneddon <daniel.sneddon@linux.intel.com> + +commit 53cf5797f114ba2bd86d23a862302119848eff19 upstream + +Gather Data Sampling (GDS) is mitigated in microcode. However, on +systems that haven't received the updated microcode, disabling AVX +can act as a mitigation. Add a Kconfig option that uses the microcode +mitigation if available and disables AVX otherwise. Setting this +option has no effect on systems not affected by GDS. This is the +equivalent of setting gather_data_sampling=force. + +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> +Acked-by: Josh Poimboeuf <jpoimboe@kernel.org> +Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/x86/Kconfig | 19 +++++++++++++++++++ + arch/x86/kernel/cpu/bugs.c | 4 ++++ + 2 files changed, 23 insertions(+) + +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -2438,6 +2438,25 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK + def_bool y + depends on X86_64 || X86_PAE + ++config GDS_FORCE_MITIGATION ++ bool "Force GDS Mitigation" ++ depends on CPU_SUP_INTEL ++ default n ++ help ++ Gather Data Sampling (GDS) is a hardware vulnerability which allows ++ unprivileged speculative access to data which was previously stored in ++ vector registers. ++ ++ This option is equivalent to setting gather_data_sampling=force on the ++ command line. The microcode mitigation is used if present, otherwise ++ AVX is disabled as a mitigation. On affected systems that are missing ++ the microcode any userspace code that unconditionally uses AVX will ++ break with this option set. ++ ++ Setting this option on systems not vulnerable to GDS has no effect. ++ ++ If in doubt, say N. ++ + config ARCH_ENABLE_HUGEPAGE_MIGRATION + def_bool y + depends on X86_64 && HUGETLB_PAGE && MIGRATION +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -613,7 +613,11 @@ enum gds_mitigations { + GDS_MITIGATION_HYPERVISOR, + }; + ++#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION) ++static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE; ++#else + static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; ++#endif + + static const char * const gds_strings[] = { + [GDS_MITIGATION_OFF] = "Vulnerable", diff --git a/debian/patches/bugfix/x86/gds/x86-xen-fix-secondary-processors-fpu-initialization.patch b/debian/patches/bugfix/x86/gds/x86-xen-fix-secondary-processors-fpu-initialization.patch new file mode 100644 index 000000000..658710bb5 --- /dev/null +++ b/debian/patches/bugfix/x86/gds/x86-xen-fix-secondary-processors-fpu-initialization.patch @@ -0,0 +1,44 @@ +From fe3e0a13e597c1c8617814bf9b42ab732db5c26e Mon Sep 17 00:00:00 2001 +From: Juergen Gross <jgross@suse.com> +Date: Mon, 3 Jul 2023 15:00:32 +0200 +Subject: x86/xen: Fix secondary processors' FPU initialization + +From: Juergen Gross <jgross@suse.com> + +commit fe3e0a13e597c1c8617814bf9b42ab732db5c26e upstream. + +Moving the call of fpu__init_cpu() from cpu_init() to start_secondary() +broke Xen PV guests, as those don't call start_secondary() for APs. + +Call fpu__init_cpu() in Xen's cpu_bringup(), which is the Xen PV +replacement of start_secondary(). + +Fixes: b81fac906a8f ("x86/fpu: Move FPU initialization into arch_cpu_finalize_init()") +Signed-off-by: Juergen Gross <jgross@suse.com> +Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> +Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> +Acked-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20230703130032.22916-1-jgross@suse.com +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/x86/xen/smp_pv.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/x86/xen/smp_pv.c ++++ b/arch/x86/xen/smp_pv.c +@@ -27,6 +27,7 @@ + #include <asm/desc.h> + #include <asm/pgtable.h> + #include <asm/cpu.h> ++#include <asm/fpu/internal.h> + + #include <xen/interface/xen.h> + #include <xen/interface/vcpu.h> +@@ -58,6 +59,7 @@ static void cpu_bringup(void) + int cpu; + + cpu_init(); ++ fpu__init_cpu(); + touch_softlockup_watchdog(); + preempt_disable(); + |