diff options
Diffstat (limited to '')
14 files changed, 1427 insertions, 0 deletions
diff --git a/linux-kernel-patches/01-3f1f576a195aa266813cbd4ca70291deb61e0129.patch b/linux-kernel-patches/01-3f1f576a195aa266813cbd4ca70291deb61e0129.patch new file mode 100644 index 0000000..fb895a4 --- /dev/null +++ b/linux-kernel-patches/01-3f1f576a195aa266813cbd4ca70291deb61e0129.patch @@ -0,0 +1,234 @@ +From 3f1f576a195aa266813cbd4ca70291deb61e0129 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 16 Feb 2018 12:26:38 +0100
+Subject: x86/microcode: Propagate return value from updating functions
+
+... so that callers can know when microcode was updated and act
+accordingly.
+
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180216112640.11554-2-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/x86/include/asm/microcode.h | 9 +++++++--
+ arch/x86/kernel/cpu/microcode/amd.c | 10 +++++-----
+ arch/x86/kernel/cpu/microcode/core.c | 33 +++++++++++++++++----------------
+ arch/x86/kernel/cpu/microcode/intel.c | 10 +++++-----
+ 4 files changed, 34 insertions(+), 28 deletions(-)
+
+diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
+index 55520cec..7fb1047 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -37,7 +37,12 @@ struct cpu_signature {
+
+ struct device;
+
+-enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
++enum ucode_state {
++ UCODE_OK = 0,
++ UCODE_UPDATED,
++ UCODE_NFOUND,
++ UCODE_ERROR,
++};
+
+ struct microcode_ops {
+ enum ucode_state (*request_microcode_user) (int cpu,
+@@ -54,7 +59,7 @@ struct microcode_ops {
+ * are being called.
+ * See also the "Synchronization" section in microcode_core.c.
+ */
+- int (*apply_microcode) (int cpu);
++ enum ucode_state (*apply_microcode) (int cpu);
+ int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
+ };
+
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 330b846..a998e1a 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -498,7 +498,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
+ return patch_size;
+ }
+
+-static int apply_microcode_amd(int cpu)
++static enum ucode_state apply_microcode_amd(int cpu)
+ {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ struct microcode_amd *mc_amd;
+@@ -512,7 +512,7 @@ static int apply_microcode_amd(int cpu)
+
+ p = find_patch(cpu);
+ if (!p)
+- return 0;
++ return UCODE_NFOUND;
+
+ mc_amd = p->data;
+ uci->mc = p->data;
+@@ -523,13 +523,13 @@ static int apply_microcode_amd(int cpu)
+ if (rev >= mc_amd->hdr.patch_id) {
+ c->microcode = rev;
+ uci->cpu_sig.rev = rev;
+- return 0;
++ return UCODE_OK;
+ }
+
+ if (__apply_microcode_amd(mc_amd)) {
+ pr_err("CPU%d: update failed for patch_level=0x%08x\n",
+ cpu, mc_amd->hdr.patch_id);
+- return -1;
++ return UCODE_ERROR;
+ }
+ pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
+ mc_amd->hdr.patch_id);
+@@ -537,7 +537,7 @@ static int apply_microcode_amd(int cpu)
+ uci->cpu_sig.rev = mc_amd->hdr.patch_id;
+ c->microcode = mc_amd->hdr.patch_id;
+
+- return 0;
++ return UCODE_UPDATED;
+ }
+
+ static int install_equiv_cpu_table(const u8 *buf)
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 319dd65..6fdaf7c 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -374,7 +374,7 @@ static int collect_cpu_info(int cpu)
+ }
+
+ struct apply_microcode_ctx {
+- int err;
++ enum ucode_state err;
+ };
+
+ static void apply_microcode_local(void *arg)
+@@ -489,31 +489,29 @@ static void __exit microcode_dev_exit(void)
+ /* fake device for request_firmware */
+ static struct platform_device *microcode_pdev;
+
+-static int reload_for_cpu(int cpu)
++static enum ucode_state reload_for_cpu(int cpu)
+ {
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ enum ucode_state ustate;
+- int err = 0;
+
+ if (!uci->valid)
+- return err;
++ return UCODE_OK;
+
+ ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true);
+- if (ustate == UCODE_OK)
+- apply_microcode_on_target(cpu);
+- else
+- if (ustate == UCODE_ERROR)
+- err = -EINVAL;
+- return err;
++ if (ustate != UCODE_OK)
++ return ustate;
++
++ return apply_microcode_on_target(cpu);
+ }
+
+ static ssize_t reload_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+ {
++ enum ucode_state tmp_ret = UCODE_OK;
+ unsigned long val;
++ ssize_t ret = 0;
+ int cpu;
+- ssize_t ret = 0, tmp_ret;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+@@ -526,15 +524,18 @@ static ssize_t reload_store(struct device *dev,
+ mutex_lock(µcode_mutex);
+ for_each_online_cpu(cpu) {
+ tmp_ret = reload_for_cpu(cpu);
+- if (tmp_ret != 0)
++ if (tmp_ret > UCODE_NFOUND) {
+ pr_warn("Error reloading microcode on CPU %d\n", cpu);
+
+- /* save retval of the first encountered reload error */
+- if (!ret)
+- ret = tmp_ret;
++ /* set retval for the first encountered reload error */
++ if (!ret)
++ ret = -EINVAL;
++ }
+ }
+- if (!ret)
++
++ if (!ret && tmp_ret == UCODE_UPDATED)
+ perf_check_microcode();
++
+ mutex_unlock(µcode_mutex);
+ put_online_cpus();
+
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index a15db2b..923054a 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -772,7 +772,7 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
+ return 0;
+ }
+
+-static int apply_microcode_intel(int cpu)
++static enum ucode_state apply_microcode_intel(int cpu)
+ {
+ struct microcode_intel *mc;
+ struct ucode_cpu_info *uci;
+@@ -782,7 +782,7 @@ static int apply_microcode_intel(int cpu)
+
+ /* We should bind the task to the CPU */
+ if (WARN_ON(raw_smp_processor_id() != cpu))
+- return -1;
++ return UCODE_ERROR;
+
+ uci = ucode_cpu_info + cpu;
+ mc = uci->mc;
+@@ -790,7 +790,7 @@ static int apply_microcode_intel(int cpu)
+ /* Look for a newer patch in our cache: */
+ mc = find_patch(uci);
+ if (!mc)
+- return 0;
++ return UCODE_NFOUND;
+ }
+
+ /* write microcode via MSR 0x79 */
+@@ -801,7 +801,7 @@ static int apply_microcode_intel(int cpu)
+ if (rev != mc->hdr.rev) {
+ pr_err("CPU%d update to revision 0x%x failed\n",
+ cpu, mc->hdr.rev);
+- return -1;
++ return UCODE_ERROR;
+ }
+
+ if (rev != prev_rev) {
+@@ -818,7 +818,7 @@ static int apply_microcode_intel(int cpu)
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
+
+- return 0;
++ return UCODE_UPDATED;
+ }
+
+ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/02-1008c52c09dcb23d93f8e0ea83a6246265d2cce0.patch b/linux-kernel-patches/02-1008c52c09dcb23d93f8e0ea83a6246265d2cce0.patch new file mode 100644 index 0000000..df8b5d3 --- /dev/null +++ b/linux-kernel-patches/02-1008c52c09dcb23d93f8e0ea83a6246265d2cce0.patch @@ -0,0 +1,90 @@ +From 1008c52c09dcb23d93f8e0ea83a6246265d2cce0 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 16 Feb 2018 12:26:39 +0100
+Subject: x86/CPU: Add a microcode loader callback
+
+Add a callback function which the microcode loader calls when microcode
+has been updated to a newer revision. Do the callback only when no error
+was encountered during loading.
+
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180216112640.11554-3-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/x86/include/asm/processor.h | 1 +
+ arch/x86/kernel/cpu/common.c | 10 ++++++++++
+ arch/x86/kernel/cpu/microcode/core.c | 8 ++++++--
+ 3 files changed, 17 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 1bd9ed8..b0ccd48 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -977,4 +977,5 @@ bool xen_set_default_idle(void);
+
+ void stop_this_cpu(void *dummy);
+ void df_debug(struct pt_regs *regs, long error_code);
++void microcode_check(void);
+ #endif /* _ASM_X86_PROCESSOR_H */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 824aee0..84f1cd8 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1749,3 +1749,13 @@ static int __init init_cpu_syscore(void)
+ return 0;
+ }
+ core_initcall(init_cpu_syscore);
++
++/*
++ * The microcode loader calls this upon late microcode load to recheck features,
++ * only when microcode has been updated. Caller holds microcode_mutex and CPU
++ * hotplug lock.
++ */
++void microcode_check(void)
++{
++ perf_check_microcode();
++}
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 6fdaf7c..aa1b9a4 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -509,6 +509,7 @@ static ssize_t reload_store(struct device *dev,
+ const char *buf, size_t size)
+ {
+ enum ucode_state tmp_ret = UCODE_OK;
++ bool do_callback = false;
+ unsigned long val;
+ ssize_t ret = 0;
+ int cpu;
+@@ -531,10 +532,13 @@ static ssize_t reload_store(struct device *dev,
+ if (!ret)
+ ret = -EINVAL;
+ }
++
++ if (tmp_ret == UCODE_UPDATED)
++ do_callback = true;
+ }
+
+- if (!ret && tmp_ret == UCODE_UPDATED)
+- perf_check_microcode();
++ if (!ret && do_callback)
++ microcode_check();
+
+ mutex_unlock(µcode_mutex);
+ put_online_cpus();
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/03-42ca8082e260dcfd8afa2afa6ec1940b9d41724c.patch b/linux-kernel-patches/03-42ca8082e260dcfd8afa2afa6ec1940b9d41724c.patch new file mode 100644 index 0000000..b4a17ca --- /dev/null +++ b/linux-kernel-patches/03-42ca8082e260dcfd8afa2afa6ec1940b9d41724c.patch @@ -0,0 +1,64 @@ +From 42ca8082e260dcfd8afa2afa6ec1940b9d41724c Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 16 Feb 2018 12:26:40 +0100
+Subject: x86/CPU: Check CPU feature bits after microcode upgrade
+
+With some microcode upgrades, new CPUID features can become visible on
+the CPU. Check what the kernel has mirrored now and issue a warning
+hinting at possible things the user/admin can do to make use of the
+newly visible features.
+
+Originally-by: Ashok Raj <ashok.raj@intel.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180216112640.11554-4-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/x86/kernel/cpu/common.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 84f1cd8..348cf48 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1757,5 +1757,25 @@ core_initcall(init_cpu_syscore);
+ */
+ void microcode_check(void)
+ {
++ struct cpuinfo_x86 info;
++
+ perf_check_microcode();
++
++ /* Reload CPUID max function as it might've changed. */
++ info.cpuid_level = cpuid_eax(0);
++
++ /*
++ * Copy all capability leafs to pick up the synthetic ones so that
++ * memcmp() below doesn't fail on that. The ones coming from CPUID will
++ * get overwritten in get_cpu_cap().
++ */
++ memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
++
++ get_cpu_cap(&info);
++
++ if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
++ return;
++
++ pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
++ pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
+ }
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/04-854857f5944c59a881ff607b37ed9ed41d031a3b.patch b/linux-kernel-patches/04-854857f5944c59a881ff607b37ed9ed41d031a3b.patch new file mode 100644 index 0000000..e618885 --- /dev/null +++ b/linux-kernel-patches/04-854857f5944c59a881ff607b37ed9ed41d031a3b.patch @@ -0,0 +1,62 @@ +From 854857f5944c59a881ff607b37ed9ed41d031a3b Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 28 Feb 2018 11:28:40 +0100
+Subject: x86/microcode: Get rid of struct apply_microcode_ctx
+
+It is a useless remnant from earlier times. Use the ucode_state enum
+directly.
+
+No functional change.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: https://lkml.kernel.org/r/20180228102846.13447-2-bp@alien8.de
+---
+ arch/x86/kernel/cpu/microcode/core.c | 19 ++++++++-----------
+ 1 file changed, 8 insertions(+), 11 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index aa1b9a4..6337065 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -373,26 +373,23 @@ static int collect_cpu_info(int cpu)
+ return ret;
+ }
+
+-struct apply_microcode_ctx {
+- enum ucode_state err;
+-};
+-
+ static void apply_microcode_local(void *arg)
+ {
+- struct apply_microcode_ctx *ctx = arg;
++ enum ucode_state *err = arg;
+
+- ctx->err = microcode_ops->apply_microcode(smp_processor_id());
++ *err = microcode_ops->apply_microcode(smp_processor_id());
+ }
+
+ static int apply_microcode_on_target(int cpu)
+ {
+- struct apply_microcode_ctx ctx = { .err = 0 };
++ enum ucode_state err;
+ int ret;
+
+- ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
+- if (!ret)
+- ret = ctx.err;
+-
++ ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
++ if (!ret) {
++ if (err == UCODE_ERROR)
++ ret = 1;
++ }
+ return ret;
+ }
+
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/05-c182d2b7d0ca48e0d6ff16f7d883161238c447ed.patch b/linux-kernel-patches/05-c182d2b7d0ca48e0d6ff16f7d883161238c447ed.patch new file mode 100644 index 0000000..0f868bf --- /dev/null +++ b/linux-kernel-patches/05-c182d2b7d0ca48e0d6ff16f7d883161238c447ed.patch @@ -0,0 +1,90 @@ +From c182d2b7d0ca48e0d6ff16f7d883161238c447ed Mon Sep 17 00:00:00 2001
+From: Ashok Raj <ashok.raj@intel.com>
+Date: Wed, 28 Feb 2018 11:28:41 +0100
+Subject: x86/microcode/intel: Check microcode revision before updating sibling
+ threads
+
+After updating microcode on one of the threads of a core, the other
+thread sibling automatically gets the update since the microcode
+resources on a hyperthreaded core are shared between the two threads.
+
+Check the microcode revision on the CPU before performing a microcode
+update and thus save us the WRMSR 0x79 because it is a particularly
+expensive operation.
+
+[ Borislav: Massage changelog and coding style. ]
+
+Signed-off-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: http://lkml.kernel.org/r/1519352533-15992-2-git-send-email-ashok.raj@intel.com
+Link: https://lkml.kernel.org/r/20180228102846.13447-3-bp@alien8.de
+---
+ arch/x86/kernel/cpu/microcode/intel.c | 27 ++++++++++++++++++++++++---
+ 1 file changed, 24 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 923054a..87bd6dc 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -589,6 +589,17 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
+ if (!mc)
+ return 0;
+
++ /*
++ * Save us the MSR write below - which is a particular expensive
++ * operation - when the other hyperthread has updated the microcode
++ * already.
++ */
++ rev = intel_get_microcode_revision();
++ if (rev >= mc->hdr.rev) {
++ uci->cpu_sig.rev = rev;
++ return UCODE_OK;
++ }
++
+ /* write microcode via MSR 0x79 */
+ native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+
+@@ -776,7 +787,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
+ {
+ struct microcode_intel *mc;
+ struct ucode_cpu_info *uci;
+- struct cpuinfo_x86 *c;
++ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ static int prev_rev;
+ u32 rev;
+
+@@ -793,6 +804,18 @@ static enum ucode_state apply_microcode_intel(int cpu)
+ return UCODE_NFOUND;
+ }
+
++ /*
++ * Save us the MSR write below - which is a particular expensive
++ * operation - when the other hyperthread has updated the microcode
++ * already.
++ */
++ rev = intel_get_microcode_revision();
++ if (rev >= mc->hdr.rev) {
++ uci->cpu_sig.rev = rev;
++ c->microcode = rev;
++ return UCODE_OK;
++ }
++
+ /* write microcode via MSR 0x79 */
+ wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+
+@@ -813,8 +836,6 @@ static enum ucode_state apply_microcode_intel(int cpu)
+ prev_rev = rev;
+ }
+
+- c = &cpu_data(cpu);
+-
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
+
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/06-91df9fdf51492aec9fed6b4cbd33160886740f47.patch b/linux-kernel-patches/06-91df9fdf51492aec9fed6b4cbd33160886740f47.patch new file mode 100644 index 0000000..ab9c49e --- /dev/null +++ b/linux-kernel-patches/06-91df9fdf51492aec9fed6b4cbd33160886740f47.patch @@ -0,0 +1,58 @@ +From 91df9fdf51492aec9fed6b4cbd33160886740f47 Mon Sep 17 00:00:00 2001
+From: Ashok Raj <ashok.raj@intel.com>
+Date: Wed, 28 Feb 2018 11:28:42 +0100
+Subject: x86/microcode/intel: Writeback and invalidate caches before updating
+ microcode
+
+Updating microcode is less error prone when caches have been flushed and
+depending on what exactly the microcode is updating. For example, some
+of the issues around certain Broadwell parts can be addressed by doing a
+full cache flush.
+
+[ Borislav: Massage it and use native_wbinvd() in both cases. ]
+
+Signed-off-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: http://lkml.kernel.org/r/1519352533-15992-3-git-send-email-ashok.raj@intel.com
+Link: https://lkml.kernel.org/r/20180228102846.13447-4-bp@alien8.de
+---
+ arch/x86/kernel/cpu/microcode/intel.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 87bd6dc..e2864bc 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -600,6 +600,12 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
+ return UCODE_OK;
+ }
+
++ /*
++ * Writeback and invalidate caches before updating microcode to avoid
++ * internal issues depending on what the microcode is updating.
++ */
++ native_wbinvd();
++
+ /* write microcode via MSR 0x79 */
+ native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+
+@@ -816,6 +822,12 @@ static enum ucode_state apply_microcode_intel(int cpu)
+ return UCODE_OK;
+ }
+
++ /*
++ * Writeback and invalidate caches before updating microcode to avoid
++ * internal issues depending on what the microcode is updating.
++ */
++ native_wbinvd();
++
+ /* write microcode via MSR 0x79 */
+ wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/07-30ec26da9967d0d785abc24073129a34c3211777.patch b/linux-kernel-patches/07-30ec26da9967d0d785abc24073129a34c3211777.patch new file mode 100644 index 0000000..d785ad2 --- /dev/null +++ b/linux-kernel-patches/07-30ec26da9967d0d785abc24073129a34c3211777.patch @@ -0,0 +1,71 @@ +From 30ec26da9967d0d785abc24073129a34c3211777 Mon Sep 17 00:00:00 2001
+From: Ashok Raj <ashok.raj@intel.com>
+Date: Wed, 28 Feb 2018 11:28:43 +0100
+Subject: x86/microcode: Do not upload microcode if CPUs are offline
+
+Avoid loading microcode if any of the CPUs are offline, and issue a
+warning. Having different microcode revisions on the system at any time
+is outright dangerous.
+
+[ Borislav: Massage changelog. ]
+
+Signed-off-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: http://lkml.kernel.org/r/1519352533-15992-4-git-send-email-ashok.raj@intel.com
+Link: https://lkml.kernel.org/r/20180228102846.13447-5-bp@alien8.de
+---
+ arch/x86/kernel/cpu/microcode/core.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 6337065..fa32cb3 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -486,6 +486,16 @@ static void __exit microcode_dev_exit(void)
+ /* fake device for request_firmware */
+ static struct platform_device *microcode_pdev;
+
++static int check_online_cpus(void)
++{
++ if (num_online_cpus() == num_present_cpus())
++ return 0;
++
++ pr_err("Not all CPUs online, aborting microcode update.\n");
++
++ return -EINVAL;
++}
++
+ static enum ucode_state reload_for_cpu(int cpu)
+ {
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+@@ -519,7 +529,13 @@ static ssize_t reload_store(struct device *dev,
+ return size;
+
+ get_online_cpus();
++
++ ret = check_online_cpus();
++ if (ret)
++ goto put;
++
+ mutex_lock(µcode_mutex);
++
+ for_each_online_cpu(cpu) {
+ tmp_ret = reload_for_cpu(cpu);
+ if (tmp_ret > UCODE_NFOUND) {
+@@ -538,6 +554,8 @@ static ssize_t reload_store(struct device *dev,
+ microcode_check();
+
+ mutex_unlock(µcode_mutex);
++
++put:
+ put_online_cpus();
+
+ if (!ret)
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/08-d8c3b52c00a05036e0a6b315b4b17921a7b67997.patch b/linux-kernel-patches/08-d8c3b52c00a05036e0a6b315b4b17921a7b67997.patch new file mode 100644 index 0000000..66699ff --- /dev/null +++ b/linux-kernel-patches/08-d8c3b52c00a05036e0a6b315b4b17921a7b67997.patch @@ -0,0 +1,54 @@ +From d8c3b52c00a05036e0a6b315b4b17921a7b67997 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 28 Feb 2018 11:28:44 +0100
+Subject: x86/microcode/intel: Look into the patch cache first
+
+The cache might contain a newer patch - look in there first.
+
+A follow-on change will make sure newest patches are loaded into the
+cache of microcode patches.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: https://lkml.kernel.org/r/20180228102846.13447-6-bp@alien8.de
+---
+ arch/x86/kernel/cpu/microcode/intel.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index e2864bc..2aded9d 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -791,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
+
+ static enum ucode_state apply_microcode_intel(int cpu)
+ {
+- struct microcode_intel *mc;
+- struct ucode_cpu_info *uci;
++ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
++ struct microcode_intel *mc;
+ static int prev_rev;
+ u32 rev;
+
+@@ -801,11 +801,10 @@ static enum ucode_state apply_microcode_intel(int cpu)
+ if (WARN_ON(raw_smp_processor_id() != cpu))
+ return UCODE_ERROR;
+
+- uci = ucode_cpu_info + cpu;
+- mc = uci->mc;
++ /* Look for a newer patch in our cache: */
++ mc = find_patch(uci);
+ if (!mc) {
+- /* Look for a newer patch in our cache: */
+- mc = find_patch(uci);
++ mc = uci->mc;
+ if (!mc)
+ return UCODE_NFOUND;
+ }
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/09-cfb52a5a09c8ae3a1dafb44ce549fde5b69e8117.patch b/linux-kernel-patches/09-cfb52a5a09c8ae3a1dafb44ce549fde5b69e8117.patch new file mode 100644 index 0000000..0541a2b --- /dev/null +++ b/linux-kernel-patches/09-cfb52a5a09c8ae3a1dafb44ce549fde5b69e8117.patch @@ -0,0 +1,67 @@ +From cfb52a5a09c8ae3a1dafb44ce549fde5b69e8117 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 28 Feb 2018 11:28:45 +0100
+Subject: x86/microcode: Request microcode on the BSP
+
+... so that any newer version can land in the cache and can later be
+fished out by the application functions. Do that before grabbing the
+hotplug lock.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: https://lkml.kernel.org/r/20180228102846.13447-7-bp@alien8.de
+---
+ arch/x86/kernel/cpu/microcode/core.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index fa32cb3..5dd157d 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -499,15 +499,10 @@ static int check_online_cpus(void)
+ static enum ucode_state reload_for_cpu(int cpu)
+ {
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+- enum ucode_state ustate;
+
+ if (!uci->valid)
+ return UCODE_OK;
+
+- ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true);
+- if (ustate != UCODE_OK)
+- return ustate;
+-
+ return apply_microcode_on_target(cpu);
+ }
+
+@@ -515,11 +510,11 @@ static ssize_t reload_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+ {
++ int cpu, bsp = boot_cpu_data.cpu_index;
+ enum ucode_state tmp_ret = UCODE_OK;
+ bool do_callback = false;
+ unsigned long val;
+ ssize_t ret = 0;
+- int cpu;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+@@ -528,6 +523,10 @@ static ssize_t reload_store(struct device *dev,
+ if (val != 1)
+ return size;
+
++ tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true);
++ if (tmp_ret != UCODE_OK)
++ return size;
++
+ get_online_cpus();
+
+ ret = check_online_cpus();
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/10-a5321aec6412b20b5ad15db2d6b916c05349dbff.patch b/linux-kernel-patches/10-a5321aec6412b20b5ad15db2d6b916c05349dbff.patch new file mode 100644 index 0000000..793d62e --- /dev/null +++ b/linux-kernel-patches/10-a5321aec6412b20b5ad15db2d6b916c05349dbff.patch @@ -0,0 +1,213 @@ +From a5321aec6412b20b5ad15db2d6b916c05349dbff Mon Sep 17 00:00:00 2001
+From: Ashok Raj <ashok.raj@intel.com>
+Date: Wed, 28 Feb 2018 11:28:46 +0100
+Subject: x86/microcode: Synchronize late microcode loading
+
+Original idea by Ashok, completely rewritten by Borislav.
+
+Before you read any further: the early loading method is still the
+preferred one and you should always do that. The following patch is
+improving the late loading mechanism for long running jobs and cloud use
+cases.
+
+Gather all cores and serialize the microcode update on them by doing it
+one-by-one to make the late update process as reliable as possible and
+avoid potential issues caused by the microcode update.
+
+[ Borislav: Rewrite completely. ]
+
+Co-developed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: https://lkml.kernel.org/r/20180228102846.13447-8-bp@alien8.de
+---
+ arch/x86/kernel/cpu/microcode/core.c | 118 +++++++++++++++++++++++++++--------
+ 1 file changed, 92 insertions(+), 26 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 5dd157d..70ecbc8 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -22,13 +22,16 @@
+ #define pr_fmt(fmt) "microcode: " fmt
+
+ #include <linux/platform_device.h>
++#include <linux/stop_machine.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/miscdevice.h>
+ #include <linux/capability.h>
+ #include <linux/firmware.h>
+ #include <linux/kernel.h>
++#include <linux/delay.h>
+ #include <linux/mutex.h>
+ #include <linux/cpu.h>
++#include <linux/nmi.h>
+ #include <linux/fs.h>
+ #include <linux/mm.h>
+
+@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
+ */
+ static DEFINE_MUTEX(microcode_mutex);
+
++/*
++ * Serialize late loading so that CPUs get updated one-by-one.
++ */
++static DEFINE_SPINLOCK(update_lock);
++
+ struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
+
+ struct cpu_info_ctx {
+@@ -486,6 +494,19 @@ static void __exit microcode_dev_exit(void)
+ /* fake device for request_firmware */
+ static struct platform_device *microcode_pdev;
+
++/*
++ * Late loading dance. Why the heavy-handed stomp_machine effort?
++ *
++ * - HT siblings must be idle and not execute other code while the other sibling
++ * is loading microcode in order to avoid any negative interactions caused by
++ * the loading.
++ *
++ * - In addition, microcode update on the cores must be serialized until this
++ * requirement can be relaxed in the future. Right now, this is conservative
++ * and good.
++ */
++#define SPINUNIT 100 /* 100 nsec */
++
+ static int check_online_cpus(void)
+ {
+ if (num_online_cpus() == num_present_cpus())
+@@ -496,23 +517,85 @@ static int check_online_cpus(void)
+ return -EINVAL;
+ }
+
+-static enum ucode_state reload_for_cpu(int cpu)
++static atomic_t late_cpus;
++
++/*
++ * Returns:
++ * < 0 - on error
++ * 0 - no update done
++ * 1 - microcode was updated
++ */
++static int __reload_late(void *info)
+ {
+- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
++ unsigned int timeout = NSEC_PER_SEC;
++ int all_cpus = num_online_cpus();
++ int cpu = smp_processor_id();
++ enum ucode_state err;
++ int ret = 0;
+
+- if (!uci->valid)
+- return UCODE_OK;
++ atomic_dec(&late_cpus);
++
++ /*
++ * Wait for all CPUs to arrive. A load will not be attempted unless all
++ * CPUs show up.
++ * */
++ while (atomic_read(&late_cpus)) {
++ if (timeout < SPINUNIT) {
++ pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
++ atomic_read(&late_cpus));
++ return -1;
++ }
++
++ ndelay(SPINUNIT);
++ timeout -= SPINUNIT;
++
++ touch_nmi_watchdog();
++ }
++
++ spin_lock(&update_lock);
++ apply_microcode_local(&err);
++ spin_unlock(&update_lock);
++
++ if (err > UCODE_NFOUND) {
++ pr_warn("Error reloading microcode on CPU %d\n", cpu);
++ ret = -1;
++ } else if (err == UCODE_UPDATED) {
++ ret = 1;
++ }
+
+- return apply_microcode_on_target(cpu);
++ atomic_inc(&late_cpus);
++
++ while (atomic_read(&late_cpus) != all_cpus)
++ cpu_relax();
++
++ return ret;
++}
++
++/*
++ * Reload microcode late on all CPUs. Wait for a sec until they
++ * all gather together.
++ */
++static int microcode_reload_late(void)
++{
++ int ret;
++
++ atomic_set(&late_cpus, num_online_cpus());
++
++ ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
++ if (ret < 0)
++ return ret;
++ else if (ret > 0)
++ microcode_check();
++
++ return ret;
+ }
+
+ static ssize_t reload_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+ {
+- int cpu, bsp = boot_cpu_data.cpu_index;
+ enum ucode_state tmp_ret = UCODE_OK;
+- bool do_callback = false;
++ int bsp = boot_cpu_data.cpu_index;
+ unsigned long val;
+ ssize_t ret = 0;
+
+@@ -534,30 +617,13 @@ static ssize_t reload_store(struct device *dev,
+ goto put;
+
+ mutex_lock(µcode_mutex);
+-
+- for_each_online_cpu(cpu) {
+- tmp_ret = reload_for_cpu(cpu);
+- if (tmp_ret > UCODE_NFOUND) {
+- pr_warn("Error reloading microcode on CPU %d\n", cpu);
+-
+- /* set retval for the first encountered reload error */
+- if (!ret)
+- ret = -EINVAL;
+- }
+-
+- if (tmp_ret == UCODE_UPDATED)
+- do_callback = true;
+- }
+-
+- if (!ret && do_callback)
+- microcode_check();
+-
++ ret = microcode_reload_late();
+ mutex_unlock(µcode_mutex);
+
+ put:
+ put_online_cpus();
+
+- if (!ret)
++ if (ret >= 0)
+ ret = size;
+
+ return ret;
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/11-2613f36ed965d0e5a595a1d931fd3b480e82d6fd.patch b/linux-kernel-patches/11-2613f36ed965d0e5a595a1d931fd3b480e82d6fd.patch new file mode 100644 index 0000000..c9adfe2 --- /dev/null +++ b/linux-kernel-patches/11-2613f36ed965d0e5a595a1d931fd3b480e82d6fd.patch @@ -0,0 +1,154 @@ +From 2613f36ed965d0e5a595a1d931fd3b480e82d6fd Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 14 Mar 2018 19:36:14 +0100
+Subject: x86/microcode: Attempt late loading only when new microcode is
+ present
+
+Return UCODE_NEW from the scanning functions to denote that new microcode
+was found and only then attempt the expensive synchronization dance.
+
+Reported-by: Emanuel Czirai <xftroxgpx@protonmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Emanuel Czirai <xftroxgpx@protonmail.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Link: https://lkml.kernel.org/r/20180314183615.17629-1-bp@alien8.de
+---
+ arch/x86/include/asm/microcode.h | 1 +
+ arch/x86/kernel/cpu/microcode/amd.c | 34 +++++++++++++++++++++-------------
+ arch/x86/kernel/cpu/microcode/core.c | 8 +++-----
+ arch/x86/kernel/cpu/microcode/intel.c | 4 +++-
+ 4 files changed, 28 insertions(+), 19 deletions(-)
+
+diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
+index 7fb1047..6cf0e4c 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -39,6 +39,7 @@ struct device;
+
+ enum ucode_state {
+ UCODE_OK = 0,
++ UCODE_NEW,
+ UCODE_UPDATED,
+ UCODE_NFOUND,
+ UCODE_ERROR,
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index a998e1a..4817992 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
+ return -EINVAL;
+
+ ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
+- if (ret != UCODE_OK)
++ if (ret > UCODE_UPDATED)
+ return -EINVAL;
+
+ return 0;
+@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+ static enum ucode_state
+ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
+ {
++ struct ucode_patch *p;
+ enum ucode_state ret;
+
+ /* free old equiv table */
+ free_equiv_cpu_table();
+
+ ret = __load_microcode_amd(family, data, size);
+-
+- if (ret != UCODE_OK)
++ if (ret != UCODE_OK) {
+ cleanup();
++ return ret;
++ }
+
+-#ifdef CONFIG_X86_32
+- /* save BSP's matching patch for early load */
+- if (save) {
+- struct ucode_patch *p = find_patch(0);
+- if (p) {
+- memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
+- memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
+- PATCH_MAX_SIZE));
+- }
++ p = find_patch(0);
++ if (!p) {
++ return ret;
++ } else {
++ if (boot_cpu_data.microcode == p->patch_id)
++ return ret;
++
++ ret = UCODE_NEW;
+ }
+-#endif
++
++ /* save BSP's matching patch for early load */
++ if (!save)
++ return ret;
++
++ memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
++ memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
++
+ return ret;
+ }
+
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 70ecbc8..9f0fe5b 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -607,7 +607,7 @@ static ssize_t reload_store(struct device *dev,
+ return size;
+
+ tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true);
+- if (tmp_ret != UCODE_OK)
++ if (tmp_ret != UCODE_NEW)
+ return size;
+
+ get_online_cpus();
+@@ -691,10 +691,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
+ if (system_state != SYSTEM_RUNNING)
+ return UCODE_NFOUND;
+
+- ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev,
+- refresh_fw);
+-
+- if (ustate == UCODE_OK) {
++ ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw);
++ if (ustate == UCODE_NEW) {
+ pr_debug("CPU%d updated upon init\n", cpu);
+ apply_microcode_on_target(cpu);
+ }
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 2aded9d..32b8e57 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -862,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
+ unsigned int leftover = size;
+ unsigned int curr_mc_size = 0, new_mc_size = 0;
+ unsigned int csig, cpf;
++ enum ucode_state ret = UCODE_OK;
+
+ while (leftover) {
+ struct microcode_header_intel mc_header;
+@@ -903,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
+ new_mc = mc;
+ new_mc_size = mc_size;
+ mc = NULL; /* trigger new vmalloc */
++ ret = UCODE_NEW;
+ }
+
+ ucode_ptr += mc_size;
+@@ -932,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
+ pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
+ cpu, new_rev, uci->cpu_sig.rev);
+
+- return UCODE_OK;
++ return ret;
+ }
+
+ static int get_ucode_fw(void *to, const void *from, size_t n)
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/12-bb8c13d61a629276a162c1d2b1a20a815cbcfbb7.patch b/linux-kernel-patches/12-bb8c13d61a629276a162c1d2b1a20a815cbcfbb7.patch new file mode 100644 index 0000000..7baa44a --- /dev/null +++ b/linux-kernel-patches/12-bb8c13d61a629276a162c1d2b1a20a815cbcfbb7.patch @@ -0,0 +1,163 @@ +From bb8c13d61a629276a162c1d2b1a20a815cbcfbb7 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 14 Mar 2018 19:36:15 +0100
+Subject: x86/microcode: Fix CPU synchronization routine
+
+Emanuel reported an issue with a hang during microcode update because my
+dumb idea to use one atomic synchronization variable for both rendezvous
+- before and after update - was simply bollocks:
+
+ microcode: microcode_reload_late: late_cpus: 4
+ microcode: __reload_late: cpu 2 entered
+ microcode: __reload_late: cpu 1 entered
+ microcode: __reload_late: cpu 3 entered
+ microcode: __reload_late: cpu 0 entered
+ microcode: __reload_late: cpu 1 left
+ microcode: Timeout while waiting for CPUs rendezvous, remaining: 1
+
+CPU1 above would finish, leave and the others will still spin waiting for
+it to join.
+
+So do two synchronization atomics instead, which makes the code a lot more
+straightforward.
+
+Also, since the update is serialized and it also takes quite some time per
+microcode engine, increase the exit timeout by the number of CPUs on the
+system.
+
+That's ok because the moment all CPUs are done, that timeout will be cut
+short.
+
+Furthermore, panic when some of the CPUs timeout when returning from a
+microcode update: we can't allow a system with not all cores updated.
+
+Also, as an optimization, do not do the exit sync if microcode wasn't
+updated.
+
+Reported-by: Emanuel Czirai <xftroxgpx@protonmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Emanuel Czirai <xftroxgpx@protonmail.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Link: https://lkml.kernel.org/r/20180314183615.17629-2-bp@alien8.de
+---
+ arch/x86/kernel/cpu/microcode/core.c | 68 ++++++++++++++++++++++--------------
+ 1 file changed, 41 insertions(+), 27 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 9f0fe5b..10c4fc2 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -517,7 +517,29 @@ static int check_online_cpus(void)
+ return -EINVAL;
+ }
+
+-static atomic_t late_cpus;
++static atomic_t late_cpus_in;
++static atomic_t late_cpus_out;
++
++static int __wait_for_cpus(atomic_t *t, long long timeout)
++{
++ int all_cpus = num_online_cpus();
++
++ atomic_inc(t);
++
++ while (atomic_read(t) < all_cpus) {
++ if (timeout < SPINUNIT) {
++ pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
++ all_cpus - atomic_read(t));
++ return 1;
++ }
++
++ ndelay(SPINUNIT);
++ timeout -= SPINUNIT;
++
++ touch_nmi_watchdog();
++ }
++ return 0;
++}
+
+ /*
+ * Returns:
+@@ -527,30 +549,16 @@ static atomic_t late_cpus;
+ */
+ static int __reload_late(void *info)
+ {
+- unsigned int timeout = NSEC_PER_SEC;
+- int all_cpus = num_online_cpus();
+ int cpu = smp_processor_id();
+ enum ucode_state err;
+ int ret = 0;
+
+- atomic_dec(&late_cpus);
+-
+ /*
+ * Wait for all CPUs to arrive. A load will not be attempted unless all
+ * CPUs show up.
+ * */
+- while (atomic_read(&late_cpus)) {
+- if (timeout < SPINUNIT) {
+- pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
+- atomic_read(&late_cpus));
+- return -1;
+- }
+-
+- ndelay(SPINUNIT);
+- timeout -= SPINUNIT;
+-
+- touch_nmi_watchdog();
+- }
++ if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
++ return -1;
+
+ spin_lock(&update_lock);
+ apply_microcode_local(&err);
+@@ -558,15 +566,22 @@ static int __reload_late(void *info)
+
+ if (err > UCODE_NFOUND) {
+ pr_warn("Error reloading microcode on CPU %d\n", cpu);
+- ret = -1;
+- } else if (err == UCODE_UPDATED) {
++ return -1;
++ /* siblings return UCODE_OK because their engine got updated already */
++ } else if (err == UCODE_UPDATED || err == UCODE_OK) {
+ ret = 1;
++ } else {
++ return ret;
+ }
+
+- atomic_inc(&late_cpus);
+-
+- while (atomic_read(&late_cpus) != all_cpus)
+- cpu_relax();
++ /*
++ * Increase the wait timeout to a safe value here since we're
++ * serializing the microcode update and that could take a while on a
++ * large number of CPUs. And that is fine as the *actual* timeout will
++ * be determined by the last CPU finished updating and thus cut short.
++ */
++ if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
++ panic("Timeout during microcode update!\n");
+
+ return ret;
+ }
+@@ -579,12 +594,11 @@ static int microcode_reload_late(void)
+ {
+ int ret;
+
+- atomic_set(&late_cpus, num_online_cpus());
++ atomic_set(&late_cpus_in, 0);
++ atomic_set(&late_cpus_out, 0);
+
+ ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
+- if (ret < 0)
+- return ret;
+- else if (ret > 0)
++ if (ret > 0)
+ microcode_check();
+
+ return ret;
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/13-09e182d17e8891dd73baba961a0f5a82e9274c97.patch b/linux-kernel-patches/13-09e182d17e8891dd73baba961a0f5a82e9274c97.patch new file mode 100644 index 0000000..54a90a5 --- /dev/null +++ b/linux-kernel-patches/13-09e182d17e8891dd73baba961a0f5a82e9274c97.patch @@ -0,0 +1,59 @@ +From 09e182d17e8891dd73baba961a0f5a82e9274c97 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Sat, 21 Apr 2018 10:19:30 +0200
+Subject: x86/microcode: Do not exit early from __reload_late()
+
+Vitezslav reported a case where the
+
+ "Timeout during microcode update!"
+
+panic would hit. After a deeper look, it turned out that his .config had
+CONFIG_HOTPLUG_CPU disabled which practically made save_mc_for_early() a
+no-op.
+
+When that happened, the discovered microcode patch wasn't saved into the
+cache and the late loading path wouldn't find any.
+
+This, then, lead to early exit from __reload_late() and thus CPUs waiting
+until the timeout is reached, leading to the panic.
+
+In hindsight, that function should have been written so it does not return
+before the post-synchronization. Oh well, I know better now...
+
+Fixes: bb8c13d61a62 ("x86/microcode: Fix CPU synchronization routine")
+Reported-by: Vitezslav Samel <vitezslav@samel.cz>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Vitezslav Samel <vitezslav@samel.cz>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20180418081140.GA2439@pc11.op.pod.cz
+Link: https://lkml.kernel.org/r/20180421081930.15741-2-bp@alien8.de
+---
+ arch/x86/kernel/cpu/microcode/core.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 10c4fc2..77e2013 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -564,14 +564,12 @@ static int __reload_late(void *info)
+ apply_microcode_local(&err);
+ spin_unlock(&update_lock);
+
++ /* siblings return UCODE_OK because their engine got updated already */
+ if (err > UCODE_NFOUND) {
+ pr_warn("Error reloading microcode on CPU %d\n", cpu);
+- return -1;
+- /* siblings return UCODE_OK because their engine got updated already */
++ ret = -1;
+ } else if (err == UCODE_UPDATED || err == UCODE_OK) {
+ ret = 1;
+- } else {
+- return ret;
+ }
+
+ /*
+--
+cgit v1.1
+
diff --git a/linux-kernel-patches/14-84749d83758af6576552046b215b9b7f37f9556b.patch b/linux-kernel-patches/14-84749d83758af6576552046b215b9b7f37f9556b.patch new file mode 100644 index 0000000..fb1074f --- /dev/null +++ b/linux-kernel-patches/14-84749d83758af6576552046b215b9b7f37f9556b.patch @@ -0,0 +1,48 @@ +From 84749d83758af6576552046b215b9b7f37f9556b Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Sat, 21 Apr 2018 10:19:29 +0200
+Subject: x86/microcode/intel: Save microcode patch unconditionally
+
+save_mc_for_early() was a no-op on !CONFIG_HOTPLUG_CPU but the
+generic_load_microcode() path saves the microcode patches it has found into
+the cache of patches which is used for late loading too. Regardless of
+whether CPU hotplug is used or not.
+
+Make the saving unconditional so that late loading can find the proper
+patch.
+
+Reported-by: Vitezslav Samel <vitezslav@samel.cz>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Vitezslav Samel <vitezslav@samel.cz>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20180418081140.GA2439@pc11.op.pod.cz
+Link: https://lkml.kernel.org/r/20180421081930.15741-1-bp@alien8.de
+---
+ arch/x86/kernel/cpu/microcode/intel.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 32b8e57..1c2cfa0 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -485,7 +485,6 @@ static void show_saved_mc(void)
+ */
+ static void save_mc_for_early(u8 *mc, unsigned int size)
+ {
+-#ifdef CONFIG_HOTPLUG_CPU
+ /* Synchronization during CPU hotplug. */
+ static DEFINE_MUTEX(x86_cpu_microcode_mutex);
+
+@@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size)
+ show_saved_mc();
+
+ mutex_unlock(&x86_cpu_microcode_mutex);
+-#endif
+ }
+
+ static bool load_builtin_intel_microcode(struct cpio_data *cp)
+--
+cgit v1.1
+
|