summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch')
-rw-r--r--debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch83
1 files changed, 83 insertions, 0 deletions
diff --git a/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch b/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch
new file mode 100644
index 000000000..9ebe84c33
--- /dev/null
+++ b/debian/patches-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch
@@ -0,0 +1,83 @@
+From 67b8d4ae9c2cf65bccbb75f2099bca2da5e78de5 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Jul 2018 09:13:42 +0200
+Subject: [PATCH 020/347] arm64: KVM: compute_layout before altenates are
+ applied
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+compute_layout() is invoked as part of an alternative fixup under
+stop_machine() and needs a sleeping lock as part of get_random_long().
+
+Invoke compute_layout() before the alternatives are applied.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/include/asm/alternative.h | 6 ++++++
+ arch/arm64/kernel/alternative.c | 1 +
+ arch/arm64/kvm/va_layout.c | 7 +------
+ 3 files changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
+index 4fbbcdda70d7..99b215602a1a 100644
+--- a/arch/arm64/include/asm/alternative.h
++++ b/arch/arm64/include/asm/alternative.h
+@@ -35,6 +35,12 @@ void apply_alternatives_module(void *start, size_t length);
+ static inline void apply_alternatives_module(void *start, size_t length) { }
+ #endif
+
++#ifdef CONFIG_KVM_ARM_HOST
++void kvm_compute_layout(void);
++#else
++static inline void kvm_compute_layout(void) { }
++#endif
++
+ #define ALTINSTR_ENTRY(feature) \
+ " .word 661b - .\n" /* label */ \
+ " .word 663f - .\n" /* new instruction */ \
+diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
+index 3747c8d87bdb..75ccb5b279a2 100644
+--- a/arch/arm64/kernel/alternative.c
++++ b/arch/arm64/kernel/alternative.c
+@@ -212,6 +212,7 @@ static int __apply_alternatives_multi_stop(void *unused)
+ void __init apply_alternatives_all(void)
+ {
+ /* better not try code patching on a live SMP system */
++ kvm_compute_layout();
+ stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
+ }
+
+diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
+index c712a7376bc1..792da0e125de 100644
+--- a/arch/arm64/kvm/va_layout.c
++++ b/arch/arm64/kvm/va_layout.c
+@@ -33,7 +33,7 @@ static u8 tag_lsb;
+ static u64 tag_val;
+ static u64 va_mask;
+
+-static void compute_layout(void)
++__init void kvm_compute_layout(void)
+ {
+ phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
+ u64 hyp_va_msb;
+@@ -121,8 +121,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
+
+ BUG_ON(nr_inst != 5);
+
+- if (!has_vhe() && !va_mask)
+- compute_layout();
+
+ for (i = 0; i < nr_inst; i++) {
+ u32 rd, rn, insn, oinsn;
+@@ -167,9 +165,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
+ return;
+ }
+
+- if (!va_mask)
+- compute_layout();
+-
+ /*
+ * Compute HYP VA by using the same computation as kern_hyp_va()
+ */
+--
+2.36.1
+