1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 11 Dec 2018 21:53:43 +0100
Subject: [PATCH 087/353] x86/mm/pat: disable preemption __split_large_page()
after spin_lock()
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=93515e9f085077bbdb2a1a2aebd39cad36bfa04e
Commit "x86/mm/pat: Disable preemption around __flush_tlb_all()" added a
warning if __flush_tlb_all() is invoked in preemptible context. On !RT
the warning does not trigger because a spin lock is acquired which
disables preemption. On RT the spin lock does not disable preemption and
so the warning is seen.
Disable preemption to avoid the warning __flush_tlb_all().
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/mm/pageattr.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 101f3ad0d6ad..0b0396261ca1 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -687,12 +687,18 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
pgprot_t ref_prot;
spin_lock(&pgd_lock);
+ /*
+ * Keep preemption disabled after __flush_tlb_all() which expects not be
+ * preempted during the flush of the local TLB.
+ */
+ preempt_disable();
/*
* Check for races, another CPU might have split this page
* up for us already:
*/
tmp = _lookup_address_cpa(cpa, address, &level);
if (tmp != kpte) {
+ preempt_enable();
spin_unlock(&pgd_lock);
return 1;
}
@@ -726,6 +732,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
break;
default:
+ preempt_enable();
spin_unlock(&pgd_lock);
return 1;
}
@@ -764,6 +771,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
* going on.
*/
__flush_tlb_all();
+ preempt_enable();
spin_unlock(&pgd_lock);
return 0;
|