summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0205-slub-Enable-irqs-for-__GFP_WAIT.patch
blob: d3496f78e7ace0c31f7d62670bf0626dc14d3804 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
From c0f57e0b6278391f1fec71ccb1e25fe46deade2b Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 9 Jan 2013 12:08:15 +0100
Subject: [PATCH 205/323] slub: Enable irqs for __GFP_WAIT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz

SYSTEM_RUNNING might be too late for enabling interrupts. Allocations
with GFP_WAIT can happen before that. So use this as an indicator.

[bigeasy: Add warning on RT for allocations in atomic context.
  Don't enable interrupts on allocations during SYSTEM_SUSPEND. This is done
  during suspend by ACPI, noticed by Liwei Song <liwei.song@windriver.com>
]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 mm/slub.c | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/mm/slub.c b/mm/slub.c
index ac2ddf6a4220..00f1d1206dbc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1750,10 +1750,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 	void *start, *p, *next;
 	int idx;
 	bool shuffle;
+	bool enableirqs = false;
 
 	flags &= gfp_allowed_mask;
 
 	if (gfpflags_allow_blocking(flags))
+		enableirqs = true;
+
+#ifdef CONFIG_PREEMPT_RT
+	if (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND)
+		enableirqs = true;
+#endif
+	if (enableirqs)
 		local_irq_enable();
 
 	flags |= s->allocflags;
@@ -1812,7 +1820,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 	page->frozen = 1;
 
 out:
-	if (gfpflags_allow_blocking(flags))
+	if (enableirqs)
 		local_irq_disable();
 	if (!page)
 		return NULL;
@@ -2870,6 +2878,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 	unsigned long tid;
 	struct obj_cgroup *objcg = NULL;
 
+	if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP))
+		WARN_ON_ONCE(!preemptible() &&
+			     (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND));
+
 	s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
 	if (!s)
 		return NULL;
@@ -3338,6 +3350,10 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 	int i;
 	struct obj_cgroup *objcg = NULL;
 
+	if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP))
+		WARN_ON_ONCE(!preemptible() &&
+			     (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND));
+
 	/* memcg and kmem_cache debug support */
 	s = slab_pre_alloc_hook(s, &objcg, size, flags);
 	if (unlikely(!s))
-- 
2.43.0