1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
|
From 7a4ec53aa79ebc416eb752e204669e0d4dc53a06 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 18 Aug 2020 10:30:00 +0200
Subject: [PATCH 207/323] mm: memcontrol: Provide a local_lock for per-CPU
memcg_stock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
The interrupts are disabled to ensure CPU-local access to the per-CPU
variable `memcg_stock'.
As the code inside the interrupt disabled section acquires regular
spinlocks, which are converted to 'sleeping' spinlocks on a PREEMPT_RT
kernel, this conflicts with the RT semantics.
Convert it to a local_lock which allows RT kernels to substitute them with
a real per CPU lock. On non RT kernels this maps to local_irq_save() as
before, but provides also lockdep coverage of the critical region.
No functional change.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/memcontrol.c | 31 ++++++++++++++++++-------------
1 file changed, 18 insertions(+), 13 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 49566afaef1c..d2a47428831b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2202,6 +2202,7 @@ void unlock_page_memcg(struct page *page)
EXPORT_SYMBOL(unlock_page_memcg);
struct memcg_stock_pcp {
+ local_lock_t lock;
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
@@ -2253,7 +2254,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
if (nr_pages > MEMCG_CHARGE_BATCH)
return ret;
- local_irq_save(flags);
+ local_lock_irqsave(&memcg_stock.lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
@@ -2261,7 +2262,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
ret = true;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(&memcg_stock.lock, flags);
return ret;
}
@@ -2296,14 +2297,14 @@ static void drain_local_stock(struct work_struct *dummy)
* The only protection from memory hotplug vs. drain_stock races is
* that we always operate on local CPU stock here with IRQ disabled
*/
- local_irq_save(flags);
+ local_lock_irqsave(&memcg_stock.lock, flags);
stock = this_cpu_ptr(&memcg_stock);
drain_obj_stock(stock);
drain_stock(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
- local_irq_restore(flags);
+ local_unlock_irqrestore(&memcg_stock.lock, flags);
}
/*
@@ -2315,7 +2316,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
struct memcg_stock_pcp *stock;
unsigned long flags;
- local_irq_save(flags);
+ local_lock_irqsave(&memcg_stock.lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
@@ -2328,7 +2329,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
if (stock->nr_pages > MEMCG_CHARGE_BATCH)
drain_stock(stock);
- local_irq_restore(flags);
+ local_unlock_irqrestore(&memcg_stock.lock, flags);
}
/*
@@ -3137,7 +3138,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
unsigned long flags;
bool ret = false;
- local_irq_save(flags);
+ local_lock_irqsave(&memcg_stock.lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
@@ -3145,7 +3146,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
ret = true;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(&memcg_stock.lock, flags);
return ret;
}
@@ -3212,7 +3213,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
struct memcg_stock_pcp *stock;
unsigned long flags;
- local_irq_save(flags);
+ local_lock_irqsave(&memcg_stock.lock, flags);
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached_objcg != objcg) { /* reset if necessary */
@@ -3226,7 +3227,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
if (stock->nr_bytes > PAGE_SIZE)
drain_obj_stock(stock);
- local_irq_restore(flags);
+ local_unlock_irqrestore(&memcg_stock.lock, flags);
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
@@ -7161,9 +7162,13 @@ static int __init mem_cgroup_init(void)
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);
- for_each_possible_cpu(cpu)
- INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
- drain_local_stock);
+ for_each_possible_cpu(cpu) {
+ struct memcg_stock_pcp *stock;
+
+ stock = per_cpu_ptr(&memcg_stock, cpu);
+ INIT_WORK(&stock->work, drain_local_stock);
+ local_lock_init(&stock->lock);
+ }
for_each_node(node) {
struct mem_cgroup_tree_per_node *rtpn;
--
2.43.0
|