diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
commit | b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch) | |
tree | 1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0207-mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch | |
parent | Adding upstream version 5.10.209. (diff) | |
download | linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip |
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0207-mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch')
-rw-r--r-- | debian/patches-rt/0207-mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch | 144 |
1 files changed, 144 insertions, 0 deletions
diff --git a/debian/patches-rt/0207-mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch b/debian/patches-rt/0207-mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch new file mode 100644 index 000000000..73fbdfd8d --- /dev/null +++ b/debian/patches-rt/0207-mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch @@ -0,0 +1,144 @@ +From 7a4ec53aa79ebc416eb752e204669e0d4dc53a06 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Tue, 18 Aug 2020 10:30:00 +0200 +Subject: [PATCH 207/323] mm: memcontrol: Provide a local_lock for per-CPU + memcg_stock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +The interrupts are disabled to ensure CPU-local access to the per-CPU +variable `memcg_stock'. +As the code inside the interrupt disabled section acquires regular +spinlocks, which are converted to 'sleeping' spinlocks on a PREEMPT_RT +kernel, this conflicts with the RT semantics. + +Convert it to a local_lock which allows RT kernels to substitute them with +a real per CPU lock. On non RT kernels this maps to local_irq_save() as +before, but provides also lockdep coverage of the critical region. +No functional change. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + mm/memcontrol.c | 31 ++++++++++++++++++------------- + 1 file changed, 18 insertions(+), 13 deletions(-) + +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 49566afaef1c..d2a47428831b 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -2202,6 +2202,7 @@ void unlock_page_memcg(struct page *page) + EXPORT_SYMBOL(unlock_page_memcg); + + struct memcg_stock_pcp { ++ local_lock_t lock; + struct mem_cgroup *cached; /* this never be root cgroup */ + unsigned int nr_pages; + +@@ -2253,7 +2254,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) + if (nr_pages > MEMCG_CHARGE_BATCH) + return ret; + +- local_irq_save(flags); ++ local_lock_irqsave(&memcg_stock.lock, flags); + + stock = this_cpu_ptr(&memcg_stock); + if (memcg == stock->cached && stock->nr_pages >= nr_pages) { +@@ -2261,7 +2262,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(&memcg_stock.lock, flags); + + return ret; + } +@@ -2296,14 +2297,14 @@ static void drain_local_stock(struct work_struct *dummy) + * The only protection from memory hotplug vs. drain_stock races is + * that we always operate on local CPU stock here with IRQ disabled + */ +- local_irq_save(flags); ++ local_lock_irqsave(&memcg_stock.lock, flags); + + stock = this_cpu_ptr(&memcg_stock); + drain_obj_stock(stock); + drain_stock(stock); + clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); + +- local_irq_restore(flags); ++ local_unlock_irqrestore(&memcg_stock.lock, flags); + } + + /* +@@ -2315,7 +2316,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) + struct memcg_stock_pcp *stock; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(&memcg_stock.lock, flags); + + stock = this_cpu_ptr(&memcg_stock); + if (stock->cached != memcg) { /* reset if necessary */ +@@ -2328,7 +2329,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) + if (stock->nr_pages > MEMCG_CHARGE_BATCH) + drain_stock(stock); + +- local_irq_restore(flags); ++ local_unlock_irqrestore(&memcg_stock.lock, flags); + } + + /* +@@ -3137,7 +3138,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) + unsigned long flags; + bool ret = false; + +- local_irq_save(flags); ++ local_lock_irqsave(&memcg_stock.lock, flags); + + stock = this_cpu_ptr(&memcg_stock); + if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { +@@ -3145,7 +3146,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(&memcg_stock.lock, flags); + + return ret; + } +@@ -3212,7 +3213,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) + struct memcg_stock_pcp *stock; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(&memcg_stock.lock, flags); + + stock = this_cpu_ptr(&memcg_stock); + if (stock->cached_objcg != objcg) { /* reset if necessary */ +@@ -3226,7 +3227,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) + if (stock->nr_bytes > PAGE_SIZE) + drain_obj_stock(stock); + +- local_irq_restore(flags); ++ local_unlock_irqrestore(&memcg_stock.lock, flags); + } + + int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) +@@ -7161,9 +7162,13 @@ static int __init mem_cgroup_init(void) + cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, + memcg_hotplug_cpu_dead); + +- for_each_possible_cpu(cpu) +- INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, +- drain_local_stock); ++ for_each_possible_cpu(cpu) { ++ struct memcg_stock_pcp *stock; ++ ++ stock = per_cpu_ptr(&memcg_stock, cpu); ++ INIT_WORK(&stock->work, drain_local_stock); ++ local_lock_init(&stock->lock); ++ } + + for_each_node(node) { + struct mem_cgroup_tree_per_node *rtpn; +-- +2.43.0 + |