summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch')
-rw-r--r--debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch124
1 files changed, 124 insertions, 0 deletions
diff --git a/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch b/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch
new file mode 100644
index 000000000..8da70c668
--- /dev/null
+++ b/debian/patches-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch
@@ -0,0 +1,124 @@
+From d5d11c92081bb9edd9eb11cd752a325f8822a0a5 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Jan 2015 17:14:16 +0100
+Subject: [PATCH 085/347] mm/memcontrol: Replace local_irq_disable with local
+ locks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+There are a few local_irq_disable() which then take sleeping locks. This
+patch converts them local locks.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/memcontrol.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 281d74f86892..239fec6eac9f 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -69,6 +69,7 @@
+ #include <net/sock.h>
+ #include <net/ip.h>
+ #include "slab.h"
++#include <linux/locallock.h>
+
+ #include <linux/uaccess.h>
+
+@@ -94,6 +95,8 @@ int do_swap_account __read_mostly;
+ #define do_swap_account 0
+ #endif
+
++static DEFINE_LOCAL_IRQ_LOCK(event_lock);
++
+ /* Whether legacy memory+swap accounting is active */
+ static bool do_memsw_account(void)
+ {
+@@ -4933,12 +4936,12 @@ static int mem_cgroup_move_account(struct page *page,
+
+ ret = 0;
+
+- local_irq_disable();
++ local_lock_irq(event_lock);
+ mem_cgroup_charge_statistics(to, page, compound, nr_pages);
+ memcg_check_events(to, page);
+ mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
+ memcg_check_events(from, page);
+- local_irq_enable();
++ local_unlock_irq(event_lock);
+ out_unlock:
+ unlock_page(page);
+ out:
+@@ -6057,10 +6060,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
+
+ commit_charge(page, memcg, lrucare);
+
+- local_irq_disable();
++ local_lock_irq(event_lock);
+ mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
+ memcg_check_events(memcg, page);
+- local_irq_enable();
++ local_unlock_irq(event_lock);
+
+ if (do_memsw_account() && PageSwapCache(page)) {
+ swp_entry_t entry = { .val = page_private(page) };
+@@ -6129,7 +6132,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
+ memcg_oom_recover(ug->memcg);
+ }
+
+- local_irq_save(flags);
++ local_lock_irqsave(event_lock, flags);
+ __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
+ __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
+ __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
+@@ -6137,7 +6140,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
+ __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
+ __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
+ memcg_check_events(ug->memcg, ug->dummy_page);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(event_lock, flags);
+
+ if (!mem_cgroup_is_root(ug->memcg))
+ css_put_many(&ug->memcg->css, nr_pages);
+@@ -6300,10 +6303,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
+
+ commit_charge(newpage, memcg, false);
+
+- local_irq_save(flags);
++ local_lock_irqsave(event_lock, flags);
+ mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
+ memcg_check_events(memcg, newpage);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(event_lock, flags);
+ }
+
+ DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
+@@ -6485,6 +6488,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+ struct mem_cgroup *memcg, *swap_memcg;
+ unsigned int nr_entries;
+ unsigned short oldid;
++ unsigned long flags;
+
+ VM_BUG_ON_PAGE(PageLRU(page), page);
+ VM_BUG_ON_PAGE(page_count(page), page);
+@@ -6530,13 +6534,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+ * important here to have the interrupts disabled because it is the
+ * only synchronisation we have for updating the per-CPU variables.
+ */
++ local_lock_irqsave(event_lock, flags);
++#ifndef CONFIG_PREEMPT_RT_BASE
+ VM_BUG_ON(!irqs_disabled());
++#endif
+ mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
+ -nr_entries);
+ memcg_check_events(memcg, page);
+
+ if (!mem_cgroup_is_root(memcg))
+ css_put_many(&memcg->css, nr_entries);
++ local_unlock_irqrestore(event_lock, flags);
+ }
+
+ /**
+--
+2.36.1
+