summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
commit08b74a000942a380fe028845f92cd3a0dee827d5 (patch)
treeaa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch
parentAdding upstream version 4.19.249. (diff)
downloadlinux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz
linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch')
-rw-r--r--debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch109
1 files changed, 109 insertions, 0 deletions
diff --git a/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch b/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch
new file mode 100644
index 000000000..0983e2795
--- /dev/null
+++ b/debian/patches-rt/0078-mm-perform-lru_add_drain_all-remotely.patch
@@ -0,0 +1,109 @@
+From 9336f220ffe2a225210342f5977ec2636ffc8717 Mon Sep 17 00:00:00 2001
+From: Luiz Capitulino <lcapitulino@redhat.com>
+Date: Fri, 27 May 2016 15:03:28 +0200
+Subject: [PATCH 078/347] mm: perform lru_add_drain_all() remotely
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
+on all CPUs that have non-empty LRU pagevecs and then waiting for
+the scheduled work to complete. However, workqueue threads may never
+have the chance to run on a CPU that's running a SCHED_FIFO task.
+This causes lru_add_drain_all() to block forever.
+
+This commit solves this problem by changing lru_add_drain_all()
+to drain the LRU pagevecs of remote CPUs. This is done by grabbing
+swapvec_lock and calling lru_add_drain_cpu().
+
+PS: This is based on an idea and initial implementation by
+ Rik van Riel.
+
+Signed-off-by: Rik van Riel <riel@redhat.com>
+Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/swap.c | 36 ++++++++++++++++++++++++++++++------
+ 1 file changed, 30 insertions(+), 6 deletions(-)
+
+diff --git a/mm/swap.c b/mm/swap.c
+index 92f994b962f0..3885645a45ce 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -585,9 +585,15 @@ void lru_add_drain_cpu(int cpu)
+ unsigned long flags;
+
+ /* No harm done if a racing interrupt already did this */
++#ifdef CONFIG_PREEMPT_RT_BASE
++ local_lock_irqsave_on(rotate_lock, flags, cpu);
++ pagevec_move_tail(pvec);
++ local_unlock_irqrestore_on(rotate_lock, flags, cpu);
++#else
+ local_lock_irqsave(rotate_lock, flags);
+ pagevec_move_tail(pvec);
+ local_unlock_irqrestore(rotate_lock, flags);
++#endif
+ }
+
+ pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
+@@ -657,6 +663,16 @@ void lru_add_drain(void)
+
+ #ifdef CONFIG_SMP
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
++{
++ local_lock_on(swapvec_lock, cpu);
++ lru_add_drain_cpu(cpu);
++ local_unlock_on(swapvec_lock, cpu);
++}
++
++#else
++
+ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+
+ static void lru_add_drain_per_cpu(struct work_struct *dummy)
+@@ -664,6 +680,16 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
+ lru_add_drain();
+ }
+
++static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
++{
++ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
++
++ INIT_WORK(work, lru_add_drain_per_cpu);
++ queue_work_on(cpu, mm_percpu_wq, work);
++ cpumask_set_cpu(cpu, has_work);
++}
++#endif
++
+ /*
+ * Doesn't need any cpu hotplug locking because we do rely on per-cpu
+ * kworkers being shut down before our page_alloc_cpu_dead callback is
+@@ -688,21 +714,19 @@ void lru_add_drain_all(void)
+ cpumask_clear(&has_work);
+
+ for_each_online_cpu(cpu) {
+- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+ if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
+ pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
+ pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
+ pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
+- need_activate_page_drain(cpu)) {
+- INIT_WORK(work, lru_add_drain_per_cpu);
+- queue_work_on(cpu, mm_percpu_wq, work);
+- cpumask_set_cpu(cpu, &has_work);
+- }
++ need_activate_page_drain(cpu))
++ remote_lru_add_drain(cpu, &has_work);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_BASE
+ for_each_cpu(cpu, &has_work)
+ flush_work(&per_cpu(lru_add_drain_work, cpu));
++#endif
+
+ mutex_unlock(&lock);
+ }
+--
+2.36.1
+