summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0005-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0005-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch')
-rw-r--r--debian/patches-rt/0005-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch124
1 files changed, 124 insertions, 0 deletions
diff --git a/debian/patches-rt/0005-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch b/debian/patches-rt/0005-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
new file mode 100644
index 000000000..abd6b3875
--- /dev/null
+++ b/debian/patches-rt/0005-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
@@ -0,0 +1,124 @@
+From 94b59ab31222fc252603987e1ee316264426a015 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 23 Oct 2020 12:12:02 +0200
+Subject: [PATCH 005/323] sched/core: Wait for tasks being pushed away on
+ hotplug
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+RT kernels need to ensure that all tasks which are not per CPU kthreads
+have left the outgoing CPU to guarantee that no tasks are force migrated
+within a migrate disabled section.
+
+There is also some desire to (ab)use fine grained CPU hotplug control to
+clear a CPU from active state to force migrate tasks which are not per CPU
+kthreads away for power control purposes.
+
+Add a mechanism which waits until all tasks which should leave the CPU
+after the CPU active flag is cleared have moved to a different online CPU.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 40 +++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/sched.h | 4 ++++
+ 2 files changed, 43 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 32c3acef5781..b902755615d7 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6896,8 +6896,21 @@ static void balance_push(struct rq *rq)
+ * Both the cpu-hotplug and stop task are in this case and are
+ * required to complete the hotplug process.
+ */
+- if (is_per_cpu_kthread(push_task))
++ if (is_per_cpu_kthread(push_task)) {
++ /*
++ * If this is the idle task on the outgoing CPU try to wake
++ * up the hotplug control thread which might wait for the
++ * last task to vanish. The rcuwait_active() check is
++ * accurate here because the waiter is pinned on this CPU
++ * and can't obviously be running in parallel.
++ */
++ if (!rq->nr_running && rcuwait_active(&rq->hotplug_wait)) {
++ raw_spin_unlock(&rq->lock);
++ rcuwait_wake_up(&rq->hotplug_wait);
++ raw_spin_lock(&rq->lock);
++ }
+ return;
++ }
+
+ get_task_struct(push_task);
+ /*
+@@ -6928,6 +6941,20 @@ static void balance_push_set(int cpu, bool on)
+ rq_unlock_irqrestore(rq, &rf);
+ }
+
++/*
++ * Invoked from a CPUs hotplug control thread after the CPU has been marked
++ * inactive. All tasks which are not per CPU kernel threads are either
++ * pushed off this CPU now via balance_push() or placed on a different CPU
++ * during wakeup. Wait until the CPU is quiescent.
++ */
++static void balance_hotplug_wait(void)
++{
++ struct rq *rq = this_rq();
++
++ rcuwait_wait_event(&rq->hotplug_wait, rq->nr_running == 1,
++ TASK_UNINTERRUPTIBLE);
++}
++
+ #else
+
+ static inline void balance_push(struct rq *rq)
+@@ -6938,6 +6965,10 @@ static inline void balance_push_set(int cpu, bool on)
+ {
+ }
+
++static inline void balance_hotplug_wait(void)
++{
++}
++
+ #endif /* CONFIG_HOTPLUG_CPU */
+
+ void set_rq_online(struct rq *rq)
+@@ -7094,6 +7125,10 @@ int sched_cpu_deactivate(unsigned int cpu)
+ return ret;
+ }
+ sched_domains_numa_masks_clear(cpu);
++
++ /* Wait for all non per CPU kernel threads to vanish. */
++ balance_hotplug_wait();
++
+ return 0;
+ }
+
+@@ -7334,6 +7369,9 @@ void __init sched_init(void)
+
+ rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func);
+ #endif
++#ifdef CONFIG_HOTPLUG_CPU
++ rcuwait_init(&rq->hotplug_wait);
++#endif
+ #endif /* CONFIG_SMP */
+ hrtick_rq_init(rq);
+ atomic_set(&rq->nr_iowait, 0);
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 81dc4212423a..a72464d370cd 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1007,6 +1007,10 @@ struct rq {
+
+ /* This is used to determine avg_idle's max value */
+ u64 max_idle_balance_cost;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ struct rcuwait hotplug_wait;
++#endif
+ #endif /* CONFIG_SMP */
+
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+--
+2.43.0
+