summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0309-sched-migrate_enable-Use-stop_one_cpu_nowait.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
commit08b74a000942a380fe028845f92cd3a0dee827d5 (patch)
treeaa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0309-sched-migrate_enable-Use-stop_one_cpu_nowait.patch
parentAdding upstream version 4.19.249. (diff)
downloadlinux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz
linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip
Adding debian version 4.19.249-2.debian/4.19.249-2debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0309-sched-migrate_enable-Use-stop_one_cpu_nowait.patch')
-rw-r--r--debian/patches-rt/0309-sched-migrate_enable-Use-stop_one_cpu_nowait.patch121
1 files changed, 121 insertions, 0 deletions
diff --git a/debian/patches-rt/0309-sched-migrate_enable-Use-stop_one_cpu_nowait.patch b/debian/patches-rt/0309-sched-migrate_enable-Use-stop_one_cpu_nowait.patch
new file mode 100644
index 000000000..21108241b
--- /dev/null
+++ b/debian/patches-rt/0309-sched-migrate_enable-Use-stop_one_cpu_nowait.patch
@@ -0,0 +1,121 @@
+From 778702cffc4e8e3f002716b84944ddf23692639f Mon Sep 17 00:00:00 2001
+From: Scott Wood <swood@redhat.com>
+Date: Sat, 12 Oct 2019 01:52:14 -0500
+Subject: [PATCH 309/347] sched: migrate_enable: Use stop_one_cpu_nowait()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+[ Upstream commit 6b39a1fa8c53cae08dc03afdae193b7d3a78a173 ]
+
+migrate_enable() can be called with current->state != TASK_RUNNING.
+Avoid clobbering the existing state by using stop_one_cpu_nowait().
+Since we're stopping the current cpu, we know that we won't get
+past __schedule() until migration_cpu_stop() has run (at least up to
+the point of migrating us to another cpu).
+
+Signed-off-by: Scott Wood <swood@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+---
+ include/linux/stop_machine.h | 2 ++
+ kernel/sched/core.c | 23 +++++++++++++----------
+ kernel/stop_machine.c | 7 +++++--
+ 3 files changed, 20 insertions(+), 12 deletions(-)
+
+diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
+index ccdaa8fd5657..150e886ed83d 100644
+--- a/include/linux/stop_machine.h
++++ b/include/linux/stop_machine.h
+@@ -26,6 +26,8 @@ struct cpu_stop_work {
+ cpu_stop_fn_t fn;
+ void *arg;
+ struct cpu_stop_done *done;
++ /* Did not run due to disabled stopper; for nowait debug checks */
++ bool disabled;
+ };
+
+ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index de6514e13e0c..dff9d26a8625 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -990,6 +990,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
+ struct migration_arg {
+ struct task_struct *task;
+ int dest_cpu;
++ bool done;
+ };
+
+ /*
+@@ -1025,6 +1026,11 @@ static int migration_cpu_stop(void *data)
+ struct task_struct *p = arg->task;
+ struct rq *rq = this_rq();
+ struct rq_flags rf;
++ int dest_cpu = arg->dest_cpu;
++
++ /* We don't look at arg after this point. */
++ smp_mb();
++ arg->done = true;
+
+ /*
+ * The original target CPU might have gone down and we might
+@@ -1047,9 +1053,9 @@ static int migration_cpu_stop(void *data)
+ */
+ if (task_rq(p) == rq) {
+ if (task_on_rq_queued(p))
+- rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
++ rq = __migrate_task(rq, &rf, p, dest_cpu);
+ else
+- p->wake_cpu = arg->dest_cpu;
++ p->wake_cpu = dest_cpu;
+ }
+ rq_unlock(rq, &rf);
+ raw_spin_unlock(&p->pi_lock);
+@@ -7316,6 +7322,7 @@ void migrate_enable(void)
+ WARN_ON(smp_processor_id() != cpu);
+ if (!is_cpu_allowed(p, cpu)) {
+ struct migration_arg arg = { p };
++ struct cpu_stop_work work;
+ struct rq_flags rf;
+
+ rq = task_rq_lock(p, &rf);
+@@ -7323,15 +7330,11 @@ void migrate_enable(void)
+ arg.dest_cpu = select_fallback_rq(cpu, p);
+ task_rq_unlock(rq, p, &rf);
+
+- preempt_lazy_enable();
+- preempt_enable();
+-
+- sleeping_lock_inc();
+- stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+- sleeping_lock_dec();
++ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
++ &arg, &work);
+ tlb_migrate_finish(p->mm);
+-
+- return;
++ __schedule(true);
++ WARN_ON_ONCE(!arg.done && !work.disabled);
+ }
+
+ out:
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 067cb83f37ea..2d15c0d50625 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -86,8 +86,11 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
+ enabled = stopper->enabled;
+ if (enabled)
+ __cpu_stop_queue_work(stopper, work, &wakeq);
+- else if (work->done)
+- cpu_stop_signal_done(work->done);
++ else {
++ work->disabled = true;
++ if (work->done)
++ cpu_stop_signal_done(work->done);
++ }
+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
+
+ wake_up_q(&wakeq);
+--
+2.36.1
+