summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0155-sched-migrate_disable-fallback-to-preempt_disable-in.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
commit08b74a000942a380fe028845f92cd3a0dee827d5 (patch)
treeaa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0155-sched-migrate_disable-fallback-to-preempt_disable-in.patch
parentAdding upstream version 4.19.249. (diff)
downloadlinux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz
linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip
Adding debian version 4.19.249-2.debian/4.19.249-2debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0155-sched-migrate_disable-fallback-to-preempt_disable-in.patch')
-rw-r--r--debian/patches-rt/0155-sched-migrate_disable-fallback-to-preempt_disable-in.patch204
1 files changed, 204 insertions, 0 deletions
diff --git a/debian/patches-rt/0155-sched-migrate_disable-fallback-to-preempt_disable-in.patch b/debian/patches-rt/0155-sched-migrate_disable-fallback-to-preempt_disable-in.patch
new file mode 100644
index 000000000..c471ea20d
--- /dev/null
+++ b/debian/patches-rt/0155-sched-migrate_disable-fallback-to-preempt_disable-in.patch
@@ -0,0 +1,204 @@
+From e85e95b0e59b7d493402419e3a63a06978a06452 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 5 Jul 2018 14:44:51 +0200
+Subject: [PATCH 155/347] sched/migrate_disable: fallback to preempt_disable()
+ instead barrier()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+On SMP + !RT migrate_disable() is still around. It is not part of spin_lock()
+anymore so it has almost no users. However the futex code has a workaround for
+the !in_atomic() part of migrate disable which fails because the matching
+migrade_disable() is no longer part of spin_lock().
+
+On !SMP + !RT migrate_disable() is reduced to barrier(). This is not optimal
+because we few spots where a "preempt_disable()" statement was replaced with
+"migrate_disable()".
+
+We also used the migration_disable counter to figure out if a sleeping lock is
+acquired so RCU does not complain about schedule() during rcu_read_lock() while
+a sleeping lock is held. This changed, we no longer use it, we have now a
+sleeping_lock counter for the RCU purpose.
+
+This means we can now:
+- for SMP + RT_BASE
+ full migration program, nothing changes here
+
+- for !SMP + RT_BASE
+ the migration counting is no longer required. It used to ensure that the task
+ is not migrated to another CPU and that this CPU remains online. !SMP ensures
+ that already.
+ Move it to CONFIG_SCHED_DEBUG so the counting is done for debugging purpose
+ only.
+
+- for all other cases including !RT
+ fallback to preempt_disable(). The only remaining users of migrate_disable()
+ are those which were converted from preempt_disable() and the futex
+ workaround which is already in the preempt_disable() section due to the
+ spin_lock that is held.
+
+Cc: stable-rt@vger.kernel.org
+Reported-by: joe.korty@concurrent-rt.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/preempt.h | 6 +++---
+ include/linux/sched.h | 4 ++--
+ kernel/sched/core.c | 23 +++++++++++------------
+ kernel/sched/debug.c | 2 +-
+ 4 files changed, 17 insertions(+), 18 deletions(-)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 9eafc34898b4..ed8413e7140f 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -204,7 +204,7 @@ do { \
+
+ #define preemptible() (preempt_count() == 0 && !irqs_disabled())
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+
+ extern void migrate_disable(void);
+ extern void migrate_enable(void);
+@@ -221,8 +221,8 @@ static inline int __migrate_disabled(struct task_struct *p)
+ }
+
+ #else
+-#define migrate_disable() barrier()
+-#define migrate_enable() barrier()
++#define migrate_disable() preempt_disable()
++#define migrate_enable() preempt_enable()
+ static inline int __migrate_disabled(struct task_struct *p)
+ {
+ return 0;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 4f8fcba23c1d..ceb3bdfb6bc4 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -667,7 +667,7 @@ struct task_struct {
+ int nr_cpus_allowed;
+ const cpumask_t *cpus_ptr;
+ cpumask_t cpus_mask;
+-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ int migrate_disable;
+ int migrate_disable_update;
+ # ifdef CONFIG_SCHED_DEBUG
+@@ -675,8 +675,8 @@ struct task_struct {
+ # endif
+
+ #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+- int migrate_disable;
+ # ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable;
+ int migrate_disable_atomic;
+ # endif
+ #endif
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 555dea10764e..0c44c16244a1 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1031,7 +1031,7 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+
+-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ int __migrate_disabled(struct task_struct *p)
+ {
+ return p->migrate_disable;
+@@ -1071,7 +1071,7 @@ static void __do_set_cpus_allowed_tail(struct task_struct *p,
+
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ if (__migrate_disabled(p)) {
+ lockdep_assert_held(&p->pi_lock);
+
+@@ -1145,7 +1145,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ goto out;
+
+-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ if (__migrate_disabled(p)) {
+ p->migrate_disable_update = 1;
+ goto out;
+@@ -7204,7 +7204,7 @@ const u32 sched_prio_to_wmult[40] = {
+
+ #undef CREATE_TRACE_POINTS
+
+-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+
+ static inline void
+ update_nr_migratory(struct task_struct *p, long delta)
+@@ -7352,45 +7352,44 @@ EXPORT_SYMBOL(migrate_enable);
+ #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ void migrate_disable(void)
+ {
++#ifdef CONFIG_SCHED_DEBUG
+ struct task_struct *p = current;
+
+ if (in_atomic() || irqs_disabled()) {
+-#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+-#endif
+ return;
+ }
+-#ifdef CONFIG_SCHED_DEBUG
++
+ if (unlikely(p->migrate_disable_atomic)) {
+ tracing_off();
+ WARN_ON_ONCE(1);
+ }
+-#endif
+
+ p->migrate_disable++;
++#endif
++ barrier();
+ }
+ EXPORT_SYMBOL(migrate_disable);
+
+ void migrate_enable(void)
+ {
++#ifdef CONFIG_SCHED_DEBUG
+ struct task_struct *p = current;
+
+ if (in_atomic() || irqs_disabled()) {
+-#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+-#endif
+ return;
+ }
+
+-#ifdef CONFIG_SCHED_DEBUG
+ if (unlikely(p->migrate_disable_atomic)) {
+ tracing_off();
+ WARN_ON_ONCE(1);
+ }
+-#endif
+
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+ p->migrate_disable--;
++#endif
++ barrier();
+ }
+ EXPORT_SYMBOL(migrate_enable);
+ #endif
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index dc3f3e6fa0bd..169b98c12da7 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -988,7 +988,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
+ P(dl.runtime);
+ P(dl.deadline);
+ }
+-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
+ P(migrate_disable);
+ #endif
+ P(nr_cpus_allowed);
+--
+2.36.1
+