summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0154-rtmutex-annotate-sleeping-lock-context.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
commit08b74a000942a380fe028845f92cd3a0dee827d5 (patch)
treeaa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0154-rtmutex-annotate-sleeping-lock-context.patch
parentAdding upstream version 4.19.249. (diff)
downloadlinux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz
linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0154-rtmutex-annotate-sleeping-lock-context.patch')
-rw-r--r--debian/patches-rt/0154-rtmutex-annotate-sleeping-lock-context.patch308
1 files changed, 308 insertions, 0 deletions
diff --git a/debian/patches-rt/0154-rtmutex-annotate-sleeping-lock-context.patch b/debian/patches-rt/0154-rtmutex-annotate-sleeping-lock-context.patch
new file mode 100644
index 000000000..916d8d802
--- /dev/null
+++ b/debian/patches-rt/0154-rtmutex-annotate-sleeping-lock-context.patch
@@ -0,0 +1,308 @@
+From ac520c8f628b0eeff2af71b77289199262510960 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 21 Sep 2017 14:25:13 +0200
+Subject: [PATCH 154/347] rtmutex: annotate sleeping lock context
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+The RCU code complains on schedule() within a rcu_readlock() section.
+The valid scenario on -RT is if a sleeping is held. In order to suppress
+the warning the mirgrate_disable counter was used to identify the
+invocation of schedule() due to lock contention.
+
+Grygorii Strashko report that during CPU hotplug we might see the
+warning via
+ rt_spin_lock() -> migrate_disable() -> pin_current_cpu() -> __read_rt_lock()
+
+because the counter is not yet set.
+It is also possible to trigger the warning from cpu_chill()
+(seen on a kblockd_mod_delayed_work_on() caller).
+
+To address this RCU warning I annotate the sleeping lock context. The
+counter is incremented before migrate_disable() so the warning Grygorii
+should not trigger anymore. Additionally I use that counter in
+cpu_chill() to avoid the RCU warning from there.
+
+Reported-by: Grygorii Strashko <grygorii.strashko@ti.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/preempt.h | 9 ++++++++
+ include/linux/sched.h | 26 ++++++++++++++++++++++
+ kernel/locking/rtmutex.c | 12 ++++++++--
+ kernel/locking/rwlock-rt.c | 18 +++++++++++----
+ kernel/rcu/tree_plugin.h | 6 ++++-
+ kernel/sched/core.c | 45 ++++++++++++++++++++++++++++++++++++++
+ 6 files changed, 109 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 27c3176d88d2..9eafc34898b4 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -211,6 +211,15 @@ extern void migrate_enable(void);
+
+ int __migrate_disabled(struct task_struct *p);
+
++#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
++
++extern void migrate_disable(void);
++extern void migrate_enable(void);
++static inline int __migrate_disabled(struct task_struct *p)
++{
++ return 0;
++}
++
+ #else
+ #define migrate_disable() barrier()
+ #define migrate_enable() barrier()
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 5374ade34b87..4f8fcba23c1d 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -673,6 +673,15 @@ struct task_struct {
+ # ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable_atomic;
+ # endif
++
++#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
++ int migrate_disable;
++# ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable_atomic;
++# endif
++#endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int sleeping_lock;
+ #endif
+
+ #ifdef CONFIG_PREEMPT_RCU
+@@ -1810,6 +1819,23 @@ static __always_inline bool need_resched(void)
+ return unlikely(tif_need_resched());
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline void sleeping_lock_inc(void)
++{
++ current->sleeping_lock++;
++}
++
++static inline void sleeping_lock_dec(void)
++{
++ current->sleeping_lock--;
++}
++
++#else
++
++static inline void sleeping_lock_inc(void) { }
++static inline void sleeping_lock_dec(void) { }
++#endif
++
+ /*
+ * Wrappers for p->thread_info->cpu access. No-op on UP.
+ */
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 11bcae2184c4..ded2296f848a 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1141,6 +1141,7 @@ void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+
+ void __lockfunc rt_spin_lock(spinlock_t *lock)
+ {
++ sleeping_lock_inc();
+ migrate_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+@@ -1155,6 +1156,7 @@ void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+ {
++ sleeping_lock_inc();
+ migrate_disable();
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+@@ -1168,6 +1170,7 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock)
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
+ migrate_enable();
++ sleeping_lock_dec();
+ }
+ EXPORT_SYMBOL(rt_spin_unlock);
+
+@@ -1193,12 +1196,15 @@ int __lockfunc rt_spin_trylock(spinlock_t *lock)
+ {
+ int ret;
+
++ sleeping_lock_inc();
+ migrate_disable();
+ ret = __rt_mutex_trylock(&lock->lock);
+- if (ret)
++ if (ret) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+- else
++ } else {
+ migrate_enable();
++ sleeping_lock_dec();
++ }
+ return ret;
+ }
+ EXPORT_SYMBOL(rt_spin_trylock);
+@@ -1210,6 +1216,7 @@ int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
+ local_bh_disable();
+ ret = __rt_mutex_trylock(&lock->lock);
+ if (ret) {
++ sleeping_lock_inc();
+ migrate_disable();
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ } else
+@@ -1225,6 +1232,7 @@ int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+ *flags = 0;
+ ret = __rt_mutex_trylock(&lock->lock);
+ if (ret) {
++ sleeping_lock_inc();
+ migrate_disable();
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ }
+diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c
+index 8f90afe111ce..c3b91205161c 100644
+--- a/kernel/locking/rwlock-rt.c
++++ b/kernel/locking/rwlock-rt.c
+@@ -305,12 +305,15 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+ {
+ int ret;
+
++ sleeping_lock_inc();
+ migrate_disable();
+ ret = do_read_rt_trylock(rwlock);
+- if (ret)
++ if (ret) {
+ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
+- else
++ } else {
+ migrate_enable();
++ sleeping_lock_dec();
++ }
+ return ret;
+ }
+ EXPORT_SYMBOL(rt_read_trylock);
+@@ -319,18 +322,22 @@ int __lockfunc rt_write_trylock(rwlock_t *rwlock)
+ {
+ int ret;
+
++ sleeping_lock_inc();
+ migrate_disable();
+ ret = do_write_rt_trylock(rwlock);
+- if (ret)
++ if (ret) {
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+- else
++ } else {
+ migrate_enable();
++ sleeping_lock_dec();
++ }
+ return ret;
+ }
+ EXPORT_SYMBOL(rt_write_trylock);
+
+ void __lockfunc rt_read_lock(rwlock_t *rwlock)
+ {
++ sleeping_lock_inc();
+ migrate_disable();
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+ do_read_rt_lock(rwlock);
+@@ -339,6 +346,7 @@ EXPORT_SYMBOL(rt_read_lock);
+
+ void __lockfunc rt_write_lock(rwlock_t *rwlock)
+ {
++ sleeping_lock_inc();
+ migrate_disable();
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ do_write_rt_lock(rwlock);
+@@ -350,6 +358,7 @@ void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+ do_read_rt_unlock(rwlock);
+ migrate_enable();
++ sleeping_lock_dec();
+ }
+ EXPORT_SYMBOL(rt_read_unlock);
+
+@@ -358,6 +367,7 @@ void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+ do_write_rt_unlock(rwlock);
+ migrate_enable();
++ sleeping_lock_dec();
+ }
+ EXPORT_SYMBOL(rt_write_unlock);
+
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 5f6de49dc78e..35f3552b7463 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -337,9 +337,13 @@ static void rcu_preempt_note_context_switch(bool preempt)
+ struct task_struct *t = current;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
++ int sleeping_l = 0;
+
+ lockdep_assert_irqs_disabled();
+- WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
++#if defined(CONFIG_PREEMPT_RT_FULL)
++ sleeping_l = t->sleeping_lock;
++#endif
++ WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l);
+ if (t->rcu_read_lock_nesting > 0 &&
+ !t->rcu_read_unlock_special.b.blocked) {
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 9494e2007118..555dea10764e 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7348,4 +7348,49 @@ void migrate_enable(void)
+ preempt_enable();
+ }
+ EXPORT_SYMBOL(migrate_enable);
++
++#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
++void migrate_disable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic() || irqs_disabled()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic++;
++#endif
++ return;
++ }
++#ifdef CONFIG_SCHED_DEBUG
++ if (unlikely(p->migrate_disable_atomic)) {
++ tracing_off();
++ WARN_ON_ONCE(1);
++ }
++#endif
++
++ p->migrate_disable++;
++}
++EXPORT_SYMBOL(migrate_disable);
++
++void migrate_enable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic() || irqs_disabled()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic--;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ if (unlikely(p->migrate_disable_atomic)) {
++ tracing_off();
++ WARN_ON_ONCE(1);
++ }
++#endif
++
++ WARN_ON_ONCE(p->migrate_disable <= 0);
++ p->migrate_disable--;
++}
++EXPORT_SYMBOL(migrate_enable);
+ #endif
+--
+2.36.1
+