summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch')
-rw-r--r--debian/patches-rt/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch57
1 files changed, 57 insertions, 0 deletions
diff --git a/debian/patches-rt/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch b/debian/patches-rt/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch
new file mode 100644
index 000000000..7d94cdb9d
--- /dev/null
+++ b/debian/patches-rt/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch
@@ -0,0 +1,57 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 8 Sep 2023 18:22:53 +0200
+Subject: [PATCH 6/7] locking/rtmutex: Add a lockdep assert to catch potential
+ nested blocking
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+There used to be a BUG_ON(current->pi_blocked_on) in the lock acquisition
+functions, but that vanished in one of the rtmutex overhauls.
+
+Bring it back in form of a lockdep assert to catch code paths which take
+rtmutex based locks with current::pi_blocked_on != NULL.
+
+Reported-by: Crystal Wood <swood@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230908162254.999499-7-bigeasy@linutronix.de
+---
+ kernel/locking/rtmutex.c | 2 ++
+ kernel/locking/rwbase_rt.c | 2 ++
+ kernel/locking/spinlock_rt.c | 2 ++
+ 3 files changed, 6 insertions(+)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1784,6 +1784,8 @@ static int __sched rt_mutex_slowlock(str
+ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
+ unsigned int state)
+ {
++ lockdep_assert(!current->pi_blocked_on);
++
+ if (likely(rt_mutex_try_acquire(lock)))
+ return 0;
+
+--- a/kernel/locking/rwbase_rt.c
++++ b/kernel/locking/rwbase_rt.c
+@@ -133,6 +133,8 @@ static int __sched __rwbase_read_lock(st
+ static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
+ unsigned int state)
+ {
++ lockdep_assert(!current->pi_blocked_on);
++
+ if (rwbase_read_trylock(rwb))
+ return 0;
+
+--- a/kernel/locking/spinlock_rt.c
++++ b/kernel/locking/spinlock_rt.c
+@@ -37,6 +37,8 @@
+
+ static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
+ {
++ lockdep_assert(!current->pi_blocked_on);
++
+ if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
+ rtlock_slowlock(rtm);
+ }