diff options
Diffstat (limited to 'debian/patches-rt/0303-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch')
-rw-r--r-- | debian/patches-rt/0303-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch | 98 |
1 files changed, 98 insertions, 0 deletions
diff --git a/debian/patches-rt/0303-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch b/debian/patches-rt/0303-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch new file mode 100644 index 000000000..8ec5a16d0 --- /dev/null +++ b/debian/patches-rt/0303-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch @@ -0,0 +1,98 @@ +From: Peter Zijlstra <peterz@infradead.org> +Date: Mon, 30 Sep 2019 18:15:44 +0200 +Subject: [PATCH 303/342] locking/rtmutex: Clean ->pi_blocked_on in the error + case +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=aa4169c26b331ce5542238710c17b2bfe9cab704 + +[ Upstream commit 0be4ea6e3ce693101be0fbd55a0cc7ce238ab2eb ] + +The function rt_mutex_wait_proxy_lock() cleans ->pi_blocked_on in case +of failure (timeout, signal). The same cleanup is required in +__rt_mutex_start_proxy_lock(). +In both the cases the tasks was interrupted by a signal or timeout while +acquiring the lock and after the interruption it longer blocks on the +lock. + +Fixes: 1a1fb985f2e2b ("futex: Handle early deadlock return correctly") +Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> +--- + kernel/locking/rtmutex.c | 43 +++++++++++++++++++++++----------------- + 1 file changed, 25 insertions(+), 18 deletions(-) + +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index ded2296f848a..dc95a81ae3a6 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -2319,6 +2319,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock) + rt_mutex_set_owner(lock, NULL); + } + ++static void fixup_rt_mutex_blocked(struct rt_mutex *lock) ++{ ++ struct task_struct *tsk = current; ++ /* ++ * RT has a problem here when the wait got interrupted by a timeout ++ * or a signal. task->pi_blocked_on is still set. The task must ++ * acquire the hash bucket lock when returning from this function. ++ * ++ * If the hash bucket lock is contended then the ++ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in ++ * task_blocks_on_rt_mutex() will trigger. This can be avoided by ++ * clearing task->pi_blocked_on which removes the task from the ++ * boosting chain of the rtmutex. That's correct because the task ++ * is not longer blocked on it. ++ */ ++ raw_spin_lock(&tsk->pi_lock); ++ tsk->pi_blocked_on = NULL; ++ raw_spin_unlock(&tsk->pi_lock); ++} ++ + /** + * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task + * @lock: the rt_mutex to take +@@ -2391,6 +2411,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + ret = 0; + } + ++ if (ret) ++ fixup_rt_mutex_blocked(lock); ++ + debug_rt_mutex_print_deadlock(waiter); + + return ret; +@@ -2471,7 +2494,6 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *to, + struct rt_mutex_waiter *waiter) + { +- struct task_struct *tsk = current; + int ret; + + raw_spin_lock_irq(&lock->wait_lock); +@@ -2483,23 +2505,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + * have to fix that up. + */ + fixup_rt_mutex_waiters(lock); +- /* +- * RT has a problem here when the wait got interrupted by a timeout +- * or a signal. task->pi_blocked_on is still set. The task must +- * acquire the hash bucket lock when returning from this function. +- * +- * If the hash bucket lock is contended then the +- * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in +- * task_blocks_on_rt_mutex() will trigger. This can be avoided by +- * clearing task->pi_blocked_on which removes the task from the +- * boosting chain of the rtmutex. That's correct because the task +- * is not longer blocked on it. +- */ +- if (ret) { +- raw_spin_lock(&tsk->pi_lock); +- tsk->pi_blocked_on = NULL; +- raw_spin_unlock(&tsk->pi_lock); +- } ++ if (ret) ++ fixup_rt_mutex_blocked(lock); + + raw_spin_unlock_irq(&lock->wait_lock); + |