diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:28:00 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:28:00 +0000 |
commit | 3565071f226432336a54d0193d729fa4508a3394 (patch) | |
tree | 4cde13f078f84c0a7785d234fd52edce7c90546a /debian/patches-rt/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-3565071f226432336a54d0193d729fa4508a3394.tar.xz linux-3565071f226432336a54d0193d729fa4508a3394.zip |
Adding debian version 6.6.15-2.debian/6.6.15-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch')
-rw-r--r-- | debian/patches-rt/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch | 177 |
1 files changed, 177 insertions, 0 deletions
diff --git a/debian/patches-rt/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch b/debian/patches-rt/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch new file mode 100644 index 0000000000..d84497e5fd --- /dev/null +++ b/debian/patches-rt/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch @@ -0,0 +1,177 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Fri, 8 Sep 2023 18:22:52 +0200 +Subject: [PATCH 5/7] locking/rtmutex: Use rt_mutex specific scheduler helpers +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz + +Have rt_mutex use the rt_mutex specific scheduler helpers to avoid +recursion vs rtlock on the PI state. + +[[ peterz: adapted to new names ]] + +Reported-by: Crystal Wood <swood@redhat.com> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Link: https://lkml.kernel.org/r/20230908162254.999499-6-bigeasy@linutronix.de +--- + kernel/futex/pi.c | 11 +++++++++++ + kernel/locking/rtmutex.c | 14 ++++++++++++-- + kernel/locking/rwbase_rt.c | 6 ++++++ + kernel/locking/rwsem.c | 8 +++++++- + kernel/locking/spinlock_rt.c | 4 ++++ + 5 files changed, 40 insertions(+), 3 deletions(-) + +--- a/kernel/futex/pi.c ++++ b/kernel/futex/pi.c +@@ -1,6 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0-or-later + + #include <linux/slab.h> ++#include <linux/sched/rt.h> + #include <linux/sched/task.h> + + #include "futex.h" +@@ -1002,6 +1003,12 @@ int futex_lock_pi(u32 __user *uaddr, uns + goto no_block; + } + ++ /* ++ * Must be done before we enqueue the waiter, here is unfortunately ++ * under the hb lock, but that *should* work because it does nothing. ++ */ ++ rt_mutex_pre_schedule(); ++ + rt_mutex_init_waiter(&rt_waiter); + + /* +@@ -1052,6 +1059,10 @@ int futex_lock_pi(u32 __user *uaddr, uns + if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) + ret = 0; + ++ /* ++ * Waiter is unqueued. ++ */ ++ rt_mutex_post_schedule(); + no_block: + /* + * Fixup the pi_state owner and possibly acquire the lock if we +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1632,7 +1632,7 @@ static int __sched rt_mutex_slowlock_blo + raw_spin_unlock_irq(&lock->wait_lock); + + if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) +- schedule(); ++ rt_mutex_schedule(); + + raw_spin_lock_irq(&lock->wait_lock); + set_current_state(state); +@@ -1661,7 +1661,7 @@ static void __sched rt_mutex_handle_dead + WARN(1, "rtmutex deadlock detected\n"); + while (1) { + set_current_state(TASK_INTERRUPTIBLE); +- schedule(); ++ rt_mutex_schedule(); + } + } + +@@ -1757,6 +1757,15 @@ static int __sched rt_mutex_slowlock(str + int ret; + + /* ++ * Do all pre-schedule work here, before we queue a waiter and invoke ++ * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would ++ * otherwise recurse back into task_blocks_on_rt_mutex() through ++ * rtlock_slowlock() and will then enqueue a second waiter for this ++ * same task and things get really confusing real fast. ++ */ ++ rt_mutex_pre_schedule(); ++ ++ /* + * Technically we could use raw_spin_[un]lock_irq() here, but this can + * be called in early boot if the cmpxchg() fast path is disabled + * (debug, no architecture support). In this case we will acquire the +@@ -1767,6 +1776,7 @@ static int __sched rt_mutex_slowlock(str + raw_spin_lock_irqsave(&lock->wait_lock, flags); + ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ rt_mutex_post_schedule(); + + return ret; + } +--- a/kernel/locking/rwbase_rt.c ++++ b/kernel/locking/rwbase_rt.c +@@ -71,6 +71,7 @@ static int __sched __rwbase_read_lock(st + struct rt_mutex_base *rtm = &rwb->rtmutex; + int ret; + ++ rwbase_pre_schedule(); + raw_spin_lock_irq(&rtm->wait_lock); + + /* +@@ -125,6 +126,7 @@ static int __sched __rwbase_read_lock(st + rwbase_rtmutex_unlock(rtm); + + trace_contention_end(rwb, ret); ++ rwbase_post_schedule(); + return ret; + } + +@@ -237,6 +239,8 @@ static int __sched rwbase_write_lock(str + /* Force readers into slow path */ + atomic_sub(READER_BIAS, &rwb->readers); + ++ rwbase_pre_schedule(); ++ + raw_spin_lock_irqsave(&rtm->wait_lock, flags); + if (__rwbase_write_trylock(rwb)) + goto out_unlock; +@@ -248,6 +252,7 @@ static int __sched rwbase_write_lock(str + if (rwbase_signal_pending_state(state, current)) { + rwbase_restore_current_state(); + __rwbase_write_unlock(rwb, 0, flags); ++ rwbase_post_schedule(); + trace_contention_end(rwb, -EINTR); + return -EINTR; + } +@@ -266,6 +271,7 @@ static int __sched rwbase_write_lock(str + + out_unlock: + raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); ++ rwbase_post_schedule(); + return 0; + } + +--- a/kernel/locking/rwsem.c ++++ b/kernel/locking/rwsem.c +@@ -1427,8 +1427,14 @@ static inline void __downgrade_write(str + #define rwbase_signal_pending_state(state, current) \ + signal_pending_state(state, current) + ++#define rwbase_pre_schedule() \ ++ rt_mutex_pre_schedule() ++ + #define rwbase_schedule() \ +- schedule() ++ rt_mutex_schedule() ++ ++#define rwbase_post_schedule() \ ++ rt_mutex_post_schedule() + + #include "rwbase_rt.c" + +--- a/kernel/locking/spinlock_rt.c ++++ b/kernel/locking/spinlock_rt.c +@@ -184,9 +184,13 @@ static __always_inline int rwbase_rtmut + + #define rwbase_signal_pending_state(state, current) (0) + ++#define rwbase_pre_schedule() ++ + #define rwbase_schedule() \ + schedule_rtlock() + ++#define rwbase_post_schedule() ++ + #include "rwbase_rt.c" + /* + * The common functions which get wrapped into the rwlock API. |