diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
commit | 08b74a000942a380fe028845f92cd3a0dee827d5 (patch) | |
tree | aa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0140-rtmutex-Provide-rt_mutex_slowlock_locked.patch | |
parent | Adding upstream version 4.19.249. (diff) | |
download | linux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip |
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0140-rtmutex-Provide-rt_mutex_slowlock_locked.patch')
-rw-r--r-- | debian/patches-rt/0140-rtmutex-Provide-rt_mutex_slowlock_locked.patch | 145 |
1 files changed, 145 insertions, 0 deletions
diff --git a/debian/patches-rt/0140-rtmutex-Provide-rt_mutex_slowlock_locked.patch b/debian/patches-rt/0140-rtmutex-Provide-rt_mutex_slowlock_locked.patch new file mode 100644 index 000000000..d63649047 --- /dev/null +++ b/debian/patches-rt/0140-rtmutex-Provide-rt_mutex_slowlock_locked.patch @@ -0,0 +1,145 @@ +From 2263182a5f18a0abbe7ba2d582f42308580bbd31 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Thu, 12 Oct 2017 16:14:22 +0200 +Subject: [PATCH 140/347] rtmutex: Provide rt_mutex_slowlock_locked() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + kernel/locking/rtmutex.c | 67 +++++++++++++++++++-------------- + kernel/locking/rtmutex_common.h | 7 ++++ + 2 files changed, 45 insertions(+), 29 deletions(-) + +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 333c4b7aa8e8..e9255b28de8f 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1244,35 +1244,16 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, + } + } + +-/* +- * Slow path lock function: +- */ +-static int __sched +-rt_mutex_slowlock(struct rt_mutex *lock, int state, +- struct hrtimer_sleeper *timeout, +- enum rtmutex_chainwalk chwalk) ++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, ++ struct hrtimer_sleeper *timeout, ++ enum rtmutex_chainwalk chwalk, ++ struct rt_mutex_waiter *waiter) + { +- struct rt_mutex_waiter waiter; +- unsigned long flags; +- int ret = 0; +- +- rt_mutex_init_waiter(&waiter); +- +- /* +- * Technically we could use raw_spin_[un]lock_irq() here, but this can +- * be called in early boot if the cmpxchg() fast path is disabled +- * (debug, no architecture support). In this case we will acquire the +- * rtmutex with lock->wait_lock held. But we cannot unconditionally +- * enable interrupts in that early boot case. So we need to use the +- * irqsave/restore variants. +- */ +- raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ int ret; + + /* Try to acquire the lock again: */ +- if (try_to_take_rt_mutex(lock, current, NULL)) { +- raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ if (try_to_take_rt_mutex(lock, current, NULL)) + return 0; +- } + + set_current_state(state); + +@@ -1280,16 +1261,16 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, + if (unlikely(timeout)) + hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); + +- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); ++ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); + + if (likely(!ret)) + /* sleep on the mutex */ +- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); ++ ret = __rt_mutex_slowlock(lock, state, timeout, waiter); + + if (unlikely(ret)) { + __set_current_state(TASK_RUNNING); +- remove_waiter(lock, &waiter); +- rt_mutex_handle_deadlock(ret, chwalk, &waiter); ++ remove_waiter(lock, waiter); ++ rt_mutex_handle_deadlock(ret, chwalk, waiter); + } + + /* +@@ -1297,6 +1278,34 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, + * unconditionally. We might have to fix that up. + */ + fixup_rt_mutex_waiters(lock); ++ return ret; ++} ++ ++/* ++ * Slow path lock function: ++ */ ++static int __sched ++rt_mutex_slowlock(struct rt_mutex *lock, int state, ++ struct hrtimer_sleeper *timeout, ++ enum rtmutex_chainwalk chwalk) ++{ ++ struct rt_mutex_waiter waiter; ++ unsigned long flags; ++ int ret = 0; ++ ++ rt_mutex_init_waiter(&waiter); ++ ++ /* ++ * Technically we could use raw_spin_[un]lock_irq() here, but this can ++ * be called in early boot if the cmpxchg() fast path is disabled ++ * (debug, no architecture support). In this case we will acquire the ++ * rtmutex with lock->wait_lock held. But we cannot unconditionally ++ * enable interrupts in that early boot case. So we need to use the ++ * irqsave/restore variants. ++ */ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ ++ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, &waiter); + + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h +index 202e2b0d1251..66deb94765fb 100644 +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -15,6 +15,7 @@ + + #include <linux/rtmutex.h> + #include <linux/sched/wake_q.h> ++#include <linux/sched/debug.h> + + /* + * This is the control structure for tasks blocked on a rt_mutex, +@@ -158,6 +159,12 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, + struct wake_q_head *wqh); + + extern void rt_mutex_postunlock(struct wake_q_head *wake_q); ++/* RW semaphore special interface */ ++ ++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, ++ struct hrtimer_sleeper *timeout, ++ enum rtmutex_chainwalk chwalk, ++ struct rt_mutex_waiter *waiter); + + #ifdef CONFIG_DEBUG_RT_MUTEXES + # include "rtmutex-debug.h" +-- +2.36.1 + |