From b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 27 Apr 2024 12:06:00 +0200 Subject: Adding debian version 5.10.209-2. Signed-off-by: Daniel Baumann --- ...0175-locking-rtmutex-wire-up-RT-s-locking.patch | 348 +++++++++++++++++++++ 1 file changed, 348 insertions(+) create mode 100644 debian/patches-rt/0175-locking-rtmutex-wire-up-RT-s-locking.patch (limited to 'debian/patches-rt/0175-locking-rtmutex-wire-up-RT-s-locking.patch') diff --git a/debian/patches-rt/0175-locking-rtmutex-wire-up-RT-s-locking.patch b/debian/patches-rt/0175-locking-rtmutex-wire-up-RT-s-locking.patch new file mode 100644 index 000000000..4a55d2f15 --- /dev/null +++ b/debian/patches-rt/0175-locking-rtmutex-wire-up-RT-s-locking.patch @@ -0,0 +1,348 @@ +From 48720bc07f81cfe49fe8e2a7afa1f18acd9227dd Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Thu, 12 Oct 2017 17:31:14 +0200 +Subject: [PATCH 175/323] locking/rtmutex: wire up RT's locking +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/mutex.h | 26 ++++++++++++++++---------- + include/linux/rwsem.h | 12 ++++++++++++ + include/linux/spinlock.h | 12 +++++++++++- + include/linux/spinlock_api_smp.h | 4 +++- + include/linux/spinlock_types.h | 11 ++++++++--- + include/linux/spinlock_types_up.h | 2 +- + kernel/Kconfig.preempt | 1 + + kernel/locking/Makefile | 10 +++++++--- + kernel/locking/rwsem.c | 6 ++++++ + kernel/locking/spinlock.c | 7 +++++++ + kernel/locking/spinlock_debug.c | 5 +++++ + 11 files changed, 77 insertions(+), 19 deletions(-) + +diff --git a/include/linux/mutex.h b/include/linux/mutex.h +index 4d671fba3cab..e45774a337d2 100644 +--- a/include/linux/mutex.h ++++ b/include/linux/mutex.h +@@ -22,6 +22,20 @@ + + struct ww_acquire_ctx; + ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ ++ , .dep_map = { \ ++ .name = #lockname, \ ++ .wait_type_inner = LD_WAIT_SLEEP, \ ++ } ++#else ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) ++#endif ++ ++#ifdef CONFIG_PREEMPT_RT ++# include ++#else ++ + /* + * Simple, straightforward mutexes with strict semantics: + * +@@ -119,16 +133,6 @@ do { \ + __mutex_init((mutex), #mutex, &__key); \ + } while (0) + +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ +- , .dep_map = { \ +- .name = #lockname, \ +- .wait_type_inner = LD_WAIT_SLEEP, \ +- } +-#else +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +-#endif +- + #define __MUTEX_INITIALIZER(lockname) \ + { .owner = ATOMIC_LONG_INIT(0) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ +@@ -224,4 +228,6 @@ enum mutex_trylock_recursive_enum { + extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum + mutex_trylock_recursive(struct mutex *lock); + ++#endif /* !PREEMPT_RT */ ++ + #endif /* __LINUX_MUTEX_H */ +diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h +index 4c715be48717..9323af8a9244 100644 +--- a/include/linux/rwsem.h ++++ b/include/linux/rwsem.h +@@ -16,6 +16,11 @@ + #include + #include + #include ++ ++#ifdef CONFIG_PREEMPT_RT ++#include ++#else /* PREEMPT_RT */ ++ + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER + #include + #endif +@@ -119,6 +124,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) + return !list_empty(&sem->wait_list); + } + ++#endif /* !PREEMPT_RT */ ++ ++/* ++ * The functions below are the same for all rwsem implementations including ++ * the RT specific variant. ++ */ ++ + /* + * lock for reading + */ +diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h +index 79897841a2cc..c3c70291b46c 100644 +--- a/include/linux/spinlock.h ++++ b/include/linux/spinlock.h +@@ -309,7 +309,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) + }) + + /* Include rwlock functions */ +-#include ++#ifdef CONFIG_PREEMPT_RT ++# include ++#else ++# include ++#endif + + /* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: +@@ -320,6 +324,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) + # include + #endif + ++#ifdef CONFIG_PREEMPT_RT ++# include ++#else /* PREEMPT_RT */ ++ + /* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ +@@ -454,6 +462,8 @@ static __always_inline int spin_is_contended(spinlock_t *lock) + + #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) + ++#endif /* !PREEMPT_RT */ ++ + /* + * Pull the atomic_t declaration: + * (asm-mips/atomic.h needs above definitions) +diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h +index 19a9be9d97ee..da38149f2843 100644 +--- a/include/linux/spinlock_api_smp.h ++++ b/include/linux/spinlock_api_smp.h +@@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) + return 0; + } + +-#include ++#ifndef CONFIG_PREEMPT_RT ++# include ++#endif + + #endif /* __LINUX_SPINLOCK_API_SMP_H */ +diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h +index 5c8664d57fb8..8d896d3e1a01 100644 +--- a/include/linux/spinlock_types.h ++++ b/include/linux/spinlock_types.h +@@ -11,8 +11,13 @@ + + #include + +-#include +- +-#include ++#ifndef CONFIG_PREEMPT_RT ++# include ++# include ++#else ++# include ++# include ++# include ++#endif + + #endif /* __LINUX_SPINLOCK_TYPES_H */ +diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h +index c09b6407ae1b..d9b371fa13e0 100644 +--- a/include/linux/spinlock_types_up.h ++++ b/include/linux/spinlock_types_up.h +@@ -1,7 +1,7 @@ + #ifndef __LINUX_SPINLOCK_TYPES_UP_H + #define __LINUX_SPINLOCK_TYPES_UP_H + +-#ifndef __LINUX_SPINLOCK_TYPES_H ++#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__LINUX_RT_MUTEX_H) + # error "please don't include this file directly" + #endif + +diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt +index bf82259cff96..cbe3aa495519 100644 +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt +@@ -59,6 +59,7 @@ config PREEMPT_RT + bool "Fully Preemptible Kernel (Real-Time)" + depends on EXPERT && ARCH_SUPPORTS_RT + select PREEMPTION ++ select RT_MUTEXES + help + This option turns the kernel into a real-time kernel by replacing + various locking primitives (spinlocks, rwlocks, etc.) with +diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile +index 6d11cfb9b41f..c7fbf737e16e 100644 +--- a/kernel/locking/Makefile ++++ b/kernel/locking/Makefile +@@ -3,7 +3,7 @@ + # and is generally not a function of system call inputs. + KCOV_INSTRUMENT := n + +-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o ++obj-y += semaphore.o rwsem.o percpu-rwsem.o + + # Avoid recursion lockdep -> KCSAN -> ... -> lockdep. + KCSAN_SANITIZE_lockdep.o := n +@@ -15,19 +15,23 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE) + CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) + endif + +-obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o + obj-$(CONFIG_LOCKDEP) += lockdep.o + ifeq ($(CONFIG_PROC_FS),y) + obj-$(CONFIG_LOCKDEP) += lockdep_proc.o + endif + obj-$(CONFIG_SMP) += spinlock.o +-obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o + obj-$(CONFIG_PROVE_LOCKING) += spinlock.o + obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o + obj-$(CONFIG_RT_MUTEXES) += rtmutex.o + obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o ++ifneq ($(CONFIG_PREEMPT_RT),y) ++obj-y += mutex.o ++obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o ++obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o ++endif ++obj-$(CONFIG_PREEMPT_RT) += mutex-rt.o rwsem-rt.o rwlock-rt.o + obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o + obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o + obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o +diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c +index cc5cc889b5b7..f7c909ef1261 100644 +--- a/kernel/locking/rwsem.c ++++ b/kernel/locking/rwsem.c +@@ -28,6 +28,7 @@ + #include + #include + ++#ifndef CONFIG_PREEMPT_RT + #include "lock_events.h" + + /* +@@ -1494,6 +1495,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) + if (tmp & RWSEM_FLAG_WAITERS) + rwsem_downgrade_wake(sem); + } ++#endif + + /* + * lock for reading +@@ -1657,7 +1659,9 @@ void down_read_non_owner(struct rw_semaphore *sem) + { + might_sleep(); + __down_read(sem); ++#ifndef CONFIG_PREEMPT_RT + __rwsem_set_reader_owned(sem, NULL); ++#endif + } + EXPORT_SYMBOL(down_read_non_owner); + +@@ -1686,7 +1690,9 @@ EXPORT_SYMBOL(down_write_killable_nested); + + void up_read_non_owner(struct rw_semaphore *sem) + { ++#ifndef CONFIG_PREEMPT_RT + DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); ++#endif + __up_read(sem); + } + EXPORT_SYMBOL(up_read_non_owner); +diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c +index 0ff08380f531..45445a2f1799 100644 +--- a/kernel/locking/spinlock.c ++++ b/kernel/locking/spinlock.c +@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ + * __[spin|read|write]_lock_bh() + */ + BUILD_LOCK_OPS(spin, raw_spinlock); ++ ++#ifndef CONFIG_PREEMPT_RT + BUILD_LOCK_OPS(read, rwlock); + BUILD_LOCK_OPS(write, rwlock); ++#endif + + #endif + +@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) + EXPORT_SYMBOL(_raw_spin_unlock_bh); + #endif + ++#ifndef CONFIG_PREEMPT_RT ++ + #ifndef CONFIG_INLINE_READ_TRYLOCK + int __lockfunc _raw_read_trylock(rwlock_t *lock) + { +@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) + EXPORT_SYMBOL(_raw_write_unlock_bh); + #endif + ++#endif /* !PREEMPT_RT */ ++ + #ifdef CONFIG_DEBUG_LOCK_ALLOC + + void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) +diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c +index b9d93087ee66..72e306e0e8a3 100644 +--- a/kernel/locking/spinlock_debug.c ++++ b/kernel/locking/spinlock_debug.c +@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + + EXPORT_SYMBOL(__raw_spin_lock_init); + ++#ifndef CONFIG_PREEMPT_RT + void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key) + { +@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, + } + + EXPORT_SYMBOL(__rwlock_init); ++#endif + + static void spin_dump(raw_spinlock_t *lock, const char *msg) + { +@@ -139,6 +141,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) + arch_spin_unlock(&lock->raw_lock); + } + ++#ifndef CONFIG_PREEMPT_RT + static void rwlock_bug(rwlock_t *lock, const char *msg) + { + if (!debug_locks_off()) +@@ -228,3 +231,5 @@ void do_raw_write_unlock(rwlock_t *lock) + debug_write_unlock(lock); + arch_write_unlock(&lock->raw_lock); + } ++ ++#endif +-- +2.43.0 + -- cgit v1.2.3