From d334127aee6c64a7cdf5e5d3420666bcfdeb8ba6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:31:14 +0200 Subject: [PATCH 147/347] rtmutex: wire up RT's locking Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior --- include/linux/mutex.h | 20 +++++++++++++------- include/linux/rwsem.h | 11 +++++++++++ include/linux/spinlock.h | 12 +++++++++++- include/linux/spinlock_api_smp.h | 4 +++- include/linux/spinlock_types.h | 11 ++++++++--- kernel/locking/Makefile | 9 ++++++++- kernel/locking/spinlock.c | 7 +++++++ kernel/locking/spinlock_debug.c | 5 +++++ 8 files changed, 66 insertions(+), 13 deletions(-) diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 8f7cdf83f359..6aa217c6e3ca 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -22,6 +22,17 @@ struct ww_acquire_ctx; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#ifdef CONFIG_PREEMPT_RT_FULL +# include +#else + /* * Simple, straightforward mutexes with strict semantics: * @@ -118,13 +129,6 @@ do { \ __mutex_init((mutex), #mutex, &__key); \ } while (0) -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { .name = #lockname } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - #define __MUTEX_INITIALIZER(lockname) \ { .owner = ATOMIC_LONG_INIT(0) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ @@ -229,4 +233,6 @@ mutex_trylock_recursive(struct mutex *lock) return mutex_trylock(lock); } +#endif /* !PREEMPT_RT_FULL */ + #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index ab93b6eae696..b1e32373f44f 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -20,6 +20,10 @@ #include #endif +#ifdef CONFIG_PREEMPT_RT_FULL +#include +#else /* PREEMPT_RT_FULL */ + struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK @@ -114,6 +118,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) return !list_empty(&sem->wait_list); } +#endif /* !PREEMPT_RT_FULL */ + +/* + * The functions below are the same for all rwsem implementations including + * the RT specific variant. + */ + /* * lock for reading */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index e089157dcf97..5f5ad0630a26 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -298,7 +298,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) }) /* Include rwlock functions */ -#include +#ifdef CONFIG_PREEMPT_RT_FULL +# include +#else +# include +#endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -309,6 +313,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # include #endif +#ifdef CONFIG_PREEMPT_RT_FULL +# include +#else /* PREEMPT_RT_FULL */ + /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -429,6 +437,8 @@ static __always_inline int spin_is_contended(spinlock_t *lock) #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) +#endif /* !PREEMPT_RT_FULL */ + /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 42dfab89e740..29d99ae5a8ab 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) return 0; } -#include +#ifndef CONFIG_PREEMPT_RT_FULL +# include +#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 5c8664d57fb8..10bac715ea96 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -11,8 +11,13 @@ #include -#include - -#include +#ifndef CONFIG_PREEMPT_RT_FULL +# include +# include +#else +# include +# include +# include +#endif #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 392c7f23af76..c0bf04b6b965 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -3,7 +3,7 @@ # and is generally not a function of system call inputs. KCOV_INSTRUMENT := n -obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o +obj-y += semaphore.o percpu-rwsem.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) @@ -12,7 +12,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) endif +ifneq ($(CONFIG_PREEMPT_RT_FULL),y) +obj-y += mutex.o obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o +endif +obj-y += rwsem.o obj-$(CONFIG_LOCKDEP) += lockdep.o ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o @@ -25,8 +29,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o +ifneq ($(CONFIG_PREEMPT_RT_FULL),y) obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o +endif +obj-$(CONFIG_PREEMPT_RT_FULL) += mutex-rt.o rwsem-rt.o rwlock-rt.o obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 936f3d14dd6b..e89b70f474af 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -117,8 +117,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ * __[spin|read|write]_lock_bh() */ BUILD_LOCK_OPS(spin, raw_spinlock); + +#ifndef CONFIG_PREEMPT_RT_FULL BUILD_LOCK_OPS(read, rwlock); BUILD_LOCK_OPS(write, rwlock); +#endif #endif @@ -202,6 +205,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif +#ifndef CONFIG_PREEMPT_RT_FULL + #ifndef CONFIG_INLINE_READ_TRYLOCK int __lockfunc _raw_read_trylock(rwlock_t *lock) { @@ -346,6 +351,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) EXPORT_SYMBOL(_raw_write_unlock_bh); #endif +#endif /* !PREEMPT_RT_FULL */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c index 03595c29c566..d63df281b495 100644 --- a/kernel/locking/spinlock_debug.c +++ b/kernel/locking/spinlock_debug.c @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, EXPORT_SYMBOL(__raw_spin_lock_init); +#ifndef CONFIG_PREEMPT_RT_FULL void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) { @@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, } EXPORT_SYMBOL(__rwlock_init); +#endif static void spin_dump(raw_spinlock_t *lock, const char *msg) { @@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) arch_spin_unlock(&lock->raw_lock); } +#ifndef CONFIG_PREEMPT_RT_FULL static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) @@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock) debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); } + +#endif -- 2.36.1