diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
commit | 08b74a000942a380fe028845f92cd3a0dee827d5 (patch) | |
tree | aa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch | |
parent | Adding upstream version 4.19.249. (diff) | |
download | linux-debian/4.19.249-2.tar.xz linux-debian/4.19.249-2.zip |
Adding debian version 4.19.249-2.debian/4.19.249-2debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch')
-rw-r--r-- | debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch | 104 |
1 files changed, 104 insertions, 0 deletions
diff --git a/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch b/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch new file mode 100644 index 000000000..9fe3abb77 --- /dev/null +++ b/debian/patches-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch @@ -0,0 +1,104 @@ +From 6702144cc338594e14d79ce9dd255f50d21840f9 Mon Sep 17 00:00:00 2001 +From: Josh Cartwright <joshc@ni.com> +Date: Thu, 31 Mar 2016 00:04:25 -0500 +Subject: [PATCH 069/347] list_bl: fixup bogus lockdep warning +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +At first glance, the use of 'static inline' seems appropriate for +INIT_HLIST_BL_HEAD(). + +However, when a 'static inline' function invocation is inlined by gcc, +all callers share any static local data declared within that inline +function. + +This presents a problem for how lockdep classes are setup. raw_spinlocks, for +example, when CONFIG_DEBUG_SPINLOCK, + + # define raw_spin_lock_init(lock) \ + do { \ + static struct lock_class_key __key; \ + \ + __raw_spin_lock_init((lock), #lock, &__key); \ + } while (0) + +When this macro is expanded into a 'static inline' caller, like +INIT_HLIST_BL_HEAD(): + + static inline INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) + { + h->first = NULL; + raw_spin_lock_init(&h->lock); + } + +...the static local lock_class_key object is made a function static. + +For compilation units which initialize invoke INIT_HLIST_BL_HEAD() more +than once, then, all of the invocations share this same static local +object. + +This can lead to some very confusing lockdep splats (example below). +Solve this problem by forcing the INIT_HLIST_BL_HEAD() to be a macro, +which prevents the lockdep class object sharing. + + ============================================= + [ INFO: possible recursive locking detected ] + 4.4.4-rt11 #4 Not tainted + --------------------------------------------- + kswapd0/59 is trying to acquire lock: + (&h->lock#2){+.+.-.}, at: mb_cache_shrink_scan + + but task is already holding lock: + (&h->lock#2){+.+.-.}, at: mb_cache_shrink_scan + + other info that might help us debug this: + Possible unsafe locking scenario: + + CPU0 + ---- + lock(&h->lock#2); + lock(&h->lock#2); + + *** DEADLOCK *** + + May be due to missing lock nesting notation + + 2 locks held by kswapd0/59: + #0: (shrinker_rwsem){+.+...}, at: rt_down_read_trylock + #1: (&h->lock#2){+.+.-.}, at: mb_cache_shrink_scan + +Reported-by: Luis Claudio R. Goncalves <lclaudio@uudg.org> +Tested-by: Luis Claudio R. Goncalves <lclaudio@uudg.org> +Signed-off-by: Josh Cartwright <joshc@ni.com> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/list_bl.h | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) + +diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h +index 69b659259bac..0b5de7d9ffcf 100644 +--- a/include/linux/list_bl.h ++++ b/include/linux/list_bl.h +@@ -43,13 +43,15 @@ struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; + }; + +-static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) +-{ +- h->first = NULL; + #ifdef CONFIG_PREEMPT_RT_BASE +- raw_spin_lock_init(&h->lock); ++#define INIT_HLIST_BL_HEAD(h) \ ++do { \ ++ (h)->first = NULL; \ ++ raw_spin_lock_init(&(h)->lock); \ ++} while (0) ++#else ++#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL + #endif +-} + + static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) + { +-- +2.36.1 + |