summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:31 +0000
commitf7bf6055eb1e7ff603f0907b18ece35c72b05302 (patch)
tree13ac5b481056384a3ccc7979b86a42d208fcacdc /debian/patches-rt/0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
parentMerging upstream version 6.8.9. (diff)
downloadlinux-f7bf6055eb1e7ff603f0907b18ece35c72b05302.tar.xz
linux-f7bf6055eb1e7ff603f0907b18ece35c72b05302.zip
Adding debian version 6.8.9-1.debian/6.8.9-1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch')
-rw-r--r--debian/patches-rt/0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch487
1 files changed, 487 insertions, 0 deletions
diff --git a/debian/patches-rt/0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch b/debian/patches-rt/0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
new file mode 100644
index 0000000000..64cf39b672
--- /dev/null
+++ b/debian/patches-rt/0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
@@ -0,0 +1,487 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 18 Sep 2023 20:27:41 +0000
+Subject: [PATCH 32/48] lockdep: Mark emergency sections in lockdep splats
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+
+Mark emergency sections wherever multiple lines of
+lock debugging output are generated. In an emergency
+section the CPU will not perform console output for the
+printk() calls. Instead, a flushing of the console
+output is triggered when exiting the emergency section.
+This allows the full message block to be stored as
+quickly as possible in the ringbuffer.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/lockdep.c | 91 +++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 88 insertions(+), 3 deletions(-)
+
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -56,6 +56,7 @@
+ #include <linux/kprobes.h>
+ #include <linux/lockdep.h>
+ #include <linux/context_tracking.h>
++#include <linux/console.h>
+
+ #include <asm/sections.h>
+
+@@ -574,8 +575,10 @@ static struct lock_trace *save_trace(voi
+ if (!debug_locks_off_graph_unlock())
+ return NULL;
+
++ nbcon_cpu_emergency_enter();
+ print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+
+ return NULL;
+ }
+@@ -782,6 +785,8 @@ static void lockdep_print_held_locks(str
+ {
+ int i, depth = READ_ONCE(p->lockdep_depth);
+
++ nbcon_cpu_emergency_enter();
++
+ if (!depth)
+ printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
+ else
+@@ -792,11 +797,13 @@ static void lockdep_print_held_locks(str
+ * and it's not the current task.
+ */
+ if (p != current && task_is_running(p))
+- return;
++ goto out;
+ for (i = 0; i < depth; i++) {
+ printk(" #%d: ", i);
+ print_lock(p->held_locks + i);
+ }
++out:
++ nbcon_cpu_emergency_exit();
+ }
+
+ static void print_kernel_ident(void)
+@@ -888,11 +895,13 @@ look_up_lock_class(const struct lockdep_
+ if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
+ instrumentation_begin();
+ debug_locks_off();
++ nbcon_cpu_emergency_enter();
+ printk(KERN_ERR
+ "BUG: looking up invalid subclass: %u\n", subclass);
+ printk(KERN_ERR
+ "turning off the locking correctness validator.\n");
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+ instrumentation_end();
+ return NULL;
+ }
+@@ -969,11 +978,13 @@ static bool assign_lock_key(struct lockd
+ else {
+ /* Debug-check: all keys must be persistent! */
+ debug_locks_off();
++ nbcon_cpu_emergency_enter();
+ pr_err("INFO: trying to register non-static key.\n");
+ pr_err("The code is fine but needs lockdep annotation, or maybe\n");
+ pr_err("you didn't initialize this object before use?\n");
+ pr_err("turning off the locking correctness validator.\n");
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+ return false;
+ }
+
+@@ -1317,8 +1328,10 @@ register_lock_class(struct lockdep_map *
+ return NULL;
+ }
+
++ nbcon_cpu_emergency_enter();
+ print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+ return NULL;
+ }
+ nr_lock_classes++;
+@@ -1350,11 +1363,13 @@ register_lock_class(struct lockdep_map *
+ if (verbose(class)) {
+ graph_unlock();
+
++ nbcon_cpu_emergency_enter();
+ printk("\nnew class %px: %s", class->key, class->name);
+ if (class->name_version > 1)
+ printk(KERN_CONT "#%d", class->name_version);
+ printk(KERN_CONT "\n");
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+
+ if (!graph_lock()) {
+ return NULL;
+@@ -1393,8 +1408,10 @@ static struct lock_list *alloc_list_entr
+ if (!debug_locks_off_graph_unlock())
+ return NULL;
+
++ nbcon_cpu_emergency_enter();
+ print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+ return NULL;
+ }
+ nr_list_entries++;
+@@ -2040,6 +2057,8 @@ static noinline void print_circular_bug(
+
+ depth = get_lock_depth(target);
+
++ nbcon_cpu_emergency_enter();
++
+ print_circular_bug_header(target, depth, check_src, check_tgt);
+
+ parent = get_lock_parent(target);
+@@ -2058,6 +2077,8 @@ static noinline void print_circular_bug(
+
+ printk("\nstack backtrace:\n");
+ dump_stack();
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ static noinline void print_bfs_bug(int ret)
+@@ -2570,6 +2591,8 @@ print_bad_irq_dependency(struct task_str
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("=====================================================\n");
+ pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
+@@ -2619,11 +2642,13 @@ print_bad_irq_dependency(struct task_str
+ pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
+ next_root->trace = save_trace();
+ if (!next_root->trace)
+- return;
++ goto out;
+ print_shortest_lock_dependencies(forwards_entry, next_root);
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++out:
++ nbcon_cpu_emergency_exit();
+ }
+
+ static const char *state_names[] = {
+@@ -2988,6 +3013,8 @@ print_deadlock_bug(struct task_struct *c
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("============================================\n");
+ pr_warn("WARNING: possible recursive locking detected\n");
+@@ -3010,6 +3037,8 @@ print_deadlock_bug(struct task_struct *c
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ /*
+@@ -3607,6 +3636,8 @@ static void print_collision(struct task_
+ struct held_lock *hlock_next,
+ struct lock_chain *chain)
+ {
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("============================\n");
+ pr_warn("WARNING: chain_key collision\n");
+@@ -3623,6 +3654,8 @@ static void print_collision(struct task_
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++
++ nbcon_cpu_emergency_exit();
+ }
+ #endif
+
+@@ -3713,8 +3746,10 @@ static inline int add_chain_cache(struct
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
++ nbcon_cpu_emergency_enter();
+ print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+ return 0;
+ }
+ chain->chain_key = chain_key;
+@@ -3731,8 +3766,10 @@ static inline int add_chain_cache(struct
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
++ nbcon_cpu_emergency_enter();
+ print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+ return 0;
+ }
+
+@@ -3971,6 +4008,8 @@ print_usage_bug(struct task_struct *curr
+ if (!debug_locks_off() || debug_locks_silent)
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("================================\n");
+ pr_warn("WARNING: inconsistent lock state\n");
+@@ -3999,6 +4038,8 @@ print_usage_bug(struct task_struct *curr
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ /*
+@@ -4033,6 +4074,8 @@ print_irq_inversion_bug(struct task_stru
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("========================================================\n");
+ pr_warn("WARNING: possible irq lock inversion dependency detected\n");
+@@ -4073,11 +4116,13 @@ print_irq_inversion_bug(struct task_stru
+ pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
+ root->trace = save_trace();
+ if (!root->trace)
+- return;
++ goto out;
+ print_shortest_lock_dependencies(other, root);
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++out:
++ nbcon_cpu_emergency_exit();
+ }
+
+ /*
+@@ -4154,6 +4199,8 @@ void print_irqtrace_events(struct task_s
+ {
+ const struct irqtrace_events *trace = &curr->irqtrace;
+
++ nbcon_cpu_emergency_enter();
++
+ printk("irq event stamp: %u\n", trace->irq_events);
+ printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
+ trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
+@@ -4167,6 +4214,8 @@ void print_irqtrace_events(struct task_s
+ printk("softirqs last disabled at (%u): [<%px>] %pS\n",
+ trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
+ (void *)trace->softirq_disable_ip);
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ static int HARDIRQ_verbose(struct lock_class *class)
+@@ -4687,10 +4736,12 @@ static int mark_lock(struct task_struct
+ * We must printk outside of the graph_lock:
+ */
+ if (ret == 2) {
++ nbcon_cpu_emergency_enter();
+ printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
+ print_lock(this);
+ print_irqtrace_events(curr);
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+ }
+
+ return ret;
+@@ -4731,6 +4782,8 @@ print_lock_invalid_wait_context(struct t
+ if (debug_locks_silent)
+ return 0;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("=============================\n");
+ pr_warn("[ BUG: Invalid wait context ]\n");
+@@ -4750,6 +4803,8 @@ print_lock_invalid_wait_context(struct t
+ pr_warn("stack backtrace:\n");
+ dump_stack();
+
++ nbcon_cpu_emergency_exit();
++
+ return 0;
+ }
+
+@@ -4954,6 +5009,8 @@ print_lock_nested_lock_not_held(struct t
+ if (debug_locks_silent)
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("==================================\n");
+ pr_warn("WARNING: Nested lock was not taken\n");
+@@ -4974,6 +5031,8 @@ print_lock_nested_lock_not_held(struct t
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ static int __lock_is_held(const struct lockdep_map *lock, int read);
+@@ -5019,11 +5078,13 @@ static int __lock_acquire(struct lockdep
+ debug_class_ops_inc(class);
+
+ if (very_verbose(class)) {
++ nbcon_cpu_emergency_enter();
+ printk("\nacquire class [%px] %s", class->key, class->name);
+ if (class->name_version > 1)
+ printk(KERN_CONT "#%d", class->name_version);
+ printk(KERN_CONT "\n");
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+ }
+
+ /*
+@@ -5150,6 +5211,7 @@ static int __lock_acquire(struct lockdep
+ #endif
+ if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
+ debug_locks_off();
++ nbcon_cpu_emergency_enter();
+ print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
+ printk(KERN_DEBUG "depth: %i max: %lu!\n",
+ curr->lockdep_depth, MAX_LOCK_DEPTH);
+@@ -5157,6 +5219,7 @@ static int __lock_acquire(struct lockdep
+ lockdep_print_held_locks(current);
+ debug_show_all_locks();
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+
+ return 0;
+ }
+@@ -5176,6 +5239,8 @@ static void print_unlock_imbalance_bug(s
+ if (debug_locks_silent)
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("=====================================\n");
+ pr_warn("WARNING: bad unlock balance detected!\n");
+@@ -5192,6 +5257,8 @@ static void print_unlock_imbalance_bug(s
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ static noinstr int match_held_lock(const struct held_lock *hlock,
+@@ -5895,6 +5962,8 @@ static void print_lock_contention_bug(st
+ if (debug_locks_silent)
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("=================================\n");
+ pr_warn("WARNING: bad contention detected!\n");
+@@ -5911,6 +5980,8 @@ static void print_lock_contention_bug(st
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ static void
+@@ -6524,6 +6595,8 @@ print_freed_lock_bug(struct task_struct
+ if (debug_locks_silent)
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("=========================\n");
+ pr_warn("WARNING: held lock freed!\n");
+@@ -6536,6 +6609,8 @@ print_freed_lock_bug(struct task_struct
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ static inline int not_in_range(const void* mem_from, unsigned long mem_len,
+@@ -6582,6 +6657,8 @@ static void print_held_locks_bug(void)
+ if (debug_locks_silent)
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("====================================\n");
+ pr_warn("WARNING: %s/%d still has locks held!\n",
+@@ -6591,6 +6668,8 @@ static void print_held_locks_bug(void)
+ lockdep_print_held_locks(current);
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ void debug_check_no_locks_held(void)
+@@ -6609,6 +6688,7 @@ void debug_show_all_locks(void)
+ pr_warn("INFO: lockdep is turned off.\n");
+ return;
+ }
++ nbcon_cpu_emergency_enter();
+ pr_warn("\nShowing all locks held in the system:\n");
+
+ rcu_read_lock();
+@@ -6623,6 +6703,7 @@ void debug_show_all_locks(void)
+
+ pr_warn("\n");
+ pr_warn("=============================================\n\n");
++ nbcon_cpu_emergency_exit();
+ }
+ EXPORT_SYMBOL_GPL(debug_show_all_locks);
+ #endif
+@@ -6648,6 +6729,7 @@ asmlinkage __visible void lockdep_sys_ex
+ if (unlikely(curr->lockdep_depth)) {
+ if (!debug_locks_off())
+ return;
++ nbcon_cpu_emergency_enter();
+ pr_warn("\n");
+ pr_warn("================================================\n");
+ pr_warn("WARNING: lock held when returning to user space!\n");
+@@ -6656,6 +6738,7 @@ asmlinkage __visible void lockdep_sys_ex
+ pr_warn("%s/%d is leaving the kernel with locks still held!\n",
+ curr->comm, curr->pid);
+ lockdep_print_held_locks(curr);
++ nbcon_cpu_emergency_exit();
+ }
+
+ /*
+@@ -6672,6 +6755,7 @@ void lockdep_rcu_suspicious(const char *
+ bool rcu = warn_rcu_enter();
+
+ /* Note: the following can be executed concurrently, so be careful. */
++ nbcon_cpu_emergency_enter();
+ pr_warn("\n");
+ pr_warn("=============================\n");
+ pr_warn("WARNING: suspicious RCU usage\n");
+@@ -6710,6 +6794,7 @@ void lockdep_rcu_suspicious(const char *
+ lockdep_print_held_locks(curr);
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++ nbcon_cpu_emergency_exit();
+ warn_rcu_exit(rcu);
+ }
+ EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);