diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
commit | b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch) | |
tree | 1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0097-printk-remove-safe-buffers.patch | |
parent | Adding upstream version 5.10.209. (diff) | |
download | linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip |
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0097-printk-remove-safe-buffers.patch')
-rw-r--r-- | debian/patches-rt/0097-printk-remove-safe-buffers.patch | 877 |
1 files changed, 877 insertions, 0 deletions
diff --git a/debian/patches-rt/0097-printk-remove-safe-buffers.patch b/debian/patches-rt/0097-printk-remove-safe-buffers.patch new file mode 100644 index 000000000..df3318972 --- /dev/null +++ b/debian/patches-rt/0097-printk-remove-safe-buffers.patch @@ -0,0 +1,877 @@ +From c0ee407b7db3b7ac5c24337a9d365a35450c233b Mon Sep 17 00:00:00 2001 +From: John Ogness <john.ogness@linutronix.de> +Date: Mon, 30 Nov 2020 01:42:00 +0106 +Subject: [PATCH 097/323] printk: remove safe buffers +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +With @logbuf_lock removed, the high level printk functions for +storing messages are lockless. Messages can be stored from any +context, so there is no need for the NMI and safe buffers anymore. + +Remove the NMI and safe buffers. In NMI or safe contexts, store +the message immediately but still use irq_work to defer the console +printing. + +Signed-off-by: John Ogness <john.ogness@linutronix.de> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + arch/powerpc/kernel/traps.c | 1 - + arch/powerpc/kernel/watchdog.c | 5 - + include/linux/printk.h | 10 - + kernel/kexec_core.c | 1 - + kernel/panic.c | 3 - + kernel/printk/internal.h | 2 - + kernel/printk/printk.c | 85 ++------- + kernel/printk/printk_safe.c | 329 +-------------------------------- + lib/nmi_backtrace.c | 6 - + 9 files changed, 17 insertions(+), 425 deletions(-) + +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c +index 5e5a2448ae79..d39a4a6b4ddf 100644 +--- a/arch/powerpc/kernel/traps.c ++++ b/arch/powerpc/kernel/traps.c +@@ -170,7 +170,6 @@ extern void panic_flush_kmsg_start(void) + + extern void panic_flush_kmsg_end(void) + { +- printk_safe_flush_on_panic(); + kmsg_dump(KMSG_DUMP_PANIC); + bust_spinlocks(0); + debug_locks_off(); +diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c +index 75b2a6c4db5a..db40e20d0c54 100644 +--- a/arch/powerpc/kernel/watchdog.c ++++ b/arch/powerpc/kernel/watchdog.c +@@ -185,11 +185,6 @@ static void watchdog_smp_panic(int cpu, u64 tb) + + wd_smp_unlock(&flags); + +- printk_safe_flush(); +- /* +- * printk_safe_flush() seems to require another print +- * before anything actually goes out to console. +- */ + if (sysctl_hardlockup_all_cpu_backtrace) + trigger_allbutself_cpu_backtrace(); + +diff --git a/include/linux/printk.h b/include/linux/printk.h +index 344f6da3d4c3..c6bb48f0134c 100644 +--- a/include/linux/printk.h ++++ b/include/linux/printk.h +@@ -207,8 +207,6 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); + void dump_stack_print_info(const char *log_lvl); + void show_regs_print_info(const char *log_lvl); + extern asmlinkage void dump_stack(void) __cold; +-extern void printk_safe_flush(void); +-extern void printk_safe_flush_on_panic(void); + #else + static inline __printf(1, 0) + int vprintk(const char *s, va_list args) +@@ -272,14 +270,6 @@ static inline void show_regs_print_info(const char *log_lvl) + static inline void dump_stack(void) + { + } +- +-static inline void printk_safe_flush(void) +-{ +-} +- +-static inline void printk_safe_flush_on_panic(void) +-{ +-} + #endif + + extern int kptr_restrict; +diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c +index 3a37fc62dc95..78a1fd9c3c8a 100644 +--- a/kernel/kexec_core.c ++++ b/kernel/kexec_core.c +@@ -978,7 +978,6 @@ void crash_kexec(struct pt_regs *regs) + old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); + if (old_cpu == PANIC_CPU_INVALID) { + /* This is the 1st CPU which comes here, so go ahead. */ +- printk_safe_flush_on_panic(); + __crash_kexec(regs); + + /* +diff --git a/kernel/panic.c b/kernel/panic.c +index bc39e2b27d31..7965f1e31224 100644 +--- a/kernel/panic.c ++++ b/kernel/panic.c +@@ -324,7 +324,6 @@ void panic(const char *fmt, ...) + * Bypass the panic_cpu check and call __crash_kexec directly. + */ + if (!_crash_kexec_post_notifiers) { +- printk_safe_flush_on_panic(); + __crash_kexec(NULL); + + /* +@@ -348,8 +347,6 @@ void panic(const char *fmt, ...) + */ + atomic_notifier_call_chain(&panic_notifier_list, 0, buf); + +- /* Call flush even twice. It tries harder with a single online CPU */ +- printk_safe_flush_on_panic(); + kmsg_dump(KMSG_DUMP_PANIC); + + /* +diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h +index e7acc2888c8e..e108b2ece8c7 100644 +--- a/kernel/printk/internal.h ++++ b/kernel/printk/internal.h +@@ -23,7 +23,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args); + void __printk_safe_enter(void); + void __printk_safe_exit(void); + +-void printk_safe_init(void); + bool printk_percpu_data_ready(void); + + #define printk_safe_enter_irqsave(flags) \ +@@ -67,6 +66,5 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; } + #define printk_safe_enter_irq() local_irq_disable() + #define printk_safe_exit_irq() local_irq_enable() + +-static inline void printk_safe_init(void) { } + static inline bool printk_percpu_data_ready(void) { return false; } + #endif /* CONFIG_PRINTK */ +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index 31a2b7a116a7..90a4cf4c23a4 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -735,27 +735,22 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, + if (ret) + return ret; + +- printk_safe_enter_irq(); + if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) { + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; +- printk_safe_exit_irq(); + goto out; + } + +- printk_safe_exit_irq(); + ret = wait_event_interruptible(log_wait, + prb_read_valid(prb, atomic64_read(&user->seq), r)); + if (ret) + goto out; +- printk_safe_enter_irq(); + } + + if (r->info->seq != atomic64_read(&user->seq)) { + /* our last seen message is gone, return error and reset */ + atomic64_set(&user->seq, r->info->seq); + ret = -EPIPE; +- printk_safe_exit_irq(); + goto out; + } + +@@ -765,7 +760,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, + &r->info->dev_info); + + atomic64_set(&user->seq, r->info->seq + 1); +- printk_safe_exit_irq(); + + if (len > count) { + ret = -EINVAL; +@@ -800,7 +794,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) + if (offset) + return -ESPIPE; + +- printk_safe_enter_irq(); + switch (whence) { + case SEEK_SET: + /* the first record */ +@@ -821,7 +814,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) + default: + ret = -EINVAL; + } +- printk_safe_exit_irq(); + return ret; + } + +@@ -836,7 +828,6 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait) + + poll_wait(file, &log_wait, wait); + +- printk_safe_enter_irq(); + if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) { + /* return error when data has vanished underneath us */ + if (info.seq != atomic64_read(&user->seq)) +@@ -844,7 +835,6 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait) + else + ret = EPOLLIN|EPOLLRDNORM; + } +- printk_safe_exit_irq(); + + return ret; + } +@@ -877,9 +867,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) + prb_rec_init_rd(&user->record, &user->info, + &user->text_buf[0], sizeof(user->text_buf)); + +- printk_safe_enter_irq(); + atomic64_set(&user->seq, prb_first_valid_seq(prb)); +- printk_safe_exit_irq(); + + file->private_data = user; + return 0; +@@ -1045,9 +1033,6 @@ static inline void log_buf_add_cpu(void) {} + + static void __init set_percpu_data_ready(void) + { +- printk_safe_init(); +- /* Make sure we set this flag only after printk_safe() init is done */ +- barrier(); + __printk_percpu_data_ready = true; + } + +@@ -1145,8 +1130,6 @@ void __init setup_log_buf(int early) + new_descs, ilog2(new_descs_count), + new_infos); + +- printk_safe_enter_irqsave(flags); +- + log_buf_len = new_log_buf_len; + log_buf = new_log_buf; + new_log_buf_len = 0; +@@ -1162,8 +1145,6 @@ void __init setup_log_buf(int early) + */ + prb = &printk_rb_dynamic; + +- printk_safe_exit_irqrestore(flags); +- + if (seq != prb_next_seq(&printk_rb_static)) { + pr_err("dropped %llu messages\n", + prb_next_seq(&printk_rb_static) - seq); +@@ -1501,11 +1482,9 @@ static int syslog_print(char __user *buf, int size) + size_t n; + size_t skip; + +- printk_safe_enter_irq(); +- raw_spin_lock(&syslog_lock); ++ raw_spin_lock_irq(&syslog_lock); + if (!prb_read_valid(prb, syslog_seq, &r)) { +- raw_spin_unlock(&syslog_lock); +- printk_safe_exit_irq(); ++ raw_spin_unlock_irq(&syslog_lock); + break; + } + if (r.info->seq != syslog_seq) { +@@ -1534,8 +1513,7 @@ static int syslog_print(char __user *buf, int size) + syslog_partial += n; + } else + n = 0; +- raw_spin_unlock(&syslog_lock); +- printk_safe_exit_irq(); ++ raw_spin_unlock_irq(&syslog_lock); + + if (!n) + break; +@@ -1569,7 +1547,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) + return -ENOMEM; + + time = printk_time; +- printk_safe_enter_irq(); + /* + * Find first record that fits, including all following records, + * into the user-provided buffer for this dump. +@@ -1590,23 +1567,20 @@ static int syslog_print_all(char __user *buf, int size, bool clear) + break; + } + +- printk_safe_exit_irq(); + if (copy_to_user(buf + len, text, textlen)) + len = -EFAULT; + else + len += textlen; +- printk_safe_enter_irq(); + + if (len < 0) + break; + } + + if (clear) { +- raw_spin_lock(&syslog_lock); ++ raw_spin_lock_irq(&syslog_lock); + latched_seq_write(&clear_seq, seq); +- raw_spin_unlock(&syslog_lock); ++ raw_spin_unlock_irq(&syslog_lock); + } +- printk_safe_exit_irq(); + + kfree(text); + return len; +@@ -1614,11 +1588,9 @@ static int syslog_print_all(char __user *buf, int size, bool clear) + + static void syslog_clear(void) + { +- printk_safe_enter_irq(); +- raw_spin_lock(&syslog_lock); ++ raw_spin_lock_irq(&syslog_lock); + latched_seq_write(&clear_seq, prb_next_seq(prb)); +- raw_spin_unlock(&syslog_lock); +- printk_safe_exit_irq(); ++ raw_spin_unlock_irq(&syslog_lock); + } + + /* Return a consistent copy of @syslog_seq. */ +@@ -1706,12 +1678,10 @@ int do_syslog(int type, char __user *buf, int len, int source) + break; + /* Number of chars in the log buffer */ + case SYSLOG_ACTION_SIZE_UNREAD: +- printk_safe_enter_irq(); +- raw_spin_lock(&syslog_lock); ++ raw_spin_lock_irq(&syslog_lock); + if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) { + /* No unread messages. */ +- raw_spin_unlock(&syslog_lock); +- printk_safe_exit_irq(); ++ raw_spin_unlock_irq(&syslog_lock); + return 0; + } + if (info.seq != syslog_seq) { +@@ -1739,8 +1709,7 @@ int do_syslog(int type, char __user *buf, int len, int source) + } + error -= syslog_partial; + } +- raw_spin_unlock(&syslog_lock); +- printk_safe_exit_irq(); ++ raw_spin_unlock_irq(&syslog_lock); + break; + /* Size of the log buffer */ + case SYSLOG_ACTION_SIZE_BUFFER: +@@ -2210,7 +2179,6 @@ asmlinkage int vprintk_emit(int facility, int level, + { + int printed_len; + bool in_sched = false; +- unsigned long flags; + + /* Suppress unimportant messages after panic happens */ + if (unlikely(suppress_printk)) +@@ -2224,9 +2192,7 @@ asmlinkage int vprintk_emit(int facility, int level, + boot_delay_msec(level); + printk_delay(); + +- printk_safe_enter_irqsave(flags); + printed_len = vprintk_store(facility, level, dev_info, fmt, args); +- printk_safe_exit_irqrestore(flags); + + /* If called from the scheduler, we can not call up(). */ + if (!in_sched) { +@@ -2618,7 +2584,6 @@ void console_unlock(void) + { + static char ext_text[CONSOLE_EXT_LOG_MAX]; + static char text[CONSOLE_LOG_MAX]; +- unsigned long flags; + bool do_cond_resched, retry; + struct printk_info info; + struct printk_record r; +@@ -2663,7 +2628,6 @@ void console_unlock(void) + size_t ext_len = 0; + size_t len; + +- printk_safe_enter_irqsave(flags); + skip: + if (!prb_read_valid(prb, console_seq, &r)) + break; +@@ -2720,12 +2684,8 @@ void console_unlock(void) + call_console_drivers(ext_text, ext_len, text, len); + start_critical_timings(); + +- if (console_lock_spinning_disable_and_check()) { +- printk_safe_exit_irqrestore(flags); ++ if (console_lock_spinning_disable_and_check()) + return; +- } +- +- printk_safe_exit_irqrestore(flags); + + if (do_cond_resched) + cond_resched(); +@@ -2742,8 +2702,6 @@ void console_unlock(void) + * flush, no worries. + */ + retry = prb_read_valid(prb, console_seq, NULL); +- printk_safe_exit_irqrestore(flags); +- + if (retry && console_trylock()) + goto again; + } +@@ -2805,13 +2763,8 @@ void console_flush_on_panic(enum con_flush_mode mode) + console_trylock(); + console_may_schedule = 0; + +- if (mode == CONSOLE_REPLAY_ALL) { +- unsigned long flags; +- +- printk_safe_enter_irqsave(flags); ++ if (mode == CONSOLE_REPLAY_ALL) + console_seq = prb_first_valid_seq(prb); +- printk_safe_exit_irqrestore(flags); +- } + console_unlock(); + } + +@@ -3469,11 +3422,9 @@ bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog, + struct printk_info info; + unsigned int line_count; + struct printk_record r; +- unsigned long flags; + size_t l = 0; + bool ret = false; + +- printk_safe_enter_irqsave(flags); + prb_rec_init_rd(&r, &info, line, size); + + if (!iter->active) +@@ -3497,7 +3448,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog, + iter->cur_seq = r.info->seq + 1; + ret = true; + out: +- printk_safe_exit_irqrestore(flags); + if (len) + *len = l; + return ret; +@@ -3528,7 +3478,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog, + { + struct printk_info info; + struct printk_record r; +- unsigned long flags; + u64 seq; + u64 next_seq; + size_t len = 0; +@@ -3538,7 +3487,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog, + if (!iter->active || !buf || !size) + goto out; + +- printk_safe_enter_irqsave(flags); + if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) { + if (info.seq != iter->cur_seq) { + /* messages are gone, move to first available one */ +@@ -3547,10 +3495,8 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog, + } + + /* last entry */ +- if (iter->cur_seq >= iter->next_seq) { +- printk_safe_exit_irqrestore(flags); ++ if (iter->cur_seq >= iter->next_seq) + goto out; +- } + + /* + * Find first record that fits, including all following records, +@@ -3582,7 +3528,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog, + + iter->next_seq = next_seq; + ret = true; +- printk_safe_exit_irqrestore(flags); + out: + if (len_out) + *len_out = len; +@@ -3600,12 +3545,8 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); + */ + void kmsg_dump_rewind(struct kmsg_dumper_iter *iter) + { +- unsigned long flags; +- +- printk_safe_enter_irqsave(flags); + iter->cur_seq = latched_seq_read_nolock(&clear_seq); + iter->next_seq = prb_next_seq(prb); +- printk_safe_exit_irqrestore(flags); + } + EXPORT_SYMBOL_GPL(kmsg_dump_rewind); + +diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c +index 7df8a88d4115..c23b127a6545 100644 +--- a/kernel/printk/printk_safe.c ++++ b/kernel/printk/printk_safe.c +@@ -15,282 +15,9 @@ + + #include "internal.h" + +-/* +- * In NMI and safe mode, printk() avoids taking locks. Instead, +- * it uses an alternative implementation that temporary stores +- * the strings into a per-CPU buffer. The content of the buffer +- * is later flushed into the main ring buffer via IRQ work. +- * +- * The alternative implementation is chosen transparently +- * by examining current printk() context mask stored in @printk_context +- * per-CPU variable. +- * +- * The implementation allows to flush the strings also from another CPU. +- * There are situations when we want to make sure that all buffers +- * were handled or when IRQs are blocked. +- */ +- +-#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \ +- sizeof(atomic_t) - \ +- sizeof(atomic_t) - \ +- sizeof(struct irq_work)) +- +-struct printk_safe_seq_buf { +- atomic_t len; /* length of written data */ +- atomic_t message_lost; +- struct irq_work work; /* IRQ work that flushes the buffer */ +- unsigned char buffer[SAFE_LOG_BUF_LEN]; +-}; +- +-static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq); + static DEFINE_PER_CPU(int, printk_context); + +-static DEFINE_RAW_SPINLOCK(safe_read_lock); +- +-#ifdef CONFIG_PRINTK_NMI +-static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); +-#endif +- +-/* Get flushed in a more safe context. */ +-static void queue_flush_work(struct printk_safe_seq_buf *s) +-{ +- if (printk_percpu_data_ready()) +- irq_work_queue(&s->work); +-} +- +-/* +- * Add a message to per-CPU context-dependent buffer. NMI and printk-safe +- * have dedicated buffers, because otherwise printk-safe preempted by +- * NMI-printk would have overwritten the NMI messages. +- * +- * The messages are flushed from irq work (or from panic()), possibly, +- * from other CPU, concurrently with printk_safe_log_store(). Should this +- * happen, printk_safe_log_store() will notice the buffer->len mismatch +- * and repeat the write. +- */ +-static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s, +- const char *fmt, va_list args) +-{ +- int add; +- size_t len; +- va_list ap; +- +-again: +- len = atomic_read(&s->len); +- +- /* The trailing '\0' is not counted into len. */ +- if (len >= sizeof(s->buffer) - 1) { +- atomic_inc(&s->message_lost); +- queue_flush_work(s); +- return 0; +- } +- +- /* +- * Make sure that all old data have been read before the buffer +- * was reset. This is not needed when we just append data. +- */ +- if (!len) +- smp_rmb(); +- +- va_copy(ap, args); +- add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap); +- va_end(ap); +- if (!add) +- return 0; +- +- /* +- * Do it once again if the buffer has been flushed in the meantime. +- * Note that atomic_cmpxchg() is an implicit memory barrier that +- * makes sure that the data were written before updating s->len. +- */ +- if (atomic_cmpxchg(&s->len, len, len + add) != len) +- goto again; +- +- queue_flush_work(s); +- return add; +-} +- +-static inline void printk_safe_flush_line(const char *text, int len) +-{ +- /* +- * Avoid any console drivers calls from here, because we may be +- * in NMI or printk_safe context (when in panic). The messages +- * must go only into the ring buffer at this stage. Consoles will +- * get explicitly called later when a crashdump is not generated. +- */ +- printk_deferred("%.*s", len, text); +-} +- +-/* printk part of the temporary buffer line by line */ +-static int printk_safe_flush_buffer(const char *start, size_t len) +-{ +- const char *c, *end; +- bool header; +- +- c = start; +- end = start + len; +- header = true; +- +- /* Print line by line. */ +- while (c < end) { +- if (*c == '\n') { +- printk_safe_flush_line(start, c - start + 1); +- start = ++c; +- header = true; +- continue; +- } +- +- /* Handle continuous lines or missing new line. */ +- if ((c + 1 < end) && printk_get_level(c)) { +- if (header) { +- c = printk_skip_level(c); +- continue; +- } +- +- printk_safe_flush_line(start, c - start); +- start = c++; +- header = true; +- continue; +- } +- +- header = false; +- c++; +- } +- +- /* Check if there was a partial line. Ignore pure header. */ +- if (start < end && !header) { +- static const char newline[] = KERN_CONT "\n"; +- +- printk_safe_flush_line(start, end - start); +- printk_safe_flush_line(newline, strlen(newline)); +- } +- +- return len; +-} +- +-static void report_message_lost(struct printk_safe_seq_buf *s) +-{ +- int lost = atomic_xchg(&s->message_lost, 0); +- +- if (lost) +- printk_deferred("Lost %d message(s)!\n", lost); +-} +- +-/* +- * Flush data from the associated per-CPU buffer. The function +- * can be called either via IRQ work or independently. +- */ +-static void __printk_safe_flush(struct irq_work *work) +-{ +- struct printk_safe_seq_buf *s = +- container_of(work, struct printk_safe_seq_buf, work); +- unsigned long flags; +- size_t len; +- int i; +- +- /* +- * The lock has two functions. First, one reader has to flush all +- * available message to make the lockless synchronization with +- * writers easier. Second, we do not want to mix messages from +- * different CPUs. This is especially important when printing +- * a backtrace. +- */ +- raw_spin_lock_irqsave(&safe_read_lock, flags); +- +- i = 0; +-more: +- len = atomic_read(&s->len); +- +- /* +- * This is just a paranoid check that nobody has manipulated +- * the buffer an unexpected way. If we printed something then +- * @len must only increase. Also it should never overflow the +- * buffer size. +- */ +- if ((i && i >= len) || len > sizeof(s->buffer)) { +- const char *msg = "printk_safe_flush: internal error\n"; +- +- printk_safe_flush_line(msg, strlen(msg)); +- len = 0; +- } +- +- if (!len) +- goto out; /* Someone else has already flushed the buffer. */ +- +- /* Make sure that data has been written up to the @len */ +- smp_rmb(); +- i += printk_safe_flush_buffer(s->buffer + i, len - i); +- +- /* +- * Check that nothing has got added in the meantime and truncate +- * the buffer. Note that atomic_cmpxchg() is an implicit memory +- * barrier that makes sure that the data were copied before +- * updating s->len. +- */ +- if (atomic_cmpxchg(&s->len, len, 0) != len) +- goto more; +- +-out: +- report_message_lost(s); +- raw_spin_unlock_irqrestore(&safe_read_lock, flags); +-} +- +-/** +- * printk_safe_flush - flush all per-cpu nmi buffers. +- * +- * The buffers are flushed automatically via IRQ work. This function +- * is useful only when someone wants to be sure that all buffers have +- * been flushed at some point. +- */ +-void printk_safe_flush(void) +-{ +- int cpu; +- +- for_each_possible_cpu(cpu) { +-#ifdef CONFIG_PRINTK_NMI +- __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work); +-#endif +- __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work); +- } +-} +- +-/** +- * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system +- * goes down. +- * +- * Similar to printk_safe_flush() but it can be called even in NMI context when +- * the system goes down. It does the best effort to get NMI messages into +- * the main ring buffer. +- * +- * Note that it could try harder when there is only one CPU online. +- */ +-void printk_safe_flush_on_panic(void) +-{ +- if (raw_spin_is_locked(&safe_read_lock)) { +- if (num_online_cpus() > 1) +- return; +- +- debug_locks_off(); +- raw_spin_lock_init(&safe_read_lock); +- } +- +- printk_safe_flush(); +-} +- + #ifdef CONFIG_PRINTK_NMI +-/* +- * Safe printk() for NMI context. It uses a per-CPU buffer to +- * store the message. NMIs are not nested, so there is always only +- * one writer running. But the buffer might get flushed from another +- * CPU, so we need to be careful. +- */ +-static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) +-{ +- struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq); +- +- return printk_safe_log_store(s, fmt, args); +-} +- + void noinstr printk_nmi_enter(void) + { + this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET); +@@ -305,9 +32,6 @@ void noinstr printk_nmi_exit(void) + * Marks a code that might produce many messages in NMI context + * and the risk of losing them is more critical than eventual + * reordering. +- * +- * It has effect only when called in NMI context. Then printk() +- * will store the messages into the main logbuf directly. + */ + void printk_nmi_direct_enter(void) + { +@@ -320,27 +44,8 @@ void printk_nmi_direct_exit(void) + this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK); + } + +-#else +- +-static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) +-{ +- return 0; +-} +- + #endif /* CONFIG_PRINTK_NMI */ + +-/* +- * Lock-less printk(), to avoid deadlocks should the printk() recurse +- * into itself. It uses a per-CPU buffer to store the message, just like +- * NMI. +- */ +-static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args) +-{ +- struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq); +- +- return printk_safe_log_store(s, fmt, args); +-} +- + /* Can be preempted by NMI. */ + void __printk_safe_enter(void) + { +@@ -365,8 +70,10 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) + * Use the main logbuf even in NMI. But avoid calling console + * drivers that might have their own locks. + */ +- if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK)) { +- unsigned long flags; ++ if (this_cpu_read(printk_context) & ++ (PRINTK_NMI_DIRECT_CONTEXT_MASK | ++ PRINTK_NMI_CONTEXT_MASK | ++ PRINTK_SAFE_CONTEXT_MASK)) { + int len; + + printk_safe_enter_irqsave(flags); +@@ -376,34 +83,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) + return len; + } + +- /* Use extra buffer in NMI. */ +- if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) +- return vprintk_nmi(fmt, args); +- +- /* Use extra buffer to prevent a recursion deadlock in safe mode. */ +- if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) +- return vprintk_safe(fmt, args); +- + /* No obstacles. */ + return vprintk_default(fmt, args); + } +- +-void __init printk_safe_init(void) +-{ +- int cpu; +- +- for_each_possible_cpu(cpu) { +- struct printk_safe_seq_buf *s; +- +- s = &per_cpu(safe_print_seq, cpu); +- init_irq_work(&s->work, __printk_safe_flush); +- +-#ifdef CONFIG_PRINTK_NMI +- s = &per_cpu(nmi_print_seq, cpu); +- init_irq_work(&s->work, __printk_safe_flush); +-#endif +- } +- +- /* Flush pending messages that did not have scheduled IRQ works. */ +- printk_safe_flush(); +-} +diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c +index 8abe1870dba4..b09a490f5f70 100644 +--- a/lib/nmi_backtrace.c ++++ b/lib/nmi_backtrace.c +@@ -75,12 +75,6 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, + touch_softlockup_watchdog(); + } + +- /* +- * Force flush any remote buffers that might be stuck in IRQ context +- * and therefore could not run their irq_work. +- */ +- printk_safe_flush(); +- + clear_bit_unlock(0, &backtrace_flag); + put_cpu(); + } +-- +2.43.0 + |