From 511721a2f6160a32b221debb19e94129cc3f5ce9 Mon Sep 17 00:00:00 2001 From: John Ogness Date: Tue, 26 Jan 2021 17:43:19 +0106 Subject: [PATCH 092/323] printk: remove logbuf_lock Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.215-rt107.tar.xz Since the ringbuffer is lockless, there is no need for it to be protected by @logbuf_lock. Remove @logbuf_lock. This means that printk_nmi_direct and printk_safe_flush_on_panic() no longer need to acquire any lock to run. @console_seq, @exclusive_console_stop_seq, @console_dropped are protected by @console_lock. Signed-off-by: John Ogness Signed-off-by: Sebastian Andrzej Siewior --- kernel/printk/internal.h | 4 +- kernel/printk/printk.c | 118 ++++++++++++------------------------ kernel/printk/printk_safe.c | 29 +++------ 3 files changed, 48 insertions(+), 103 deletions(-) diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index 3a8fd491758cf..e7acc2888c8e1 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -12,8 +12,6 @@ #define PRINTK_NMI_CONTEXT_OFFSET 0x010000000 -extern raw_spinlock_t logbuf_lock; - __printf(4, 0) int vprintk_store(int facility, int level, const struct dev_printk_info *dev_info, @@ -59,7 +57,7 @@ void defer_console_output(void); __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; } /* - * In !PRINTK builds we still export logbuf_lock spin_lock, console_sem + * In !PRINTK builds we still export console_sem * semaphore and some of console functions (console_unlock()/etc.), so * printk-safe must preserve the existing local IRQ guarantees. */ diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index cc5f7e0fb1573..9fde03f1e29bf 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -357,41 +357,6 @@ enum log_flags { LOG_CONT = 8, /* text is a fragment of a continuation line */ }; -/* - * The logbuf_lock protects kmsg buffer, indices, counters. This can be taken - * within the scheduler's rq lock. It must be released before calling - * console_unlock() or anything else that might wake up a process. - */ -DEFINE_RAW_SPINLOCK(logbuf_lock); - -/* - * Helper macros to lock/unlock logbuf_lock and switch between - * printk-safe/unsafe modes. - */ -#define logbuf_lock_irq() \ - do { \ - printk_safe_enter_irq(); \ - raw_spin_lock(&logbuf_lock); \ - } while (0) - -#define logbuf_unlock_irq() \ - do { \ - raw_spin_unlock(&logbuf_lock); \ - printk_safe_exit_irq(); \ - } while (0) - -#define logbuf_lock_irqsave(flags) \ - do { \ - printk_safe_enter_irqsave(flags); \ - raw_spin_lock(&logbuf_lock); \ - } while (0) - -#define logbuf_unlock_irqrestore(flags) \ - do { \ - raw_spin_unlock(&logbuf_lock); \ - printk_safe_exit_irqrestore(flags); \ - } while (0) - /* syslog_lock protects syslog_* variables and write access to clear_seq. */ static DEFINE_RAW_SPINLOCK(syslog_lock); @@ -403,6 +368,7 @@ static u64 syslog_seq; static size_t syslog_partial; static bool syslog_time; +/* All 3 protected by @console_sem. */ /* the next printk record to write to the console */ static u64 console_seq; static u64 exclusive_console_stop_seq; @@ -769,27 +735,27 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, if (ret) return ret; - logbuf_lock_irq(); + printk_safe_enter_irq(); if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) { if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; - logbuf_unlock_irq(); + printk_safe_exit_irq(); goto out; } - logbuf_unlock_irq(); + printk_safe_exit_irq(); ret = wait_event_interruptible(log_wait, prb_read_valid(prb, atomic64_read(&user->seq), r)); if (ret) goto out; - logbuf_lock_irq(); + printk_safe_enter_irq(); } if (r->info->seq != atomic64_read(&user->seq)) { /* our last seen message is gone, return error and reset */ atomic64_set(&user->seq, r->info->seq); ret = -EPIPE; - logbuf_unlock_irq(); + printk_safe_exit_irq(); goto out; } @@ -799,7 +765,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, &r->info->dev_info); atomic64_set(&user->seq, r->info->seq + 1); - logbuf_unlock_irq(); + printk_safe_exit_irq(); if (len > count) { ret = -EINVAL; @@ -834,7 +800,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) if (offset) return -ESPIPE; - logbuf_lock_irq(); + printk_safe_enter_irq(); switch (whence) { case SEEK_SET: /* the first record */ @@ -855,7 +821,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) default: ret = -EINVAL; } - logbuf_unlock_irq(); + printk_safe_exit_irq(); return ret; } @@ -870,15 +836,15 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait) poll_wait(file, &log_wait, wait); - logbuf_lock_irq(); - if (prb_read_valid_info(prb, user->seq, &info, NULL)) { + printk_safe_enter_irq(); + if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) { /* return error when data has vanished underneath us */ if (info.seq != atomic64_read(&user->seq)) ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; else ret = EPOLLIN|EPOLLRDNORM; } - logbuf_unlock_irq(); + printk_safe_exit_irq(); return ret; } @@ -911,9 +877,9 @@ static int devkmsg_open(struct inode *inode, struct file *file) prb_rec_init_rd(&user->record, &user->info, &user->text_buf[0], sizeof(user->text_buf)); - logbuf_lock_irq(); + printk_safe_enter_irq(); atomic64_set(&user->seq, prb_first_valid_seq(prb)); - logbuf_unlock_irq(); + printk_safe_exit_irq(); file->private_data = user; return 0; @@ -1535,11 +1501,11 @@ static int syslog_print(char __user *buf, int size) size_t n; size_t skip; - logbuf_lock_irq(); + printk_safe_enter_irq(); raw_spin_lock(&syslog_lock); if (!prb_read_valid(prb, syslog_seq, &r)) { raw_spin_unlock(&syslog_lock); - logbuf_unlock_irq(); + printk_safe_exit_irq(); break; } if (r.info->seq != syslog_seq) { @@ -1569,7 +1535,7 @@ static int syslog_print(char __user *buf, int size) } else n = 0; raw_spin_unlock(&syslog_lock); - logbuf_unlock_irq(); + printk_safe_exit_irq(); if (!n) break; @@ -1603,7 +1569,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) return -ENOMEM; time = printk_time; - logbuf_lock_irq(); + printk_safe_enter_irq(); /* * Find first record that fits, including all following records, * into the user-provided buffer for this dump. @@ -1624,12 +1590,12 @@ static int syslog_print_all(char __user *buf, int size, bool clear) break; } - logbuf_unlock_irq(); + printk_safe_exit_irq(); if (copy_to_user(buf + len, text, textlen)) len = -EFAULT; else len += textlen; - logbuf_lock_irq(); + printk_safe_enter_irq(); if (len < 0) break; @@ -1640,7 +1606,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) latched_seq_write(&clear_seq, seq); raw_spin_unlock(&syslog_lock); } - logbuf_unlock_irq(); + printk_safe_exit_irq(); kfree(text); return len; @@ -1648,11 +1614,11 @@ static int syslog_print_all(char __user *buf, int size, bool clear) static void syslog_clear(void) { - logbuf_lock_irq(); + printk_safe_enter_irq(); raw_spin_lock(&syslog_lock); latched_seq_write(&clear_seq, prb_next_seq(prb)); raw_spin_unlock(&syslog_lock); - logbuf_unlock_irq(); + printk_safe_exit_irq(); } /* Return a consistent copy of @syslog_seq. */ @@ -1740,12 +1706,12 @@ int do_syslog(int type, char __user *buf, int len, int source) break; /* Number of chars in the log buffer */ case SYSLOG_ACTION_SIZE_UNREAD: - logbuf_lock_irq(); + printk_safe_enter_irq(); raw_spin_lock(&syslog_lock); if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) { /* No unread messages. */ raw_spin_unlock(&syslog_lock); - logbuf_unlock_irq(); + printk_safe_exit_irq(); return 0; } if (info.seq != syslog_seq) { @@ -1774,7 +1740,7 @@ int do_syslog(int type, char __user *buf, int len, int source) error -= syslog_partial; } raw_spin_unlock(&syslog_lock); - logbuf_unlock_irq(); + printk_safe_exit_irq(); break; /* Size of the log buffer */ case SYSLOG_ACTION_SIZE_BUFFER: @@ -2636,7 +2602,6 @@ void console_unlock(void) size_t len; printk_safe_enter_irqsave(flags); - raw_spin_lock(&logbuf_lock); skip: if (!prb_read_valid(prb, console_seq, &r)) break; @@ -2680,7 +2645,6 @@ void console_unlock(void) console_msg_format & MSG_FORMAT_SYSLOG, printk_time); console_seq++; - raw_spin_unlock(&logbuf_lock); /* * While actively printing out messages, if another printk() @@ -2707,8 +2671,6 @@ void console_unlock(void) console_locked = 0; - raw_spin_unlock(&logbuf_lock); - up_console_sem(); /* @@ -2717,9 +2679,7 @@ void console_unlock(void) * there's a new owner and the console_unlock() from them will do the * flush, no worries. */ - raw_spin_lock(&logbuf_lock); retry = prb_read_valid(prb, console_seq, NULL); - raw_spin_unlock(&logbuf_lock); printk_safe_exit_irqrestore(flags); if (retry && console_trylock()) @@ -2786,9 +2746,9 @@ void console_flush_on_panic(enum con_flush_mode mode) if (mode == CONSOLE_REPLAY_ALL) { unsigned long flags; - logbuf_lock_irqsave(flags); + printk_safe_enter_irqsave(flags); console_seq = prb_first_valid_seq(prb); - logbuf_unlock_irqrestore(flags); + printk_safe_exit_irqrestore(flags); } console_unlock(); } @@ -3040,7 +3000,7 @@ void register_console(struct console *newcon) * console_unlock(); will print out the buffered messages * for us. */ - logbuf_lock_irqsave(flags); + printk_safe_enter_irqsave(flags); /* * We're about to replay the log buffer. Only do this to the * just-registered console to avoid excessive message spam to @@ -3058,7 +3018,7 @@ void register_console(struct console *newcon) console_seq = syslog_seq; raw_spin_unlock(&syslog_lock); - logbuf_unlock_irqrestore(flags); + printk_safe_exit_irqrestore(flags); } console_unlock(); console_sysfs_notify(); @@ -3444,10 +3404,10 @@ void kmsg_dump(enum kmsg_dump_reason reason) /* initialize iterator with data about the stored records */ iter.active = true; - logbuf_lock_irqsave(flags); + printk_safe_enter_irqsave(flags); iter.cur_seq = latched_seq_read_nolock(&clear_seq); iter.next_seq = prb_next_seq(prb); - logbuf_unlock_irqrestore(flags); + printk_safe_exit_irqrestore(flags); /* invoke dumper which will iterate over records */ dumper->dump(dumper, reason, &iter); @@ -3534,9 +3494,9 @@ bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog, unsigned long flags; bool ret; - logbuf_lock_irqsave(flags); + printk_safe_enter_irqsave(flags); ret = kmsg_dump_get_line_nolock(iter, syslog, line, size, len); - logbuf_unlock_irqrestore(flags); + printk_safe_exit_irqrestore(flags); return ret; } @@ -3576,7 +3536,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog, if (!iter->active || !buf || !size) goto out; - logbuf_lock_irqsave(flags); + printk_safe_enter_irqsave(flags); if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) { if (info.seq != iter->cur_seq) { /* messages are gone, move to first available one */ @@ -3586,7 +3546,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog, /* last entry */ if (iter->cur_seq >= iter->next_seq) { - logbuf_unlock_irqrestore(flags); + printk_safe_exit_irqrestore(flags); goto out; } @@ -3620,7 +3580,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog, iter->next_seq = next_seq; ret = true; - logbuf_unlock_irqrestore(flags); + printk_safe_exit_irqrestore(flags); out: if (len_out) *len_out = len; @@ -3656,9 +3616,9 @@ void kmsg_dump_rewind(struct kmsg_dumper_iter *iter) { unsigned long flags; - logbuf_lock_irqsave(flags); + printk_safe_enter_irqsave(flags); kmsg_dump_rewind_nolock(iter); - logbuf_unlock_irqrestore(flags); + printk_safe_exit_irqrestore(flags); } EXPORT_SYMBOL_GPL(kmsg_dump_rewind); diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index 2e9e3ed7d63ef..7df8a88d4115e 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -16,7 +16,7 @@ #include "internal.h" /* - * printk() could not take logbuf_lock in NMI context. Instead, + * In NMI and safe mode, printk() avoids taking locks. Instead, * it uses an alternative implementation that temporary stores * the strings into a per-CPU buffer. The content of the buffer * is later flushed into the main ring buffer via IRQ work. @@ -266,18 +266,6 @@ void printk_safe_flush(void) */ void printk_safe_flush_on_panic(void) { - /* - * Make sure that we could access the main ring buffer. - * Do not risk a double release when more CPUs are up. - */ - if (raw_spin_is_locked(&logbuf_lock)) { - if (num_online_cpus() > 1) - return; - - debug_locks_off(); - raw_spin_lock_init(&logbuf_lock); - } - if (raw_spin_is_locked(&safe_read_lock)) { if (num_online_cpus() > 1) return; @@ -319,9 +307,7 @@ void noinstr printk_nmi_exit(void) * reordering. * * It has effect only when called in NMI context. Then printk() - * will try to store the messages into the main logbuf directly - * and use the per-CPU buffers only as a fallback when the lock - * is not available. + * will store the messages into the main logbuf directly. */ void printk_nmi_direct_enter(void) { @@ -376,20 +362,21 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) #endif /* - * Try to use the main logbuf even in NMI. But avoid calling console + * Use the main logbuf even in NMI. But avoid calling console * drivers that might have their own locks. */ - if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) && - raw_spin_trylock(&logbuf_lock)) { + if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK)) { + unsigned long flags; int len; + printk_safe_enter_irqsave(flags); len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args); - raw_spin_unlock(&logbuf_lock); + printk_safe_exit_irqrestore(flags); defer_console_output(); return len; } - /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */ + /* Use extra buffer in NMI. */ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) return vprintk_nmi(fmt, args); -- 2.44.0