From: John Ogness Date: Fri, 22 Sep 2023 17:35:04 +0000 Subject: [PATCH 46/48] printk: Add kthread for all legacy consoles Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz The write callback of legacy consoles makes use of spinlocks. This is not permitted with PREEMPT_RT in atomic contexts. For PREEMPT_RT, create a new kthread to handle printing of all the legacy consoles (and nbcon consoles if boot consoles are registered). Since, if running from the kthread, the consoles are printing in a task context, the legacy nbcon printing can use the device_lock(), write_thread(), device_unlock() callbacks for printing. Introduce the macro force_printkthreads() to query if the forced threading of legacy consoles is in effect. These changes only affect CONFIG_PREEMPT_RT. Signed-off-by: John Ogness Signed-off-by: Sebastian Andrzej Siewior --- kernel/printk/internal.h | 20 +++ kernel/printk/nbcon.c | 50 +++++---- kernel/printk/printk.c | 252 +++++++++++++++++++++++++++++++++++++---------- 3 files changed, 251 insertions(+), 71 deletions(-) --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -21,6 +21,12 @@ int devkmsg_sysctl_set_loglvl(struct ctl (con->flags & CON_BOOT) ? "boot" : "", \ con->name, con->index, ##__VA_ARGS__) +#ifdef CONFIG_PREEMPT_RT +# define force_printkthreads() (true) +#else +# define force_printkthreads() (false) +#endif + #ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK_CALLER @@ -90,9 +96,10 @@ void nbcon_free(struct console *con); enum nbcon_prio nbcon_get_default_prio(void); void nbcon_atomic_flush_pending(void); bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, - int cookie); + int cookie, bool use_atomic); void nbcon_kthread_create(struct console *con); void nbcon_wake_threads(void); +void nbcon_legacy_kthread_create(void); /* * Check if the given console is currently capable and allowed to print @@ -179,7 +186,7 @@ static inline void nbcon_free(struct con static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; } static inline void nbcon_atomic_flush_pending(void) { } static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, - int cookie) { return false; } + int cookie, bool use_atomic) { return false; } static inline bool console_is_usable(struct console *con, short flags, bool use_atomic) { return false; } @@ -187,6 +194,15 @@ static inline bool console_is_usable(str #endif /* CONFIG_PRINTK */ extern bool have_boot_console; +extern bool have_legacy_console; + +/* + * Specifies if the console lock/unlock dance is needed for console + * printing. If @have_boot_console is true, the nbcon consoles will + * be printed serially along with the legacy consoles because nbcon + * consoles cannot print simultaneously with boot consoles. + */ +#define printing_via_unlock (have_legacy_console || have_boot_console) extern struct printk_buffers printk_shared_pbufs; --- a/kernel/printk/nbcon.c +++ b/kernel/printk/nbcon.c @@ -1202,9 +1202,10 @@ static __ref unsigned int *nbcon_get_cpu } /** - * nbcon_atomic_emit_one - Print one record for an nbcon console using the - * write_atomic() callback + * nbcon_emit_one - Print one record for an nbcon console using the + * specified callback * @wctxt: An initialized write context struct to use for this context + * @use_atomic: True if the write_atomic callback is to be used * * Return: False if it is known there are no more records to print, * otherwise true. @@ -1212,7 +1213,7 @@ static __ref unsigned int *nbcon_get_cpu * This is an internal helper to handle the locking of the console before * calling nbcon_emit_next_record(). */ -static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt) +static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic) { struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); @@ -1224,7 +1225,7 @@ static bool nbcon_atomic_emit_one(struct * handed over or taken over. In both cases the context is no * longer valid. */ - if (!nbcon_emit_next_record(wctxt, true)) + if (!nbcon_emit_next_record(wctxt, use_atomic)) return true; nbcon_context_release(ctxt); @@ -1263,6 +1264,7 @@ enum nbcon_prio nbcon_get_default_prio(v * both the console_lock and the SRCU read lock. Otherwise it * is set to false. * @cookie: The cookie from the SRCU read lock. + * @use_atomic: True if the write_atomic callback is to be used * * Context: Any context except NMI. * Return: False if the given console has no next record to print, @@ -1273,7 +1275,7 @@ enum nbcon_prio nbcon_get_default_prio(v * Essentially it is the nbcon version of console_emit_next_record(). */ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, - int cookie) + int cookie, bool use_atomic) { struct nbcon_write_context wctxt = { }; struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); @@ -1282,19 +1284,29 @@ bool nbcon_legacy_emit_next_record(struc *handover = false; - /* Use the same procedure as console_emit_next_record(). */ - printk_safe_enter_irqsave(flags); - console_lock_spinning_enable(); - stop_critical_timings(); - - ctxt->console = con; - ctxt->prio = nbcon_get_default_prio(); - - progress = nbcon_atomic_emit_one(&wctxt); - - start_critical_timings(); - *handover = console_lock_spinning_disable_and_check(cookie); - printk_safe_exit_irqrestore(flags); + ctxt->console = con; + + if (use_atomic) { + /* Use the same procedure as console_emit_next_record(). */ + printk_safe_enter_irqsave(flags); + console_lock_spinning_enable(); + stop_critical_timings(); + + ctxt->prio = nbcon_get_default_prio(); + progress = nbcon_emit_one(&wctxt, use_atomic); + + start_critical_timings(); + *handover = console_lock_spinning_disable_and_check(cookie); + printk_safe_exit_irqrestore(flags); + } else { + con->device_lock(con, &flags); + cant_migrate(); + + ctxt->prio = nbcon_get_default_prio(); + progress = nbcon_emit_one(&wctxt, use_atomic); + + con->device_unlock(con, flags); + } return progress; } @@ -1536,6 +1548,8 @@ static int __init printk_setup_threads(v printk_threads_enabled = true; for_each_console(con) nbcon_kthread_create(con); + if (force_printkthreads() && printing_via_unlock) + nbcon_legacy_kthread_create(); console_list_unlock(); return 0; } --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -468,7 +468,7 @@ static DEFINE_MUTEX(syslog_lock); * present, it is necessary to perform the console lock/unlock dance * whenever console flushing should occur. */ -static bool have_legacy_console; +bool have_legacy_console; /* * Specifies if an nbcon console is registered. If nbcon consoles are present, @@ -485,16 +485,11 @@ static bool have_nbcon_console; */ bool have_boot_console; -/* - * Specifies if the console lock/unlock dance is needed for console - * printing. If @have_boot_console is true, the nbcon consoles will - * be printed serially along with the legacy consoles because nbcon - * consoles cannot print simultaneously with boot consoles. - */ -#define printing_via_unlock (have_legacy_console || have_boot_console) - #ifdef CONFIG_PRINTK DECLARE_WAIT_QUEUE_HEAD(log_wait); + +static DECLARE_WAIT_QUEUE_HEAD(legacy_wait); + /* All 3 protected by @syslog_lock. */ /* the next printk record to read by syslog(READ) or /proc/kmsg */ static u64 syslog_seq; @@ -2358,7 +2353,8 @@ asmlinkage int vprintk_emit(int facility const struct dev_printk_info *dev_info, const char *fmt, va_list args) { - bool do_trylock_unlock = printing_via_unlock; + bool do_trylock_unlock = printing_via_unlock && + !force_printkthreads(); int printed_len; /* Suppress unimportant messages after panic happens */ @@ -2481,6 +2477,14 @@ EXPORT_SYMBOL(_printk); static bool pr_flush(int timeout_ms, bool reset_on_progress); static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); +static struct task_struct *nbcon_legacy_kthread; + +static inline void wake_up_legacy_kthread(void) +{ + if (nbcon_legacy_kthread) + wake_up_interruptible(&legacy_wait); +} + #else /* CONFIG_PRINTK */ #define printk_time false @@ -2494,6 +2498,8 @@ static u64 syslog_seq; static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; } static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } +static inline void nbcon_legacy_kthread_create(void) { } +static inline void wake_up_legacy_kthread(void) { } #endif /* CONFIG_PRINTK */ #ifdef CONFIG_EARLY_PRINTK @@ -2739,6 +2745,8 @@ void resume_console(void) } console_srcu_read_unlock(cookie); + wake_up_legacy_kthread(); + pr_flush(1000, true); } @@ -2753,7 +2761,8 @@ void resume_console(void) */ static int console_cpu_notify(unsigned int cpu) { - if (!cpuhp_tasks_frozen && printing_via_unlock) { + if (!cpuhp_tasks_frozen && printing_via_unlock && + !force_printkthreads()) { /* If trylock fails, someone else is doing the printing */ if (console_trylock()) console_unlock(); @@ -3013,31 +3022,43 @@ static bool console_emit_next_record(str con->dropped = 0; } - /* - * While actively printing out messages, if another printk() - * were to occur on another CPU, it may wait for this one to - * finish. This task can not be preempted if there is a - * waiter waiting to take over. - * - * Interrupts are disabled because the hand over to a waiter - * must not be interrupted until the hand over is completed - * (@console_waiter is cleared). - */ - printk_safe_enter_irqsave(flags); - console_lock_spinning_enable(); + /* Write everything out to the hardware. */ + + if (force_printkthreads()) { + /* + * With forced threading this function is either in a thread + * or panic context. So there is no need for concern about + * printk reentrance or handovers. + */ - /* Do not trace print latency. */ - stop_critical_timings(); + con->write(con, outbuf, pmsg.outbuf_len); + con->seq = pmsg.seq + 1; + } else { + /* + * While actively printing out messages, if another printk() + * were to occur on another CPU, it may wait for this one to + * finish. This task can not be preempted if there is a + * waiter waiting to take over. + * + * Interrupts are disabled because the hand over to a waiter + * must not be interrupted until the hand over is completed + * (@console_waiter is cleared). + */ + printk_safe_enter_irqsave(flags); + console_lock_spinning_enable(); - /* Write everything out to the hardware. */ - con->write(con, outbuf, pmsg.outbuf_len); + /* Do not trace print latency. */ + stop_critical_timings(); - start_critical_timings(); + con->write(con, outbuf, pmsg.outbuf_len); - con->seq = pmsg.seq + 1; + start_critical_timings(); - *handover = console_lock_spinning_disable_and_check(cookie); - printk_safe_exit_irqrestore(flags); + con->seq = pmsg.seq + 1; + + *handover = console_lock_spinning_disable_and_check(cookie); + printk_safe_exit_irqrestore(flags); + } skip: return true; } @@ -3101,12 +3122,13 @@ static bool console_flush_all(bool do_co if ((flags & CON_NBCON) && con->kthread) continue; - if (!console_is_usable(con, flags, true)) + if (!console_is_usable(con, flags, !do_cond_resched)) continue; any_usable = true; if (flags & CON_NBCON) { - progress = nbcon_legacy_emit_next_record(con, handover, cookie); + progress = nbcon_legacy_emit_next_record(con, handover, cookie, + !do_cond_resched); printk_seq = nbcon_seq_read(con); } else { progress = console_emit_next_record(con, handover, cookie); @@ -3145,19 +3167,7 @@ static bool console_flush_all(bool do_co return false; } -/** - * console_unlock - unblock the console subsystem from printing - * - * Releases the console_lock which the caller holds to block printing of - * the console subsystem. - * - * While the console_lock was held, console output may have been buffered - * by printk(). If this is the case, console_unlock(); emits - * the output prior to releasing the lock. - * - * console_unlock(); may be called from any context. - */ -void console_unlock(void) +static void console_flush_and_unlock(void) { bool do_cond_resched; bool handover; @@ -3201,6 +3211,32 @@ void console_unlock(void) */ } while (prb_read_valid(prb, next_seq, NULL) && console_trylock()); } + +/** + * console_unlock - unblock the console subsystem from printing + * + * Releases the console_lock which the caller holds to block printing of + * the console subsystem. + * + * While the console_lock was held, console output may have been buffered + * by printk(). If this is the case, console_unlock(); emits + * the output prior to releasing the lock. + * + * console_unlock(); may be called from any context. + */ +void console_unlock(void) +{ + /* + * Forced threading relies on kthread and atomic consoles for + * printing. It never attempts to print from console_unlock(). + */ + if (force_printkthreads()) { + __console_unlock(); + return; + } + + console_flush_and_unlock(); +} EXPORT_SYMBOL(console_unlock); /** @@ -3410,11 +3446,106 @@ void console_start(struct console *conso if (flags & CON_NBCON) nbcon_kthread_wake(console); + else + wake_up_legacy_kthread(); __pr_flush(console, 1000, true); } EXPORT_SYMBOL(console_start); +#ifdef CONFIG_PRINTK +static bool printer_should_wake(void) +{ + bool available = false; + struct console *con; + int cookie; + + if (kthread_should_stop()) + return true; + + cookie = console_srcu_read_lock(); + for_each_console_srcu(con) { + short flags = console_srcu_read_flags(con); + u64 printk_seq; + + /* + * The legacy printer thread is only for legacy consoles, + * unless the nbcon console has no kthread printer. + */ + if ((flags & CON_NBCON) && con->kthread) + continue; + + if (!console_is_usable(con, flags, true)) + continue; + + if (flags & CON_NBCON) { + printk_seq = nbcon_seq_read(con); + } else { + /* + * It is safe to read @seq because only this + * thread context updates @seq. + */ + printk_seq = con->seq; + } + + if (prb_read_valid(prb, printk_seq, NULL)) { + available = true; + break; + } + } + console_srcu_read_unlock(cookie); + + return available; +} + +static int nbcon_legacy_kthread_func(void *unused) +{ + int error; + + for (;;) { + error = wait_event_interruptible(legacy_wait, printer_should_wake()); + + if (kthread_should_stop()) + break; + + if (error) + continue; + + console_lock(); + console_flush_and_unlock(); + } + + return 0; +} + +void nbcon_legacy_kthread_create(void) +{ + struct task_struct *kt; + + lockdep_assert_held(&console_mutex); + + if (!force_printkthreads()) + return; + + if (!printk_threads_enabled || nbcon_legacy_kthread) + return; + + kt = kthread_run(nbcon_legacy_kthread_func, NULL, "pr/legacy"); + if (IS_ERR(kt)) { + pr_err("unable to start legacy printing thread\n"); + return; + } + + nbcon_legacy_kthread = kt; + + /* + * It is important that console printing threads are scheduled + * shortly after a printk call and with generous runtime budgets. + */ + sched_set_normal(nbcon_legacy_kthread, -20); +} +#endif /* CONFIG_PRINTK */ + static int __read_mostly keep_bootcon; static int __init keep_bootcon_setup(char *str) @@ -3691,6 +3822,7 @@ void register_console(struct console *ne newcon->seq = 0; } else { have_legacy_console = true; + nbcon_legacy_kthread_create(); } if (newcon->flags & CON_BOOT) @@ -3846,6 +3978,13 @@ static int unregister_console_locked(str nbcon_kthread_create(c); } +#ifdef CONFIG_PRINTK + if (!printing_via_unlock && nbcon_legacy_kthread) { + kthread_stop(nbcon_legacy_kthread); + nbcon_legacy_kthread = NULL; + } +#endif + return res; } @@ -4004,8 +4143,12 @@ static bool __pr_flush(struct console *c seq = prb_next_reserve_seq(prb); - /* Flush the consoles so that records up to @seq are printed. */ - if (printing_via_unlock) { + /* + * Flush the consoles so that records up to @seq are printed. + * Otherwise this function will just wait for the threaded printers + * to print up to @seq. + */ + if (printing_via_unlock && !force_printkthreads()) { console_lock(); console_unlock(); } @@ -4119,9 +4262,16 @@ static void wake_up_klogd_work_func(stru int pending = this_cpu_xchg(printk_pending, 0); if (pending & PRINTK_PENDING_OUTPUT) { - /* If trylock fails, someone else is doing the printing */ - if (console_trylock()) - console_unlock(); + if (force_printkthreads()) { + wake_up_legacy_kthread(); + } else { + /* + * If trylock fails, some other context + * will do the printing. + */ + if (console_trylock()) + console_unlock(); + } } if (pending & PRINTK_PENDING_WAKEUP)