summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0046-printk-Add-kthread-for-all-legacy-consoles.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:40 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:59 +0000
commitba6b167af6ee5e63ca79ad22e7719644aed12b2c (patch)
tree62272cbe2fb256ecb90fa6e2cbfa509541954d28 /debian/patches-rt/0046-printk-Add-kthread-for-all-legacy-consoles.patch
parentMerging upstream version 6.8.9. (diff)
downloadlinux-ba6b167af6ee5e63ca79ad22e7719644aed12b2c.tar.xz
linux-ba6b167af6ee5e63ca79ad22e7719644aed12b2c.zip
Merging debian version 6.8.9-1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0046-printk-Add-kthread-for-all-legacy-consoles.patch (renamed from debian/patches-rt/0048-printk-Add-kthread-for-all-legacy-consoles.patch)278
1 files changed, 211 insertions, 67 deletions
diff --git a/debian/patches-rt/0048-printk-Add-kthread-for-all-legacy-consoles.patch b/debian/patches-rt/0046-printk-Add-kthread-for-all-legacy-consoles.patch
index 73a0c337be..e29281db8f 100644
--- a/debian/patches-rt/0048-printk-Add-kthread-for-all-legacy-consoles.patch
+++ b/debian/patches-rt/0046-printk-Add-kthread-for-all-legacy-consoles.patch
@@ -1,82 +1,210 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 22 Sep 2023 17:35:04 +0000
-Subject: [PATCH 48/50] printk: Add kthread for all legacy consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.7/older/patches-6.7-rt6.tar.xz
+Subject: [PATCH 46/48] printk: Add kthread for all legacy consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-The write callback of legacy consoles make use of spinlocks.
+The write callback of legacy consoles makes use of spinlocks.
This is not permitted with PREEMPT_RT in atomic contexts.
-Create a new kthread to handle printing of all the legacy
-consoles (and nbcon consoles if boot consoles are registered).
+For PREEMPT_RT, create a new kthread to handle printing of all
+the legacy consoles (and nbcon consoles if boot consoles are
+registered).
-Since the consoles are printing in a task context, it is no
-longer appropriate to support the legacy handover mechanism.
+Since, if running from the kthread, the consoles are printing
+in a task context, the legacy nbcon printing can use the
+device_lock(), write_thread(), device_unlock() callbacks for
+printing.
-These changes exist only for CONFIG_PREEMPT_RT.
+Introduce the macro force_printkthreads() to query if the
+forced threading of legacy consoles is in effect.
+
+These changes only affect CONFIG_PREEMPT_RT.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/internal.h | 1
- kernel/printk/nbcon.c | 18 ++-
- kernel/printk/printk.c | 237 +++++++++++++++++++++++++++++++++++++++--------
- 3 files changed, 210 insertions(+), 46 deletions(-)
+ kernel/printk/internal.h | 20 +++
+ kernel/printk/nbcon.c | 50 +++++----
+ kernel/printk/printk.c | 252 +++++++++++++++++++++++++++++++++++++----------
+ 3 files changed, 251 insertions(+), 71 deletions(-)
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -95,6 +95,7 @@ void nbcon_atomic_flush_all(void);
- bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie);
+@@ -21,6 +21,12 @@ int devkmsg_sysctl_set_loglvl(struct ctl
+ (con->flags & CON_BOOT) ? "boot" : "", \
+ con->name, con->index, ##__VA_ARGS__)
+
++#ifdef CONFIG_PREEMPT_RT
++# define force_printkthreads() (true)
++#else
++# define force_printkthreads() (false)
++#endif
++
+ #ifdef CONFIG_PRINTK
+
+ #ifdef CONFIG_PRINTK_CALLER
+@@ -90,9 +96,10 @@ void nbcon_free(struct console *con);
+ enum nbcon_prio nbcon_get_default_prio(void);
+ void nbcon_atomic_flush_pending(void);
+ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
+- int cookie);
++ int cookie, bool use_atomic);
void nbcon_kthread_create(struct console *con);
void nbcon_wake_threads(void);
+void nbcon_legacy_kthread_create(void);
/*
* Check if the given console is currently capable and allowed to print
+@@ -179,7 +186,7 @@ static inline void nbcon_free(struct con
+ static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
+ static inline void nbcon_atomic_flush_pending(void) { }
+ static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
+- int cookie) { return false; }
++ int cookie, bool use_atomic) { return false; }
+
+ static inline bool console_is_usable(struct console *con, short flags,
+ bool use_atomic) { return false; }
+@@ -187,6 +194,15 @@ static inline bool console_is_usable(str
+ #endif /* CONFIG_PRINTK */
+
+ extern bool have_boot_console;
++extern bool have_legacy_console;
++
++/*
++ * Specifies if the console lock/unlock dance is needed for console
++ * printing. If @have_boot_console is true, the nbcon consoles will
++ * be printed serially along with the legacy consoles because nbcon
++ * consoles cannot print simultaneously with boot consoles.
++ */
++#define printing_via_unlock (have_legacy_console || have_boot_console)
+
+ extern struct printk_buffers printk_shared_pbufs;
+
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
-@@ -1247,9 +1247,11 @@ bool nbcon_atomic_emit_next_record(struc
+@@ -1202,9 +1202,10 @@ static __ref unsigned int *nbcon_get_cpu
+ }
+
+ /**
+- * nbcon_atomic_emit_one - Print one record for an nbcon console using the
+- * write_atomic() callback
++ * nbcon_emit_one - Print one record for an nbcon console using the
++ * specified callback
+ * @wctxt: An initialized write context struct to use for this context
++ * @use_atomic: True if the write_atomic callback is to be used
+ *
+ * Return: False if it is known there are no more records to print,
+ * otherwise true.
+@@ -1212,7 +1213,7 @@ static __ref unsigned int *nbcon_get_cpu
+ * This is an internal helper to handle the locking of the console before
+ * calling nbcon_emit_next_record().
+ */
+-static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt)
++static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic)
+ {
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+
+@@ -1224,7 +1225,7 @@ static bool nbcon_atomic_emit_one(struct
+ * handed over or taken over. In both cases the context is no
+ * longer valid.
+ */
+- if (!nbcon_emit_next_record(wctxt, true))
++ if (!nbcon_emit_next_record(wctxt, use_atomic))
+ return true;
+
+ nbcon_context_release(ctxt);
+@@ -1263,6 +1264,7 @@ enum nbcon_prio nbcon_get_default_prio(v
+ * both the console_lock and the SRCU read lock. Otherwise it
+ * is set to false.
+ * @cookie: The cookie from the SRCU read lock.
++ * @use_atomic: True if the write_atomic callback is to be used
+ *
+ * Context: Any context except NMI.
+ * Return: False if the given console has no next record to print,
+@@ -1273,7 +1275,7 @@ enum nbcon_prio nbcon_get_default_prio(v
+ * Essentially it is the nbcon version of console_emit_next_record().
+ */
+ bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
+- int cookie)
++ int cookie, bool use_atomic)
+ {
+ struct nbcon_write_context wctxt = { };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
+@@ -1282,19 +1284,29 @@ bool nbcon_legacy_emit_next_record(struc
+
*handover = false;
- /* Use the same locking order as console_emit_next_record(). */
+- /* Use the same procedure as console_emit_next_record(). */
- printk_safe_enter_irqsave(flags);
- console_lock_spinning_enable();
- stop_critical_timings();
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
-+ printk_safe_enter_irqsave(flags);
-+ console_lock_spinning_enable();
-+ stop_critical_timings();
-+ }
-
- con->driver_enter(con, &driver_flags);
- cant_migrate();
-@@ -1261,9 +1263,11 @@ bool nbcon_atomic_emit_next_record(struc
-
- con->driver_exit(con, driver_flags);
-
+-
+- ctxt->console = con;
+- ctxt->prio = nbcon_get_default_prio();
+-
+- progress = nbcon_atomic_emit_one(&wctxt);
+-
- start_critical_timings();
- *handover = console_lock_spinning_disable_and_check(cookie);
- printk_safe_exit_irqrestore(flags);
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ ctxt->console = con;
++
++ if (use_atomic) {
++ /* Use the same procedure as console_emit_next_record(). */
++ printk_safe_enter_irqsave(flags);
++ console_lock_spinning_enable();
++ stop_critical_timings();
++
++ ctxt->prio = nbcon_get_default_prio();
++ progress = nbcon_emit_one(&wctxt, use_atomic);
++
+ start_critical_timings();
+ *handover = console_lock_spinning_disable_and_check(cookie);
+ printk_safe_exit_irqrestore(flags);
++ } else {
++ con->device_lock(con, &flags);
++ cant_migrate();
++
++ ctxt->prio = nbcon_get_default_prio();
++ progress = nbcon_emit_one(&wctxt, use_atomic);
++
++ con->device_unlock(con, flags);
+ }
return progress;
}
-@@ -1469,6 +1473,8 @@ static int __init printk_setup_threads(v
+@@ -1536,6 +1548,8 @@ static int __init printk_setup_threads(v
printk_threads_enabled = true;
for_each_console(con)
nbcon_kthread_create(con);
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && printing_via_unlock)
++ if (force_printkthreads() && printing_via_unlock)
+ nbcon_legacy_kthread_create();
console_list_unlock();
return 0;
}
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -487,6 +487,9 @@ bool have_boot_console;
+@@ -468,7 +468,7 @@ static DEFINE_MUTEX(syslog_lock);
+ * present, it is necessary to perform the console lock/unlock dance
+ * whenever console flushing should occur.
+ */
+-static bool have_legacy_console;
++bool have_legacy_console;
+ /*
+ * Specifies if an nbcon console is registered. If nbcon consoles are present,
+@@ -485,16 +485,11 @@ static bool have_nbcon_console;
+ */
+ bool have_boot_console;
+
+-/*
+- * Specifies if the console lock/unlock dance is needed for console
+- * printing. If @have_boot_console is true, the nbcon consoles will
+- * be printed serially along with the legacy consoles because nbcon
+- * consoles cannot print simultaneously with boot consoles.
+- */
+-#define printing_via_unlock (have_legacy_console || have_boot_console)
+-
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
+
@@ -85,17 +213,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
-@@ -2344,7 +2347,8 @@ asmlinkage int vprintk_emit(int facility
+@@ -2358,7 +2353,8 @@ asmlinkage int vprintk_emit(int facility
const struct dev_printk_info *dev_info,
const char *fmt, va_list args)
{
- bool do_trylock_unlock = printing_via_unlock;
+ bool do_trylock_unlock = printing_via_unlock &&
-+ !IS_ENABLED(CONFIG_PREEMPT_RT);
++ !force_printkthreads();
int printed_len;
/* Suppress unimportant messages after panic happens */
-@@ -2472,6 +2476,14 @@ EXPORT_SYMBOL(_printk);
+@@ -2481,6 +2477,14 @@ EXPORT_SYMBOL(_printk);
static bool pr_flush(int timeout_ms, bool reset_on_progress);
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
@@ -110,7 +238,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else /* CONFIG_PRINTK */
#define printk_time false
-@@ -2485,6 +2497,8 @@ static u64 syslog_seq;
+@@ -2494,6 +2498,8 @@ static u64 syslog_seq;
static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
@@ -119,7 +247,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_PRINTK */
#ifdef CONFIG_EARLY_PRINTK
-@@ -2730,6 +2744,8 @@ void resume_console(void)
+@@ -2739,6 +2745,8 @@ void resume_console(void)
}
console_srcu_read_unlock(cookie);
@@ -128,17 +256,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_flush(1000, true);
}
-@@ -2744,7 +2760,8 @@ void resume_console(void)
+@@ -2753,7 +2761,8 @@ void resume_console(void)
*/
static int console_cpu_notify(unsigned int cpu)
{
- if (!cpuhp_tasks_frozen && printing_via_unlock) {
+ if (!cpuhp_tasks_frozen && printing_via_unlock &&
-+ !IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ !force_printkthreads()) {
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
-@@ -2969,31 +2986,43 @@ static bool console_emit_next_record(str
+@@ -3013,31 +3022,43 @@ static bool console_emit_next_record(str
con->dropped = 0;
}
@@ -155,18 +283,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- printk_safe_enter_irqsave(flags);
- console_lock_spinning_enable();
+ /* Write everything out to the hardware. */
-
-- /* Do not trace print latency. */
-- stop_critical_timings();
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++
++ if (force_printkthreads()) {
+ /*
-+ * On PREEMPT_RT this function is either in a thread or
-+ * panic context. So there is no need for concern about
++ * With forced threading this function is either in a thread
++ * or panic context. So there is no need for concern about
+ * printk reentrance or handovers.
+ */
-- /* Write everything out to the hardware. */
-- con->write(con, outbuf, pmsg.outbuf_len);
+- /* Do not trace print latency. */
+- stop_critical_timings();
+ con->write(con, outbuf, pmsg.outbuf_len);
+ con->seq = pmsg.seq + 1;
+ } else {
@@ -183,17 +309,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ printk_safe_enter_irqsave(flags);
+ console_lock_spinning_enable();
-- start_critical_timings();
+- /* Write everything out to the hardware. */
+- con->write(con, outbuf, pmsg.outbuf_len);
+ /* Do not trace print latency. */
+ stop_critical_timings();
-- con->seq = pmsg.seq + 1;
+- start_critical_timings();
+ con->write(con, outbuf, pmsg.outbuf_len);
+- con->seq = pmsg.seq + 1;
++ start_critical_timings();
+
- *handover = console_lock_spinning_disable_and_check(cookie);
- printk_safe_exit_irqrestore(flags);
-+ start_critical_timings();
-+
+ con->seq = pmsg.seq + 1;
+
+ *handover = console_lock_spinning_disable_and_check(cookie);
@@ -202,7 +330,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
skip:
return true;
}
-@@ -3103,19 +3132,7 @@ static bool console_flush_all(bool do_co
+@@ -3101,12 +3122,13 @@ static bool console_flush_all(bool do_co
+ if ((flags & CON_NBCON) && con->kthread)
+ continue;
+
+- if (!console_is_usable(con, flags, true))
++ if (!console_is_usable(con, flags, !do_cond_resched))
+ continue;
+ any_usable = true;
+
+ if (flags & CON_NBCON) {
+- progress = nbcon_legacy_emit_next_record(con, handover, cookie);
++ progress = nbcon_legacy_emit_next_record(con, handover, cookie,
++ !do_cond_resched);
+ printk_seq = nbcon_seq_read(con);
+ } else {
+ progress = console_emit_next_record(con, handover, cookie);
+@@ -3145,19 +3167,7 @@ static bool console_flush_all(bool do_co
return false;
}
@@ -223,7 +367,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
bool do_cond_resched;
bool handover;
-@@ -3159,6 +3176,32 @@ void console_unlock(void)
+@@ -3201,6 +3211,32 @@ void console_unlock(void)
*/
} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
}
@@ -243,10 +387,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+void console_unlock(void)
+{
+ /*
-+ * PREEMPT_RT relies on kthread and atomic consoles for printing.
-+ * It never attempts to print from console_unlock().
++ * Forced threading relies on kthread and atomic consoles for
++ * printing. It never attempts to print from console_unlock().
+ */
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ if (force_printkthreads()) {
+ __console_unlock();
+ return;
+ }
@@ -256,7 +400,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
EXPORT_SYMBOL(console_unlock);
/**
-@@ -3368,11 +3411,106 @@ void console_start(struct console *conso
+@@ -3410,11 +3446,106 @@ void console_start(struct console *conso
if (flags & CON_NBCON)
nbcon_kthread_wake(console);
@@ -338,7 +482,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+ lockdep_assert_held(&console_mutex);
+
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ if (!force_printkthreads())
+ return;
+
+ if (!printk_threads_enabled || nbcon_legacy_kthread)
@@ -363,15 +507,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int __read_mostly keep_bootcon;
static int __init keep_bootcon_setup(char *str)
-@@ -3639,6 +3777,7 @@ void register_console(struct console *ne
- nbcon_init(newcon);
+@@ -3691,6 +3822,7 @@ void register_console(struct console *ne
+ newcon->seq = 0;
} else {
have_legacy_console = true;
+ nbcon_legacy_kthread_create();
}
if (newcon->flags & CON_BOOT)
-@@ -3777,6 +3916,13 @@ static int unregister_console_locked(str
+@@ -3846,6 +3978,13 @@ static int unregister_console_locked(str
nbcon_kthread_create(c);
}
@@ -385,7 +529,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return res;
}
-@@ -3936,8 +4082,12 @@ static bool __pr_flush(struct console *c
+@@ -4004,8 +4143,12 @@ static bool __pr_flush(struct console *c
seq = prb_next_reserve_seq(prb);
@@ -396,19 +540,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * Otherwise this function will just wait for the threaded printers
+ * to print up to @seq.
+ */
-+ if (printing_via_unlock && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ if (printing_via_unlock && !force_printkthreads()) {
console_lock();
console_unlock();
}
-@@ -4045,9 +4195,16 @@ static void wake_up_klogd_work_func(stru
+@@ -4119,9 +4262,16 @@ static void wake_up_klogd_work_func(stru
int pending = this_cpu_xchg(printk_pending, 0);
if (pending & PRINTK_PENDING_OUTPUT) {
- /* If trylock fails, someone else is doing the printing */
- if (console_trylock())
- console_unlock();
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
-+ wake_up_interruptible(&legacy_wait);
++ if (force_printkthreads()) {
++ wake_up_legacy_kthread();
+ } else {
+ /*
+ * If trylock fails, some other context