summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch')
-rw-r--r--debian/patches-rt/0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch193
1 files changed, 193 insertions, 0 deletions
diff --git a/debian/patches-rt/0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch b/debian/patches-rt/0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch
new file mode 100644
index 0000000000..26c68ed229
--- /dev/null
+++ b/debian/patches-rt/0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch
@@ -0,0 +1,193 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Sep 2023 12:00:08 +0000
+Subject: [PATCH 20/48] printk: nbcon: Provide function to flush using
+ write_atomic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+
+Provide nbcon_atomic_flush_pending() to perform flushing of all
+registered nbcon consoles using their write_atomic() callback.
+
+Unlike console_flush_all(), nbcon_atomic_flush_pending() will
+only flush up through the newest record at the time of the
+call. This prevents a CPU from printing unbounded when other
+CPUs are adding records.
+
+Also unlike console_flush_all(), nbcon_atomic_flush_pending()
+will fully flush one console before flushing the next. This
+helps to guarantee that a block of pending records (such as
+a stack trace in an emergency situation) can be printed
+atomically at once before releasing console ownership.
+
+nbcon_atomic_flush_pending() is safe in any context because it
+uses write_atomic() and acquires with unsafe_takeover disabled.
+
+Use it in console_flush_on_panic() before flushing legacy
+consoles. The legacy write() callbacks are not fully safe when
+oops_in_progress is set.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 2
+ kernel/printk/nbcon.c | 104 ++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/printk/printk.c | 2
+ 3 files changed, 106 insertions(+), 2 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -84,6 +84,7 @@ void nbcon_seq_force(struct console *con
+ bool nbcon_alloc(struct console *con);
+ void nbcon_init(struct console *con);
+ void nbcon_free(struct console *con);
++void nbcon_atomic_flush_pending(void);
+
+ /*
+ * Check if the given console is currently capable and allowed to print
+@@ -138,6 +139,7 @@ static inline void nbcon_seq_force(struc
+ static inline bool nbcon_alloc(struct console *con) { return false; }
+ static inline void nbcon_init(struct console *con) { }
+ static inline void nbcon_free(struct console *con) { }
++static inline void nbcon_atomic_flush_pending(void) { }
+
+ static inline bool console_is_usable(struct console *con, short flags) { return false; }
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -548,7 +548,6 @@ static struct printk_buffers panic_nbcon
+ * in an unsafe state. Otherwise, on success the caller may assume
+ * the console is not in an unsafe state.
+ */
+-__maybe_unused
+ static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
+ {
+ unsigned int cpu = smp_processor_id();
+@@ -850,7 +849,6 @@ EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
+ * When true is returned, @wctxt->ctxt.backlog indicates whether there are
+ * still records pending in the ringbuffer,
+ */
+-__maybe_unused
+ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
+ {
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+@@ -938,6 +936,108 @@ static bool nbcon_emit_next_record(struc
+ }
+
+ /**
++ * __nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
++ * write_atomic() callback
++ * @con: The nbcon console to flush
++ * @stop_seq: Flush up until this record
++ *
++ * Return: True if taken over while printing. Otherwise false.
++ *
++ * If flushing up to @stop_seq was not successful, it only makes sense for the
++ * caller to try again when true was returned. When false is returned, either
++ * there are no more records available to read or this context is not allowed
++ * to acquire the console.
++ */
++static bool __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
++{
++ struct nbcon_write_context wctxt = { };
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
++
++ ctxt->console = con;
++ ctxt->spinwait_max_us = 2000;
++ ctxt->prio = NBCON_PRIO_NORMAL;
++
++ if (!nbcon_context_try_acquire(ctxt))
++ return false;
++
++ while (nbcon_seq_read(con) < stop_seq) {
++ /*
++ * nbcon_emit_next_record() returns false when the console was
++ * handed over or taken over. In both cases the context is no
++ * longer valid.
++ */
++ if (!nbcon_emit_next_record(&wctxt))
++ return true;
++
++ if (!ctxt->backlog)
++ break;
++ }
++
++ nbcon_context_release(ctxt);
++
++ return false;
++}
++
++/**
++ * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
++ * write_atomic() callback
++ * @stop_seq: Flush up until this record
++ */
++static void __nbcon_atomic_flush_pending(u64 stop_seq)
++{
++ struct console *con;
++ bool should_retry;
++ int cookie;
++
++ do {
++ should_retry = false;
++
++ cookie = console_srcu_read_lock();
++ for_each_console_srcu(con) {
++ short flags = console_srcu_read_flags(con);
++ unsigned long irq_flags;
++
++ if (!(flags & CON_NBCON))
++ continue;
++
++ if (!console_is_usable(con, flags))
++ continue;
++
++ if (nbcon_seq_read(con) >= stop_seq)
++ continue;
++
++ /*
++ * Atomic flushing does not use console driver
++ * synchronization (i.e. it does not hold the port
++ * lock for uart consoles). Therefore IRQs must be
++ * disabled to avoid being interrupted and then
++ * calling into a driver that will deadlock trying
++ * to acquire console ownership.
++ */
++ local_irq_save(irq_flags);
++
++ should_retry |= __nbcon_atomic_flush_pending_con(con, stop_seq);
++
++ local_irq_restore(irq_flags);
++ }
++ console_srcu_read_unlock(cookie);
++ } while (should_retry);
++}
++
++/**
++ * nbcon_atomic_flush_pending - Flush all nbcon consoles using their
++ * write_atomic() callback
++ *
++ * Flush the backlog up through the currently newest record. Any new
++ * records added while flushing will not be flushed. This is to avoid
++ * one CPU printing unbounded because other CPUs continue to add records.
++ */
++void nbcon_atomic_flush_pending(void)
++{
++ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
++}
++
++/**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+ *
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3177,6 +3177,8 @@ void console_flush_on_panic(enum con_flu
+ console_srcu_read_unlock(cookie);
+ }
+
++ nbcon_atomic_flush_pending();
++
+ console_flush_all(false, &next_seq, &handover);
+ }
+