summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch')
-rw-r--r--debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch260
1 files changed, 260 insertions, 0 deletions
diff --git a/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch b/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch
new file mode 100644
index 0000000000..b42d8623bb
--- /dev/null
+++ b/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch
@@ -0,0 +1,260 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Sep 2023 12:00:08 +0000
+Subject: [PATCH 18/46] printk: nbcon: Provide function to flush using
+ write_atomic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+
+Provide nbcon_atomic_flush_pending() to perform flushing of all
+registered nbcon consoles using their write_atomic() callback.
+
+Unlike console_flush_all(), nbcon_atomic_flush_pending() will
+only flush up through the newest record at the time of the
+call. This prevents a CPU from printing unbounded when other
+CPUs are adding records. If new records are added while
+flushing, it is expected that the dedicated printer threads
+will print those records. If the printer thread is not
+available (which is always the case at this point in the
+rework), nbcon_atomic_flush_pending() _will_ flush all records
+in the ringbuffer.
+
+Unlike console_flush_all(), nbcon_atomic_flush_pending() will
+fully flush one console before flushing the next. This helps to
+guarantee that a block of pending records (such as a stack
+trace in an emergency situation) can be printed atomically at
+once before releasing console ownership.
+
+nbcon_atomic_flush_pending() is safe in any context because it
+uses write_atomic() and acquires with unsafe_takeover disabled.
+
+Use it in console_flush_on_panic() before flushing legacy
+consoles. The legacy write() callbacks are not fully safe when
+oops_in_progress is set.
+
+Also use it in nbcon_driver_release() to flush records added
+while the driver had the console locked to perform non-printing
+operations.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 2
+ kernel/printk/nbcon.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/printk/printk.c | 2
+ 3 files changed, 163 insertions(+), 3 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -85,6 +85,7 @@ bool nbcon_alloc(struct console *con);
+ void nbcon_init(struct console *con, u64 init_seq);
+ void nbcon_free(struct console *con);
+ enum nbcon_prio nbcon_get_default_prio(void);
++void nbcon_atomic_flush_pending(void);
+
+ /*
+ * Check if the given console is currently capable and allowed to print
+@@ -140,6 +141,7 @@ static inline bool nbcon_alloc(struct co
+ static inline void nbcon_init(struct console *con, u64 init_seq) { }
+ static inline void nbcon_free(struct console *con) { }
+ static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
++static inline void nbcon_atomic_flush_pending(void) { }
+
+ static inline bool console_is_usable(struct console *con, short flags) { return false; }
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -850,7 +850,6 @@ EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
+ * When true is returned, @wctxt->ctxt.backlog indicates whether there are
+ * still records pending in the ringbuffer,
+ */
+-__maybe_unused
+ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
+ {
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+@@ -953,6 +952,148 @@ enum nbcon_prio nbcon_get_default_prio(v
+ return NBCON_PRIO_NORMAL;
+ }
+
++/*
++ * __nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
++ * write_atomic() callback
++ * @con: The nbcon console to flush
++ * @stop_seq: Flush up until this record
++ *
++ * Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
++ * failure.
++ *
++ * Errors:
++ *
++ * -EPERM: Unable to acquire console ownership.
++ *
++ * -EAGAIN: Another context took over ownership while printing.
++ *
++ * -ENOENT: A record before @stop_seq is not available.
++ *
++ * If flushing up to @stop_seq was not successful, it only makes sense for the
++ * caller to try again when -EAGAIN was returned. When -EPERM is returned,
++ * this context is not allowed to acquire the console. When -ENOENT is
++ * returned, it cannot be expected that the unfinalized record will become
++ * available.
++ */
++static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
++{
++ struct nbcon_write_context wctxt = { };
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
++ int err = 0;
++
++ ctxt->console = con;
++ ctxt->spinwait_max_us = 2000;
++ ctxt->prio = nbcon_get_default_prio();
++
++ if (!nbcon_context_try_acquire(ctxt))
++ return -EPERM;
++
++ while (nbcon_seq_read(con) < stop_seq) {
++ /*
++ * nbcon_emit_next_record() returns false when the console was
++ * handed over or taken over. In both cases the context is no
++ * longer valid.
++ */
++ if (!nbcon_emit_next_record(&wctxt))
++ return -EAGAIN;
++
++ if (!ctxt->backlog) {
++ if (nbcon_seq_read(con) < stop_seq)
++ err = -ENOENT;
++ break;
++ }
++ }
++
++ nbcon_context_release(ctxt);
++ return err;
++}
++
++/**
++ * nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
++ * write_atomic() callback
++ * @con: The nbcon console to flush
++ * @stop_seq: Flush up until this record
++ *
++ * This will stop flushing before @stop_seq if another context has ownership.
++ * That context is then responsible for the flushing. Likewise, if new records
++ * are added while this context was flushing and there is no other context
++ * to handle the printing, this context must also flush those records.
++ */
++static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
++{
++ unsigned long flags;
++ int err;
++
++again:
++ /*
++ * Atomic flushing does not use console driver synchronization (i.e.
++ * it does not hold the port lock for uart consoles). Therefore IRQs
++ * must be disabled to avoid being interrupted and then calling into
++ * a driver that will deadlock trying to acquire console ownership.
++ */
++ local_irq_save(flags);
++
++ err = __nbcon_atomic_flush_pending_con(con, stop_seq);
++
++ local_irq_restore(flags);
++
++ /*
++ * If flushing was successful but more records are available, this
++ * context must flush those remaining records because there is no
++ * other context that will do it.
++ */
++ if (!err && prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
++ stop_seq = prb_next_reserve_seq(prb);
++ goto again;
++ }
++
++ /*
++ * If there was a new owner, that context is responsible for
++ * completing the flush.
++ */
++}
++
++/**
++ * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
++ * write_atomic() callback
++ * @stop_seq: Flush up until this record
++ */
++static void __nbcon_atomic_flush_pending(u64 stop_seq)
++{
++ struct console *con;
++ int cookie;
++
++ cookie = console_srcu_read_lock();
++ for_each_console_srcu(con) {
++ short flags = console_srcu_read_flags(con);
++
++ if (!(flags & CON_NBCON))
++ continue;
++
++ if (!console_is_usable(con, flags))
++ continue;
++
++ if (nbcon_seq_read(con) >= stop_seq)
++ continue;
++
++ nbcon_atomic_flush_pending_con(con, stop_seq);
++ }
++ console_srcu_read_unlock(cookie);
++}
++
++/**
++ * nbcon_atomic_flush_pending - Flush all nbcon consoles using their
++ * write_atomic() callback
++ *
++ * Flush the backlog up through the currently newest record. Any new
++ * records added while flushing will not be flushed. This is to avoid
++ * one CPU printing unbounded because other CPUs continue to add records.
++ */
++void nbcon_atomic_flush_pending(void)
++{
++ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
++}
++
+ /**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+@@ -1064,8 +1205,23 @@ EXPORT_SYMBOL_GPL(nbcon_driver_try_acqui
+ void nbcon_driver_release(struct console *con)
+ {
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_driver_ctxt);
++ int cookie;
+
+- if (nbcon_context_exit_unsafe(ctxt))
+- nbcon_context_release(ctxt);
++ if (!nbcon_context_exit_unsafe(ctxt))
++ return;
++
++ nbcon_context_release(ctxt);
++
++ /*
++ * This context must flush any new records added while the console
++ * was locked. The console_srcu_read_lock must be taken to ensure
++ * the console is usable throughout flushing.
++ */
++ cookie = console_srcu_read_lock();
++ if (console_is_usable(con, console_srcu_read_flags(con)) &&
++ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
++ __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb));
++ }
++ console_srcu_read_unlock(cookie);
+ }
+ EXPORT_SYMBOL_GPL(nbcon_driver_release);
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3172,6 +3172,8 @@ void console_flush_on_panic(enum con_flu
+ console_srcu_read_unlock(cookie);
+ }
+
++ nbcon_atomic_flush_pending();
++
+ console_flush_all(false, &next_seq, &handover);
+ }
+