summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:28:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:28:00 +0000
commit3565071f226432336a54d0193d729fa4508a3394 (patch)
tree4cde13f078f84c0a7785d234fd52edce7c90546a /debian/patches-rt
parentAdding upstream version 6.6.15. (diff)
downloadlinux-3565071f226432336a54d0193d729fa4508a3394.tar.xz
linux-3565071f226432336a54d0193d729fa4508a3394.zip
Adding debian version 6.6.15-2.debian/6.6.15-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch75
-rw-r--r--debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch36
-rw-r--r--debian/patches-rt/0001-drm-amd-display-Remove-migrate_en-dis-from-dc_fpu_be.patch86
-rw-r--r--debian/patches-rt/0001-sched-Constrain-locks-in-sched_submit_work.patch49
-rw-r--r--debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch54
-rw-r--r--debian/patches-rt/0001-signal-Add-proper-comment-about-the-preempt-disable-.patch47
-rw-r--r--debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch44
-rw-r--r--debian/patches-rt/0002-drm-amd-display-Simplify-the-per-CPU-usage.patch127
-rw-r--r--debian/patches-rt/0002-locking-rtmutex-Avoid-unconditional-slowpath-for-DEB.patch79
-rw-r--r--debian/patches-rt/0002-serial-core-Use-lock-wrappers.patch93
-rw-r--r--debian/patches-rt/0002-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch48
-rw-r--r--debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch60
-rw-r--r--debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch48
-rw-r--r--debian/patches-rt/0003-drm-amd-display-Add-a-warning-if-the-FPU-is-used-out.patch26
-rw-r--r--debian/patches-rt/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch56
-rw-r--r--debian/patches-rt/0003-sched-Extract-__schedule_loop.patch58
-rw-r--r--debian/patches-rt/0003-serial-21285-Use-port-lock-wrappers.patch75
-rw-r--r--debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch47
-rw-r--r--debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch121
-rw-r--r--debian/patches-rt/0004-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch87
-rw-r--r--debian/patches-rt/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch85
-rw-r--r--debian/patches-rt/0004-sched-Provide-rt_mutex-specific-scheduler-helpers.patch128
-rw-r--r--debian/patches-rt/0004-serial-8250_aspeed_vuart-Use-port-lock-wrappers.patch61
-rw-r--r--debian/patches-rt/0005-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch121
-rw-r--r--debian/patches-rt/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch30
-rw-r--r--debian/patches-rt/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch177
-rw-r--r--debian/patches-rt/0005-serial-8250_bcm7271-Use-port-lock-wrappers.patch151
-rw-r--r--debian/patches-rt/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch45
-rw-r--r--debian/patches-rt/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch57
-rw-r--r--debian/patches-rt/0006-serial-8250-Use-port-lock-wrappers.patch465
-rw-r--r--debian/patches-rt/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch29
-rw-r--r--debian/patches-rt/0007-futex-pi-Fix-recursive-rt_mutex-waiter-state.patch198
-rw-r--r--debian/patches-rt/0007-serial-8250_dma-Use-port-lock-wrappers.patch80
-rw-r--r--debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch42
-rw-r--r--debian/patches-rt/0008-serial-8250_dw-Use-port-lock-wrappers.patch69
-rw-r--r--debian/patches-rt/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch89
-rw-r--r--debian/patches-rt/0009-serial-8250_exar-Use-port-lock-wrappers.patch52
-rw-r--r--debian/patches-rt/0010-drm-i915-Drop-the-irqs_disabled-check.patch39
-rw-r--r--debian/patches-rt/0010-serial-8250_fsl-Use-port-lock-wrappers.patch63
-rw-r--r--debian/patches-rt/0011-serial-8250_mtk-Use-port-lock-wrappers.patch77
-rw-r--r--debian/patches-rt/0012-serial-8250_omap-Use-port-lock-wrappers.patch236
-rw-r--r--debian/patches-rt/0013-serial-8250_pci1xxxx-Use-port-lock-wrappers.patch66
-rw-r--r--debian/patches-rt/0014-serial-altera_jtaguart-Use-port-lock-wrappers.patch133
-rw-r--r--debian/patches-rt/0015-serial-altera_uart-Use-port-lock-wrappers.patch116
-rw-r--r--debian/patches-rt/0016-serial-amba-pl010-Use-port-lock-wrappers.patch112
-rw-r--r--debian/patches-rt/0017-serial-amba-pl011-Use-port-lock-wrappers.patch327
-rw-r--r--debian/patches-rt/0018-serial-apb-Use-port-lock-wrappers.patch76
-rw-r--r--debian/patches-rt/0019-serial-ar933x-Use-port-lock-wrappers.patch144
-rw-r--r--debian/patches-rt/0020-serial-arc_uart-Use-port-lock-wrappers.patch97
-rw-r--r--debian/patches-rt/0021-serial-atmel-Use-port-lock-wrappers.patch119
-rw-r--r--debian/patches-rt/0022-serial-bcm63xx-uart-Use-port-lock-wrappers.patch128
-rw-r--r--debian/patches-rt/0023-serial-cpm_uart-Use-port-lock-wrappers.patch70
-rw-r--r--debian/patches-rt/0024-serial-digicolor-Use-port-lock-wrappers.patch113
-rw-r--r--debian/patches-rt/0025-serial-dz-Use-port-lock-wrappers.patch161
-rw-r--r--debian/patches-rt/0026-serial-linflexuart-Use-port-lock-wrappers.patch143
-rw-r--r--debian/patches-rt/0027-serial-fsl_lpuart-Use-port-lock-wrappers.patch389
-rw-r--r--debian/patches-rt/0028-serial-icom-Use-port-lock-wrappers.patch151
-rw-r--r--debian/patches-rt/0029-serial-imx-Use-port-lock-wrappers.patch354
-rw-r--r--debian/patches-rt/0030-serial-ip22zilog-Use-port-lock-wrappers.patch185
-rw-r--r--debian/patches-rt/0031-serial-jsm-Use-port-lock-wrappers.patch124
-rw-r--r--debian/patches-rt/0032-serial-liteuart-Use-port-lock-wrappers.patch110
-rw-r--r--debian/patches-rt/0033-serial-lpc32xx_hs-Use-port-lock-wrappers.patch148
-rw-r--r--debian/patches-rt/0034-serial-ma35d1-Use-port-lock-wrappers.patch117
-rw-r--r--debian/patches-rt/0035-serial-mcf-Use-port-lock-wrappers.patch127
-rw-r--r--debian/patches-rt/0036-serial-men_z135_uart-Use-port-lock-wrappers.patch76
-rw-r--r--debian/patches-rt/0037-serial-meson-Use-port-lock-wrappers.patch168
-rw-r--r--debian/patches-rt/0038-serial-milbeaut_usio-Use-port-lock-wrappers.patch101
-rw-r--r--debian/patches-rt/0039-serial-mpc52xx-Use-port-lock-wrappers.patch89
-rw-r--r--debian/patches-rt/0040-serial-mps2-uart-Use-port-lock-wrappers.patch103
-rw-r--r--debian/patches-rt/0041-serial-msm-Use-port-lock-wrappers.patch185
-rw-r--r--debian/patches-rt/0042-serial-mvebu-uart-Use-port-lock-wrappers.patch108
-rw-r--r--debian/patches-rt/0043-serial-omap-Use-port-lock-wrappers.patch180
-rw-r--r--debian/patches-rt/0044-serial-owl-Use-port-lock-wrappers.patch147
-rw-r--r--debian/patches-rt/0045-serial-pch-Use-port-lock-wrappers.patch80
-rw-r--r--debian/patches-rt/0046-serial-pic32-Use-port-lock-wrappers.patch118
-rw-r--r--debian/patches-rt/0047-serial-pmac_zilog-Use-port-lock-wrappers.patch232
-rw-r--r--debian/patches-rt/0048-serial-pxa-Use-port-lock-wrappers.patch150
-rw-r--r--debian/patches-rt/0049-serial-qcom-geni-Use-port-lock-wrappers.patch71
-rw-r--r--debian/patches-rt/0050-serial-rda-Use-port-lock-wrappers.patch177
-rw-r--r--debian/patches-rt/0051-serial-rp2-Use-port-lock-wrappers.patch114
-rw-r--r--debian/patches-rt/0052-serial-sa1100-Use-port-lock-wrappers.patch117
-rw-r--r--debian/patches-rt/0053-serial-samsung_tty-Use-port-lock-wrappers.patch245
-rw-r--r--debian/patches-rt/0054-serial-sb1250-duart-Use-port-lock-wrappers.patch85
-rw-r--r--debian/patches-rt/0056-serial-tegra-Use-port-lock-wrappers.patch176
-rw-r--r--debian/patches-rt/0057-serial-core-Use-port-lock-wrappers.patch365
-rw-r--r--debian/patches-rt/0058-serial-mctrl_gpio-Use-port-lock-wrappers.patch58
-rw-r--r--debian/patches-rt/0059-serial-txx9-Use-port-lock-wrappers.patch134
-rw-r--r--debian/patches-rt/0060-serial-sh-sci-Use-port-lock-wrappers.patch302
-rw-r--r--debian/patches-rt/0061-serial-sifive-Use-port-lock-wrappers.patch102
-rw-r--r--debian/patches-rt/0062-serial-sprd-Use-port-lock-wrappers.patch162
-rw-r--r--debian/patches-rt/0063-serial-st-asc-Use-port-lock-wrappers.patch110
-rw-r--r--debian/patches-rt/0064-serial-stm32-Use-port-lock-wrappers.patch184
-rw-r--r--debian/patches-rt/0065-serial-sunhv-Use-port-lock-wrappers.patch149
-rw-r--r--debian/patches-rt/0066-serial-sunplus-uart-Use-port-lock-wrappers.patch146
-rw-r--r--debian/patches-rt/0067-serial-sunsab-Use-port-lock-wrappers.patch176
-rw-r--r--debian/patches-rt/0068-serial-sunsu-Use-port-lock-wrappers.patch219
-rw-r--r--debian/patches-rt/0069-serial-sunzilog-Use-port-lock-wrappers.patch211
-rw-r--r--debian/patches-rt/0070-serial-timbuart-Use-port-lock-wrappers.patch71
-rw-r--r--debian/patches-rt/0071-serial-uartlite-Use-port-lock-wrappers.patch105
-rw-r--r--debian/patches-rt/0072-serial-ucc_uart-Use-port-lock-wrappers.patch59
-rw-r--r--debian/patches-rt/0073-serial-vt8500-Use-port-lock-wrappers.patch76
-rw-r--r--debian/patches-rt/0074-serial-xilinx_uartps-Use-port-lock-wrappers.patch276
-rw-r--r--debian/patches-rt/0075-printk-Add-non-BKL-nbcon-console-basic-infrastructur.patch261
-rw-r--r--debian/patches-rt/0076-printk-nbcon-Add-acquire-release-logic.patch705
-rw-r--r--debian/patches-rt/0077-printk-Make-static-printk-buffers-available-to-nbcon.patch65
-rw-r--r--debian/patches-rt/0078-printk-nbcon-Add-buffer-management.patch311
-rw-r--r--debian/patches-rt/0079-printk-nbcon-Add-ownership-state-functions.patch179
-rw-r--r--debian/patches-rt/0080-printk-nbcon-Add-sequence-handling.patch311
-rw-r--r--debian/patches-rt/0081-printk-nbcon-Add-emit-function-and-callback-function.patch262
-rw-r--r--debian/patches-rt/0082-printk-nbcon-Allow-drivers-to-mark-unsafe-regions-an.patch136
-rw-r--r--debian/patches-rt/0083-printk-fix-illegal-pbufs-access-for-CONFIG_PRINTK.patch134
-rw-r--r--debian/patches-rt/0084-printk-Reduce-pr_flush-pooling-time.patch102
-rw-r--r--debian/patches-rt/0085-printk-nbcon-Relocate-32bit-seq-macros.patch141
-rw-r--r--debian/patches-rt/0086-printk-Adjust-mapping-for-32bit-seq-macros.patch71
-rw-r--r--debian/patches-rt/0087-printk-Use-prb_first_seq-as-base-for-32bit-seq-macro.patch71
-rw-r--r--debian/patches-rt/0088-printk-ringbuffer-Do-not-skip-non-finalized-records-.patch304
-rw-r--r--debian/patches-rt/0089-printk-ringbuffer-Clarify-special-lpos-values.patch92
-rw-r--r--debian/patches-rt/0090-printk-For-suppress_panic_printk-check-for-other-CPU.patch34
-rw-r--r--debian/patches-rt/0091-printk-Add-this_cpu_in_panic.patch88
-rw-r--r--debian/patches-rt/0092-printk-ringbuffer-Cleanup-reader-terminology.patch67
-rw-r--r--debian/patches-rt/0093-printk-Wait-for-all-reserved-records-with-pr_flush.patch170
-rw-r--r--debian/patches-rt/0094-printk-ringbuffer-Skip-non-finalized-records-in-pani.patch68
-rw-r--r--debian/patches-rt/0095-printk-ringbuffer-Consider-committed-as-finalized-in.patch62
-rw-r--r--debian/patches-rt/0096-printk-Disable-passing-console-lock-owner-completely.patch107
-rw-r--r--debian/patches-rt/0097-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch78
-rw-r--r--debian/patches-rt/0098-panic-Flush-kernel-log-buffer-at-the-end.patch38
-rw-r--r--debian/patches-rt/0099-printk-Consider-nbcon-boot-consoles-on-seq-init.patch51
-rw-r--r--debian/patches-rt/0100-printk-Add-sparse-notation-to-console_srcu-locking.patch36
-rw-r--r--debian/patches-rt/0101-printk-nbcon-Ensure-ownership-release-on-failed-emit.patch58
-rw-r--r--debian/patches-rt/0102-printk-Check-printk_deferred_enter-_exit-usage.patch58
-rw-r--r--debian/patches-rt/0103-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch244
-rw-r--r--debian/patches-rt/0104-printk-nbcon-Add-driver_enter-driver_exit-console-ca.patch42
-rw-r--r--debian/patches-rt/0105-printk-Make-console_is_usable-available-to-nbcon.patch103
-rw-r--r--debian/patches-rt/0106-printk-Let-console_is_usable-handle-nbcon.patch43
-rw-r--r--debian/patches-rt/0107-printk-Add-flags-argument-for-console_is_usable.patch67
-rw-r--r--debian/patches-rt/0108-printk-nbcon-Provide-function-to-flush-using-write_a.patch186
-rw-r--r--debian/patches-rt/0109-printk-Track-registered-boot-consoles.patch78
-rw-r--r--debian/patches-rt/0110-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch173
-rw-r--r--debian/patches-rt/0111-printk-nbcon-Assign-priority-based-on-CPU-state.patch113
-rw-r--r--debian/patches-rt/0112-printk-nbcon-Add-unsafe-flushing-on-panic.patch96
-rw-r--r--debian/patches-rt/0113-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch211
-rw-r--r--debian/patches-rt/0114-printk-Track-nbcon-consoles.patch67
-rw-r--r--debian/patches-rt/0115-printk-Coordinate-direct-printing-in-panic.patch134
-rw-r--r--debian/patches-rt/0116-printk-nbcon-Implement-emergency-sections.patch232
-rw-r--r--debian/patches-rt/0117-panic-Mark-emergency-section-in-warn.patch38
-rw-r--r--debian/patches-rt/0118-panic-Mark-emergency-section-in-oops.patch39
-rw-r--r--debian/patches-rt/0119-rcu-Mark-emergency-section-in-rcu-stalls.patch45
-rw-r--r--debian/patches-rt/0120-lockdep-Mark-emergency-section-in-lockdep-splats.patch45
-rw-r--r--debian/patches-rt/0121-printk-nbcon-Introduce-printing-kthreads.patch441
-rw-r--r--debian/patches-rt/0122-printk-Atomic-print-in-printk-context-on-shutdown.patch41
-rw-r--r--debian/patches-rt/0123-printk-nbcon-Add-context-to-console_is_usable.patch111
-rw-r--r--debian/patches-rt/0124-printk-nbcon-Add-printer-thread-wakeups.patch164
-rw-r--r--debian/patches-rt/0125-printk-nbcon-Stop-threads-on-shutdown-reboot.patch60
-rw-r--r--debian/patches-rt/0126-printk-nbcon-Start-printing-threads.patch135
-rw-r--r--debian/patches-rt/0127-proc-Add-nbcon-support-for-proc-consoles.patch53
-rw-r--r--debian/patches-rt/0128-tty-sysfs-Add-nbcon-support-for-active.patch33
-rw-r--r--debian/patches-rt/0129-printk-nbcon-Provide-function-to-reacquire-ownership.patch92
-rw-r--r--debian/patches-rt/0130-serial-core-Provide-low-level-functions-to-port-lock.patch43
-rw-r--r--debian/patches-rt/0131-serial-8250-Switch-to-nbcon-console.patch335
-rw-r--r--debian/patches-rt/0132-printk-Add-kthread-for-all-legacy-consoles.patch422
-rw-r--r--debian/patches-rt/0133-serial-8250-revert-drop-lockdep-annotation-from-seri.patch29
-rw-r--r--debian/patches-rt/0134-printk-Avoid-false-positive-lockdep-report-for-legac.patch64
-rw-r--r--debian/patches-rt/ARM64__Allow_to_enable_RT.patch27
-rw-r--r--debian/patches-rt/ARM__Allow_to_enable_RT.patch35
-rw-r--r--debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch91
-rw-r--r--debian/patches-rt/Add_localversion_for_-RT_release.patch19
-rw-r--r--debian/patches-rt/POWERPC__Allow_to_enable_RT.patch35
-rw-r--r--debian/patches-rt/PREEMPT_AUTO.patch779
-rw-r--r--debian/patches-rt/RISC-V-Probe-misaligned-access-speed-in-parallel.patch197
-rw-r--r--debian/patches-rt/Revert-drm-i915-Depend-on-PREEMPT_RT.patch23
-rw-r--r--debian/patches-rt/drm-i915-Do-not-disable-preemption-for-resets.patch99
-rw-r--r--debian/patches-rt/drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch29
-rw-r--r--debian/patches-rt/net-Avoid-the-IPI-to-free-the.patch119
-rw-r--r--debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch27
-rw-r--r--debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch38
-rw-r--r--debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch43
-rw-r--r--debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch115
-rw-r--r--debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch37
-rw-r--r--debian/patches-rt/preempt-Put-preempt_enable-within-an-instrumentation.patch47
-rw-r--r--debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch71
-rw-r--r--debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch44
-rw-r--r--debian/patches-rt/riscv-allow-to-enable-RT.patch23
-rw-r--r--debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch58
-rw-r--r--debian/patches-rt/series255
-rw-r--r--debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch222
-rw-r--r--debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch44
-rw-r--r--debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch53
-rw-r--r--debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch106
-rw-r--r--debian/patches-rt/tty_serial_omap__Make_the_locking_RT_aware.patch47
-rw-r--r--debian/patches-rt/tty_serial_pl011__Make_the_locking_work_on_RT.patch46
-rw-r--r--debian/patches-rt/x86__Allow_to_enable_RT.patch27
-rw-r--r--debian/patches-rt/x86__Enable_RT_also_on_32bit.patch33
-rw-r--r--debian/patches-rt/zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch94
193 files changed, 23669 insertions, 0 deletions
diff --git a/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch b/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
new file mode 100644
index 0000000000..03eb551db3
--- /dev/null
+++ b/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
@@ -0,0 +1,75 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 19 May 2023 16:57:29 +0200
+Subject: [PATCH 1/4] ARM: vfp: Provide vfp_lock() for VFP locking.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+kernel_neon_begin() uses local_bh_disable() to ensure exclusive access
+to the VFP unit. This is broken on PREEMPT_RT because a BH disabled
+section remains preemptible on PREEMPT_RT.
+
+Introduce vfp_lock() which uses local_bh_disable() and preempt_disable()
+on PREEMPT_RT. Since softirqs are processed always in thread context,
+disabling preemption is enough to ensure that the current context won't
+get interrupted by something that is using the VFP. Use it in
+kernel_neon_begin().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/vfp/vfpmodule.c | 32 ++++++++++++++++++++++++++++++--
+ 1 file changed, 30 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -56,6 +56,34 @@ extern unsigned int VFP_arch_feroceon __
+ union vfp_state *vfp_current_hw_state[NR_CPUS];
+
+ /*
++ * Claim ownership of the VFP unit.
++ *
++ * The caller may change VFP registers until vfp_unlock() is called.
++ *
++ * local_bh_disable() is used to disable preemption and to disable VFP
++ * processing in softirq context. On PREEMPT_RT kernels local_bh_disable() is
++ * not sufficient because it only serializes soft interrupt related sections
++ * via a local lock, but stays preemptible. Disabling preemption is the right
++ * choice here as bottom half processing is always in thread context on RT
++ * kernels so it implicitly prevents bottom half processing as well.
++ */
++static void vfp_lock(void)
++{
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_bh_disable();
++ else
++ preempt_disable();
++}
++
++static void vfp_unlock(void)
++{
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_bh_enable();
++ else
++ preempt_enable();
++}
++
++/*
+ * Is 'thread's most up to date state stored in this CPUs hardware?
+ * Must be called from non-preemptible context.
+ */
+@@ -819,7 +847,7 @@ void kernel_neon_begin(void)
+ unsigned int cpu;
+ u32 fpexc;
+
+- local_bh_disable();
++ vfp_lock();
+
+ /*
+ * Kernel mode NEON is only allowed outside of hardirq context with
+@@ -850,7 +878,7 @@ void kernel_neon_end(void)
+ {
+ /* Disable the NEON/VFP unit. */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+- local_bh_enable();
++ vfp_unlock();
+ }
+ EXPORT_SYMBOL(kernel_neon_end);
+
diff --git a/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch b/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
new file mode 100644
index 0000000000..5c8c179aef
--- /dev/null
+++ b/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
@@ -0,0 +1,36 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 8 Jul 2015 17:14:48 +0200
+Subject: [PATCH 1/2] arm: Disable jump-label on PREEMPT_RT.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+jump-labels are used to efficiently switch between two possible code
+paths. To achieve this, stop_machine() is used to keep the CPU in a
+known state while the opcode is modified. The usage of stop_machine()
+here leads to large latency spikes which can be observed on PREEMPT_RT.
+
+Jump labels may change the target during runtime and are not restricted
+to debug or "configuration/ setup" part of a PREEMPT_RT system where
+high latencies could be defined as acceptable.
+
+Disable jump-label support on a PREEMPT_RT system.
+
+[bigeasy: Patch description.]
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/20220613182447.112191-2-bigeasy@linutronix.de
+---
+ arch/arm/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -73,7 +73,7 @@ config ARM
+ select HAS_IOPORT
+ select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
+ select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT
+ select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
+ select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
+ select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
diff --git a/debian/patches-rt/0001-drm-amd-display-Remove-migrate_en-dis-from-dc_fpu_be.patch b/debian/patches-rt/0001-drm-amd-display-Remove-migrate_en-dis-from-dc_fpu_be.patch
new file mode 100644
index 0000000000..b78f988da5
--- /dev/null
+++ b/debian/patches-rt/0001-drm-amd-display-Remove-migrate_en-dis-from-dc_fpu_be.patch
@@ -0,0 +1,86 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 21 Sep 2023 16:15:12 +0200
+Subject: [PATCH 1/5] drm/amd/display: Remove migrate_en/dis from
+ dc_fpu_begin().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+This is a revert of the commit mentioned below while it is not wrong, as
+in the kernel will explode, having migrate_disable() here it is
+complete waste of resources.
+
+Additionally commit message is plain wrong the review tag does not make
+it any better. The migrate_disable() interface has a fat comment
+describing it and it includes the word "undesired" in the headline which
+should tickle people to read it before using it.
+Initially I assumed it is worded too harsh but now I beg to differ.
+
+The reviewer of the original commit, even not understanding what
+migrate_disable() does should ask the following:
+
+- migrate_disable() is added only to the CONFIG_X86 block and it claims
+ to protect fpu_recursion_depth. Why are the other the architectures
+ excluded?
+
+- migrate_disable() is added after fpu_recursion_depth was modified.
+ Shouldn't it be added before the modification or referencing takes
+ place?
+
+Moving on.
+Disabling preemption DOES prevent CPU migration. A task, that can not be
+pushed away from the CPU by the scheduler (due to disabled preemption)
+can not be pushed or migrated to another CPU.
+
+Disabling migration DOES NOT ensure consistency of per-CPU variables. It
+only ensures that the task acts always on the same per-CPU variable. The
+task remains preemptible meaning multiple tasks can access the same
+per-CPU variable. This in turn leads to inconsistency for the statement
+
+ *pcpu -= 1;
+
+with two tasks on one CPU and a preemption point during the RMW
+operation:
+
+ Task A Task B
+ read pcpu to reg # 0
+ inc reg # 0 -> 1
+ read pcpu to reg # 0
+ inc reg # 0 -> 1
+ write reg to pcpu # 1
+ write reg to pcpu # 1
+
+At the end pcpu reads 1 but should read 2 instead. Boom.
+
+get_cpu_ptr() already contains a preempt_disable() statement. That means
+that the per-CPU variable can only be referenced by a single task which
+is currently running. The only inconsistency that can occur if the
+variable is additionally accessed from an interrupt.
+
+Remove migrate_disable/enable() from dc_fpu_begin/end().
+
+Cc: Tianci Yin <tianci.yin@amd.com>
+Cc: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Fixes: 0c316556d1249 ("drm/amd/display: Disable migration to ensure consistency of per-CPU variable")
+Link: https://lore.kernel.org/r/20230921141516.520471-2-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+@@ -91,7 +91,6 @@ void dc_fpu_begin(const char *function_n
+
+ if (*pcpu == 1) {
+ #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+- migrate_disable();
+ kernel_fpu_begin();
+ #elif defined(CONFIG_PPC64)
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+@@ -132,7 +131,6 @@ void dc_fpu_end(const char *function_nam
+ if (*pcpu <= 0) {
+ #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+ kernel_fpu_end();
+- migrate_enable();
+ #elif defined(CONFIG_PPC64)
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+ disable_kernel_vsx();
diff --git a/debian/patches-rt/0001-sched-Constrain-locks-in-sched_submit_work.patch b/debian/patches-rt/0001-sched-Constrain-locks-in-sched_submit_work.patch
new file mode 100644
index 0000000000..73828f0efd
--- /dev/null
+++ b/debian/patches-rt/0001-sched-Constrain-locks-in-sched_submit_work.patch
@@ -0,0 +1,49 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 8 Sep 2023 18:22:48 +0200
+Subject: [PATCH 1/7] sched: Constrain locks in sched_submit_work()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Even though sched_submit_work() is ran from preemptible context,
+it is discouraged to have it use blocking locks due to the recursion
+potential.
+
+Enforce this.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230908162254.999499-2-bigeasy@linutronix.de
+---
+ kernel/sched/core.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6721,11 +6721,18 @@ void __noreturn do_task_dead(void)
+
+ static inline void sched_submit_work(struct task_struct *tsk)
+ {
++ static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
+ unsigned int task_flags;
+
+ if (task_is_running(tsk))
+ return;
+
++ /*
++ * Establish LD_WAIT_CONFIG context to ensure none of the code called
++ * will use a blocking primitive -- which would lead to recursion.
++ */
++ lock_map_acquire_try(&sched_map);
++
+ task_flags = tsk->flags;
+ /*
+ * If a worker goes to sleep, notify and ask workqueue whether it
+@@ -6750,6 +6757,8 @@ static inline void sched_submit_work(str
+ * make sure to submit it to avoid deadlocks.
+ */
+ blk_flush_plug(tsk->plug, true);
++
++ lock_map_release(&sched_map);
+ }
+
+ static void sched_update_worker(struct task_struct *tsk)
diff --git a/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch b/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
new file mode 100644
index 0000000000..35892558e6
--- /dev/null
+++ b/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
@@ -0,0 +1,54 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 4 Aug 2023 13:30:37 +0200
+Subject: [PATCH 1/3] sched/core: Provide a method to check if a task is
+ PI-boosted.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Provide a method to check if a task inherited the priority from another
+task. This happens if a task owns a lock which is requested by a task
+with higher priority. This can be used as a hint to add a preemption
+point to the critical section.
+
+Provide a function which reports true if the task is PI-boosted.
+
+Link: https://lore.kernel.org/r/20230804113039.419794-2-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/sched.h | 1 +
+ kernel/sched/core.c | 15 +++++++++++++++
+ 2 files changed, 16 insertions(+)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1905,6 +1905,7 @@ static inline int dl_task_check_affinity
+ }
+ #endif
+
++extern bool task_is_pi_boosted(const struct task_struct *p);
+ extern int yield_to(struct task_struct *p, bool preempt);
+ extern void set_user_nice(struct task_struct *p, long nice);
+ extern int task_prio(const struct task_struct *p);
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -8923,6 +8923,21 @@ static inline void preempt_dynamic_init(
+
+ #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
+
++/*
++ * task_is_pi_boosted - Check if task has been PI boosted.
++ * @p: Task to check.
++ *
++ * Return true if task is subject to priority inheritance.
++ */
++bool task_is_pi_boosted(const struct task_struct *p)
++{
++ int prio = p->prio;
++
++ if (!rt_prio(prio))
++ return false;
++ return prio != p->normal_prio;
++}
++
+ /**
+ * yield - yield the current processor to other threads.
+ *
diff --git a/debian/patches-rt/0001-signal-Add-proper-comment-about-the-preempt-disable-.patch b/debian/patches-rt/0001-signal-Add-proper-comment-about-the-preempt-disable-.patch
new file mode 100644
index 0000000000..e00261d790
--- /dev/null
+++ b/debian/patches-rt/0001-signal-Add-proper-comment-about-the-preempt-disable-.patch
@@ -0,0 +1,47 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 3 Aug 2023 12:09:31 +0200
+Subject: [PATCH 1/2] signal: Add proper comment about the preempt-disable in
+ ptrace_stop().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Commit 53da1d9456fe7 ("fix ptrace slowness") added a preempt-disable section
+between read_unlock() and the following schedule() invocation without
+explaining why it is needed.
+
+Replace the comment with an explanation why this is needed. Clarify that
+it is needed for correctness but for performance reasons.
+
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20230803100932.325870-2-bigeasy@linutronix.de
+---
+ kernel/signal.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2329,10 +2329,21 @@ static int ptrace_stop(int exit_code, in
+ do_notify_parent_cldstop(current, false, why);
+
+ /*
+- * Don't want to allow preemption here, because
+- * sys_ptrace() needs this task to be inactive.
++ * The previous do_notify_parent_cldstop() invocation woke ptracer.
++ * One a PREEMPTION kernel this can result in preemption requirement
++ * which will be fulfilled after read_unlock() and the ptracer will be
++ * put on the CPU.
++ * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
++ * this task wait in schedule(). If this task gets preempted then it
++ * remains enqueued on the runqueue. The ptracer will observe this and
++ * then sleep for a delay of one HZ tick. In the meantime this task
++ * gets scheduled, enters schedule() and will wait for the ptracer.
+ *
+- * XXX: implement read_unlock_no_resched().
++ * This preemption point is not bad from correctness point of view but
++ * extends the runtime by one HZ tick time due to the ptracer's sleep.
++ * The preempt-disable section ensures that there will be no preemption
++ * between unlock and schedule() and so improving the performance since
++ * the ptracer has no reason to sleep.
+ */
+ preempt_disable();
+ read_unlock(&tasklist_lock);
diff --git a/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch b/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
new file mode 100644
index 0000000000..a0d93547cd
--- /dev/null
+++ b/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
@@ -0,0 +1,44 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 19 May 2023 16:57:30 +0200
+Subject: [PATCH 2/4] ARM: vfp: Use vfp_lock() in vfp_sync_hwstate().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+vfp_sync_hwstate() uses preempt_disable() followed by local_bh_disable()
+to ensure that it won't get interrupted while checking the VFP state.
+This harms PREEMPT_RT because softirq handling can get preempted and
+local_bh_disable() synchronizes the related section with a sleeping lock
+which does not work with disabled preemption.
+
+Use the vfp_lock() to synchronize the access.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/vfp/vfpmodule.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -540,11 +540,9 @@ static inline void vfp_pm_init(void) { }
+ */
+ void vfp_sync_hwstate(struct thread_info *thread)
+ {
+- unsigned int cpu = get_cpu();
++ vfp_lock();
+
+- local_bh_disable();
+-
+- if (vfp_state_in_hw(cpu, thread)) {
++ if (vfp_state_in_hw(raw_smp_processor_id(), thread)) {
+ u32 fpexc = fmrx(FPEXC);
+
+ /*
+@@ -555,8 +553,7 @@ void vfp_sync_hwstate(struct thread_info
+ fmxr(FPEXC, fpexc);
+ }
+
+- local_bh_enable();
+- put_cpu();
++ vfp_unlock();
+ }
+
+ /* Ensure that the thread reloads the hardware VFP state on the next use. */
diff --git a/debian/patches-rt/0002-drm-amd-display-Simplify-the-per-CPU-usage.patch b/debian/patches-rt/0002-drm-amd-display-Simplify-the-per-CPU-usage.patch
new file mode 100644
index 0000000000..fd18d53e2d
--- /dev/null
+++ b/debian/patches-rt/0002-drm-amd-display-Simplify-the-per-CPU-usage.patch
@@ -0,0 +1,127 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 21 Sep 2023 16:15:13 +0200
+Subject: [PATCH 2/5] drm/amd/display: Simplify the per-CPU usage.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The fpu_recursion_depth counter is used to ensure that dc_fpu_begin()
+can be invoked multiple times while the FPU-disable function itself is
+only invoked once. Also the counter part (dc_fpu_end()) is ballanced
+properly.
+
+Instead of using the get_cpu_ptr() dance around the inc it is simpler to
+increment the per-CPU variable directly. Also the per-CPU variable has
+to be incremented and decremented on the same CPU. This is ensured by
+the inner-part which disables preemption. This is kind of not obvious,
+works and the preempt-counter is touched a few times for no reason.
+
+Disable preemption before incrementing fpu_recursion_depth for the first
+time. Keep preemption disabled until dc_fpu_end() where the counter is
+decremented making it obvious that the preemption has to stay disabled
+while the counter is non-zero.
+Use simple inc/dec functions.
+Remove the nested preempt_disable/enable functions which are now not
+needed.
+
+Link: https://lore.kernel.org/r/20230921141516.520471-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 50 ++++++++++---------------
+ 1 file changed, 20 insertions(+), 30 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+@@ -60,11 +60,9 @@ static DEFINE_PER_CPU(int, fpu_recursion
+ */
+ inline void dc_assert_fp_enabled(void)
+ {
+- int *pcpu, depth = 0;
++ int depth;
+
+- pcpu = get_cpu_ptr(&fpu_recursion_depth);
+- depth = *pcpu;
+- put_cpu_ptr(&fpu_recursion_depth);
++ depth = __this_cpu_read(fpu_recursion_depth);
+
+ ASSERT(depth >= 1);
+ }
+@@ -84,32 +82,27 @@ inline void dc_assert_fp_enabled(void)
+ */
+ void dc_fpu_begin(const char *function_name, const int line)
+ {
+- int *pcpu;
++ int depth;
+
+- pcpu = get_cpu_ptr(&fpu_recursion_depth);
+- *pcpu += 1;
++ preempt_disable();
++ depth = __this_cpu_inc_return(fpu_recursion_depth);
+
+- if (*pcpu == 1) {
++ if (depth == 1) {
+ #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+ kernel_fpu_begin();
+ #elif defined(CONFIG_PPC64)
+- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+- preempt_disable();
++ if (cpu_has_feature(CPU_FTR_VSX_COMP))
+ enable_kernel_vsx();
+- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
+- preempt_disable();
++ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
+ enable_kernel_altivec();
+- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
+- preempt_disable();
++ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
+ enable_kernel_fp();
+- }
+ #elif defined(CONFIG_ARM64)
+ kernel_neon_begin();
+ #endif
+ }
+
+- TRACE_DCN_FPU(true, function_name, line, *pcpu);
+- put_cpu_ptr(&fpu_recursion_depth);
++ TRACE_DCN_FPU(true, function_name, line, depth);
+ }
+
+ /**
+@@ -124,29 +117,26 @@ void dc_fpu_begin(const char *function_n
+ */
+ void dc_fpu_end(const char *function_name, const int line)
+ {
+- int *pcpu;
++ int depth;
+
+- pcpu = get_cpu_ptr(&fpu_recursion_depth);
+- *pcpu -= 1;
+- if (*pcpu <= 0) {
++ depth = __this_cpu_dec_return(fpu_recursion_depth);
++ if (depth == 0) {
+ #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+ kernel_fpu_end();
+ #elif defined(CONFIG_PPC64)
+- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
++ if (cpu_has_feature(CPU_FTR_VSX_COMP))
+ disable_kernel_vsx();
+- preempt_enable();
+- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
++ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
+ disable_kernel_altivec();
+- preempt_enable();
+- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
++ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
+ disable_kernel_fp();
+- preempt_enable();
+- }
+ #elif defined(CONFIG_ARM64)
+ kernel_neon_end();
+ #endif
++ } else {
++ WARN_ON_ONCE(depth < 0);
+ }
+
+- TRACE_DCN_FPU(false, function_name, line, *pcpu);
+- put_cpu_ptr(&fpu_recursion_depth);
++ TRACE_DCN_FPU(false, function_name, line, depth);
++ preempt_enable();
+ }
diff --git a/debian/patches-rt/0002-locking-rtmutex-Avoid-unconditional-slowpath-for-DEB.patch b/debian/patches-rt/0002-locking-rtmutex-Avoid-unconditional-slowpath-for-DEB.patch
new file mode 100644
index 0000000000..561d5891b1
--- /dev/null
+++ b/debian/patches-rt/0002-locking-rtmutex-Avoid-unconditional-slowpath-for-DEB.patch
@@ -0,0 +1,79 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 8 Sep 2023 18:22:49 +0200
+Subject: [PATCH 2/7] locking/rtmutex: Avoid unconditional slowpath for
+ DEBUG_RT_MUTEXES
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+With DEBUG_RT_MUTEXES enabled the fast-path rt_mutex_cmpxchg_acquire()
+always fails and all lock operations take the slow path.
+
+Provide a new helper inline rt_mutex_try_acquire() which maps to
+rt_mutex_cmpxchg_acquire() in the non-debug case. For the debug case
+it invokes rt_mutex_slowtrylock() which can acquire a non-contended
+rtmutex under full debug coverage.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230908162254.999499-3-bigeasy@linutronix.de
+---
+ kernel/locking/rtmutex.c | 21 ++++++++++++++++++++-
+ kernel/locking/ww_rt_mutex.c | 2 +-
+ 2 files changed, 21 insertions(+), 2 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -218,6 +218,11 @@ static __always_inline bool rt_mutex_cmp
+ return try_cmpxchg_acquire(&lock->owner, &old, new);
+ }
+
++static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
++{
++ return rt_mutex_cmpxchg_acquire(lock, NULL, current);
++}
++
+ static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
+ struct task_struct *old,
+ struct task_struct *new)
+@@ -297,6 +302,20 @@ static __always_inline bool rt_mutex_cmp
+
+ }
+
++static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
++
++static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
++{
++ /*
++ * With debug enabled rt_mutex_cmpxchg trylock() will always fail.
++ *
++ * Avoid unconditionally taking the slow path by using
++ * rt_mutex_slow_trylock() which is covered by the debug code and can
++ * acquire a non-contended rtmutex.
++ */
++ return rt_mutex_slowtrylock(lock);
++}
++
+ static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
+ struct task_struct *old,
+ struct task_struct *new)
+@@ -1755,7 +1774,7 @@ static int __sched rt_mutex_slowlock(str
+ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
+ unsigned int state)
+ {
+- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
++ if (likely(rt_mutex_try_acquire(lock)))
+ return 0;
+
+ return rt_mutex_slowlock(lock, NULL, state);
+--- a/kernel/locking/ww_rt_mutex.c
++++ b/kernel/locking/ww_rt_mutex.c
+@@ -62,7 +62,7 @@ static int __sched
+ }
+ mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
+
+- if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
++ if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) {
+ if (ww_ctx)
+ ww_mutex_set_context_fastpath(lock, ww_ctx);
+ return 0;
diff --git a/debian/patches-rt/0002-serial-core-Use-lock-wrappers.patch b/debian/patches-rt/0002-serial-core-Use-lock-wrappers.patch
new file mode 100644
index 0000000000..ad41a3dc78
--- /dev/null
+++ b/debian/patches-rt/0002-serial-core-Use-lock-wrappers.patch
@@ -0,0 +1,93 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:19 +0206
+Subject: [PATCH 002/134] serial: core: Use lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-3-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/serial_core.h | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -1035,14 +1035,14 @@ static inline void uart_unlock_and_check
+ u8 sysrq_ch;
+
+ if (!port->has_sysrq) {
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+@@ -1054,14 +1054,14 @@ static inline void uart_unlock_and_check
+ u8 sysrq_ch;
+
+ if (!port->has_sysrq) {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+@@ -1077,12 +1077,12 @@ static inline int uart_prepare_sysrq_cha
+ }
+ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
+ {
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ }
+ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+ {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+ #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
+
diff --git a/debian/patches-rt/0002-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch b/debian/patches-rt/0002-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
new file mode 100644
index 0000000000..01e89d97ab
--- /dev/null
+++ b/debian/patches-rt/0002-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
@@ -0,0 +1,48 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 3 Aug 2023 12:09:32 +0200
+Subject: [PATCH 2/2] signal: Don't disable preemption in ptrace_stop() on
+ PREEMPT_RT.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+On PREEMPT_RT keeping preemption disabled during the invocation of
+cgroup_enter_frozen() is a problem because the function acquires css_set_lock
+which is a sleeping lock on PREEMPT_RT and must not be acquired with disabled
+preemption.
+The preempt-disabled section is only for performance optimisation
+reasons and can be avoided.
+
+Extend the comment and don't disable preemption before scheduling on
+PREEMPT_RT.
+
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20230803100932.325870-3-bigeasy@linutronix.de
+---
+ kernel/signal.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2344,11 +2344,20 @@ static int ptrace_stop(int exit_code, in
+ * The preempt-disable section ensures that there will be no preemption
+ * between unlock and schedule() and so improving the performance since
+ * the ptracer has no reason to sleep.
++ *
++ * On PREEMPT_RT locking tasklist_lock does not disable preemption.
++ * Therefore the task can be preempted (after
++ * do_notify_parent_cldstop()) before unlocking tasklist_lock so there
++ * is no benefit in doing this. The optimisation is harmful on
++ * PEEMPT_RT because the spinlock_t (in cgroup_enter_frozen()) must not
++ * be acquired with disabled preemption.
+ */
+- preempt_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_disable();
+ read_unlock(&tasklist_lock);
+ cgroup_enter_frozen();
+- preempt_enable_no_resched();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_enable_no_resched();
+ schedule();
+ cgroup_leave_frozen(true);
+
diff --git a/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch b/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
new file mode 100644
index 0000000000..f8cddfa446
--- /dev/null
+++ b/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
@@ -0,0 +1,60 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 4 Aug 2023 13:30:38 +0200
+Subject: [PATCH 2/3] softirq: Add function to preempt serving softirqs.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Add a functionality for the softirq handler to preempt its current work
+if needed. The softirq core has no particular state. It reads and resets
+the pending softirq bits and then processes one after the other.
+It can already be preempted while it invokes a certain softirq handler.
+
+By enabling the BH the softirq core releases the per-CPU bh lock which
+serializes all softirq handler. It is safe to do as long as the code
+does not expect any serialisation in between. A typical scenarion would
+after the invocation of callback where no state needs to be preserved
+before the next callback is invoked.
+
+Add functionaliry to preempt the serving softirqs.
+
+Link: https://lore.kernel.org/r/20230804113039.419794-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/bottom_half.h | 2 ++
+ kernel/softirq.c | 13 +++++++++++++
+ 2 files changed, 15 insertions(+)
+
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
+@@ -35,8 +35,10 @@ static inline void local_bh_enable(void)
+
+ #ifdef CONFIG_PREEMPT_RT
+ extern bool local_bh_blocked(void);
++extern void softirq_preempt(void);
+ #else
+ static inline bool local_bh_blocked(void) { return false; }
++static inline void softirq_preempt(void) { }
+ #endif
+
+ #endif /* _LINUX_BH_H */
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -247,6 +247,19 @@ void __local_bh_enable_ip(unsigned long
+ }
+ EXPORT_SYMBOL(__local_bh_enable_ip);
+
++void softirq_preempt(void)
++{
++ if (WARN_ON_ONCE(!preemptible()))
++ return;
++
++ if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
++ return;
++
++ __local_bh_enable(SOFTIRQ_OFFSET, true);
++ /* preemption point */
++ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
++}
++
+ /*
+ * Invoked from ksoftirqd_run() outside of the interrupt disabled section
+ * to acquire the per CPU local lock for reentrancy protection.
diff --git a/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch b/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
new file mode 100644
index 0000000000..c02278e5a8
--- /dev/null
+++ b/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
@@ -0,0 +1,48 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Jun 2023 09:36:10 +0200
+Subject: [PATCH 3/4] ARM: vfp: Use vfp_lock() in vfp_support_entry().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+vfp_entry() is invoked from exception handler and is fully preemptible.
+It uses local_bh_disable() to remain uninterrupted while checking the
+VFP state.
+This is not working on PREEMPT_RT because local_bh_disable()
+synchronizes the relevant section but the context remains fully
+preemptible.
+
+Use vfp_lock() for uninterrupted access.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/vfp/vfpmodule.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -708,7 +708,7 @@ static int vfp_support_entry(struct pt_r
+ if (!user_mode(regs))
+ return vfp_kmode_exception(regs, trigger);
+
+- local_bh_disable();
++ vfp_lock();
+ fpexc = fmrx(FPEXC);
+
+ /*
+@@ -787,7 +787,7 @@ static int vfp_support_entry(struct pt_r
+ if (!(fpscr & FPSCR_IXE)) {
+ if (!(fpscr & FPSCR_LENGTH_MASK)) {
+ pr_debug("not VFP\n");
+- local_bh_enable();
++ vfp_unlock();
+ return -ENOEXEC;
+ }
+ fpexc |= FPEXC_DEX;
+@@ -797,7 +797,7 @@ bounce: regs->ARM_pc += 4;
+ VFP_bounce(trigger, fpexc, regs);
+ }
+
+- local_bh_enable();
++ vfp_unlock();
+ return 0;
+ }
+
diff --git a/debian/patches-rt/0003-drm-amd-display-Add-a-warning-if-the-FPU-is-used-out.patch b/debian/patches-rt/0003-drm-amd-display-Add-a-warning-if-the-FPU-is-used-out.patch
new file mode 100644
index 0000000000..9e160d5a28
--- /dev/null
+++ b/debian/patches-rt/0003-drm-amd-display-Add-a-warning-if-the-FPU-is-used-out.patch
@@ -0,0 +1,26 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 21 Sep 2023 16:15:14 +0200
+Subject: [PATCH 3/5] drm/amd/display: Add a warning if the FPU is used outside
+ from task context.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Add a warning if the FPU is used from any context other than task
+context. This is only precaution since the code is not able to be used
+from softirq while the API allows it on x86 for instance.
+
+Link: https://lore.kernel.org/r/20230921141516.520471-4-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+@@ -84,6 +84,7 @@ void dc_fpu_begin(const char *function_n
+ {
+ int depth;
+
++ WARN_ON_ONCE(!in_task());
+ preempt_disable();
+ depth = __this_cpu_inc_return(fpu_recursion_depth);
+
diff --git a/debian/patches-rt/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch b/debian/patches-rt/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
new file mode 100644
index 0000000000..f2752d0901
--- /dev/null
+++ b/debian/patches-rt/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
@@ -0,0 +1,56 @@
+From: Mike Galbraith <umgwanakikbuti@gmail.com>
+Date: Sat, 27 Feb 2016 08:09:11 +0100
+Subject: [PATCH 03/10] drm/i915: Use preempt_disable/enable_rt() where
+ recommended
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Mario Kleiner suggest in commit
+ ad3543ede630f ("drm/intel: Push get_scanout_position() timestamping into kms driver.")
+
+a spots where preemption should be disabled on PREEMPT_RT. The
+difference is that on PREEMPT_RT the intel_uncore::lock disables neither
+preemption nor interrupts and so region remains preemptible.
+
+The area covers only register reads and writes. The part that worries me
+is:
+- __intel_get_crtc_scanline() the worst case is 100us if no match is
+ found.
+
+- intel_crtc_scanlines_since_frame_timestamp() not sure how long this
+ may take in the worst case.
+
+It was in the RT queue for a while and nobody complained.
+Disable preemption on PREEPMPT_RT during timestamping.
+
+[bigeasy: patch description.]
+
+Cc: Mario Kleiner <mario.kleiner.de@gmail.com>
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/display/intel_vblank.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_vblank.c
++++ b/drivers/gpu/drm/i915/display/intel_vblank.c
+@@ -294,7 +294,8 @@ static bool i915_get_crtc_scanoutpos(str
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+- /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_disable();
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+@@ -358,7 +359,8 @@ static bool i915_get_crtc_scanoutpos(str
+ if (etime)
+ *etime = ktime_get();
+
+- /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_enable();
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
diff --git a/debian/patches-rt/0003-sched-Extract-__schedule_loop.patch b/debian/patches-rt/0003-sched-Extract-__schedule_loop.patch
new file mode 100644
index 0000000000..a236df5321
--- /dev/null
+++ b/debian/patches-rt/0003-sched-Extract-__schedule_loop.patch
@@ -0,0 +1,58 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 8 Sep 2023 18:22:50 +0200
+Subject: [PATCH 3/7] sched: Extract __schedule_loop()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+There are currently two implementations of this basic __schedule()
+loop, and there is soon to be a third.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230908162254.999499-4-bigeasy@linutronix.de
+---
+ kernel/sched/core.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6771,16 +6771,21 @@ static void sched_update_worker(struct t
+ }
+ }
+
+-asmlinkage __visible void __sched schedule(void)
++static __always_inline void __schedule_loop(unsigned int sched_mode)
+ {
+- struct task_struct *tsk = current;
+-
+- sched_submit_work(tsk);
+ do {
+ preempt_disable();
+- __schedule(SM_NONE);
++ __schedule(sched_mode);
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++ struct task_struct *tsk = current;
++
++ sched_submit_work(tsk);
++ __schedule_loop(SM_NONE);
+ sched_update_worker(tsk);
+ }
+ EXPORT_SYMBOL(schedule);
+@@ -6844,11 +6849,7 @@ void __sched schedule_preempt_disabled(v
+ #ifdef CONFIG_PREEMPT_RT
+ void __sched notrace schedule_rtlock(void)
+ {
+- do {
+- preempt_disable();
+- __schedule(SM_RTLOCK_WAIT);
+- sched_preempt_enable_no_resched();
+- } while (need_resched());
++ __schedule_loop(SM_RTLOCK_WAIT);
+ }
+ NOKPROBE_SYMBOL(schedule_rtlock);
+ #endif
diff --git a/debian/patches-rt/0003-serial-21285-Use-port-lock-wrappers.patch b/debian/patches-rt/0003-serial-21285-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..e025de601c
--- /dev/null
+++ b/debian/patches-rt/0003-serial-21285-Use-port-lock-wrappers.patch
@@ -0,0 +1,75 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:20 +0206
+Subject: [PATCH 003/134] serial: 21285: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-4-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/21285.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/21285.c
++++ b/drivers/tty/serial/21285.c
+@@ -185,14 +185,14 @@ static void serial21285_break_ctl(struct
+ unsigned long flags;
+ unsigned int h_lcr;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ h_lcr = *CSR_H_UBRLCR;
+ if (break_state)
+ h_lcr |= H_UBRLCR_BREAK;
+ else
+ h_lcr &= ~H_UBRLCR_BREAK;
+ *CSR_H_UBRLCR = h_lcr;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int serial21285_startup(struct uart_port *port)
+@@ -272,7 +272,7 @@ serial21285_set_termios(struct uart_port
+ if (port->fifosize)
+ h_lcr |= H_UBRLCR_FIFO;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /*
+ * Update the per-port timeout.
+@@ -309,7 +309,7 @@ serial21285_set_termios(struct uart_port
+ *CSR_H_UBRLCR = h_lcr;
+ *CSR_UARTCON = 1;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *serial21285_type(struct uart_port *port)
diff --git a/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch b/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch
new file mode 100644
index 0000000000..7ede7d6ae5
--- /dev/null
+++ b/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch
@@ -0,0 +1,47 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 4 Aug 2023 13:30:39 +0200
+Subject: [PATCH 3/3] time: Allow to preempt after a callback.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The TIMER_SOFTIRQ handler invokes timer callbacks of the expired timers.
+Before each invocation the timer_base::lock is dropped. The only lock
+that is still held is the timer_base::expiry_lock and the per-CPU
+bh-lock as part of local_bh_disable(). The former is released as part
+of lock up prevention if the timer is preempted by the caller which is
+waiting for its completion.
+
+Both locks are already released as part of timer_sync_wait_running().
+This can be extended by also releasing in bh-lock. The timer core does
+not rely on any state that is serialized by the bh-lock. The timer
+callback expects the bh-state to be serialized by the lock but there is
+no need to keep state synchronized while invoking multiple callbacks.
+
+Preempt handling softirqs and release all locks after a timer invocation
+if the current has inherited priority.
+
+Link: https://lore.kernel.org/r/20230804113039.419794-4-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/timer.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1470,9 +1470,16 @@ static inline void timer_base_unlock_exp
+ */
+ static void timer_sync_wait_running(struct timer_base *base)
+ {
+- if (atomic_read(&base->timer_waiters)) {
++ bool need_preempt;
++
++ need_preempt = task_is_pi_boosted(current);
++ if (need_preempt || atomic_read(&base->timer_waiters)) {
+ raw_spin_unlock_irq(&base->lock);
+ spin_unlock(&base->expiry_lock);
++
++ if (need_preempt)
++ softirq_preempt();
++
+ spin_lock(&base->expiry_lock);
+ raw_spin_lock_irq(&base->lock);
+ }
diff --git a/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch b/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
new file mode 100644
index 0000000000..f7e006261e
--- /dev/null
+++ b/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
@@ -0,0 +1,121 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Jun 2023 09:39:33 +0200
+Subject: [PATCH 4/4] ARM: vfp: Move sending signals outside of vfp_lock()ed
+ section.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+VFP_bounce() is invoked from within vfp_support_entry() and may send a
+signal. Sending a signal uses spinlock_t which becomes a sleeping lock
+on PREEMPT_RT and must not be acquired within a preempt-disabled
+section.
+
+Move the vfp_raise_sigfpe() block outside of the vfp_lock() section.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/vfp/vfpmodule.c | 29 ++++++++++++++++++-----------
+ 1 file changed, 18 insertions(+), 11 deletions(-)
+
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -268,7 +268,7 @@ static void vfp_panic(char *reason, u32
+ /*
+ * Process bitmask of exception conditions.
+ */
+-static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
++static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr)
+ {
+ int si_code = 0;
+
+@@ -276,8 +276,7 @@ static void vfp_raise_exceptions(u32 exc
+
+ if (exceptions == VFP_EXCEPTION_ERROR) {
+ vfp_panic("unhandled bounce", inst);
+- vfp_raise_sigfpe(FPE_FLTINV, regs);
+- return;
++ return FPE_FLTINV;
+ }
+
+ /*
+@@ -305,8 +304,7 @@ static void vfp_raise_exceptions(u32 exc
+ RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
+ RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
+
+- if (si_code)
+- vfp_raise_sigfpe(si_code, regs);
++ return si_code;
+ }
+
+ /*
+@@ -352,6 +350,8 @@ static u32 vfp_emulate_instruction(u32 i
+ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
+ {
+ u32 fpscr, orig_fpscr, fpsid, exceptions;
++ int si_code2 = 0;
++ int si_code = 0;
+
+ pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
+
+@@ -397,8 +397,8 @@ static void VFP_bounce(u32 trigger, u32
+ * unallocated VFP instruction but with FPSCR.IXE set and not
+ * on VFP subarch 1.
+ */
+- vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
+- return;
++ si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr);
++ goto exit;
+ }
+
+ /*
+@@ -422,14 +422,14 @@ static void VFP_bounce(u32 trigger, u32
+ */
+ exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
+ if (exceptions)
+- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
++ si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
+
+ /*
+ * If there isn't a second FP instruction, exit now. Note that
+ * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
+ */
+ if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
+- return;
++ goto exit;
+
+ /*
+ * The barrier() here prevents fpinst2 being read
+@@ -441,7 +441,13 @@ static void VFP_bounce(u32 trigger, u32
+ emulate:
+ exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
+ if (exceptions)
+- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
++ si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
++exit:
++ vfp_unlock();
++ if (si_code2)
++ vfp_raise_sigfpe(si_code2, regs);
++ if (si_code)
++ vfp_raise_sigfpe(si_code, regs);
+ }
+
+ static void vfp_enable(void *unused)
+@@ -773,6 +779,7 @@ static int vfp_support_entry(struct pt_r
+ * replay the instruction that trapped.
+ */
+ fmxr(FPEXC, fpexc);
++ vfp_unlock();
+ } else {
+ /* Check for synchronous or asynchronous exceptions */
+ if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
+@@ -794,10 +801,10 @@ static int vfp_support_entry(struct pt_r
+ }
+ }
+ bounce: regs->ARM_pc += 4;
++ /* VFP_bounce() will invoke vfp_unlock() */
+ VFP_bounce(trigger, fpexc, regs);
+ }
+
+- vfp_unlock();
+ return 0;
+ }
+
diff --git a/debian/patches-rt/0004-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch b/debian/patches-rt/0004-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
new file mode 100644
index 0000000000..a1c046b36c
--- /dev/null
+++ b/debian/patches-rt/0004-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
@@ -0,0 +1,87 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 21 Sep 2023 16:15:15 +0200
+Subject: [PATCH 4/5] drm/amd/display: Move the memory allocation out of
+ dcn21_validate_bandwidth_fp().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+dcn21_validate_bandwidth_fp() is invoked while FPU access has been
+enabled. FPU access requires disabling preemption even on PREEMPT_RT.
+It is not possible to allocate memory with disabled preemption even with
+GFP_ATOMIC on PREEMPT_RT.
+
+Move the memory allocation before FPU access is enabled.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217928
+Link: https://lore.kernel.org/r/20230921141516.520471-5-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 10 +++++++++-
+ drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 7 ++-----
+ drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h | 5 ++---
+ 3 files changed, 13 insertions(+), 9 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -953,9 +953,17 @@ static bool dcn21_validate_bandwidth(str
+ bool fast_validate)
+ {
+ bool voltage_supported;
++ display_e2e_pipe_params_st *pipes;
++
++ pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
++ if (!pipes)
++ return false;
++
+ DC_FP_START();
+- voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
++ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ DC_FP_END();
++
++ kfree(pipes);
+ return voltage_supported;
+ }
+
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -2203,9 +2203,8 @@ static void dcn21_calculate_wm(struct dc
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ }
+
+-bool dcn21_validate_bandwidth_fp(struct dc *dc,
+- struct dc_state *context,
+- bool fast_validate)
++bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
++ bool fast_validate, display_e2e_pipe_params_st *pipes)
+ {
+ bool out = false;
+
+@@ -2214,7 +2213,6 @@ bool dcn21_validate_bandwidth_fp(struct
+ int vlevel = 0;
+ int pipe_split_from[MAX_PIPES];
+ int pipe_cnt = 0;
+- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+@@ -2254,7 +2252,6 @@ bool dcn21_validate_bandwidth_fp(struct
+ out = false;
+
+ validate_out:
+- kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+@@ -77,9 +77,8 @@ int dcn21_populate_dml_pipes_from_contex
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+-bool dcn21_validate_bandwidth_fp(struct dc *dc,
+- struct dc_state *context,
+- bool fast_validate);
++bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
++ fast_validate, display_e2e_pipe_params_st *pipes);
+ void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
+ void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
diff --git a/debian/patches-rt/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch b/debian/patches-rt/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
new file mode 100644
index 0000000000..9c386e5188
--- /dev/null
+++ b/debian/patches-rt/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
@@ -0,0 +1,85 @@
+From: Mike Galbraith <umgwanakikbuti@gmail.com>
+Date: Sat, 27 Feb 2016 09:01:42 +0100
+Subject: [PATCH 04/10] drm/i915: Don't disable interrupts on PREEMPT_RT during
+ atomic updates
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Commit
+ 8d7849db3eab7 ("drm/i915: Make sprite updates atomic")
+
+started disabling interrupts across atomic updates. This breaks on PREEMPT_RT
+because within this section the code attempt to acquire spinlock_t locks which
+are sleeping locks on PREEMPT_RT.
+
+According to the comment the interrupts are disabled to avoid random delays and
+not required for protection or synchronisation.
+If this needs to happen with disabled interrupts on PREEMPT_RT, and the
+whole section is restricted to register access then all sleeping locks
+need to be acquired before interrupts are disabled and some function
+maybe moved after enabling interrupts again.
+This includes:
+- prepare_to_wait() + finish_wait() due its wake queue.
+- drm_crtc_vblank_put() -> vblank_disable_fn() drm_device::vbl_lock.
+- skl_pfit_enable(), intel_update_plane(), vlv_atomic_update_fifo() and
+ maybe others due to intel_uncore::lock
+- drm_crtc_arm_vblank_event() due to drm_device::event_lock and
+ drm_device::vblank_time_lock.
+
+Don't disable interrupts on PREEMPT_RT during atomic updates.
+
+[bigeasy: drop local locks, commit message]
+
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/display/intel_crtc.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_crtc.c
++++ b/drivers/gpu/drm/i915/display/intel_crtc.c
+@@ -534,7 +534,8 @@ void intel_pipe_update_start(struct inte
+ */
+ intel_psr_wait_for_idle_locked(new_crtc_state);
+
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+
+ crtc->debug.min_vbl = min;
+ crtc->debug.max_vbl = max;
+@@ -559,11 +560,13 @@ void intel_pipe_update_start(struct inte
+ break;
+ }
+
+- local_irq_enable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_enable();
+
+ timeout = schedule_timeout(timeout);
+
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+ }
+
+ finish_wait(wq, &wait);
+@@ -596,7 +599,8 @@ void intel_pipe_update_start(struct inte
+ return;
+
+ irq_disable:
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+ }
+
+ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+@@ -706,7 +710,8 @@ void intel_pipe_update_end(struct intel_
+ intel_crtc_update_active_timings(new_crtc_state,
+ new_crtc_state->vrr.enable);
+
+- local_irq_enable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_enable();
+
+ if (intel_vgpu_active(dev_priv))
+ return;
diff --git a/debian/patches-rt/0004-sched-Provide-rt_mutex-specific-scheduler-helpers.patch b/debian/patches-rt/0004-sched-Provide-rt_mutex-specific-scheduler-helpers.patch
new file mode 100644
index 0000000000..f61a67ea3f
--- /dev/null
+++ b/debian/patches-rt/0004-sched-Provide-rt_mutex-specific-scheduler-helpers.patch
@@ -0,0 +1,128 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 8 Sep 2023 18:22:51 +0200
+Subject: [PATCH 4/7] sched: Provide rt_mutex specific scheduler helpers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+With PREEMPT_RT there is a rt_mutex recursion problem where
+sched_submit_work() can use an rtlock (aka spinlock_t). More
+specifically what happens is:
+
+ mutex_lock() /* really rt_mutex */
+ ...
+ __rt_mutex_slowlock_locked()
+ task_blocks_on_rt_mutex()
+ // enqueue current task as waiter
+ // do PI chain walk
+ rt_mutex_slowlock_block()
+ schedule()
+ sched_submit_work()
+ ...
+ spin_lock() /* really rtlock */
+ ...
+ __rt_mutex_slowlock_locked()
+ task_blocks_on_rt_mutex()
+ // enqueue current task as waiter *AGAIN*
+ // *CONFUSION*
+
+Fix this by making rt_mutex do the sched_submit_work() early, before
+it enqueues itself as a waiter -- before it even knows *if* it will
+wait.
+
+[[ basically Thomas' patch but with different naming and a few asserts
+ added ]]
+
+Originally-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230908162254.999499-5-bigeasy@linutronix.de
+---
+ include/linux/sched.h | 3 +++
+ include/linux/sched/rt.h | 4 ++++
+ kernel/sched/core.c | 36 ++++++++++++++++++++++++++++++++----
+ 3 files changed, 39 insertions(+), 4 deletions(-)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -911,6 +911,9 @@ struct task_struct {
+ * ->sched_remote_wakeup gets used, so it can be in this word.
+ */
+ unsigned sched_remote_wakeup:1;
++#ifdef CONFIG_RT_MUTEXES
++ unsigned sched_rt_mutex:1;
++#endif
+
+ /* Bit to tell LSMs we're in execve(): */
+ unsigned in_execve:1;
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -30,6 +30,10 @@ static inline bool task_is_realtime(stru
+ }
+
+ #ifdef CONFIG_RT_MUTEXES
++extern void rt_mutex_pre_schedule(void);
++extern void rt_mutex_schedule(void);
++extern void rt_mutex_post_schedule(void);
++
+ /*
+ * Must hold either p->pi_lock or task_rq(p)->lock.
+ */
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6724,9 +6724,6 @@ static inline void sched_submit_work(str
+ static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
+ unsigned int task_flags;
+
+- if (task_is_running(tsk))
+- return;
+-
+ /*
+ * Establish LD_WAIT_CONFIG context to ensure none of the code called
+ * will use a blocking primitive -- which would lead to recursion.
+@@ -6784,7 +6781,12 @@ asmlinkage __visible void __sched schedu
+ {
+ struct task_struct *tsk = current;
+
+- sched_submit_work(tsk);
++#ifdef CONFIG_RT_MUTEXES
++ lockdep_assert(!tsk->sched_rt_mutex);
++#endif
++
++ if (!task_is_running(tsk))
++ sched_submit_work(tsk);
+ __schedule_loop(SM_NONE);
+ sched_update_worker(tsk);
+ }
+@@ -7045,6 +7047,32 @@ static void __setscheduler_prio(struct t
+
+ #ifdef CONFIG_RT_MUTEXES
+
++/*
++ * Would be more useful with typeof()/auto_type but they don't mix with
++ * bit-fields. Since it's a local thing, use int. Keep the generic sounding
++ * name such that if someone were to implement this function we get to compare
++ * notes.
++ */
++#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
++
++void rt_mutex_pre_schedule(void)
++{
++ lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
++ sched_submit_work(current);
++}
++
++void rt_mutex_schedule(void)
++{
++ lockdep_assert(current->sched_rt_mutex);
++ __schedule_loop(SM_NONE);
++}
++
++void rt_mutex_post_schedule(void)
++{
++ sched_update_worker(current);
++ lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
++}
++
+ static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
+ {
+ if (pi_task)
diff --git a/debian/patches-rt/0004-serial-8250_aspeed_vuart-Use-port-lock-wrappers.patch b/debian/patches-rt/0004-serial-8250_aspeed_vuart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..eb65ac58fc
--- /dev/null
+++ b/debian/patches-rt/0004-serial-8250_aspeed_vuart-Use-port-lock-wrappers.patch
@@ -0,0 +1,61 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:21 +0206
+Subject: [PATCH 004/134] serial: 8250_aspeed_vuart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-5-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_aspeed_vuart.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+@@ -288,9 +288,9 @@ static void aspeed_vuart_set_throttle(st
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ __aspeed_vuart_set_throttle(up, throttle);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void aspeed_vuart_throttle(struct uart_port *port)
+@@ -340,7 +340,7 @@ static int aspeed_vuart_handle_irq(struc
+ if (iir & UART_IIR_NO_INT)
+ return 0;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ lsr = serial_port_in(port, UART_LSR);
+
diff --git a/debian/patches-rt/0005-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch b/debian/patches-rt/0005-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
new file mode 100644
index 0000000000..1a2babc614
--- /dev/null
+++ b/debian/patches-rt/0005-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
@@ -0,0 +1,121 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 21 Sep 2023 16:15:16 +0200
+Subject: [PATCH 5/5] drm/amd/display: Move the memory allocation out of
+ dcn20_validate_bandwidth_fp().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+dcn20_validate_bandwidth_fp() is invoked while FPU access has been
+enabled. FPU access requires disabling preemption even on PREEMPT_RT.
+It is not possible to allocate memory with disabled preemption even with
+GFP_ATOMIC on PREEMPT_RT.
+
+Move the memory allocation before FPU access is enabled.
+To preserve previous "clean" state of "pipes" add a memset() before the
+second invocation of dcn20_validate_bandwidth_internal() where the
+variable is used.
+
+Link: https://lore.kernel.org/r/20230921141516.520471-6-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 10 +++++++++-
+ drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 16 +++++++---------
+ drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h | 5 ++---
+ 3 files changed, 18 insertions(+), 13 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -2141,9 +2141,17 @@ bool dcn20_validate_bandwidth(struct dc
+ bool fast_validate)
+ {
+ bool voltage_supported;
++ display_e2e_pipe_params_st *pipes;
++
++ pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
++ if (!pipes)
++ return false;
++
+ DC_FP_START();
+- voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
++ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ DC_FP_END();
++
++ kfree(pipes);
+ return voltage_supported;
+ }
+
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -1910,7 +1910,7 @@ void dcn20_patch_bounding_box(struct dc
+ }
+
+ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
+- bool fast_validate)
++ bool fast_validate, display_e2e_pipe_params_st *pipes)
+ {
+ bool out = false;
+
+@@ -1919,7 +1919,6 @@ static bool dcn20_validate_bandwidth_int
+ int vlevel = 0;
+ int pipe_split_from[MAX_PIPES];
+ int pipe_cnt = 0;
+- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+@@ -1954,16 +1953,14 @@ static bool dcn20_validate_bandwidth_int
+ out = false;
+
+ validate_out:
+- kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+ }
+
+-bool dcn20_validate_bandwidth_fp(struct dc *dc,
+- struct dc_state *context,
+- bool fast_validate)
++bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
++ bool fast_validate, display_e2e_pipe_params_st *pipes)
+ {
+ bool voltage_supported = false;
+ bool full_pstate_supported = false;
+@@ -1982,11 +1979,11 @@ bool dcn20_validate_bandwidth_fp(struct
+ ASSERT(context != dc->current_state);
+
+ if (fast_validate) {
+- return dcn20_validate_bandwidth_internal(dc, context, true);
++ return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
+ }
+
+ // Best case, we support full UCLK switch latency
+- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
++ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+
+ if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
+@@ -1998,7 +1995,8 @@ bool dcn20_validate_bandwidth_fp(struct
+ // Fallback: Try to only support G6 temperature read latency
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+
+- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
++ memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
++ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+
+ if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+@@ -61,9 +61,8 @@ void dcn20_update_bounding_box(struct dc
+ unsigned int num_states);
+ void dcn20_patch_bounding_box(struct dc *dc,
+ struct _vcs_dpi_soc_bounding_box_st *bb);
+-bool dcn20_validate_bandwidth_fp(struct dc *dc,
+- struct dc_state *context,
+- bool fast_validate);
++bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
++ bool fast_validate, display_e2e_pipe_params_st *pipes);
+ void dcn20_fpu_set_wm_ranges(int i,
+ struct pp_smu_wm_range_sets *ranges,
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
diff --git a/debian/patches-rt/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch b/debian/patches-rt/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
new file mode 100644
index 0000000000..4af9325678
--- /dev/null
+++ b/debian/patches-rt/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
@@ -0,0 +1,30 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 25 Oct 2021 15:05:18 +0200
+Subject: [PATCH 05/10] drm/i915: Don't check for atomic context on PREEMPT_RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The !in_atomic() check in _wait_for_atomic() triggers on PREEMPT_RT
+because the uncore::lock is a spinlock_t and does not disable
+preemption or interrupts.
+
+Changing the uncore:lock to a raw_spinlock_t doubles the worst case
+latency on an otherwise idle testbox during testing. Therefore I'm
+currently unsure about changing this.
+
+Link: https://lore.kernel.org/all/20211006164628.s2mtsdd2jdbfyf7g@linutronix.de/
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_utils.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_utils.h
++++ b/drivers/gpu/drm/i915/i915_utils.h
+@@ -288,7 +288,7 @@ wait_remaining_ms_from_jiffies(unsigned
+ #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+
+ /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
+-#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
++#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT)
+ # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
+ #else
+ # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
diff --git a/debian/patches-rt/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch b/debian/patches-rt/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch
new file mode 100644
index 0000000000..d84497e5fd
--- /dev/null
+++ b/debian/patches-rt/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch
@@ -0,0 +1,177 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 8 Sep 2023 18:22:52 +0200
+Subject: [PATCH 5/7] locking/rtmutex: Use rt_mutex specific scheduler helpers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Have rt_mutex use the rt_mutex specific scheduler helpers to avoid
+recursion vs rtlock on the PI state.
+
+[[ peterz: adapted to new names ]]
+
+Reported-by: Crystal Wood <swood@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230908162254.999499-6-bigeasy@linutronix.de
+---
+ kernel/futex/pi.c | 11 +++++++++++
+ kernel/locking/rtmutex.c | 14 ++++++++++++--
+ kernel/locking/rwbase_rt.c | 6 ++++++
+ kernel/locking/rwsem.c | 8 +++++++-
+ kernel/locking/spinlock_rt.c | 4 ++++
+ 5 files changed, 40 insertions(+), 3 deletions(-)
+
+--- a/kernel/futex/pi.c
++++ b/kernel/futex/pi.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-or-later
+
+ #include <linux/slab.h>
++#include <linux/sched/rt.h>
+ #include <linux/sched/task.h>
+
+ #include "futex.h"
+@@ -1002,6 +1003,12 @@ int futex_lock_pi(u32 __user *uaddr, uns
+ goto no_block;
+ }
+
++ /*
++ * Must be done before we enqueue the waiter, here is unfortunately
++ * under the hb lock, but that *should* work because it does nothing.
++ */
++ rt_mutex_pre_schedule();
++
+ rt_mutex_init_waiter(&rt_waiter);
+
+ /*
+@@ -1052,6 +1059,10 @@ int futex_lock_pi(u32 __user *uaddr, uns
+ if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
+ ret = 0;
+
++ /*
++ * Waiter is unqueued.
++ */
++ rt_mutex_post_schedule();
+ no_block:
+ /*
+ * Fixup the pi_state owner and possibly acquire the lock if we
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1632,7 +1632,7 @@ static int __sched rt_mutex_slowlock_blo
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
+- schedule();
++ rt_mutex_schedule();
+
+ raw_spin_lock_irq(&lock->wait_lock);
+ set_current_state(state);
+@@ -1661,7 +1661,7 @@ static void __sched rt_mutex_handle_dead
+ WARN(1, "rtmutex deadlock detected\n");
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule();
++ rt_mutex_schedule();
+ }
+ }
+
+@@ -1757,6 +1757,15 @@ static int __sched rt_mutex_slowlock(str
+ int ret;
+
+ /*
++ * Do all pre-schedule work here, before we queue a waiter and invoke
++ * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would
++ * otherwise recurse back into task_blocks_on_rt_mutex() through
++ * rtlock_slowlock() and will then enqueue a second waiter for this
++ * same task and things get really confusing real fast.
++ */
++ rt_mutex_pre_schedule();
++
++ /*
+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
+ * be called in early boot if the cmpxchg() fast path is disabled
+ * (debug, no architecture support). In this case we will acquire the
+@@ -1767,6 +1776,7 @@ static int __sched rt_mutex_slowlock(str
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++ rt_mutex_post_schedule();
+
+ return ret;
+ }
+--- a/kernel/locking/rwbase_rt.c
++++ b/kernel/locking/rwbase_rt.c
+@@ -71,6 +71,7 @@ static int __sched __rwbase_read_lock(st
+ struct rt_mutex_base *rtm = &rwb->rtmutex;
+ int ret;
+
++ rwbase_pre_schedule();
+ raw_spin_lock_irq(&rtm->wait_lock);
+
+ /*
+@@ -125,6 +126,7 @@ static int __sched __rwbase_read_lock(st
+ rwbase_rtmutex_unlock(rtm);
+
+ trace_contention_end(rwb, ret);
++ rwbase_post_schedule();
+ return ret;
+ }
+
+@@ -237,6 +239,8 @@ static int __sched rwbase_write_lock(str
+ /* Force readers into slow path */
+ atomic_sub(READER_BIAS, &rwb->readers);
+
++ rwbase_pre_schedule();
++
+ raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+ if (__rwbase_write_trylock(rwb))
+ goto out_unlock;
+@@ -248,6 +252,7 @@ static int __sched rwbase_write_lock(str
+ if (rwbase_signal_pending_state(state, current)) {
+ rwbase_restore_current_state();
+ __rwbase_write_unlock(rwb, 0, flags);
++ rwbase_post_schedule();
+ trace_contention_end(rwb, -EINTR);
+ return -EINTR;
+ }
+@@ -266,6 +271,7 @@ static int __sched rwbase_write_lock(str
+
+ out_unlock:
+ raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
++ rwbase_post_schedule();
+ return 0;
+ }
+
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -1427,8 +1427,14 @@ static inline void __downgrade_write(str
+ #define rwbase_signal_pending_state(state, current) \
+ signal_pending_state(state, current)
+
++#define rwbase_pre_schedule() \
++ rt_mutex_pre_schedule()
++
+ #define rwbase_schedule() \
+- schedule()
++ rt_mutex_schedule()
++
++#define rwbase_post_schedule() \
++ rt_mutex_post_schedule()
+
+ #include "rwbase_rt.c"
+
+--- a/kernel/locking/spinlock_rt.c
++++ b/kernel/locking/spinlock_rt.c
+@@ -184,9 +184,13 @@ static __always_inline int rwbase_rtmut
+
+ #define rwbase_signal_pending_state(state, current) (0)
+
++#define rwbase_pre_schedule()
++
+ #define rwbase_schedule() \
+ schedule_rtlock()
+
++#define rwbase_post_schedule()
++
+ #include "rwbase_rt.c"
+ /*
+ * The common functions which get wrapped into the rwlock API.
diff --git a/debian/patches-rt/0005-serial-8250_bcm7271-Use-port-lock-wrappers.patch b/debian/patches-rt/0005-serial-8250_bcm7271-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..55ae035b80
--- /dev/null
+++ b/debian/patches-rt/0005-serial-8250_bcm7271-Use-port-lock-wrappers.patch
@@ -0,0 +1,151 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:22 +0206
+Subject: [PATCH 005/134] serial: 8250_bcm7271: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-6-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_bcm7271.c | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_bcm7271.c
++++ b/drivers/tty/serial/8250/8250_bcm7271.c
+@@ -567,7 +567,7 @@ static irqreturn_t brcmuart_isr(int irq,
+ if (interrupts == 0)
+ return IRQ_NONE;
+
+- spin_lock_irqsave(&up->lock, flags);
++ uart_port_lock_irqsave(up, &flags);
+
+ /* Clear all interrupts */
+ udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_CLEAR, interrupts);
+@@ -581,7 +581,7 @@ static irqreturn_t brcmuart_isr(int irq,
+ if ((rval | tval) == 0)
+ dev_warn(dev, "Spurious interrupt: 0x%x\n", interrupts);
+
+- spin_unlock_irqrestore(&up->lock, flags);
++ uart_port_unlock_irqrestore(up, flags);
+ return IRQ_HANDLED;
+ }
+
+@@ -608,10 +608,10 @@ static int brcmuart_startup(struct uart_
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ up->ier &= ~UART_IER_RDI;
+ serial_port_out(port, UART_IER, up->ier);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+
+ priv->tx_running = false;
+ priv->dma.rx_dma = NULL;
+@@ -629,7 +629,7 @@ static void brcmuart_shutdown(struct uar
+ struct brcmuart_priv *priv = up->port.private_data;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ priv->shutdown = true;
+ if (priv->dma_enabled) {
+ stop_rx_dma(up);
+@@ -645,7 +645,7 @@ static void brcmuart_shutdown(struct uar
+ */
+ up->dma = NULL;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ serial8250_do_shutdown(port);
+ }
+
+@@ -788,7 +788,7 @@ static int brcmuart_handle_irq(struct ua
+ * interrupt but there is no data ready.
+ */
+ if (((iir & UART_IIR_ID) == UART_IIR_RX_TIMEOUT) && !(priv->shutdown)) {
+- spin_lock_irqsave(&p->lock, flags);
++ uart_port_lock_irqsave(p, &flags);
+ status = serial_port_in(p, UART_LSR);
+ if ((status & UART_LSR_DR) == 0) {
+
+@@ -813,7 +813,7 @@ static int brcmuart_handle_irq(struct ua
+
+ handled = 1;
+ }
+- spin_unlock_irqrestore(&p->lock, flags);
++ uart_port_unlock_irqrestore(p, flags);
+ if (handled)
+ return 1;
+ }
+@@ -831,7 +831,7 @@ static enum hrtimer_restart brcmuart_hrt
+ if (priv->shutdown)
+ return HRTIMER_NORESTART;
+
+- spin_lock_irqsave(&p->lock, flags);
++ uart_port_lock_irqsave(p, &flags);
+ status = serial_port_in(p, UART_LSR);
+
+ /*
+@@ -855,7 +855,7 @@ static enum hrtimer_restart brcmuart_hrt
+ status |= UART_MCR_RTS;
+ serial_port_out(p, UART_MCR, status);
+ }
+- spin_unlock_irqrestore(&p->lock, flags);
++ uart_port_unlock_irqrestore(p, flags);
+ return HRTIMER_NORESTART;
+ }
+
+@@ -1154,10 +1154,10 @@ static int __maybe_unused brcmuart_suspe
+ * This will prevent resume from enabling RTS before the
+ * baud rate has been restored.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ priv->saved_mctrl = port->mctrl;
+ port->mctrl &= ~TIOCM_RTS;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ serial8250_suspend_port(priv->line);
+ clk_disable_unprepare(priv->baud_mux_clk);
+@@ -1196,10 +1196,10 @@ static int __maybe_unused brcmuart_resum
+
+ if (priv->saved_mctrl & TIOCM_RTS) {
+ /* Restore RTS */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ port->mctrl |= TIOCM_RTS;
+ port->ops->set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ return 0;
diff --git a/debian/patches-rt/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch b/debian/patches-rt/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
new file mode 100644
index 0000000000..f29f29bdfe
--- /dev/null
+++ b/debian/patches-rt/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
@@ -0,0 +1,45 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 6 Dec 2018 09:52:20 +0100
+Subject: [PATCH 06/10] drm/i915: Disable tracing points on PREEMPT_RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Luca Abeni reported this:
+| BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003
+| CPU: 1 PID: 15203 Comm: kworker/u8:2 Not tainted 4.19.1-rt3 #10
+| Call Trace:
+| rt_spin_lock+0x3f/0x50
+| gen6_read32+0x45/0x1d0 [i915]
+| g4x_get_vblank_counter+0x36/0x40 [i915]
+| trace_event_raw_event_i915_pipe_update_start+0x7d/0xf0 [i915]
+
+The tracing events use trace_i915_pipe_update_start() among other events
+use functions acquire spinlock_t locks which are transformed into
+sleeping locks on PREEMPT_RT. A few trace points use
+intel_get_crtc_scanline(), others use ->get_vblank_counter() wich also
+might acquire a sleeping locks on PREEMPT_RT.
+At the time the arguments are evaluated within trace point, preemption
+is disabled and so the locks must not be acquired on PREEMPT_RT.
+
+Based on this I don't see any other way than disable trace points on
+PREMPT_RT.
+
+Reported-by: Luca Abeni <lucabe72@gmail.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_trace.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_trace.h
++++ b/drivers/gpu/drm/i915/i915_trace.h
+@@ -6,6 +6,10 @@
+ #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+ #define _I915_TRACE_H_
+
++#ifdef CONFIG_PREEMPT_RT
++#define NOTRACE
++#endif
++
+ #include <linux/stringify.h>
+ #include <linux/types.h>
+ #include <linux/tracepoint.h>
diff --git a/debian/patches-rt/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch b/debian/patches-rt/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch
new file mode 100644
index 0000000000..7d94cdb9de
--- /dev/null
+++ b/debian/patches-rt/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch
@@ -0,0 +1,57 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 8 Sep 2023 18:22:53 +0200
+Subject: [PATCH 6/7] locking/rtmutex: Add a lockdep assert to catch potential
+ nested blocking
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+There used to be a BUG_ON(current->pi_blocked_on) in the lock acquisition
+functions, but that vanished in one of the rtmutex overhauls.
+
+Bring it back in form of a lockdep assert to catch code paths which take
+rtmutex based locks with current::pi_blocked_on != NULL.
+
+Reported-by: Crystal Wood <swood@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230908162254.999499-7-bigeasy@linutronix.de
+---
+ kernel/locking/rtmutex.c | 2 ++
+ kernel/locking/rwbase_rt.c | 2 ++
+ kernel/locking/spinlock_rt.c | 2 ++
+ 3 files changed, 6 insertions(+)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1784,6 +1784,8 @@ static int __sched rt_mutex_slowlock(str
+ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
+ unsigned int state)
+ {
++ lockdep_assert(!current->pi_blocked_on);
++
+ if (likely(rt_mutex_try_acquire(lock)))
+ return 0;
+
+--- a/kernel/locking/rwbase_rt.c
++++ b/kernel/locking/rwbase_rt.c
+@@ -133,6 +133,8 @@ static int __sched __rwbase_read_lock(st
+ static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
+ unsigned int state)
+ {
++ lockdep_assert(!current->pi_blocked_on);
++
+ if (rwbase_read_trylock(rwb))
+ return 0;
+
+--- a/kernel/locking/spinlock_rt.c
++++ b/kernel/locking/spinlock_rt.c
+@@ -37,6 +37,8 @@
+
+ static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
+ {
++ lockdep_assert(!current->pi_blocked_on);
++
+ if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
+ rtlock_slowlock(rtm);
+ }
diff --git a/debian/patches-rt/0006-serial-8250-Use-port-lock-wrappers.patch b/debian/patches-rt/0006-serial-8250-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..d6ee818bf9
--- /dev/null
+++ b/debian/patches-rt/0006-serial-8250-Use-port-lock-wrappers.patch
@@ -0,0 +1,465 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:23 +0206
+Subject: [PATCH 006/134] serial: 8250: Use port lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-7-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_core.c | 12 ++--
+ drivers/tty/serial/8250/8250_port.c | 100 ++++++++++++++++++------------------
+ 2 files changed, 56 insertions(+), 56 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -259,7 +259,7 @@ static void serial8250_backup_timeout(st
+ unsigned int iir, ier = 0, lsr;
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /*
+ * Must disable interrupts or else we risk racing with the interrupt
+@@ -292,7 +292,7 @@ static void serial8250_backup_timeout(st
+ if (up->port.irq)
+ serial_out(up, UART_IER, ier);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ /* Standard timer interval plus 0.2s to keep the port running */
+ mod_timer(&up->timer,
+@@ -992,11 +992,11 @@ static void serial_8250_overrun_backoff_
+ struct uart_port *port = &up->port;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ up->ier |= UART_IER_RLSI | UART_IER_RDI;
+ up->port.read_status_mask |= UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /**
+@@ -1194,9 +1194,9 @@ void serial8250_unregister_port(int line
+ if (uart->em485) {
+ unsigned long flags;
+
+- spin_lock_irqsave(&uart->port.lock, flags);
++ uart_port_lock_irqsave(&uart->port, &flags);
+ serial8250_em485_destroy(uart);
+- spin_unlock_irqrestore(&uart->port.lock, flags);
++ uart_port_unlock_irqrestore(&uart->port, flags);
+ }
+
+ uart_remove_one_port(&serial8250_reg, &uart->port);
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -689,7 +689,7 @@ static void serial8250_set_sleep(struct
+
+ if (p->capabilities & UART_CAP_SLEEP) {
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&p->port.lock);
++ uart_port_lock_irq(&p->port);
+ if (p->capabilities & UART_CAP_EFR) {
+ lcr = serial_in(p, UART_LCR);
+ efr = serial_in(p, UART_EFR);
+@@ -703,7 +703,7 @@ static void serial8250_set_sleep(struct
+ serial_out(p, UART_EFR, efr);
+ serial_out(p, UART_LCR, lcr);
+ }
+- spin_unlock_irq(&p->port.lock);
++ uart_port_unlock_irq(&p->port);
+ }
+
+ serial8250_rpm_put(p);
+@@ -746,9 +746,9 @@ static void enable_rsa(struct uart_8250_
+ {
+ if (up->port.type == PORT_RSA) {
+ if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
+- spin_lock_irq(&up->port.lock);
++ uart_port_lock_irq(&up->port);
+ __enable_rsa(up);
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ }
+ if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
+ serial_out(up, UART_RSA_FRR, 0);
+@@ -768,7 +768,7 @@ static void disable_rsa(struct uart_8250
+
+ if (up->port.type == PORT_RSA &&
+ up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
+- spin_lock_irq(&up->port.lock);
++ uart_port_lock_irq(&up->port);
+
+ mode = serial_in(up, UART_RSA_MSR);
+ result = !(mode & UART_RSA_MSR_FIFO);
+@@ -781,7 +781,7 @@ static void disable_rsa(struct uart_8250
+
+ if (result)
+ up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ }
+ }
+ #endif /* CONFIG_SERIAL_8250_RSA */
+@@ -1172,7 +1172,7 @@ static void autoconfig(struct uart_8250_
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ up->capabilities = 0;
+ up->bugs = 0;
+@@ -1211,7 +1211,7 @@ static void autoconfig(struct uart_8250_
+ /*
+ * We failed; there's nothing here
+ */
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
+ scratch2, scratch3);
+ goto out;
+@@ -1235,7 +1235,7 @@ static void autoconfig(struct uart_8250_
+ status1 = serial_in(up, UART_MSR) & UART_MSR_STATUS_BITS;
+ serial8250_out_MCR(up, save_mcr);
+ if (status1 != (UART_MSR_DCD | UART_MSR_CTS)) {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ DEBUG_AUTOCONF("LOOP test failed (%02x) ",
+ status1);
+ goto out;
+@@ -1304,7 +1304,7 @@ static void autoconfig(struct uart_8250_
+ serial8250_clear_IER(up);
+
+ out_unlock:
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /*
+ * Check if the device is a Fintek F81216A
+@@ -1344,9 +1344,9 @@ static void autoconfig_irq(struct uart_8
+ probe_irq_off(probe_irq_on());
+ save_mcr = serial8250_in_MCR(up);
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ save_ier = serial_in(up, UART_IER);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
+
+ irqs = probe_irq_on();
+@@ -1359,9 +1359,9 @@ static void autoconfig_irq(struct uart_8
+ UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
+ }
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ serial_out(up, UART_IER, UART_IER_ALL_INTR);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ serial_in(up, UART_LSR);
+ serial_in(up, UART_RX);
+ serial_in(up, UART_IIR);
+@@ -1372,9 +1372,9 @@ static void autoconfig_irq(struct uart_8
+
+ serial8250_out_MCR(up, save_mcr);
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ serial_out(up, UART_IER, save_ier);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+
+ if (port->flags & UPF_FOURPORT)
+ outb_p(save_ICP, ICP);
+@@ -1442,13 +1442,13 @@ static enum hrtimer_restart serial8250_e
+ unsigned long flags;
+
+ serial8250_rpm_get(p);
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+ if (em485->active_timer == &em485->stop_tx_timer) {
+ p->rs485_stop_tx(p);
+ em485->active_timer = NULL;
+ em485->tx_stopped = true;
+ }
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ serial8250_rpm_put(p);
+
+ return HRTIMER_NORESTART;
+@@ -1630,12 +1630,12 @@ static enum hrtimer_restart serial8250_e
+ struct uart_8250_port *p = em485->port;
+ unsigned long flags;
+
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+ if (em485->active_timer == &em485->start_tx_timer) {
+ __start_tx(&p->port);
+ em485->active_timer = NULL;
+ }
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+
+ return HRTIMER_NORESTART;
+ }
+@@ -1918,7 +1918,7 @@ int serial8250_handle_irq(struct uart_po
+ if (iir & UART_IIR_NO_INT)
+ return 0;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ status = serial_lsr_in(up);
+
+@@ -1988,9 +1988,9 @@ static int serial8250_tx_threshold_handl
+ if ((iir & UART_IIR_ID) == UART_IIR_THRI) {
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ serial8250_tx_chars(up);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ iir = serial_port_in(port, UART_IIR);
+@@ -2005,10 +2005,10 @@ static unsigned int serial8250_tx_empty(
+
+ serial8250_rpm_get(up);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
+ result = TIOCSER_TEMT;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ serial8250_rpm_put(up);
+
+@@ -2070,13 +2070,13 @@ static void serial8250_break_ctl(struct
+ unsigned long flags;
+
+ serial8250_rpm_get(up);
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_port_out(port, UART_LCR, up->lcr);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ serial8250_rpm_put(up);
+ }
+
+@@ -2211,7 +2211,7 @@ int serial8250_do_startup(struct uart_po
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ up->acr = 0;
+ serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_port_out(port, UART_EFR, UART_EFR_ECB);
+@@ -2221,7 +2221,7 @@ int serial8250_do_startup(struct uart_po
+ serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_port_out(port, UART_EFR, UART_EFR_ECB);
+ serial_port_out(port, UART_LCR, 0);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ if (port->type == PORT_DA830) {
+@@ -2230,10 +2230,10 @@ int serial8250_do_startup(struct uart_po
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ serial_port_out(port, UART_IER, 0);
+ serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ mdelay(10);
+
+ /* Enable Tx, Rx and free run mode */
+@@ -2347,7 +2347,7 @@ int serial8250_do_startup(struct uart_po
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ wait_for_xmitr(up, UART_LSR_THRE);
+ serial_port_out_sync(port, UART_IER, UART_IER_THRI);
+@@ -2359,7 +2359,7 @@ int serial8250_do_startup(struct uart_po
+ iir = serial_port_in(port, UART_IIR);
+ serial_port_out(port, UART_IER, 0);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (port->irqflags & IRQF_SHARED)
+ enable_irq(port->irq);
+@@ -2382,7 +2382,7 @@ int serial8250_do_startup(struct uart_po
+ */
+ serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (up->port.flags & UPF_FOURPORT) {
+ if (!up->port.irq)
+ up->port.mctrl |= TIOCM_OUT1;
+@@ -2428,7 +2428,7 @@ int serial8250_do_startup(struct uart_po
+ }
+
+ dont_test_tx_en:
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /*
+ * Clear the interrupt registers again for luck, and clear the
+@@ -2499,17 +2499,17 @@ void serial8250_do_shutdown(struct uart_
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ up->ier = 0;
+ serial_port_out(port, UART_IER, 0);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ synchronize_irq(port->irq);
+
+ if (up->dma)
+ serial8250_release_dma(up);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (port->flags & UPF_FOURPORT) {
+ /* reset interrupts on the AST Fourport board */
+ inb((port->iobase & 0xfe0) | 0x1f);
+@@ -2518,7 +2518,7 @@ void serial8250_do_shutdown(struct uart_
+ port->mctrl &= ~TIOCM_OUT2;
+
+ serial8250_set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /*
+ * Disable break condition and FIFOs
+@@ -2754,14 +2754,14 @@ void serial8250_update_uartclk(struct ua
+ quot = serial8250_get_divisor(port, baud, &frac);
+
+ serial8250_rpm_get(up);
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ serial8250_set_divisor(port, baud, quot, frac);
+ serial_port_out(port, UART_LCR, up->lcr);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ serial8250_rpm_put(up);
+
+ out_unlock:
+@@ -2798,7 +2798,7 @@ serial8250_do_set_termios(struct uart_po
+ * Synchronize UART_IER access against the console.
+ */
+ serial8250_rpm_get(up);
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ up->lcr = cval; /* Save computed LCR */
+
+@@ -2901,7 +2901,7 @@ serial8250_do_set_termios(struct uart_po
+ serial_port_out(port, UART_FCR, up->fcr); /* set fcr */
+ }
+ serial8250_set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ serial8250_rpm_put(up);
+
+ /* Don't rewrite B0 */
+@@ -2924,15 +2924,15 @@ void serial8250_do_set_ldisc(struct uart
+ {
+ if (termios->c_line == N_PPS) {
+ port->flags |= UPF_HARDPPS_CD;
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ serial8250_enable_ms(port);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ } else {
+ port->flags &= ~UPF_HARDPPS_CD;
+ if (!UART_ENABLE_MS(port, termios->c_cflag)) {
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ serial8250_disable_ms(port);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ }
+ }
+ }
+@@ -3406,9 +3406,9 @@ void serial8250_console_write(struct uar
+ touch_nmi_watchdog();
+
+ if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -3478,7 +3478,7 @@ void serial8250_console_write(struct uar
+ serial8250_modem_status(up);
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static unsigned int probe_baud(struct uart_port *port)
diff --git a/debian/patches-rt/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/debian/patches-rt/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
new file mode 100644
index 0000000000..d784f90a94
--- /dev/null
+++ b/debian/patches-rt/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
@@ -0,0 +1,29 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 19 Dec 2018 10:47:02 +0100
+Subject: [PATCH 07/10] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with
+ NOTRACE
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The order of the header files is important. If this header file is
+included after tracepoint.h was included then the NOTRACE here becomes a
+nop. Currently this happens for two .c files which use the tracepoitns
+behind DRM_I915_LOW_LEVEL_TRACEPOINTS.
+
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_trace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_trace.h
++++ b/drivers/gpu/drm/i915/i915_trace.h
+@@ -326,7 +326,7 @@ DEFINE_EVENT(i915_request, i915_request_
+ TP_ARGS(rq)
+ );
+
+-#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
++#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE)
+ DEFINE_EVENT(i915_request, i915_request_guc_submit,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
diff --git a/debian/patches-rt/0007-futex-pi-Fix-recursive-rt_mutex-waiter-state.patch b/debian/patches-rt/0007-futex-pi-Fix-recursive-rt_mutex-waiter-state.patch
new file mode 100644
index 0000000000..76a9b60581
--- /dev/null
+++ b/debian/patches-rt/0007-futex-pi-Fix-recursive-rt_mutex-waiter-state.patch
@@ -0,0 +1,198 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 15 Sep 2023 17:19:44 +0200
+Subject: [PATCH 7/7] futex/pi: Fix recursive rt_mutex waiter state
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Some new assertions pointed out that the existing code has nested rt_mutex wait
+state in the futex code.
+
+Specifically, the futex_lock_pi() cancel case uses spin_lock() while there
+still is a rt_waiter enqueued for this task, resulting in a state where there
+are two waiters for the same task (and task_struct::pi_blocked_on gets
+scrambled).
+
+The reason to take hb->lock at this point is to avoid the wake_futex_pi()
+EAGAIN case.
+
+This happens when futex_top_waiter() and rt_mutex_top_waiter() state becomes
+inconsistent. The current rules are such that this inconsistency will not be
+observed.
+
+Notably the case that needs to be avoided is where futex_lock_pi() and
+futex_unlock_pi() interleave such that unlock will fail to observe a new
+waiter.
+
+*However* the case at hand is where a waiter is leaving, in this case the race
+means a waiter that is going away is not observed -- which is harmless,
+provided this race is explicitly handled.
+
+This is a somewhat dangerous proposition because the converse race is not
+observing a new waiter, which must absolutely not happen. But since the race is
+valid this cannot be asserted.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/20230915151943.GD6743@noisy.programming.kicks-ass.net
+---
+ kernel/futex/pi.c | 76 ++++++++++++++++++++++++++++++-------------------
+ kernel/futex/requeue.c | 6 ++-
+ 2 files changed, 52 insertions(+), 30 deletions(-)
+
+--- a/kernel/futex/pi.c
++++ b/kernel/futex/pi.c
+@@ -611,29 +611,16 @@ int futex_lock_pi_atomic(u32 __user *uad
+ /*
+ * Caller must hold a reference on @pi_state.
+ */
+-static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
++static int wake_futex_pi(u32 __user *uaddr, u32 uval,
++ struct futex_pi_state *pi_state,
++ struct rt_mutex_waiter *top_waiter)
+ {
+- struct rt_mutex_waiter *top_waiter;
+ struct task_struct *new_owner;
+ bool postunlock = false;
+ DEFINE_RT_WAKE_Q(wqh);
+ u32 curval, newval;
+ int ret = 0;
+
+- top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
+- if (WARN_ON_ONCE(!top_waiter)) {
+- /*
+- * As per the comment in futex_unlock_pi() this should not happen.
+- *
+- * When this happens, give up our locks and try again, giving
+- * the futex_lock_pi() instance time to complete, either by
+- * waiting on the rtmutex or removing itself from the futex
+- * queue.
+- */
+- ret = -EAGAIN;
+- goto out_unlock;
+- }
+-
+ new_owner = top_waiter->task;
+
+ /*
+@@ -1046,20 +1033,34 @@ int futex_lock_pi(u32 __user *uaddr, uns
+ ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
+
+ cleanup:
+- spin_lock(q.lock_ptr);
+ /*
+ * If we failed to acquire the lock (deadlock/signal/timeout), we must
+- * first acquire the hb->lock before removing the lock from the
+- * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
+- * lists consistent.
++ * must unwind the above, however we canont lock hb->lock because
++ * rt_mutex already has a waiter enqueued and hb->lock can itself try
++ * and enqueue an rt_waiter through rtlock.
++ *
++ * Doing the cleanup without holding hb->lock can cause inconsistent
++ * state between hb and pi_state, but only in the direction of not
++ * seeing a waiter that is leaving.
++ *
++ * See futex_unlock_pi(), it deals with this inconsistency.
++ *
++ * There be dragons here, since we must deal with the inconsistency on
++ * the way out (here), it is impossible to detect/warn about the race
++ * the other way around (missing an incoming waiter).
+ *
+- * In particular; it is important that futex_unlock_pi() can not
+- * observe this inconsistency.
++ * What could possibly go wrong...
+ */
+ if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
+ ret = 0;
+
+ /*
++ * Now that the rt_waiter has been dequeued, it is safe to use
++ * spinlock/rtlock (which might enqueue its own rt_waiter) and fix up
++ * the
++ */
++ spin_lock(q.lock_ptr);
++ /*
+ * Waiter is unqueued.
+ */
+ rt_mutex_post_schedule();
+@@ -1143,6 +1144,7 @@ int futex_unlock_pi(u32 __user *uaddr, u
+ top_waiter = futex_top_waiter(hb, &key);
+ if (top_waiter) {
+ struct futex_pi_state *pi_state = top_waiter->pi_state;
++ struct rt_mutex_waiter *rt_waiter;
+
+ ret = -EINVAL;
+ if (!pi_state)
+@@ -1155,22 +1157,39 @@ int futex_unlock_pi(u32 __user *uaddr, u
+ if (pi_state->owner != current)
+ goto out_unlock;
+
+- get_pi_state(pi_state);
+ /*
+ * By taking wait_lock while still holding hb->lock, we ensure
+- * there is no point where we hold neither; and therefore
+- * wake_futex_p() must observe a state consistent with what we
+- * observed.
++ * there is no point where we hold neither; and thereby
++ * wake_futex_pi() must observe any new waiters.
++ *
++ * Since the cleanup: case in futex_lock_pi() removes the
++ * rt_waiter without holding hb->lock, it is possible for
++ * wake_futex_pi() to not find a waiter while the above does,
++ * in this case the waiter is on the way out and it can be
++ * ignored.
+ *
+ * In particular; this forces __rt_mutex_start_proxy() to
+ * complete such that we're guaranteed to observe the
+- * rt_waiter. Also see the WARN in wake_futex_pi().
++ * rt_waiter.
+ */
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++
++ /*
++ * Futex vs rt_mutex waiter state -- if there are no rt_mutex
++ * waiters even though futex thinks there are, then the waiter
++ * is leaving and the uncontended path is safe to take.
++ */
++ rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
++ if (!rt_waiter) {
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
++ goto do_uncontended;
++ }
++
++ get_pi_state(pi_state);
+ spin_unlock(&hb->lock);
+
+ /* drops pi_state->pi_mutex.wait_lock */
+- ret = wake_futex_pi(uaddr, uval, pi_state);
++ ret = wake_futex_pi(uaddr, uval, pi_state, rt_waiter);
+
+ put_pi_state(pi_state);
+
+@@ -1198,6 +1217,7 @@ int futex_unlock_pi(u32 __user *uaddr, u
+ return ret;
+ }
+
++do_uncontended:
+ /*
+ * We have no kernel internal state, i.e. no waiters in the
+ * kernel. Waiters which are about to queue themselves are stuck
+--- a/kernel/futex/requeue.c
++++ b/kernel/futex/requeue.c
+@@ -850,11 +850,13 @@ int futex_wait_requeue_pi(u32 __user *ua
+ pi_mutex = &q.pi_state->pi_mutex;
+ ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
+
+- /* Current is not longer pi_blocked_on */
+- spin_lock(q.lock_ptr);
++ /*
++ * See futex_unlock_pi()'s cleanup: comment.
++ */
+ if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
+ ret = 0;
+
++ spin_lock(q.lock_ptr);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+ /*
+ * Fixup the pi_state owner and possibly acquire the lock if we
diff --git a/debian/patches-rt/0007-serial-8250_dma-Use-port-lock-wrappers.patch b/debian/patches-rt/0007-serial-8250_dma-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..bc8686e892
--- /dev/null
+++ b/debian/patches-rt/0007-serial-8250_dma-Use-port-lock-wrappers.patch
@@ -0,0 +1,80 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:24 +0206
+Subject: [PATCH 007/134] serial: 8250_dma: Use port lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-8-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_dma.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_dma.c
++++ b/drivers/tty/serial/8250/8250_dma.c
+@@ -22,7 +22,7 @@ static void __dma_tx_complete(void *para
+ dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+
+ dma->tx_running = 0;
+
+@@ -35,7 +35,7 @@ static void __dma_tx_complete(void *para
+ if (ret || !dma->tx_running)
+ serial8250_set_THRI(p);
+
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ }
+
+ static void __dma_rx_complete(struct uart_8250_port *p)
+@@ -70,7 +70,7 @@ static void dma_rx_complete(void *param)
+ struct uart_8250_dma *dma = p->dma;
+ unsigned long flags;
+
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+ if (dma->rx_running)
+ __dma_rx_complete(p);
+
+@@ -80,7 +80,7 @@ static void dma_rx_complete(void *param)
+ */
+ if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
+ p->dma->rx_dma(p);
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ }
+
+ int serial8250_tx_dma(struct uart_8250_port *p)
diff --git a/debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch b/debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch
new file mode 100644
index 0000000000..0322ae1544
--- /dev/null
+++ b/debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch
@@ -0,0 +1,42 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 8 Sep 2021 17:18:00 +0200
+Subject: [PATCH 08/10] drm/i915/gt: Queue and wait for the irq_work item.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Disabling interrupts and invoking the irq_work function directly breaks
+on PREEMPT_RT.
+PREEMPT_RT does not invoke all irq_work from hardirq context because
+some of the user have spinlock_t locking in the callback function.
+These locks are then turned into a sleeping locks which can not be
+acquired with disabled interrupts.
+
+Using irq_work_queue() has the benefit that the irqwork will be invoked
+in the regular context. In general there is "no" delay between enqueuing
+the callback and its invocation because the interrupt is raised right
+away on architectures which support it (which includes x86).
+
+Use irq_work_queue() + irq_work_sync() instead invoking the callback
+directly.
+
+Reported-by: Clark Williams <williams@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+---
+ drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
++++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+@@ -312,10 +312,9 @@ void __intel_breadcrumbs_park(struct int
+ /* Kick the work once more to drain the signalers, and disarm the irq */
+ irq_work_sync(&b->irq_work);
+ while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
+- local_irq_disable();
+- signal_irq_work(&b->irq_work);
+- local_irq_enable();
++ irq_work_queue(&b->irq_work);
+ cond_resched();
++ irq_work_sync(&b->irq_work);
+ }
+ }
+
diff --git a/debian/patches-rt/0008-serial-8250_dw-Use-port-lock-wrappers.patch b/debian/patches-rt/0008-serial-8250_dw-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..d5394fc7a3
--- /dev/null
+++ b/debian/patches-rt/0008-serial-8250_dw-Use-port-lock-wrappers.patch
@@ -0,0 +1,69 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:25 +0206
+Subject: [PATCH 008/134] serial: 8250_dw: Use port lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-9-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_dw.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -263,20 +263,20 @@ static int dw8250_handle_irq(struct uart
+ * so we limit the workaround only to non-DMA mode.
+ */
+ if (!up->dma && rx_timeout) {
+- spin_lock_irqsave(&p->lock, flags);
++ uart_port_lock_irqsave(p, &flags);
+ status = serial_lsr_in(up);
+
+ if (!(status & (UART_LSR_DR | UART_LSR_BI)))
+ (void) p->serial_in(p, UART_RX);
+
+- spin_unlock_irqrestore(&p->lock, flags);
++ uart_port_unlock_irqrestore(p, flags);
+ }
+
+ /* Manually stop the Rx DMA transfer when acting as flow controller */
+ if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
+- spin_lock_irqsave(&p->lock, flags);
++ uart_port_lock_irqsave(p, &flags);
+ status = serial_lsr_in(up);
+- spin_unlock_irqrestore(&p->lock, flags);
++ uart_port_unlock_irqrestore(p, flags);
+
+ if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
diff --git a/debian/patches-rt/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch b/debian/patches-rt/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
new file mode 100644
index 0000000000..3245e145f0
--- /dev/null
+++ b/debian/patches-rt/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
@@ -0,0 +1,89 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 8 Sep 2021 19:03:41 +0200
+Subject: [PATCH 09/10] drm/i915/gt: Use spin_lock_irq() instead of
+ local_irq_disable() + spin_lock()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+execlists_dequeue() is invoked from a function which uses
+local_irq_disable() to disable interrupts so the spin_lock() behaves
+like spin_lock_irq().
+This breaks PREEMPT_RT because local_irq_disable() + spin_lock() is not
+the same as spin_lock_irq().
+
+execlists_dequeue_irq() and execlists_dequeue() has each one caller
+only. If intel_engine_cs::active::lock is acquired and released with the
+_irq suffix then it behaves almost as if execlists_dequeue() would be
+invoked with disabled interrupts. The difference is the last part of the
+function which is then invoked with enabled interrupts.
+I can't tell if this makes a difference. From looking at it, it might
+work to move the last unlock at the end of the function as I didn't find
+anything that would acquire the lock again.
+
+Reported-by: Clark Williams <williams@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+---
+ drivers/gpu/drm/i915/gt/intel_execlists_submission.c | 17 +++++------------
+ 1 file changed, 5 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -1303,7 +1303,7 @@ static void execlists_dequeue(struct int
+ * and context switches) submission.
+ */
+
+- spin_lock(&sched_engine->lock);
++ spin_lock_irq(&sched_engine->lock);
+
+ /*
+ * If the queue is higher priority than the last
+@@ -1403,7 +1403,7 @@ static void execlists_dequeue(struct int
+ * Even if ELSP[1] is occupied and not worthy
+ * of timeslices, our queue might be.
+ */
+- spin_unlock(&sched_engine->lock);
++ spin_unlock_irq(&sched_engine->lock);
+ return;
+ }
+ }
+@@ -1429,7 +1429,7 @@ static void execlists_dequeue(struct int
+
+ if (last && !can_merge_rq(last, rq)) {
+ spin_unlock(&ve->base.sched_engine->lock);
+- spin_unlock(&engine->sched_engine->lock);
++ spin_unlock_irq(&engine->sched_engine->lock);
+ return; /* leave this for another sibling */
+ }
+
+@@ -1591,7 +1591,7 @@ static void execlists_dequeue(struct int
+ */
+ sched_engine->queue_priority_hint = queue_prio(sched_engine);
+ i915_sched_engine_reset_on_empty(sched_engine);
+- spin_unlock(&sched_engine->lock);
++ spin_unlock_irq(&sched_engine->lock);
+
+ /*
+ * We can skip poking the HW if we ended up with exactly the same set
+@@ -1617,13 +1617,6 @@ static void execlists_dequeue(struct int
+ }
+ }
+
+-static void execlists_dequeue_irq(struct intel_engine_cs *engine)
+-{
+- local_irq_disable(); /* Suspend interrupts across request submission */
+- execlists_dequeue(engine);
+- local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
+-}
+-
+ static void clear_ports(struct i915_request **ports, int count)
+ {
+ memset_p((void **)ports, NULL, count);
+@@ -2478,7 +2471,7 @@ static void execlists_submission_tasklet
+ }
+
+ if (!engine->execlists.pending[0]) {
+- execlists_dequeue_irq(engine);
++ execlists_dequeue(engine);
+ start_timeslice(engine);
+ }
+
diff --git a/debian/patches-rt/0009-serial-8250_exar-Use-port-lock-wrappers.patch b/debian/patches-rt/0009-serial-8250_exar-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..80c37fba6c
--- /dev/null
+++ b/debian/patches-rt/0009-serial-8250_exar-Use-port-lock-wrappers.patch
@@ -0,0 +1,52 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:26 +0206
+Subject: [PATCH 009/134] serial: 8250_exar: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-10-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_exar.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -201,9 +201,9 @@ static int xr17v35x_startup(struct uart_
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ serial_port_out(port, UART_IER, 0);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+
+ return serial8250_do_startup(port);
+ }
diff --git a/debian/patches-rt/0010-drm-i915-Drop-the-irqs_disabled-check.patch b/debian/patches-rt/0010-drm-i915-Drop-the-irqs_disabled-check.patch
new file mode 100644
index 0000000000..83a516ce8b
--- /dev/null
+++ b/debian/patches-rt/0010-drm-i915-Drop-the-irqs_disabled-check.patch
@@ -0,0 +1,39 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 1 Oct 2021 20:01:03 +0200
+Subject: [PATCH 10/10] drm/i915: Drop the irqs_disabled() check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The !irqs_disabled() check triggers on PREEMPT_RT even with
+i915_sched_engine::lock acquired. The reason is the lock is transformed
+into a sleeping lock on PREEMPT_RT and does not disable interrupts.
+
+There is no need to check for disabled interrupts. The lockdep
+annotation below already check if the lock has been acquired by the
+caller and will yell if the interrupts are not disabled.
+
+Remove the !irqs_disabled() check.
+
+Reported-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_request.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_request.c
++++ b/drivers/gpu/drm/i915/i915_request.c
+@@ -609,7 +609,6 @@ bool __i915_request_submit(struct i915_r
+
+ RQ_TRACE(request, "\n");
+
+- GEM_BUG_ON(!irqs_disabled());
+ lockdep_assert_held(&engine->sched_engine->lock);
+
+ /*
+@@ -718,7 +717,6 @@ void __i915_request_unsubmit(struct i915
+ */
+ RQ_TRACE(request, "\n");
+
+- GEM_BUG_ON(!irqs_disabled());
+ lockdep_assert_held(&engine->sched_engine->lock);
+
+ /*
diff --git a/debian/patches-rt/0010-serial-8250_fsl-Use-port-lock-wrappers.patch b/debian/patches-rt/0010-serial-8250_fsl-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..abe7c81a98
--- /dev/null
+++ b/debian/patches-rt/0010-serial-8250_fsl-Use-port-lock-wrappers.patch
@@ -0,0 +1,63 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:27 +0206
+Subject: [PATCH 010/134] serial: 8250_fsl: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-11-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_fsl.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_fsl.c
++++ b/drivers/tty/serial/8250/8250_fsl.c
+@@ -30,11 +30,11 @@ int fsl8250_handle_irq(struct uart_port
+ unsigned int iir;
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ iir = port->serial_in(port, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ return 0;
+ }
+
+@@ -54,7 +54,7 @@ int fsl8250_handle_irq(struct uart_port
+ if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
+ up->lsr_saved_flags &= ~UART_LSR_BI;
+ port->serial_in(port, UART_RX);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ return 1;
+ }
+
diff --git a/debian/patches-rt/0011-serial-8250_mtk-Use-port-lock-wrappers.patch b/debian/patches-rt/0011-serial-8250_mtk-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..a668182042
--- /dev/null
+++ b/debian/patches-rt/0011-serial-8250_mtk-Use-port-lock-wrappers.patch
@@ -0,0 +1,77 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:28 +0206
+Subject: [PATCH 011/134] serial: 8250_mtk: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Chen-Yu Tsai <wenst@chromium.org>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-12-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_mtk.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -102,7 +102,7 @@ static void mtk8250_dma_rx_complete(void
+ if (data->rx_status == DMA_RX_SHUTDOWN)
+ return;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ total = dma->rx_size - state.residue;
+@@ -128,7 +128,7 @@ static void mtk8250_dma_rx_complete(void
+
+ mtk8250_rx_dma(up);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static void mtk8250_rx_dma(struct uart_8250_port *up)
+@@ -368,7 +368,7 @@ mtk8250_set_termios(struct uart_port *po
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /*
+ * Update the per-port timeout.
+@@ -416,7 +416,7 @@ mtk8250_set_termios(struct uart_port *po
+ if (uart_console(port))
+ up->port.cons->cflag = termios->c_cflag;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
diff --git a/debian/patches-rt/0012-serial-8250_omap-Use-port-lock-wrappers.patch b/debian/patches-rt/0012-serial-8250_omap-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..10ebd70ea0
--- /dev/null
+++ b/debian/patches-rt/0012-serial-8250_omap-Use-port-lock-wrappers.patch
@@ -0,0 +1,236 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:29 +0206
+Subject: [PATCH 012/134] serial: 8250_omap: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-13-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_omap.c | 52 ++++++++++++++++++------------------
+ 1 file changed, 26 insertions(+), 26 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -401,7 +401,7 @@ static void omap_8250_set_termios(struct
+ * interrupts disabled.
+ */
+ pm_runtime_get_sync(port->dev);
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+
+ /*
+ * Update the per-port timeout.
+@@ -504,7 +504,7 @@ static void omap_8250_set_termios(struct
+ }
+ omap8250_restore_regs(up);
+
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+
+@@ -529,7 +529,7 @@ static void omap_8250_pm(struct uart_por
+ pm_runtime_get_sync(port->dev);
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ efr = serial_in(up, UART_EFR);
+@@ -541,7 +541,7 @@ static void omap_8250_pm(struct uart_por
+ serial_out(up, UART_EFR, efr);
+ serial_out(up, UART_LCR, 0);
+
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+@@ -660,7 +660,7 @@ static irqreturn_t omap8250_irq(int irq,
+ unsigned long delay;
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ up->ier = port->serial_in(port, UART_IER);
+ if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
+ port->ops->stop_rx(port);
+@@ -670,7 +670,7 @@ static irqreturn_t omap8250_irq(int irq,
+ */
+ cancel_delayed_work(&up->overrun_backoff);
+ }
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
+ schedule_delayed_work(&up->overrun_backoff, delay);
+@@ -717,10 +717,10 @@ static int omap_8250_startup(struct uart
+ }
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ up->ier = UART_IER_RLSI | UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+
+ #ifdef CONFIG_PM
+ up->capabilities |= UART_CAP_RPM;
+@@ -733,9 +733,9 @@ static int omap_8250_startup(struct uart
+ serial_out(up, UART_OMAP_WER, priv->wer);
+
+ if (up->dma && !(priv->habit & UART_HAS_EFR2)) {
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ up->dma->rx_dma(up);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ }
+
+ enable_irq(up->port.irq);
+@@ -761,10 +761,10 @@ static void omap_8250_shutdown(struct ua
+ serial_out(up, UART_OMAP_EFR2, 0x0);
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ disable_irq_nosync(up->port.irq);
+ dev_pm_clear_wake_irq(port->dev);
+
+@@ -789,10 +789,10 @@ static void omap_8250_throttle(struct ua
+
+ pm_runtime_get_sync(port->dev);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ port->ops->stop_rx(port);
+ priv->throttled = true;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+@@ -807,14 +807,14 @@ static void omap_8250_unthrottle(struct
+ pm_runtime_get_sync(port->dev);
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ priv->throttled = false;
+ if (up->dma)
+ up->dma->rx_dma(up);
+ up->ier |= UART_IER_RLSI | UART_IER_RDI;
+ port->read_status_mask |= UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+@@ -958,7 +958,7 @@ static void __dma_rx_complete(void *para
+ unsigned long flags;
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+
+ /*
+ * If the tx status is not DMA_COMPLETE, then this is a delayed
+@@ -967,7 +967,7 @@ static void __dma_rx_complete(void *para
+ */
+ if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
+ DMA_COMPLETE) {
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ return;
+ }
+ __dma_rx_do_complete(p);
+@@ -978,7 +978,7 @@ static void __dma_rx_complete(void *para
+ omap_8250_rx_dma(p);
+ }
+
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ }
+
+ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
+@@ -1083,7 +1083,7 @@ static void omap_8250_dma_tx_complete(vo
+ dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+
+ dma->tx_running = 0;
+
+@@ -1112,7 +1112,7 @@ static void omap_8250_dma_tx_complete(vo
+ serial8250_set_THRI(p);
+ }
+
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ }
+
+ static int omap_8250_tx_dma(struct uart_8250_port *p)
+@@ -1278,7 +1278,7 @@ static int omap_8250_dma_handle_irq(stru
+ return IRQ_HANDLED;
+ }
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ status = serial_port_in(port, UART_LSR);
+
+@@ -1756,15 +1756,15 @@ static int omap8250_runtime_resume(struc
+ up = serial8250_get_port(priv->line);
+
+ if (up && omap8250_lost_context(up)) {
+- spin_lock_irq(&up->port.lock);
++ uart_port_lock_irq(&up->port);
+ omap8250_restore_regs(up);
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ }
+
+ if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) {
+- spin_lock_irq(&up->port.lock);
++ uart_port_lock_irq(&up->port);
+ omap_8250_rx_dma(up);
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ }
+
+ priv->latency = priv->calc_latency;
diff --git a/debian/patches-rt/0013-serial-8250_pci1xxxx-Use-port-lock-wrappers.patch b/debian/patches-rt/0013-serial-8250_pci1xxxx-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..2916387692
--- /dev/null
+++ b/debian/patches-rt/0013-serial-8250_pci1xxxx-Use-port-lock-wrappers.patch
@@ -0,0 +1,66 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:30 +0206
+Subject: [PATCH 013/134] serial: 8250_pci1xxxx: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-14-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_pci1xxxx.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
++++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
+@@ -225,10 +225,10 @@ static bool pci1xxxx_port_suspend(int li
+ if (port->suspended == 0 && port->dev) {
+ wakeup_mask = readb(up->port.membase + UART_WAKE_MASK_REG);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ port->mctrl &= ~TIOCM_OUT2;
+ port->ops->set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ ret = (wakeup_mask & UART_WAKE_SRCS) != UART_WAKE_SRCS;
+ }
+@@ -251,10 +251,10 @@ static void pci1xxxx_port_resume(int lin
+ writeb(UART_WAKE_SRCS, port->membase + UART_WAKE_REG);
+
+ if (port->suspended == 0) {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ port->mctrl |= TIOCM_OUT2;
+ port->ops->set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+ mutex_unlock(&tport->mutex);
+ }
diff --git a/debian/patches-rt/0014-serial-altera_jtaguart-Use-port-lock-wrappers.patch b/debian/patches-rt/0014-serial-altera_jtaguart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..46dbf785d0
--- /dev/null
+++ b/debian/patches-rt/0014-serial-altera_jtaguart-Use-port-lock-wrappers.patch
@@ -0,0 +1,133 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:31 +0206
+Subject: [PATCH 014/134] serial: altera_jtaguart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-15-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/altera_jtaguart.c | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+--- a/drivers/tty/serial/altera_jtaguart.c
++++ b/drivers/tty/serial/altera_jtaguart.c
+@@ -147,14 +147,14 @@ static irqreturn_t altera_jtaguart_inter
+ isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
+ ALTERA_JTAGUART_CONTROL_RI_OFF) & port->read_status_mask;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
+ altera_jtaguart_rx_chars(port);
+ if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
+ altera_jtaguart_tx_chars(port);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_RETVAL(isr);
+ }
+@@ -180,14 +180,14 @@ static int altera_jtaguart_startup(struc
+ return ret;
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Enable RX interrupts now */
+ port->read_status_mask = ALTERA_JTAGUART_CONTROL_RE_MSK;
+ writel(port->read_status_mask,
+ port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+@@ -196,14 +196,14 @@ static void altera_jtaguart_shutdown(str
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Disable all interrupts now */
+ port->read_status_mask = 0;
+ writel(port->read_status_mask,
+ port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ free_irq(port->irq, port);
+ }
+@@ -264,33 +264,33 @@ static void altera_jtaguart_console_putc
+ unsigned long flags;
+ u32 status;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ while (!altera_jtaguart_tx_space(port, &status)) {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
+ return; /* no connection activity */
+ }
+
+ cpu_relax();
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ }
+ writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+ #else
+ static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ while (!altera_jtaguart_tx_space(port, NULL)) {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ cpu_relax();
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ }
+ writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+ #endif
+
diff --git a/debian/patches-rt/0015-serial-altera_uart-Use-port-lock-wrappers.patch b/debian/patches-rt/0015-serial-altera_uart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..95e2c031f5
--- /dev/null
+++ b/debian/patches-rt/0015-serial-altera_uart-Use-port-lock-wrappers.patch
@@ -0,0 +1,116 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:32 +0206
+Subject: [PATCH 015/134] serial: altera_uart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-16-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/altera_uart.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/tty/serial/altera_uart.c
++++ b/drivers/tty/serial/altera_uart.c
+@@ -164,13 +164,13 @@ static void altera_uart_break_ctl(struct
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (break_state == -1)
+ pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
+ else
+ pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
+ altera_uart_update_ctrl_reg(pp);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void altera_uart_set_termios(struct uart_port *port,
+@@ -187,10 +187,10 @@ static void altera_uart_set_termios(stru
+ tty_termios_copy_hw(termios, old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /*
+ * FIXME: port->read_status_mask and port->ignore_status_mask
+@@ -264,12 +264,12 @@ static irqreturn_t altera_uart_interrupt
+
+ isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (isr & ALTERA_UART_STATUS_RRDY_MSK)
+ altera_uart_rx_chars(port);
+ if (isr & ALTERA_UART_STATUS_TRDY_MSK)
+ altera_uart_tx_chars(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return IRQ_RETVAL(isr);
+ }
+@@ -313,13 +313,13 @@ static int altera_uart_startup(struct ua
+ }
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Enable RX interrupts now */
+ pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
+ altera_uart_update_ctrl_reg(pp);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+@@ -329,13 +329,13 @@ static void altera_uart_shutdown(struct
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Disable all interrupts now */
+ pp->imr = 0;
+ altera_uart_update_ctrl_reg(pp);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (port->irq)
+ free_irq(port->irq, port);
diff --git a/debian/patches-rt/0016-serial-amba-pl010-Use-port-lock-wrappers.patch b/debian/patches-rt/0016-serial-amba-pl010-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..f3ba735bfc
--- /dev/null
+++ b/debian/patches-rt/0016-serial-amba-pl010-Use-port-lock-wrappers.patch
@@ -0,0 +1,112 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:33 +0206
+Subject: [PATCH 016/134] serial: amba-pl010: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-17-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/amba-pl010.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/tty/serial/amba-pl010.c
++++ b/drivers/tty/serial/amba-pl010.c
+@@ -207,7 +207,7 @@ static irqreturn_t pl010_int(int irq, vo
+ unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
+ int handled = 0;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ status = readb(port->membase + UART010_IIR);
+ if (status) {
+@@ -228,7 +228,7 @@ static irqreturn_t pl010_int(int irq, vo
+ handled = 1;
+ }
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_RETVAL(handled);
+ }
+@@ -270,14 +270,14 @@ static void pl010_break_ctl(struct uart_
+ unsigned long flags;
+ unsigned int lcr_h;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ lcr_h = readb(port->membase + UART010_LCRH);
+ if (break_state == -1)
+ lcr_h |= UART01x_LCRH_BRK;
+ else
+ lcr_h &= ~UART01x_LCRH_BRK;
+ writel(lcr_h, port->membase + UART010_LCRH);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int pl010_startup(struct uart_port *port)
+@@ -385,7 +385,7 @@ pl010_set_termios(struct uart_port *port
+ if (port->fifosize > 1)
+ lcr_h |= UART01x_LCRH_FEN;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /*
+ * Update the per-port timeout.
+@@ -438,22 +438,22 @@ pl010_set_termios(struct uart_port *port
+ writel(lcr_h, port->membase + UART010_LCRH);
+ writel(old_cr, port->membase + UART010_CR);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void pl010_set_ldisc(struct uart_port *port, struct ktermios *termios)
+ {
+ if (termios->c_line == N_PPS) {
+ port->flags |= UPF_HARDPPS_CD;
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ pl010_enable_ms(port);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ } else {
+ port->flags &= ~UPF_HARDPPS_CD;
+ if (!UART_ENABLE_MS(port, termios->c_cflag)) {
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ pl010_disable_ms(port);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ }
+ }
+ }
diff --git a/debian/patches-rt/0017-serial-amba-pl011-Use-port-lock-wrappers.patch b/debian/patches-rt/0017-serial-amba-pl011-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..2a5e3a85ea
--- /dev/null
+++ b/debian/patches-rt/0017-serial-amba-pl011-Use-port-lock-wrappers.patch
@@ -0,0 +1,327 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:34 +0206
+Subject: [PATCH 017/134] serial: amba-pl011: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-18-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/amba-pl011.c | 72 ++++++++++++++++++++--------------------
+ 1 file changed, 36 insertions(+), 36 deletions(-)
+
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -347,9 +347,9 @@ static int pl011_fifo_to_tty(struct uart
+ flag = TTY_FRAME;
+ }
+
+- spin_unlock(&uap->port.lock);
++ uart_port_unlock(&uap->port);
+ sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
+- spin_lock(&uap->port.lock);
++ uart_port_lock(&uap->port);
+
+ if (!sysrq)
+ uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+@@ -544,7 +544,7 @@ static void pl011_dma_tx_callback(void *
+ unsigned long flags;
+ u16 dmacr;
+
+- spin_lock_irqsave(&uap->port.lock, flags);
++ uart_port_lock_irqsave(&uap->port, &flags);
+ if (uap->dmatx.queued)
+ dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
+ dmatx->len, DMA_TO_DEVICE);
+@@ -565,7 +565,7 @@ static void pl011_dma_tx_callback(void *
+ if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
+ uart_circ_empty(&uap->port.state->xmit)) {
+ uap->dmatx.queued = false;
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+ return;
+ }
+
+@@ -576,7 +576,7 @@ static void pl011_dma_tx_callback(void *
+ */
+ pl011_start_tx_pio(uap);
+
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+ }
+
+ /*
+@@ -1004,7 +1004,7 @@ static void pl011_dma_rx_callback(void *
+ * routine to flush out the secondary DMA buffer while
+ * we immediately trigger the next DMA job.
+ */
+- spin_lock_irq(&uap->port.lock);
++ uart_port_lock_irq(&uap->port);
+ /*
+ * Rx data can be taken by the UART interrupts during
+ * the DMA irq handler. So we check the residue here.
+@@ -1020,7 +1020,7 @@ static void pl011_dma_rx_callback(void *
+ ret = pl011_dma_rx_trigger_dma(uap);
+
+ pl011_dma_rx_chars(uap, pending, lastbuf, false);
+- spin_unlock_irq(&uap->port.lock);
++ uart_port_unlock_irq(&uap->port);
+ /*
+ * Do this check after we picked the DMA chars so we don't
+ * get some IRQ immediately from RX.
+@@ -1086,11 +1086,11 @@ static void pl011_dma_rx_poll(struct tim
+ if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
+ > uap->dmarx.poll_timeout) {
+
+- spin_lock_irqsave(&uap->port.lock, flags);
++ uart_port_lock_irqsave(&uap->port, &flags);
+ pl011_dma_rx_stop(uap);
+ uap->im |= UART011_RXIM;
+ pl011_write(uap->im, uap, REG_IMSC);
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+
+ uap->dmarx.running = false;
+ dmaengine_terminate_all(rxchan);
+@@ -1186,10 +1186,10 @@ static void pl011_dma_shutdown(struct ua
+ while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
+ cpu_relax();
+
+- spin_lock_irq(&uap->port.lock);
++ uart_port_lock_irq(&uap->port);
+ uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
+ pl011_write(uap->dmacr, uap, REG_DMACR);
+- spin_unlock_irq(&uap->port.lock);
++ uart_port_unlock_irq(&uap->port);
+
+ if (uap->using_tx_dma) {
+ /* In theory, this should already be done by pl011_dma_flush_buffer */
+@@ -1370,9 +1370,9 @@ static void pl011_throttle_rx(struct uar
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ pl011_stop_rx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void pl011_enable_ms(struct uart_port *port)
+@@ -1390,7 +1390,7 @@ static void pl011_rx_chars(struct uart_a
+ {
+ pl011_fifo_to_tty(uap);
+
+- spin_unlock(&uap->port.lock);
++ uart_port_unlock(&uap->port);
+ tty_flip_buffer_push(&uap->port.state->port);
+ /*
+ * If we were temporarily out of DMA mode for a while,
+@@ -1415,7 +1415,7 @@ static void pl011_rx_chars(struct uart_a
+ #endif
+ }
+ }
+- spin_lock(&uap->port.lock);
++ uart_port_lock(&uap->port);
+ }
+
+ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
+@@ -1551,7 +1551,7 @@ static irqreturn_t pl011_int(int irq, vo
+ unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
+ int handled = 0;
+
+- spin_lock_irqsave(&uap->port.lock, flags);
++ uart_port_lock_irqsave(&uap->port, &flags);
+ status = pl011_read(uap, REG_RIS) & uap->im;
+ if (status) {
+ do {
+@@ -1581,7 +1581,7 @@ static irqreturn_t pl011_int(int irq, vo
+ handled = 1;
+ }
+
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+
+ return IRQ_RETVAL(handled);
+ }
+@@ -1653,14 +1653,14 @@ static void pl011_break_ctl(struct uart_
+ unsigned long flags;
+ unsigned int lcr_h;
+
+- spin_lock_irqsave(&uap->port.lock, flags);
++ uart_port_lock_irqsave(&uap->port, &flags);
+ lcr_h = pl011_read(uap, REG_LCRH_TX);
+ if (break_state == -1)
+ lcr_h |= UART01x_LCRH_BRK;
+ else
+ lcr_h &= ~UART01x_LCRH_BRK;
+ pl011_write(lcr_h, uap, REG_LCRH_TX);
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+ }
+
+ #ifdef CONFIG_CONSOLE_POLL
+@@ -1799,7 +1799,7 @@ static void pl011_enable_interrupts(stru
+ unsigned long flags;
+ unsigned int i;
+
+- spin_lock_irqsave(&uap->port.lock, flags);
++ uart_port_lock_irqsave(&uap->port, &flags);
+
+ /* Clear out any spuriously appearing RX interrupts */
+ pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
+@@ -1821,7 +1821,7 @@ static void pl011_enable_interrupts(stru
+ if (!pl011_dma_rx_running(uap))
+ uap->im |= UART011_RXIM;
+ pl011_write(uap->im, uap, REG_IMSC);
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+ }
+
+ static void pl011_unthrottle_rx(struct uart_port *port)
+@@ -1829,7 +1829,7 @@ static void pl011_unthrottle_rx(struct u
+ struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&uap->port.lock, flags);
++ uart_port_lock_irqsave(&uap->port, &flags);
+
+ uap->im = UART011_RTIM;
+ if (!pl011_dma_rx_running(uap))
+@@ -1837,7 +1837,7 @@ static void pl011_unthrottle_rx(struct u
+
+ pl011_write(uap->im, uap, REG_IMSC);
+
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+ }
+
+ static int pl011_startup(struct uart_port *port)
+@@ -1857,7 +1857,7 @@ static int pl011_startup(struct uart_por
+
+ pl011_write(uap->vendor->ifls, uap, REG_IFLS);
+
+- spin_lock_irq(&uap->port.lock);
++ uart_port_lock_irq(&uap->port);
+
+ cr = pl011_read(uap, REG_CR);
+ cr &= UART011_CR_RTS | UART011_CR_DTR;
+@@ -1868,7 +1868,7 @@ static int pl011_startup(struct uart_por
+
+ pl011_write(cr, uap, REG_CR);
+
+- spin_unlock_irq(&uap->port.lock);
++ uart_port_unlock_irq(&uap->port);
+
+ /*
+ * initialise the old status of the modem signals
+@@ -1929,12 +1929,12 @@ static void pl011_disable_uart(struct ua
+ unsigned int cr;
+
+ uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
+- spin_lock_irq(&uap->port.lock);
++ uart_port_lock_irq(&uap->port);
+ cr = pl011_read(uap, REG_CR);
+ cr &= UART011_CR_RTS | UART011_CR_DTR;
+ cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
+ pl011_write(cr, uap, REG_CR);
+- spin_unlock_irq(&uap->port.lock);
++ uart_port_unlock_irq(&uap->port);
+
+ /*
+ * disable break condition and fifos
+@@ -1946,14 +1946,14 @@ static void pl011_disable_uart(struct ua
+
+ static void pl011_disable_interrupts(struct uart_amba_port *uap)
+ {
+- spin_lock_irq(&uap->port.lock);
++ uart_port_lock_irq(&uap->port);
+
+ /* mask all interrupts and clear all pending ones */
+ uap->im = 0;
+ pl011_write(uap->im, uap, REG_IMSC);
+ pl011_write(0xffff, uap, REG_ICR);
+
+- spin_unlock_irq(&uap->port.lock);
++ uart_port_unlock_irq(&uap->port);
+ }
+
+ static void pl011_shutdown(struct uart_port *port)
+@@ -2098,7 +2098,7 @@ pl011_set_termios(struct uart_port *port
+
+ bits = tty_get_frame_size(termios->c_cflag);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /*
+ * Update the per-port timeout.
+@@ -2172,7 +2172,7 @@ pl011_set_termios(struct uart_port *port
+ old_cr |= UART011_CR_RXE;
+ pl011_write(old_cr, uap, REG_CR);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void
+@@ -2190,10 +2190,10 @@ sbsa_uart_set_termios(struct uart_port *
+ termios->c_cflag &= ~(CMSPAR | CRTSCTS);
+ termios->c_cflag |= CS8 | CLOCAL;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ uart_update_timeout(port, CS8, uap->fixed_baud);
+ pl011_setup_status_masks(port, termios);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *pl011_type(struct uart_port *port)
+@@ -2332,9 +2332,9 @@ pl011_console_write(struct console *co,
+ if (uap->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&uap->port.lock);
++ locked = uart_port_trylock(&uap->port);
+ else
+- spin_lock(&uap->port.lock);
++ uart_port_lock(&uap->port);
+
+ /*
+ * First save the CR then disable the interrupts
+@@ -2360,7 +2360,7 @@ pl011_console_write(struct console *co,
+ pl011_write(old_cr, uap, REG_CR);
+
+ if (locked)
+- spin_unlock(&uap->port.lock);
++ uart_port_unlock(&uap->port);
+ local_irq_restore(flags);
+
+ clk_disable(uap->clk);
diff --git a/debian/patches-rt/0018-serial-apb-Use-port-lock-wrappers.patch b/debian/patches-rt/0018-serial-apb-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..4dcf54a895
--- /dev/null
+++ b/debian/patches-rt/0018-serial-apb-Use-port-lock-wrappers.patch
@@ -0,0 +1,76 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:35 +0206
+Subject: [PATCH 018/134] serial: apb: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-19-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/apbuart.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/apbuart.c
++++ b/drivers/tty/serial/apbuart.c
+@@ -133,7 +133,7 @@ static irqreturn_t apbuart_int(int irq,
+ struct uart_port *port = dev_id;
+ unsigned int status;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ status = UART_GET_STATUS(port);
+ if (status & UART_STATUS_DR)
+@@ -141,7 +141,7 @@ static irqreturn_t apbuart_int(int irq,
+ if (status & UART_STATUS_THE)
+ apbuart_tx_chars(port);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -228,7 +228,7 @@ static void apbuart_set_termios(struct u
+ if (termios->c_cflag & CRTSCTS)
+ cr |= UART_CTRL_FL;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, baud);
+@@ -251,7 +251,7 @@ static void apbuart_set_termios(struct u
+ UART_PUT_SCAL(port, quot);
+ UART_PUT_CTRL(port, cr);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *apbuart_type(struct uart_port *port)
diff --git a/debian/patches-rt/0019-serial-ar933x-Use-port-lock-wrappers.patch b/debian/patches-rt/0019-serial-ar933x-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..61a5d3b1a6
--- /dev/null
+++ b/debian/patches-rt/0019-serial-ar933x-Use-port-lock-wrappers.patch
@@ -0,0 +1,144 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:36 +0206
+Subject: [PATCH 019/134] serial: ar933x: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-20-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/ar933x_uart.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/tty/serial/ar933x_uart.c
++++ b/drivers/tty/serial/ar933x_uart.c
+@@ -133,9 +133,9 @@ static unsigned int ar933x_uart_tx_empty
+ unsigned long flags;
+ unsigned int rdata;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
+ }
+@@ -220,14 +220,14 @@ static void ar933x_uart_break_ctl(struct
+ container_of(port, struct ar933x_uart_port, port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ if (break_state == -1)
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_BREAK);
+ else
+ ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_BREAK);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ /*
+@@ -318,7 +318,7 @@ static void ar933x_uart_set_termios(stru
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /* disable the UART */
+ ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
+@@ -352,7 +352,7 @@ static void ar933x_uart_set_termios(stru
+ AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
+ AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ if (tty_termios_baud_rate(new))
+ tty_termios_encode_baud_rate(new, baud, baud);
+@@ -450,7 +450,7 @@ static irqreturn_t ar933x_uart_interrupt
+ if ((status & AR933X_UART_CS_HOST_INT) == 0)
+ return IRQ_NONE;
+
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+
+ status = ar933x_uart_read(up, AR933X_UART_INT_REG);
+ status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
+@@ -468,7 +468,7 @@ static irqreturn_t ar933x_uart_interrupt
+ ar933x_uart_tx_chars(up);
+ }
+
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+
+ return IRQ_HANDLED;
+ }
+@@ -485,7 +485,7 @@ static int ar933x_uart_startup(struct ua
+ if (ret)
+ return ret;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /* Enable HOST interrupts */
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+@@ -498,7 +498,7 @@ static int ar933x_uart_startup(struct ua
+ /* Enable RX interrupts */
+ ar933x_uart_start_rx_interrupt(up);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ return 0;
+ }
+@@ -632,9 +632,9 @@ static void ar933x_uart_console_write(st
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&up->port.lock);
++ locked = uart_port_trylock(&up->port);
+ else
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -654,7 +654,7 @@ static void ar933x_uart_console_write(st
+ ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
+
+ if (locked)
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+
+ local_irq_restore(flags);
+ }
diff --git a/debian/patches-rt/0020-serial-arc_uart-Use-port-lock-wrappers.patch b/debian/patches-rt/0020-serial-arc_uart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..bc7be2c4d3
--- /dev/null
+++ b/debian/patches-rt/0020-serial-arc_uart-Use-port-lock-wrappers.patch
@@ -0,0 +1,97 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:37 +0206
+Subject: [PATCH 020/134] serial: arc_uart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-21-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/arc_uart.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/tty/serial/arc_uart.c
++++ b/drivers/tty/serial/arc_uart.c
+@@ -279,9 +279,9 @@ static irqreturn_t arc_serial_isr(int ir
+ if (status & RXIENB) {
+
+ /* already in ISR, no need of xx_irqsave */
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ arc_serial_rx_chars(port, status);
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ }
+
+ if ((status & TXIENB) && (status & TXEMPTY)) {
+@@ -291,12 +291,12 @@ static irqreturn_t arc_serial_isr(int ir
+ */
+ UART_TX_IRQ_DISABLE(port);
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ if (!uart_tx_stopped(port))
+ arc_serial_tx_chars(port);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ }
+
+ return IRQ_HANDLED;
+@@ -366,7 +366,7 @@ arc_serial_set_termios(struct uart_port
+ uartl = hw_val & 0xFF;
+ uarth = (hw_val >> 8) & 0xFF;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ UART_ALL_IRQ_DISABLE(port);
+
+@@ -391,7 +391,7 @@ arc_serial_set_termios(struct uart_port
+
+ uart_update_timeout(port, new->c_cflag, baud);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *arc_serial_type(struct uart_port *port)
+@@ -521,9 +521,9 @@ static void arc_serial_console_write(str
+ struct uart_port *port = &arc_uart_ports[co->index].port;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ uart_console_write(port, s, count, arc_serial_console_putchar);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static struct console arc_console = {
diff --git a/debian/patches-rt/0021-serial-atmel-Use-port-lock-wrappers.patch b/debian/patches-rt/0021-serial-atmel-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..ab70954a10
--- /dev/null
+++ b/debian/patches-rt/0021-serial-atmel-Use-port-lock-wrappers.patch
@@ -0,0 +1,119 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:38 +0206
+Subject: [PATCH 021/134] serial: atmel: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-22-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/atmel_serial.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -861,7 +861,7 @@ static void atmel_complete_tx_dma(void *
+ struct dma_chan *chan = atmel_port->chan_tx;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ if (chan)
+ dmaengine_terminate_all(chan);
+@@ -893,7 +893,7 @@ static void atmel_complete_tx_dma(void *
+ atmel_port->tx_done_mask);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void atmel_release_tx_dma(struct uart_port *port)
+@@ -1711,9 +1711,9 @@ static void atmel_tasklet_rx_func(struct
+ struct uart_port *port = &atmel_port->uart;
+
+ /* The interrupt handler does not take the lock */
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ atmel_port->schedule_rx(port);
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ }
+
+ static void atmel_tasklet_tx_func(struct tasklet_struct *t)
+@@ -1723,9 +1723,9 @@ static void atmel_tasklet_tx_func(struct
+ struct uart_port *port = &atmel_port->uart;
+
+ /* The interrupt handler does not take the lock */
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ atmel_port->schedule_tx(port);
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ }
+
+ static void atmel_init_property(struct atmel_uart_port *atmel_port,
+@@ -2175,7 +2175,7 @@ static void atmel_set_termios(struct uar
+ } else
+ mode |= ATMEL_US_PAR_NONE;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ port->read_status_mask = ATMEL_US_OVRE;
+ if (termios->c_iflag & INPCK)
+@@ -2377,22 +2377,22 @@ static void atmel_set_termios(struct uar
+ else
+ atmel_disable_ms(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
+ {
+ if (termios->c_line == N_PPS) {
+ port->flags |= UPF_HARDPPS_CD;
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ atmel_enable_ms(port);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ } else {
+ port->flags &= ~UPF_HARDPPS_CD;
+ if (!UART_ENABLE_MS(port, termios->c_cflag)) {
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ atmel_disable_ms(port);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ }
+ }
+ }
diff --git a/debian/patches-rt/0022-serial-bcm63xx-uart-Use-port-lock-wrappers.patch b/debian/patches-rt/0022-serial-bcm63xx-uart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..90efec4080
--- /dev/null
+++ b/debian/patches-rt/0022-serial-bcm63xx-uart-Use-port-lock-wrappers.patch
@@ -0,0 +1,128 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:39 +0206
+Subject: [PATCH 022/134] serial: bcm63xx-uart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-23-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/bcm63xx_uart.c | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/drivers/tty/serial/bcm63xx_uart.c
++++ b/drivers/tty/serial/bcm63xx_uart.c
+@@ -201,7 +201,7 @@ static void bcm_uart_break_ctl(struct ua
+ unsigned long flags;
+ unsigned int val;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ if (ctl)
+@@ -210,7 +210,7 @@ static void bcm_uart_break_ctl(struct ua
+ val &= ~UART_CTL_XMITBRK_MASK;
+ bcm_uart_writel(port, val, UART_CTL_REG);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /*
+@@ -332,7 +332,7 @@ static irqreturn_t bcm_uart_interrupt(in
+ unsigned int irqstat;
+
+ port = dev_id;
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ irqstat = bcm_uart_readl(port, UART_IR_REG);
+ if (irqstat & UART_RX_INT_STAT)
+@@ -353,7 +353,7 @@ static irqreturn_t bcm_uart_interrupt(in
+ estat & UART_EXTINP_DCD_MASK);
+ }
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ return IRQ_HANDLED;
+ }
+
+@@ -451,9 +451,9 @@ static void bcm_uart_shutdown(struct uar
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ bcm_uart_writel(port, 0, UART_IR_REG);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ bcm_uart_disable(port);
+ bcm_uart_flush(port);
+@@ -470,7 +470,7 @@ static void bcm_uart_set_termios(struct
+ unsigned long flags;
+ int tries;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Drain the hot tub fully before we power it off for the winter. */
+ for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
+@@ -546,7 +546,7 @@ static void bcm_uart_set_termios(struct
+
+ uart_update_timeout(port, new->c_cflag, baud);
+ bcm_uart_enable(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /*
+@@ -712,9 +712,9 @@ static void bcm_console_write(struct con
+ /* bcm_uart_interrupt() already took the lock */
+ locked = 0;
+ } else if (oops_in_progress) {
+- locked = spin_trylock(&port->lock);
++ locked = uart_port_trylock(port);
+ } else {
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ locked = 1;
+ }
+
+@@ -725,7 +725,7 @@ static void bcm_console_write(struct con
+ wait_for_xmitr(port);
+
+ if (locked)
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ local_irq_restore(flags);
+ }
+
diff --git a/debian/patches-rt/0023-serial-cpm_uart-Use-port-lock-wrappers.patch b/debian/patches-rt/0023-serial-cpm_uart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..c3400eac99
--- /dev/null
+++ b/debian/patches-rt/0023-serial-cpm_uart-Use-port-lock-wrappers.patch
@@ -0,0 +1,70 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:40 +0206
+Subject: [PATCH 023/134] serial: cpm_uart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-24-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/cpm_uart.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/cpm_uart.c
++++ b/drivers/tty/serial/cpm_uart.c
+@@ -569,7 +569,7 @@ static void cpm_uart_set_termios(struct
+ if ((termios->c_cflag & CREAD) == 0)
+ port->read_status_mask &= ~BD_SC_EMPTY;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ if (IS_SMC(pinfo)) {
+ unsigned int bits = tty_get_frame_size(termios->c_cflag);
+@@ -609,7 +609,7 @@ static void cpm_uart_set_termios(struct
+ clk_set_rate(pinfo->clk, baud);
+ else
+ cpm_setbrg(pinfo->brg - 1, baud);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *cpm_uart_type(struct uart_port *port)
+@@ -1386,9 +1386,9 @@ static void cpm_uart_console_write(struc
+ cpm_uart_early_write(pinfo, s, count, true);
+ local_irq_restore(flags);
+ } else {
+- spin_lock_irqsave(&pinfo->port.lock, flags);
++ uart_port_lock_irqsave(&pinfo->port, &flags);
+ cpm_uart_early_write(pinfo, s, count, true);
+- spin_unlock_irqrestore(&pinfo->port.lock, flags);
++ uart_port_unlock_irqrestore(&pinfo->port, flags);
+ }
+ }
+
diff --git a/debian/patches-rt/0024-serial-digicolor-Use-port-lock-wrappers.patch b/debian/patches-rt/0024-serial-digicolor-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..2a3aa5c9a5
--- /dev/null
+++ b/debian/patches-rt/0024-serial-digicolor-Use-port-lock-wrappers.patch
@@ -0,0 +1,113 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:41 +0206
+Subject: [PATCH 024/134] serial: digicolor: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Baruch Siach <baruch@tkos.co.il>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-25-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/digicolor-usart.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/tty/serial/digicolor-usart.c
++++ b/drivers/tty/serial/digicolor-usart.c
+@@ -133,7 +133,7 @@ static void digicolor_uart_rx(struct uar
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ while (1) {
+ u8 status, ch, ch_flag;
+@@ -172,7 +172,7 @@ static void digicolor_uart_rx(struct uar
+ ch_flag);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ tty_flip_buffer_push(&port->state->port);
+ }
+@@ -185,7 +185,7 @@ static void digicolor_uart_tx(struct uar
+ if (digicolor_uart_tx_full(port))
+ return;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ if (port->x_char) {
+ writeb_relaxed(port->x_char, port->membase + UA_EMI_REC);
+@@ -211,7 +211,7 @@ static void digicolor_uart_tx(struct uar
+ uart_write_wakeup(port);
+
+ out:
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static irqreturn_t digicolor_uart_int(int irq, void *dev_id)
+@@ -333,7 +333,7 @@ static void digicolor_uart_set_termios(s
+ port->ignore_status_mask |= UA_STATUS_OVERRUN_ERR
+ | UA_STATUS_PARITY_ERR | UA_STATUS_FRAME_ERR;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+@@ -341,7 +341,7 @@ static void digicolor_uart_set_termios(s
+ writeb_relaxed(divisor & 0xff, port->membase + UA_HBAUD_LO);
+ writeb_relaxed(divisor >> 8, port->membase + UA_HBAUD_HI);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *digicolor_uart_type(struct uart_port *port)
+@@ -398,14 +398,14 @@ static void digicolor_uart_console_write
+ int locked = 1;
+
+ if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ uart_console_write(port, c, n, digicolor_uart_console_putchar);
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /* Wait for transmitter to become empty */
+ do {
diff --git a/debian/patches-rt/0025-serial-dz-Use-port-lock-wrappers.patch b/debian/patches-rt/0025-serial-dz-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..7b3f11604c
--- /dev/null
+++ b/debian/patches-rt/0025-serial-dz-Use-port-lock-wrappers.patch
@@ -0,0 +1,161 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:42 +0206
+Subject: [PATCH 025/134] serial: dz: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-26-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/dz.c | 32 ++++++++++++++++----------------
+ 1 file changed, 16 insertions(+), 16 deletions(-)
+
+--- a/drivers/tty/serial/dz.c
++++ b/drivers/tty/serial/dz.c
+@@ -268,9 +268,9 @@ static inline void dz_transmit_chars(str
+ }
+ /* If nothing to do or stopped or hardware stopped. */
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
+- spin_lock(&dport->port.lock);
++ uart_port_lock(&dport->port);
+ dz_stop_tx(&dport->port);
+- spin_unlock(&dport->port.lock);
++ uart_port_unlock(&dport->port);
+ return;
+ }
+
+@@ -287,9 +287,9 @@ static inline void dz_transmit_chars(str
+
+ /* Are we are done. */
+ if (uart_circ_empty(xmit)) {
+- spin_lock(&dport->port.lock);
++ uart_port_lock(&dport->port);
+ dz_stop_tx(&dport->port);
+- spin_unlock(&dport->port.lock);
++ uart_port_unlock(&dport->port);
+ }
+ }
+
+@@ -415,14 +415,14 @@ static int dz_startup(struct uart_port *
+ return ret;
+ }
+
+- spin_lock_irqsave(&dport->port.lock, flags);
++ uart_port_lock_irqsave(&dport->port, &flags);
+
+ /* Enable interrupts. */
+ tmp = dz_in(dport, DZ_CSR);
+ tmp |= DZ_RIE | DZ_TIE;
+ dz_out(dport, DZ_CSR, tmp);
+
+- spin_unlock_irqrestore(&dport->port.lock, flags);
++ uart_port_unlock_irqrestore(&dport->port, flags);
+
+ return 0;
+ }
+@@ -443,9 +443,9 @@ static void dz_shutdown(struct uart_port
+ int irq_guard;
+ u16 tmp;
+
+- spin_lock_irqsave(&dport->port.lock, flags);
++ uart_port_lock_irqsave(&dport->port, &flags);
+ dz_stop_tx(&dport->port);
+- spin_unlock_irqrestore(&dport->port.lock, flags);
++ uart_port_unlock_irqrestore(&dport->port, flags);
+
+ irq_guard = atomic_add_return(-1, &mux->irq_guard);
+ if (!irq_guard) {
+@@ -491,14 +491,14 @@ static void dz_break_ctl(struct uart_por
+ unsigned long flags;
+ unsigned short tmp, mask = 1 << dport->port.line;
+
+- spin_lock_irqsave(&uport->lock, flags);
++ uart_port_lock_irqsave(uport, &flags);
+ tmp = dz_in(dport, DZ_TCR);
+ if (break_state)
+ tmp |= mask;
+ else
+ tmp &= ~mask;
+ dz_out(dport, DZ_TCR, tmp);
+- spin_unlock_irqrestore(&uport->lock, flags);
++ uart_port_unlock_irqrestore(uport, flags);
+ }
+
+ static int dz_encode_baud_rate(unsigned int baud)
+@@ -608,7 +608,7 @@ static void dz_set_termios(struct uart_p
+ if (termios->c_cflag & CREAD)
+ cflag |= DZ_RXENAB;
+
+- spin_lock_irqsave(&dport->port.lock, flags);
++ uart_port_lock_irqsave(&dport->port, &flags);
+
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+@@ -631,7 +631,7 @@ static void dz_set_termios(struct uart_p
+ if (termios->c_iflag & IGNBRK)
+ dport->port.ignore_status_mask |= DZ_BREAK;
+
+- spin_unlock_irqrestore(&dport->port.lock, flags);
++ uart_port_unlock_irqrestore(&dport->port, flags);
+ }
+
+ /*
+@@ -645,12 +645,12 @@ static void dz_pm(struct uart_port *upor
+ struct dz_port *dport = to_dport(uport);
+ unsigned long flags;
+
+- spin_lock_irqsave(&dport->port.lock, flags);
++ uart_port_lock_irqsave(&dport->port, &flags);
+ if (state < 3)
+ dz_start_tx(&dport->port);
+ else
+ dz_stop_tx(&dport->port);
+- spin_unlock_irqrestore(&dport->port.lock, flags);
++ uart_port_unlock_irqrestore(&dport->port, flags);
+ }
+
+
+@@ -811,7 +811,7 @@ static void dz_console_putchar(struct ua
+ unsigned short csr, tcr, trdy, mask;
+ int loops = 10000;
+
+- spin_lock_irqsave(&dport->port.lock, flags);
++ uart_port_lock_irqsave(&dport->port, &flags);
+ csr = dz_in(dport, DZ_CSR);
+ dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
+ tcr = dz_in(dport, DZ_TCR);
+@@ -819,7 +819,7 @@ static void dz_console_putchar(struct ua
+ mask = tcr;
+ dz_out(dport, DZ_TCR, mask);
+ iob();
+- spin_unlock_irqrestore(&dport->port.lock, flags);
++ uart_port_unlock_irqrestore(&dport->port, flags);
+
+ do {
+ trdy = dz_in(dport, DZ_CSR);
diff --git a/debian/patches-rt/0026-serial-linflexuart-Use-port-lock-wrappers.patch b/debian/patches-rt/0026-serial-linflexuart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..e2a3628ae5
--- /dev/null
+++ b/debian/patches-rt/0026-serial-linflexuart-Use-port-lock-wrappers.patch
@@ -0,0 +1,143 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:43 +0206
+Subject: [PATCH 026/134] serial: linflexuart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-27-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/fsl_linflexuart.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/tty/serial/fsl_linflexuart.c
++++ b/drivers/tty/serial/fsl_linflexuart.c
+@@ -203,7 +203,7 @@ static irqreturn_t linflex_txint(int irq
+ struct circ_buf *xmit = &sport->state->xmit;
+ unsigned long flags;
+
+- spin_lock_irqsave(&sport->lock, flags);
++ uart_port_lock_irqsave(sport, &flags);
+
+ if (sport->x_char) {
+ linflex_put_char(sport, sport->x_char);
+@@ -217,7 +217,7 @@ static irqreturn_t linflex_txint(int irq
+
+ linflex_transmit_buffer(sport);
+ out:
+- spin_unlock_irqrestore(&sport->lock, flags);
++ uart_port_unlock_irqrestore(sport, flags);
+ return IRQ_HANDLED;
+ }
+
+@@ -230,7 +230,7 @@ static irqreturn_t linflex_rxint(int irq
+ unsigned char rx;
+ bool brk;
+
+- spin_lock_irqsave(&sport->lock, flags);
++ uart_port_lock_irqsave(sport, &flags);
+
+ status = readl(sport->membase + UARTSR);
+ while (status & LINFLEXD_UARTSR_RMB) {
+@@ -266,7 +266,7 @@ static irqreturn_t linflex_rxint(int irq
+ }
+ }
+
+- spin_unlock_irqrestore(&sport->lock, flags);
++ uart_port_unlock_irqrestore(sport, flags);
+
+ tty_flip_buffer_push(port);
+
+@@ -369,11 +369,11 @@ static int linflex_startup(struct uart_p
+ int ret = 0;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ linflex_setup_watermark(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ ret = devm_request_irq(port->dev, port->irq, linflex_int, 0,
+ DRIVER_NAME, port);
+@@ -386,14 +386,14 @@ static void linflex_shutdown(struct uart
+ unsigned long ier;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* disable interrupts */
+ ier = readl(port->membase + LINIER);
+ ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
+ writel(ier, port->membase + LINIER);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ devm_free_irq(port->dev, port->irq, port);
+ }
+@@ -474,7 +474,7 @@ linflex_set_termios(struct uart_port *po
+ cr &= ~LINFLEXD_UARTCR_PCE;
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ port->read_status_mask = 0;
+
+@@ -507,7 +507,7 @@ linflex_set_termios(struct uart_port *po
+
+ writel(cr1, port->membase + LINCR1);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *linflex_type(struct uart_port *port)
+@@ -646,14 +646,14 @@ linflex_console_write(struct console *co
+ if (sport->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock_irqsave(&sport->lock, flags);
++ locked = uart_port_trylock_irqsave(sport, &flags);
+ else
+- spin_lock_irqsave(&sport->lock, flags);
++ uart_port_lock_irqsave(sport, &flags);
+
+ linflex_string_write(sport, s, count);
+
+ if (locked)
+- spin_unlock_irqrestore(&sport->lock, flags);
++ uart_port_unlock_irqrestore(sport, flags);
+ }
+
+ /*
diff --git a/debian/patches-rt/0027-serial-fsl_lpuart-Use-port-lock-wrappers.patch b/debian/patches-rt/0027-serial-fsl_lpuart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..f1475a5ffd
--- /dev/null
+++ b/debian/patches-rt/0027-serial-fsl_lpuart-Use-port-lock-wrappers.patch
@@ -0,0 +1,389 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:44 +0206
+Subject: [PATCH 027/134] serial: fsl_lpuart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-28-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/fsl_lpuart.c | 88 ++++++++++++++++++++--------------------
+ 1 file changed, 44 insertions(+), 44 deletions(-)
+
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -532,9 +532,9 @@ static void lpuart_dma_tx_complete(void
+ struct dma_chan *chan = sport->dma_tx_chan;
+ unsigned long flags;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ if (!sport->dma_tx_in_progress) {
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ return;
+ }
+
+@@ -543,7 +543,7 @@ static void lpuart_dma_tx_complete(void
+
+ uart_xmit_advance(&sport->port, sport->dma_tx_bytes);
+ sport->dma_tx_in_progress = false;
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+@@ -553,12 +553,12 @@ static void lpuart_dma_tx_complete(void
+ return;
+ }
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ if (!lpuart_stopped_or_empty(&sport->port))
+ lpuart_dma_tx(sport);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
+@@ -651,7 +651,7 @@ static int lpuart_poll_init(struct uart_
+
+ sport->port.fifosize = 0;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ /* Disable Rx & Tx */
+ writeb(0, sport->port.membase + UARTCR2);
+
+@@ -675,7 +675,7 @@ static int lpuart_poll_init(struct uart_
+
+ /* Enable Rx and Tx */
+ writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ return 0;
+ }
+@@ -703,7 +703,7 @@ static int lpuart32_poll_init(struct uar
+
+ sport->port.fifosize = 0;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ /* Disable Rx & Tx */
+ lpuart32_write(&sport->port, 0, UARTCTRL);
+@@ -724,7 +724,7 @@ static int lpuart32_poll_init(struct uar
+
+ /* Enable Rx and Tx */
+ lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ return 0;
+ }
+@@ -879,9 +879,9 @@ static unsigned int lpuart32_tx_empty(st
+
+ static void lpuart_txint(struct lpuart_port *sport)
+ {
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+ lpuart_transmit_buffer(sport);
+- spin_unlock(&sport->port.lock);
++ uart_port_unlock(&sport->port);
+ }
+
+ static void lpuart_rxint(struct lpuart_port *sport)
+@@ -890,7 +890,7 @@ static void lpuart_rxint(struct lpuart_p
+ struct tty_port *port = &sport->port.state->port;
+ unsigned char rx, sr;
+
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+
+ while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
+ flg = TTY_NORMAL;
+@@ -956,9 +956,9 @@ static void lpuart_rxint(struct lpuart_p
+
+ static void lpuart32_txint(struct lpuart_port *sport)
+ {
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+ lpuart32_transmit_buffer(sport);
+- spin_unlock(&sport->port.lock);
++ uart_port_unlock(&sport->port);
+ }
+
+ static void lpuart32_rxint(struct lpuart_port *sport)
+@@ -968,7 +968,7 @@ static void lpuart32_rxint(struct lpuart
+ unsigned long rx, sr;
+ bool is_break;
+
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+
+ while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
+ flg = TTY_NORMAL;
+@@ -1170,12 +1170,12 @@ static void lpuart_copy_rx_to_tty(struct
+
+ async_tx_ack(sport->dma_rx_desc);
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
+ if (dmastat == DMA_ERROR) {
+ dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ return;
+ }
+
+@@ -1244,7 +1244,7 @@ static void lpuart_copy_rx_to_tty(struct
+ dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
+ DMA_FROM_DEVICE);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ tty_flip_buffer_push(port);
+ if (!sport->dma_idle_int)
+@@ -1335,9 +1335,9 @@ static void lpuart_timer_func(struct tim
+ mod_timer(&sport->lpuart_timer,
+ jiffies + sport->dma_rx_timeout);
+
+- if (spin_trylock_irqsave(&sport->port.lock, flags)) {
++ if (uart_port_trylock_irqsave(&sport->port, &flags)) {
+ sport->last_residue = state.residue;
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+ }
+
+@@ -1802,14 +1802,14 @@ static void lpuart_hw_setup(struct lpuar
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ lpuart_setup_watermark_enable(sport);
+
+ lpuart_rx_dma_startup(sport);
+ lpuart_tx_dma_startup(sport);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static int lpuart_startup(struct uart_port *port)
+@@ -1859,7 +1859,7 @@ static void lpuart32_hw_setup(struct lpu
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ lpuart32_hw_disable(sport);
+
+@@ -1869,7 +1869,7 @@ static void lpuart32_hw_setup(struct lpu
+ lpuart32_setup_watermark_enable(sport);
+ lpuart32_configure(sport);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static int lpuart32_startup(struct uart_port *port)
+@@ -1932,7 +1932,7 @@ static void lpuart_shutdown(struct uart_
+ unsigned char temp;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* disable Rx/Tx and interrupts */
+ temp = readb(port->membase + UARTCR2);
+@@ -1940,7 +1940,7 @@ static void lpuart_shutdown(struct uart_
+ UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
+ writeb(temp, port->membase + UARTCR2);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ lpuart_dma_shutdown(sport);
+ }
+@@ -1952,7 +1952,7 @@ static void lpuart32_shutdown(struct uar
+ unsigned long temp;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* clear status */
+ temp = lpuart32_read(&sport->port, UARTSTAT);
+@@ -1969,7 +1969,7 @@ static void lpuart32_shutdown(struct uar
+ UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK);
+ lpuart32_write(port, temp, UARTCTRL);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ lpuart_dma_shutdown(sport);
+ }
+@@ -2069,7 +2069,7 @@ lpuart_set_termios(struct uart_port *por
+ if (old && sport->lpuart_dma_rx_use)
+ lpuart_dma_rx_free(&sport->port);
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ sport->port.read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+@@ -2124,7 +2124,7 @@ lpuart_set_termios(struct uart_port *por
+ sport->lpuart_dma_rx_use = false;
+ }
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static void __lpuart32_serial_setbrg(struct uart_port *port,
+@@ -2304,7 +2304,7 @@ lpuart32_set_termios(struct uart_port *p
+ if (old && sport->lpuart_dma_rx_use)
+ lpuart_dma_rx_free(&sport->port);
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ sport->port.read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+@@ -2359,7 +2359,7 @@ lpuart32_set_termios(struct uart_port *p
+ sport->lpuart_dma_rx_use = false;
+ }
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static const char *lpuart_type(struct uart_port *port)
+@@ -2477,9 +2477,9 @@ lpuart_console_write(struct console *co,
+ int locked = 1;
+
+ if (oops_in_progress)
+- locked = spin_trylock_irqsave(&sport->port.lock, flags);
++ locked = uart_port_trylock_irqsave(&sport->port, &flags);
+ else
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ /* first save CR2 and then disable interrupts */
+ cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
+@@ -2495,7 +2495,7 @@ lpuart_console_write(struct console *co,
+ writeb(old_cr2, sport->port.membase + UARTCR2);
+
+ if (locked)
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static void
+@@ -2507,9 +2507,9 @@ lpuart32_console_write(struct console *c
+ int locked = 1;
+
+ if (oops_in_progress)
+- locked = spin_trylock_irqsave(&sport->port.lock, flags);
++ locked = uart_port_trylock_irqsave(&sport->port, &flags);
+ else
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ /* first save CR2 and then disable interrupts */
+ cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
+@@ -2525,7 +2525,7 @@ lpuart32_console_write(struct console *c
+ lpuart32_write(&sport->port, old_cr, UARTCTRL);
+
+ if (locked)
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ /*
+@@ -3089,7 +3089,7 @@ static int lpuart_suspend(struct device
+ uart_suspend_port(&lpuart_reg, &sport->port);
+
+ if (lpuart_uport_is_active(sport)) {
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ if (lpuart_is_32(sport)) {
+ /* disable Rx/Tx and interrupts */
+ temp = lpuart32_read(&sport->port, UARTCTRL);
+@@ -3101,7 +3101,7 @@ static int lpuart_suspend(struct device
+ temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
+ writeb(temp, sport->port.membase + UARTCR2);
+ }
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ if (sport->lpuart_dma_rx_use) {
+ /*
+@@ -3114,7 +3114,7 @@ static int lpuart_suspend(struct device
+ lpuart_dma_rx_free(&sport->port);
+
+ /* Disable Rx DMA to use UART port as wakeup source */
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ if (lpuart_is_32(sport)) {
+ temp = lpuart32_read(&sport->port, UARTBAUD);
+ lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE,
+@@ -3123,11 +3123,11 @@ static int lpuart_suspend(struct device
+ writeb(readb(sport->port.membase + UARTCR5) &
+ ~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
+ }
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ if (sport->lpuart_dma_tx_use) {
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ if (lpuart_is_32(sport)) {
+ temp = lpuart32_read(&sport->port, UARTBAUD);
+ temp &= ~UARTBAUD_TDMAE;
+@@ -3137,7 +3137,7 @@ static int lpuart_suspend(struct device
+ temp &= ~UARTCR5_TDMAS;
+ writeb(temp, sport->port.membase + UARTCR5);
+ }
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ sport->dma_tx_in_progress = false;
+ dmaengine_terminate_sync(sport->dma_tx_chan);
+ }
diff --git a/debian/patches-rt/0028-serial-icom-Use-port-lock-wrappers.patch b/debian/patches-rt/0028-serial-icom-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..da765879ce
--- /dev/null
+++ b/debian/patches-rt/0028-serial-icom-Use-port-lock-wrappers.patch
@@ -0,0 +1,151 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:45 +0206
+Subject: [PATCH 028/134] serial: icom: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-29-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/icom.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/tty/serial/icom.c
++++ b/drivers/tty/serial/icom.c
+@@ -929,7 +929,7 @@ static inline void check_modem_status(st
+ char delta_status;
+ unsigned char status;
+
+- spin_lock(&icom_port->uart_port.lock);
++ uart_port_lock(&icom_port->uart_port);
+
+ /*modem input register */
+ status = readb(&icom_port->dram->isr);
+@@ -951,7 +951,7 @@ static inline void check_modem_status(st
+ port.delta_msr_wait);
+ old_status = status;
+ }
+- spin_unlock(&icom_port->uart_port.lock);
++ uart_port_unlock(&icom_port->uart_port);
+ }
+
+ static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
+@@ -1093,7 +1093,7 @@ static void process_interrupt(u16 port_i
+ struct icom_port *icom_port)
+ {
+
+- spin_lock(&icom_port->uart_port.lock);
++ uart_port_lock(&icom_port->uart_port);
+ trace(icom_port, "INTERRUPT", port_int_reg);
+
+ if (port_int_reg & (INT_XMIT_COMPLETED | INT_XMIT_DISABLED))
+@@ -1102,7 +1102,7 @@ static void process_interrupt(u16 port_i
+ if (port_int_reg & INT_RCV_COMPLETED)
+ recv_interrupt(port_int_reg, icom_port);
+
+- spin_unlock(&icom_port->uart_port.lock);
++ uart_port_unlock(&icom_port->uart_port);
+ }
+
+ static irqreturn_t icom_interrupt(int irq, void *dev_id)
+@@ -1186,14 +1186,14 @@ static unsigned int icom_tx_empty(struct
+ int ret;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
+ SA_FLAGS_READY_TO_XMIT)
+ ret = TIOCSER_TEMT;
+ else
+ ret = 0;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return ret;
+ }
+
+@@ -1276,7 +1276,7 @@ static void icom_send_xchar(struct uart_
+
+ /* wait .1 sec to send char */
+ for (index = 0; index < 10; index++) {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ xdata = readb(&icom_port->dram->xchar);
+ if (xdata == 0x00) {
+ trace(icom_port, "QUICK_WRITE", 0);
+@@ -1284,10 +1284,10 @@ static void icom_send_xchar(struct uart_
+
+ /* flush write operation */
+ xdata = readb(&icom_port->dram->xchar);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ break;
+ }
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ msleep(10);
+ }
+ }
+@@ -1307,7 +1307,7 @@ static void icom_break(struct uart_port
+ unsigned char cmdReg;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ trace(icom_port, "BREAK", 0);
+ cmdReg = readb(&icom_port->dram->CmdReg);
+ if (break_state == -1) {
+@@ -1315,7 +1315,7 @@ static void icom_break(struct uart_port
+ } else {
+ writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
+ }
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int icom_open(struct uart_port *port)
+@@ -1365,7 +1365,7 @@ static void icom_set_termios(struct uart
+ unsigned long offset;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ trace(icom_port, "CHANGE_SPEED", 0);
+
+ cflag = termios->c_cflag;
+@@ -1516,7 +1516,7 @@ static void icom_set_termios(struct uart
+ trace(icom_port, "XR_ENAB", 0);
+ writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *icom_type(struct uart_port *port)
diff --git a/debian/patches-rt/0029-serial-imx-Use-port-lock-wrappers.patch b/debian/patches-rt/0029-serial-imx-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..24f582ae87
--- /dev/null
+++ b/debian/patches-rt/0029-serial-imx-Use-port-lock-wrappers.patch
@@ -0,0 +1,354 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:46 +0206
+Subject: [PATCH 029/134] serial: imx: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-30-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/imx.c | 84 +++++++++++++++++++++++------------------------
+ 1 file changed, 42 insertions(+), 42 deletions(-)
+
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -575,7 +575,7 @@ static void imx_uart_dma_tx_callback(voi
+ unsigned long flags;
+ u32 ucr1;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+
+@@ -600,7 +600,7 @@ static void imx_uart_dma_tx_callback(voi
+ imx_uart_writel(sport, ucr4, UCR4);
+ }
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ /* called with port.lock taken and irqs off */
+@@ -766,11 +766,11 @@ static irqreturn_t imx_uart_rtsint(int i
+ struct imx_port *sport = dev_id;
+ irqreturn_t ret;
+
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+
+ ret = __imx_uart_rtsint(irq, dev_id);
+
+- spin_unlock(&sport->port.lock);
++ uart_port_unlock(&sport->port);
+
+ return ret;
+ }
+@@ -779,9 +779,9 @@ static irqreturn_t imx_uart_txint(int ir
+ {
+ struct imx_port *sport = dev_id;
+
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+ imx_uart_transmit_buffer(sport);
+- spin_unlock(&sport->port.lock);
++ uart_port_unlock(&sport->port);
+ return IRQ_HANDLED;
+ }
+
+@@ -895,11 +895,11 @@ static irqreturn_t imx_uart_rxint(int ir
+ struct imx_port *sport = dev_id;
+ irqreturn_t ret;
+
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+
+ ret = __imx_uart_rxint(irq, dev_id);
+
+- spin_unlock(&sport->port.lock);
++ uart_port_unlock(&sport->port);
+
+ return ret;
+ }
+@@ -962,7 +962,7 @@ static irqreturn_t imx_uart_int(int irq,
+ unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
+ irqreturn_t ret = IRQ_NONE;
+
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+
+ usr1 = imx_uart_readl(sport, USR1);
+ usr2 = imx_uart_readl(sport, USR2);
+@@ -1032,7 +1032,7 @@ static irqreturn_t imx_uart_int(int irq,
+ ret = IRQ_HANDLED;
+ }
+
+- spin_unlock(&sport->port.lock);
++ uart_port_unlock(&sport->port);
+
+ return ret;
+ }
+@@ -1115,7 +1115,7 @@ static void imx_uart_break_ctl(struct ua
+ unsigned long flags;
+ u32 ucr1;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK;
+
+@@ -1124,7 +1124,7 @@ static void imx_uart_break_ctl(struct ua
+
+ imx_uart_writel(sport, ucr1, UCR1);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ /*
+@@ -1137,9 +1137,9 @@ static void imx_uart_timeout(struct time
+ unsigned long flags;
+
+ if (sport->port.state) {
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ imx_uart_mctrl_check(sport);
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
+ }
+@@ -1169,9 +1169,9 @@ static void imx_uart_dma_rx_callback(voi
+ status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
+
+ if (status == DMA_ERROR) {
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+ imx_uart_clear_rx_errors(sport);
+- spin_unlock(&sport->port.lock);
++ uart_port_unlock(&sport->port);
+ return;
+ }
+
+@@ -1200,9 +1200,9 @@ static void imx_uart_dma_rx_callback(voi
+ r_bytes = rx_ring->head - rx_ring->tail;
+
+ /* If we received something, check for 0xff flood */
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+ imx_uart_check_flood(sport, imx_uart_readl(sport, USR2));
+- spin_unlock(&sport->port.lock);
++ uart_port_unlock(&sport->port);
+
+ if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
+
+@@ -1460,7 +1460,7 @@ static int imx_uart_startup(struct uart_
+ if (!uart_console(port) && imx_uart_dma_init(sport) == 0)
+ dma_is_inited = 1;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ /* Reset fifo's and state machines */
+ imx_uart_soft_reset(sport);
+@@ -1533,7 +1533,7 @@ static int imx_uart_startup(struct uart_
+
+ imx_uart_disable_loopback_rs485(sport);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ return 0;
+ }
+@@ -1558,21 +1558,21 @@ static void imx_uart_shutdown(struct uar
+ sport->dma_is_rxing = 0;
+ }
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ imx_uart_stop_tx(port);
+ imx_uart_stop_rx(port);
+ imx_uart_disable_dma(sport);
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ imx_uart_dma_exit(sport);
+ }
+
+ mctrl_gpio_disable_ms(sport->gpios);
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ ucr2 = imx_uart_readl(sport, UCR2);
+ ucr2 &= ~(UCR2_TXEN | UCR2_ATEN);
+ imx_uart_writel(sport, ucr2, UCR2);
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ /*
+ * Stop our timer.
+@@ -1583,7 +1583,7 @@ static void imx_uart_shutdown(struct uar
+ * Disable all interrupts, port and break condition.
+ */
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ ucr1 = imx_uart_readl(sport, UCR1);
+ ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN |
+@@ -1605,7 +1605,7 @@ static void imx_uart_shutdown(struct uar
+ ucr4 &= ~UCR4_TCEN;
+ imx_uart_writel(sport, ucr4, UCR4);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ clk_disable_unprepare(sport->clk_per);
+ clk_disable_unprepare(sport->clk_ipg);
+@@ -1668,7 +1668,7 @@ imx_uart_set_termios(struct uart_port *p
+ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+ quot = uart_get_divisor(port, baud);
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ /*
+ * Read current UCR2 and save it for future use, then clear all the bits
+@@ -1796,7 +1796,7 @@ imx_uart_set_termios(struct uart_port *p
+ if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
+ imx_uart_enable_ms(&sport->port);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static const char *imx_uart_type(struct uart_port *port)
+@@ -1858,7 +1858,7 @@ static int imx_uart_poll_init(struct uar
+
+ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ /*
+ * Be careful about the order of enabling bits here. First enable the
+@@ -1886,7 +1886,7 @@ static int imx_uart_poll_init(struct uar
+ imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1);
+ imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ return 0;
+ }
+@@ -2005,9 +2005,9 @@ imx_uart_console_write(struct console *c
+ if (sport->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock_irqsave(&sport->port.lock, flags);
++ locked = uart_port_trylock_irqsave(&sport->port, &flags);
+ else
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ /*
+ * First, save UCR1/2/3 and then disable interrupts
+@@ -2035,7 +2035,7 @@ imx_uart_console_write(struct console *c
+ imx_uart_ucrs_restore(sport, &old_ucr);
+
+ if (locked)
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ /*
+@@ -2193,10 +2193,10 @@ static enum hrtimer_restart imx_trigger_
+ struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx);
+ unsigned long flags;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ if (sport->tx_state == WAIT_AFTER_RTS)
+ imx_uart_start_tx(&sport->port);
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ return HRTIMER_NORESTART;
+ }
+@@ -2206,10 +2206,10 @@ static enum hrtimer_restart imx_trigger_
+ struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx);
+ unsigned long flags;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ if (sport->tx_state == WAIT_AFTER_SEND)
+ imx_uart_stop_tx(&sport->port);
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ return HRTIMER_NORESTART;
+ }
+@@ -2482,9 +2482,9 @@ static void imx_uart_restore_context(str
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ if (!sport->context_saved) {
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ return;
+ }
+
+@@ -2499,7 +2499,7 @@ static void imx_uart_restore_context(str
+ imx_uart_writel(sport, sport->saved_reg[2], UCR3);
+ imx_uart_writel(sport, sport->saved_reg[3], UCR4);
+ sport->context_saved = false;
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static void imx_uart_save_context(struct imx_port *sport)
+@@ -2507,7 +2507,7 @@ static void imx_uart_save_context(struct
+ unsigned long flags;
+
+ /* Save necessary regs */
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ sport->saved_reg[0] = imx_uart_readl(sport, UCR1);
+ sport->saved_reg[1] = imx_uart_readl(sport, UCR2);
+ sport->saved_reg[2] = imx_uart_readl(sport, UCR3);
+@@ -2519,7 +2519,7 @@ static void imx_uart_save_context(struct
+ sport->saved_reg[8] = imx_uart_readl(sport, UBMR);
+ sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS);
+ sport->context_saved = true;
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
diff --git a/debian/patches-rt/0030-serial-ip22zilog-Use-port-lock-wrappers.patch b/debian/patches-rt/0030-serial-ip22zilog-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..c1307a246b
--- /dev/null
+++ b/debian/patches-rt/0030-serial-ip22zilog-Use-port-lock-wrappers.patch
@@ -0,0 +1,185 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:47 +0206
+Subject: [PATCH 030/134] serial: ip22zilog: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-31-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/ip22zilog.c | 36 ++++++++++++++++++------------------
+ 1 file changed, 18 insertions(+), 18 deletions(-)
+
+--- a/drivers/tty/serial/ip22zilog.c
++++ b/drivers/tty/serial/ip22zilog.c
+@@ -432,7 +432,7 @@ static irqreturn_t ip22zilog_interrupt(i
+ unsigned char r3;
+ bool push = false;
+
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+ r3 = read_zsreg(channel, R3);
+
+ /* Channel A */
+@@ -448,7 +448,7 @@ static irqreturn_t ip22zilog_interrupt(i
+ if (r3 & CHATxIP)
+ ip22zilog_transmit_chars(up, channel);
+ }
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+
+ if (push)
+ tty_flip_buffer_push(&up->port.state->port);
+@@ -458,7 +458,7 @@ static irqreturn_t ip22zilog_interrupt(i
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+ push = false;
+
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+ if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
+ writeb(RES_H_IUS, &channel->control);
+ ZSDELAY();
+@@ -471,7 +471,7 @@ static irqreturn_t ip22zilog_interrupt(i
+ if (r3 & CHBTxIP)
+ ip22zilog_transmit_chars(up, channel);
+ }
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+
+ if (push)
+ tty_flip_buffer_push(&up->port.state->port);
+@@ -504,11 +504,11 @@ static unsigned int ip22zilog_tx_empty(s
+ unsigned char status;
+ unsigned int ret;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ status = ip22zilog_read_channel_status(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (status & Tx_BUF_EMP)
+ ret = TIOCSER_TEMT;
+@@ -664,7 +664,7 @@ static void ip22zilog_break_ctl(struct u
+ else
+ clear_bits |= SND_BRK;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
+ if (new_reg != up->curregs[R5]) {
+@@ -674,7 +674,7 @@ static void ip22zilog_break_ctl(struct u
+ write_zsreg(channel, R5, up->curregs[R5]);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void __ip22zilog_reset(struct uart_ip22zilog_port *up)
+@@ -735,9 +735,9 @@ static int ip22zilog_startup(struct uart
+ if (ZS_IS_CONS(up))
+ return 0;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ __ip22zilog_startup(up);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return 0;
+ }
+
+@@ -775,7 +775,7 @@ static void ip22zilog_shutdown(struct ua
+ if (ZS_IS_CONS(up))
+ return;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ channel = ZILOG_CHANNEL_FROM_PORT(port);
+
+@@ -788,7 +788,7 @@ static void ip22zilog_shutdown(struct ua
+ up->curregs[R5] &= ~SND_BRK;
+ ip22zilog_maybe_update_regs(up, channel);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /* Shared by TTY driver and serial console setup. The port lock is held
+@@ -880,7 +880,7 @@ ip22zilog_set_termios(struct uart_port *
+
+ baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
+
+@@ -894,7 +894,7 @@ ip22zilog_set_termios(struct uart_port *
+ ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static const char *ip22zilog_type(struct uart_port *port)
+@@ -1016,10 +1016,10 @@ ip22zilog_console_write(struct console *
+ struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ uart_console_write(&up->port, s, count, ip22zilog_put_char);
+ udelay(2);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static int __init ip22zilog_console_setup(struct console *con, char *options)
+@@ -1034,13 +1034,13 @@ static int __init ip22zilog_console_setu
+
+ printk(KERN_INFO "Console: ttyS%d (IP22-Zilog)\n", con->index);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ up->curregs[R15] |= BRKIE;
+
+ __ip22zilog_startup(up);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
diff --git a/debian/patches-rt/0031-serial-jsm-Use-port-lock-wrappers.patch b/debian/patches-rt/0031-serial-jsm-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..4326b333a7
--- /dev/null
+++ b/debian/patches-rt/0031-serial-jsm-Use-port-lock-wrappers.patch
@@ -0,0 +1,124 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:48 +0206
+Subject: [PATCH 031/134] serial: jsm: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-32-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/jsm/jsm_neo.c | 4 ++--
+ drivers/tty/serial/jsm/jsm_tty.c | 16 ++++++++--------
+ 2 files changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/tty/serial/jsm/jsm_neo.c
++++ b/drivers/tty/serial/jsm/jsm_neo.c
+@@ -816,9 +816,9 @@ static void neo_parse_isr(struct jsm_boa
+ /* Parse any modem signal changes */
+ jsm_dbg(INTR, &ch->ch_bd->pci_dev,
+ "MOD_STAT: sending to parse_modem_sigs\n");
+- spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
++ uart_port_lock_irqsave(&ch->uart_port, &lock_flags);
+ neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
+- spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
++ uart_port_unlock_irqrestore(&ch->uart_port, lock_flags);
+ }
+ }
+
+--- a/drivers/tty/serial/jsm/jsm_tty.c
++++ b/drivers/tty/serial/jsm/jsm_tty.c
+@@ -152,14 +152,14 @@ static void jsm_tty_send_xchar(struct ua
+ container_of(port, struct jsm_channel, uart_port);
+ struct ktermios *termios;
+
+- spin_lock_irqsave(&port->lock, lock_flags);
++ uart_port_lock_irqsave(port, &lock_flags);
+ termios = &port->state->port.tty->termios;
+ if (ch == termios->c_cc[VSTART])
+ channel->ch_bd->bd_ops->send_start_character(channel);
+
+ if (ch == termios->c_cc[VSTOP])
+ channel->ch_bd->bd_ops->send_stop_character(channel);
+- spin_unlock_irqrestore(&port->lock, lock_flags);
++ uart_port_unlock_irqrestore(port, lock_flags);
+ }
+
+ static void jsm_tty_stop_rx(struct uart_port *port)
+@@ -176,13 +176,13 @@ static void jsm_tty_break(struct uart_po
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
+
+- spin_lock_irqsave(&port->lock, lock_flags);
++ uart_port_lock_irqsave(port, &lock_flags);
+ if (break_state == -1)
+ channel->ch_bd->bd_ops->send_break(channel);
+ else
+ channel->ch_bd->bd_ops->clear_break(channel);
+
+- spin_unlock_irqrestore(&port->lock, lock_flags);
++ uart_port_unlock_irqrestore(port, lock_flags);
+ }
+
+ static int jsm_tty_open(struct uart_port *port)
+@@ -241,7 +241,7 @@ static int jsm_tty_open(struct uart_port
+ channel->ch_cached_lsr = 0;
+ channel->ch_stops_sent = 0;
+
+- spin_lock_irqsave(&port->lock, lock_flags);
++ uart_port_lock_irqsave(port, &lock_flags);
+ termios = &port->state->port.tty->termios;
+ channel->ch_c_cflag = termios->c_cflag;
+ channel->ch_c_iflag = termios->c_iflag;
+@@ -261,7 +261,7 @@ static int jsm_tty_open(struct uart_port
+ jsm_carrier(channel);
+
+ channel->ch_open_count++;
+- spin_unlock_irqrestore(&port->lock, lock_flags);
++ uart_port_unlock_irqrestore(port, lock_flags);
+
+ jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
+ return 0;
+@@ -307,7 +307,7 @@ static void jsm_tty_set_termios(struct u
+ struct jsm_channel *channel =
+ container_of(port, struct jsm_channel, uart_port);
+
+- spin_lock_irqsave(&port->lock, lock_flags);
++ uart_port_lock_irqsave(port, &lock_flags);
+ channel->ch_c_cflag = termios->c_cflag;
+ channel->ch_c_iflag = termios->c_iflag;
+ channel->ch_c_oflag = termios->c_oflag;
+@@ -317,7 +317,7 @@ static void jsm_tty_set_termios(struct u
+
+ channel->ch_bd->bd_ops->param(channel);
+ jsm_carrier(channel);
+- spin_unlock_irqrestore(&port->lock, lock_flags);
++ uart_port_unlock_irqrestore(port, lock_flags);
+ }
+
+ static const char *jsm_tty_type(struct uart_port *port)
diff --git a/debian/patches-rt/0032-serial-liteuart-Use-port-lock-wrappers.patch b/debian/patches-rt/0032-serial-liteuart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..ae679c098c
--- /dev/null
+++ b/debian/patches-rt/0032-serial-liteuart-Use-port-lock-wrappers.patch
@@ -0,0 +1,110 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:49 +0206
+Subject: [PATCH 032/134] serial: liteuart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Gabriel Somlo <gsomlo@gmail.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-33-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/liteuart.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/tty/serial/liteuart.c
++++ b/drivers/tty/serial/liteuart.c
+@@ -139,13 +139,13 @@ static irqreturn_t liteuart_interrupt(in
+ * if polling, the context would be "in_serving_softirq", so use
+ * irq[save|restore] spin_lock variants to cover all possibilities
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ isr = litex_read8(port->membase + OFF_EV_PENDING) & uart->irq_reg;
+ if (isr & EV_RX)
+ liteuart_rx_chars(port);
+ if (isr & EV_TX)
+ liteuart_tx_chars(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return IRQ_RETVAL(isr);
+ }
+@@ -195,10 +195,10 @@ static int liteuart_startup(struct uart_
+ }
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ /* only enabling rx irqs during startup */
+ liteuart_update_irq_reg(port, true, EV_RX);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (!port->irq) {
+ timer_setup(&uart->timer, liteuart_timer, 0);
+@@ -213,9 +213,9 @@ static void liteuart_shutdown(struct uar
+ struct liteuart_port *uart = to_liteuart_port(port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ liteuart_update_irq_reg(port, false, EV_RX | EV_TX);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (port->irq)
+ free_irq(port->irq, port);
+@@ -229,13 +229,13 @@ static void liteuart_set_termios(struct
+ unsigned int baud;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* update baudrate */
+ baud = uart_get_baud_rate(port, new, old, 0, 460800);
+ uart_update_timeout(port, new->c_cflag, baud);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *liteuart_type(struct uart_port *port)
+@@ -382,9 +382,9 @@ static void liteuart_console_write(struc
+ uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
+ port = &uart->port;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ uart_console_write(port, s, count, liteuart_putchar);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int liteuart_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0033-serial-lpc32xx_hs-Use-port-lock-wrappers.patch b/debian/patches-rt/0033-serial-lpc32xx_hs-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..06518e8460
--- /dev/null
+++ b/debian/patches-rt/0033-serial-lpc32xx_hs-Use-port-lock-wrappers.patch
@@ -0,0 +1,148 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:50 +0206
+Subject: [PATCH 033/134] serial: lpc32xx_hs: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-34-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/lpc32xx_hs.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/tty/serial/lpc32xx_hs.c
++++ b/drivers/tty/serial/lpc32xx_hs.c
+@@ -140,15 +140,15 @@ static void lpc32xx_hsuart_console_write
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&up->port.lock);
++ locked = uart_port_trylock(&up->port);
+ else
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+
+ uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar);
+ wait_for_xmit_empty(&up->port);
+
+ if (locked)
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+ local_irq_restore(flags);
+ }
+
+@@ -298,7 +298,7 @@ static irqreturn_t serial_lpc32xx_interr
+ struct tty_port *tport = &port->state->port;
+ u32 status;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ /* Read UART status and clear latched interrupts */
+ status = readl(LPC32XX_HSUART_IIR(port->membase));
+@@ -333,7 +333,7 @@ static irqreturn_t serial_lpc32xx_interr
+ __serial_lpc32xx_tx(port);
+ }
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -404,14 +404,14 @@ static void serial_lpc32xx_break_ctl(str
+ unsigned long flags;
+ u32 tmp;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
+ if (break_state != 0)
+ tmp |= LPC32XX_HSU_BREAK;
+ else
+ tmp &= ~LPC32XX_HSU_BREAK;
+ writel(tmp, LPC32XX_HSUART_CTRL(port->membase));
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /* port->lock is not held. */
+@@ -421,7 +421,7 @@ static int serial_lpc32xx_startup(struct
+ unsigned long flags;
+ u32 tmp;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ __serial_uart_flush(port);
+
+@@ -441,7 +441,7 @@ static int serial_lpc32xx_startup(struct
+
+ lpc32xx_loopback_set(port->mapbase, 0); /* get out of loopback mode */
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ retval = request_irq(port->irq, serial_lpc32xx_interrupt,
+ 0, MODNAME, port);
+@@ -458,7 +458,7 @@ static void serial_lpc32xx_shutdown(stru
+ u32 tmp;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ tmp = LPC32XX_HSU_TX_TL8B | LPC32XX_HSU_RX_TL32B |
+ LPC32XX_HSU_OFFSET(20) | LPC32XX_HSU_TMO_INACT_4B;
+@@ -466,7 +466,7 @@ static void serial_lpc32xx_shutdown(stru
+
+ lpc32xx_loopback_set(port->mapbase, 1); /* go to loopback mode */
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ free_irq(port->irq, port);
+ }
+@@ -491,7 +491,7 @@ static void serial_lpc32xx_set_termios(s
+
+ quot = __serial_get_clock_div(port->uartclk, baud);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Ignore characters? */
+ tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
+@@ -505,7 +505,7 @@ static void serial_lpc32xx_set_termios(s
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
diff --git a/debian/patches-rt/0034-serial-ma35d1-Use-port-lock-wrappers.patch b/debian/patches-rt/0034-serial-ma35d1-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..995bad9aad
--- /dev/null
+++ b/debian/patches-rt/0034-serial-ma35d1-Use-port-lock-wrappers.patch
@@ -0,0 +1,117 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:51 +0206
+Subject: [PATCH 034/134] serial: ma35d1: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-35-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/ma35d1_serial.c | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/drivers/tty/serial/ma35d1_serial.c
++++ b/drivers/tty/serial/ma35d1_serial.c
+@@ -269,16 +269,16 @@ static void receive_chars(struct uart_ma
+ if (uart_handle_sysrq_char(&up->port, ch))
+ continue;
+
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+ uart_insert_char(&up->port, fsr, MA35_FSR_RX_OVER_IF, ch, flag);
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+
+ fsr = serial_in(up, MA35_FSR_REG);
+ } while (!(fsr & MA35_FSR_RX_EMPTY) && (max_count-- > 0));
+
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+ tty_flip_buffer_push(&up->port.state->port);
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+ }
+
+ static irqreturn_t ma35d1serial_interrupt(int irq, void *dev_id)
+@@ -364,14 +364,14 @@ static void ma35d1serial_break_ctl(struc
+ unsigned long flags;
+ u32 lcr;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ lcr = serial_in(up, MA35_LCR_REG);
+ if (break_state != 0)
+ lcr |= MA35_LCR_BREAK;
+ else
+ lcr &= ~MA35_LCR_BREAK;
+ serial_out(up, MA35_LCR_REG, lcr);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static int ma35d1serial_startup(struct uart_port *port)
+@@ -441,7 +441,7 @@ static void ma35d1serial_set_termios(str
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ up->port.read_status_mask = MA35_FSR_RX_OVER_IF;
+ if (termios->c_iflag & INPCK)
+@@ -475,7 +475,7 @@ static void ma35d1serial_set_termios(str
+
+ serial_out(up, MA35_LCR_REG, lcr);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static const char *ma35d1serial_type(struct uart_port *port)
+@@ -560,9 +560,9 @@ static void ma35d1serial_console_write(s
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock_irqsave(&up->port.lock, flags);
++ locked = uart_port_trylock_irqsave(&up->port, &flags);
+ else
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -576,7 +576,7 @@ static void ma35d1serial_console_write(s
+ serial_out(up, MA35_IER_REG, ier);
+
+ if (locked)
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static int __init ma35d1serial_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0035-serial-mcf-Use-port-lock-wrappers.patch b/debian/patches-rt/0035-serial-mcf-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..966618a3e4
--- /dev/null
+++ b/debian/patches-rt/0035-serial-mcf-Use-port-lock-wrappers.patch
@@ -0,0 +1,127 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:52 +0206
+Subject: [PATCH 035/134] serial: mcf: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-36-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/mcf.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/tty/serial/mcf.c
++++ b/drivers/tty/serial/mcf.c
+@@ -135,12 +135,12 @@ static void mcf_break_ctl(struct uart_po
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (break_state == -1)
+ writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
+ else
+ writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /****************************************************************************/
+@@ -150,7 +150,7 @@ static int mcf_startup(struct uart_port
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Reset UART, get it into known state... */
+ writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
+@@ -164,7 +164,7 @@ static int mcf_startup(struct uart_port
+ pp->imr = MCFUART_UIR_RXREADY;
+ writeb(pp->imr, port->membase + MCFUART_UIMR);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+@@ -176,7 +176,7 @@ static void mcf_shutdown(struct uart_por
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Disable all interrupts now */
+ pp->imr = 0;
+@@ -186,7 +186,7 @@ static void mcf_shutdown(struct uart_por
+ writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
+ writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /****************************************************************************/
+@@ -252,7 +252,7 @@ static void mcf_set_termios(struct uart_
+ mr2 |= MCFUART_MR2_TXCTS;
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ mr2 |= MCFUART_MR2_TXRTS;
+@@ -273,7 +273,7 @@ static void mcf_set_termios(struct uart_
+ port->membase + MCFUART_UCSR);
+ writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
+ port->membase + MCFUART_UCR);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /****************************************************************************/
+@@ -350,7 +350,7 @@ static irqreturn_t mcf_interrupt(int irq
+
+ isr = readb(port->membase + MCFUART_UISR) & pp->imr;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ if (isr & MCFUART_UIR_RXREADY) {
+ mcf_rx_chars(pp);
+ ret = IRQ_HANDLED;
+@@ -359,7 +359,7 @@ static irqreturn_t mcf_interrupt(int irq
+ mcf_tx_chars(pp);
+ ret = IRQ_HANDLED;
+ }
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return ret;
+ }
diff --git a/debian/patches-rt/0036-serial-men_z135_uart-Use-port-lock-wrappers.patch b/debian/patches-rt/0036-serial-men_z135_uart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..11019689b6
--- /dev/null
+++ b/debian/patches-rt/0036-serial-men_z135_uart-Use-port-lock-wrappers.patch
@@ -0,0 +1,76 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:53 +0206
+Subject: [PATCH 036/134] serial: men_z135_uart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-37-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/men_z135_uart.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/men_z135_uart.c
++++ b/drivers/tty/serial/men_z135_uart.c
+@@ -392,7 +392,7 @@ static irqreturn_t men_z135_intr(int irq
+ if (!irq_id)
+ goto out;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ /* It's save to write to IIR[7:6] RXC[9:8] */
+ iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
+
+@@ -418,7 +418,7 @@ static irqreturn_t men_z135_intr(int irq
+ handled = true;
+ }
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ out:
+ return IRQ_RETVAL(handled);
+ }
+@@ -708,7 +708,7 @@ static void men_z135_set_termios(struct
+
+ baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
+
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+@@ -716,7 +716,7 @@ static void men_z135_set_termios(struct
+ iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ }
+
+ static const char *men_z135_type(struct uart_port *port)
diff --git a/debian/patches-rt/0037-serial-meson-Use-port-lock-wrappers.patch b/debian/patches-rt/0037-serial-meson-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..d2efb460bc
--- /dev/null
+++ b/debian/patches-rt/0037-serial-meson-Use-port-lock-wrappers.patch
@@ -0,0 +1,168 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:54 +0206
+Subject: [PATCH 037/134] serial: meson: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Neil Armstrong <neil.armstrong@linaro.org>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-38-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/meson_uart.c | 30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/drivers/tty/serial/meson_uart.c
++++ b/drivers/tty/serial/meson_uart.c
+@@ -129,14 +129,14 @@ static void meson_uart_shutdown(struct u
+
+ free_irq(port->irq, port);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ val = readl(port->membase + AML_UART_CONTROL);
+ val &= ~AML_UART_RX_EN;
+ val &= ~(AML_UART_RX_INT_EN | AML_UART_TX_INT_EN);
+ writel(val, port->membase + AML_UART_CONTROL);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void meson_uart_start_tx(struct uart_port *port)
+@@ -238,7 +238,7 @@ static irqreturn_t meson_uart_interrupt(
+ {
+ struct uart_port *port = (struct uart_port *)dev_id;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY))
+ meson_receive_chars(port);
+@@ -248,7 +248,7 @@ static irqreturn_t meson_uart_interrupt(
+ meson_uart_start_tx(port);
+ }
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -284,7 +284,7 @@ static int meson_uart_startup(struct uar
+ u32 val;
+ int ret = 0;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ val = readl(port->membase + AML_UART_CONTROL);
+ val |= AML_UART_CLEAR_ERR;
+@@ -301,7 +301,7 @@ static int meson_uart_startup(struct uar
+ val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
+ writel(val, port->membase + AML_UART_MISC);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ ret = request_irq(port->irq, meson_uart_interrupt, 0,
+ port->name, port);
+@@ -341,7 +341,7 @@ static void meson_uart_set_termios(struc
+ unsigned long flags;
+ u32 val;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ cflags = termios->c_cflag;
+ iflags = termios->c_iflag;
+@@ -405,7 +405,7 @@ static void meson_uart_set_termios(struc
+ AML_UART_FRAME_ERR;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int meson_uart_verify_port(struct uart_port *port,
+@@ -464,14 +464,14 @@ static int meson_uart_poll_get_char(stru
+ u32 c;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
+ c = NO_POLL_CHAR;
+ else
+ c = readl(port->membase + AML_UART_RFIFO);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return c;
+ }
+@@ -482,7 +482,7 @@ static void meson_uart_poll_put_char(str
+ u32 reg;
+ int ret;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Wait until FIFO is empty or timeout */
+ ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
+@@ -506,7 +506,7 @@ static void meson_uart_poll_put_char(str
+ dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
+
+ out:
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ #endif /* CONFIG_CONSOLE_POLL */
+@@ -563,9 +563,9 @@ static void meson_serial_port_write(stru
+ if (port->sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+- locked = spin_trylock(&port->lock);
++ locked = uart_port_trylock(port);
+ } else {
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ locked = 1;
+ }
+
+@@ -577,7 +577,7 @@ static void meson_serial_port_write(stru
+ writel(val, port->membase + AML_UART_CONTROL);
+
+ if (locked)
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ local_irq_restore(flags);
+ }
+
diff --git a/debian/patches-rt/0038-serial-milbeaut_usio-Use-port-lock-wrappers.patch b/debian/patches-rt/0038-serial-milbeaut_usio-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..47b1b7f3cc
--- /dev/null
+++ b/debian/patches-rt/0038-serial-milbeaut_usio-Use-port-lock-wrappers.patch
@@ -0,0 +1,101 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:55 +0206
+Subject: [PATCH 038/134] serial: milbeaut_usio: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-39-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/milbeaut_usio.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/tty/serial/milbeaut_usio.c
++++ b/drivers/tty/serial/milbeaut_usio.c
+@@ -207,9 +207,9 @@ static irqreturn_t mlb_usio_rx_irq(int i
+ {
+ struct uart_port *port = dev_id;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ mlb_usio_rx_chars(port);
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -218,10 +218,10 @@ static irqreturn_t mlb_usio_tx_irq(int i
+ {
+ struct uart_port *port = dev_id;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI)
+ mlb_usio_tx_chars(port);
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -267,7 +267,7 @@ static int mlb_usio_startup(struct uart_
+ escr = readb(port->membase + MLB_USIO_REG_ESCR);
+ if (of_property_read_bool(port->dev->of_node, "auto-flow-control"))
+ escr |= MLB_USIO_ESCR_FLWEN;
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ writeb(0, port->membase + MLB_USIO_REG_SCR);
+ writeb(escr, port->membase + MLB_USIO_REG_ESCR);
+ writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR);
+@@ -282,7 +282,7 @@ static int mlb_usio_startup(struct uart_
+
+ writeb(MLB_USIO_SCR_TXE | MLB_USIO_SCR_RIE | MLB_USIO_SCR_TBIE |
+ MLB_USIO_SCR_RXE, port->membase + MLB_USIO_REG_SCR);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+@@ -337,7 +337,7 @@ static void mlb_usio_set_termios(struct
+ else
+ quot = 0;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ port->read_status_mask = MLB_USIO_SSR_ORE | MLB_USIO_SSR_RDRF |
+ MLB_USIO_SSR_TDRE;
+@@ -367,7 +367,7 @@ static void mlb_usio_set_termios(struct
+ writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE);
+ writeb(MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE | MLB_USIO_SCR_TBIE |
+ MLB_USIO_SCR_TXE, port->membase + MLB_USIO_REG_SCR);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *mlb_usio_type(struct uart_port *port)
diff --git a/debian/patches-rt/0039-serial-mpc52xx-Use-port-lock-wrappers.patch b/debian/patches-rt/0039-serial-mpc52xx-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..ee7014a925
--- /dev/null
+++ b/debian/patches-rt/0039-serial-mpc52xx-Use-port-lock-wrappers.patch
@@ -0,0 +1,89 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:56 +0206
+Subject: [PATCH 039/134] serial: mpc52xx: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-40-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/mpc52xx_uart.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/tty/serial/mpc52xx_uart.c
++++ b/drivers/tty/serial/mpc52xx_uart.c
+@@ -1096,14 +1096,14 @@ static void
+ mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
+ {
+ unsigned long flags;
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ if (ctl == -1)
+ psc_ops->command(port, MPC52xx_PSC_START_BRK);
+ else
+ psc_ops->command(port, MPC52xx_PSC_STOP_BRK);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int
+@@ -1214,7 +1214,7 @@ mpc52xx_uart_set_termios(struct uart_por
+ }
+
+ /* Get the lock */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Do our best to flush TX & RX, so we don't lose anything */
+ /* But we don't wait indefinitely ! */
+@@ -1250,7 +1250,7 @@ mpc52xx_uart_set_termios(struct uart_por
+ psc_ops->command(port, MPC52xx_PSC_RX_ENABLE);
+
+ /* We're all set, release the lock */
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *
+@@ -1477,11 +1477,11 @@ mpc52xx_uart_int(int irq, void *dev_id)
+ struct uart_port *port = dev_id;
+ irqreturn_t ret;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ ret = psc_ops->handle_irq(port);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return ret;
+ }
diff --git a/debian/patches-rt/0040-serial-mps2-uart-Use-port-lock-wrappers.patch b/debian/patches-rt/0040-serial-mps2-uart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..a626bdb031
--- /dev/null
+++ b/debian/patches-rt/0040-serial-mps2-uart-Use-port-lock-wrappers.patch
@@ -0,0 +1,103 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:57 +0206
+Subject: [PATCH 040/134] serial: mps2-uart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-41-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/mps2-uart.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/tty/serial/mps2-uart.c
++++ b/drivers/tty/serial/mps2-uart.c
+@@ -188,12 +188,12 @@ static irqreturn_t mps2_uart_rxirq(int i
+ if (unlikely(!(irqflag & UARTn_INT_RX)))
+ return IRQ_NONE;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ mps2_uart_write8(port, UARTn_INT_RX, UARTn_INT);
+ mps2_uart_rx_chars(port);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -206,12 +206,12 @@ static irqreturn_t mps2_uart_txirq(int i
+ if (unlikely(!(irqflag & UARTn_INT_TX)))
+ return IRQ_NONE;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ mps2_uart_write8(port, UARTn_INT_TX, UARTn_INT);
+ mps2_uart_tx_chars(port);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -222,7 +222,7 @@ static irqreturn_t mps2_uart_oerrirq(int
+ struct uart_port *port = data;
+ u8 irqflag = mps2_uart_read8(port, UARTn_INT);
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ if (irqflag & UARTn_INT_RX_OVERRUN) {
+ struct tty_port *tport = &port->state->port;
+@@ -244,7 +244,7 @@ static irqreturn_t mps2_uart_oerrirq(int
+ handled = IRQ_HANDLED;
+ }
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return handled;
+ }
+@@ -356,12 +356,12 @@ mps2_uart_set_termios(struct uart_port *
+
+ bauddiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+ mps2_uart_write32(port, bauddiv, UARTn_BAUDDIV);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
diff --git a/debian/patches-rt/0041-serial-msm-Use-port-lock-wrappers.patch b/debian/patches-rt/0041-serial-msm-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..89b8453303
--- /dev/null
+++ b/debian/patches-rt/0041-serial-msm-Use-port-lock-wrappers.patch
@@ -0,0 +1,185 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:58 +0206
+Subject: [PATCH 041/134] serial: msm: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Bjorn Andersson <quic_bjorande@quicinc.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-42-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/msm_serial.c | 38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -444,7 +444,7 @@ static void msm_complete_tx_dma(void *ar
+ unsigned int count;
+ u32 val;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Already stopped */
+ if (!dma->count)
+@@ -476,7 +476,7 @@ static void msm_complete_tx_dma(void *ar
+
+ msm_handle_tx(port);
+ done:
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
+@@ -549,7 +549,7 @@ static void msm_complete_rx_dma(void *ar
+ unsigned long flags;
+ u32 val;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Already stopped */
+ if (!dma->count)
+@@ -587,16 +587,16 @@ static void msm_complete_rx_dma(void *ar
+ if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
+ flag = TTY_NORMAL;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (!sysrq)
+ tty_insert_flip_char(tport, dma->virt[i], flag);
+ }
+
+ msm_start_rx_dma(msm_port);
+ done:
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (count)
+ tty_flip_buffer_push(tport);
+@@ -762,9 +762,9 @@ static void msm_handle_rx_dm(struct uart
+ if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
+ flag = TTY_NORMAL;
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ sysrq = uart_handle_sysrq_char(port, buf[i]);
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ if (!sysrq)
+ tty_insert_flip_char(tport, buf[i], flag);
+ }
+@@ -824,9 +824,9 @@ static void msm_handle_rx(struct uart_po
+ else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
+ flag = TTY_FRAME;
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ sysrq = uart_handle_sysrq_char(port, c);
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ if (!sysrq)
+ tty_insert_flip_char(tport, c, flag);
+ }
+@@ -951,7 +951,7 @@ static irqreturn_t msm_uart_irq(int irq,
+ unsigned int misr;
+ u32 val;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ misr = msm_read(port, MSM_UART_MISR);
+ msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
+
+@@ -983,7 +983,7 @@ static irqreturn_t msm_uart_irq(int irq,
+ msm_handle_delta_cts(port);
+
+ msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return IRQ_HANDLED;
+ }
+@@ -1128,13 +1128,13 @@ static int msm_set_baud_rate(struct uart
+ unsigned long flags, rate;
+
+ flags = *saved_flags;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ entry = msm_find_best_baud(port, baud, &rate);
+ clk_set_rate(msm_port->clk, rate);
+ baud = rate / 16 / entry->divisor;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ *saved_flags = flags;
+ port->uartclk = rate;
+
+@@ -1266,7 +1266,7 @@ static void msm_set_termios(struct uart_
+ unsigned long flags;
+ unsigned int baud, mr;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ if (dma->chan) /* Terminate if any */
+ msm_stop_dma(port, dma);
+@@ -1338,7 +1338,7 @@ static void msm_set_termios(struct uart_
+ /* Try to use DMA */
+ msm_start_rx_dma(msm_port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *msm_type(struct uart_port *port)
+@@ -1620,9 +1620,9 @@ static void __msm_console_write(struct u
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&port->lock);
++ locked = uart_port_trylock(port);
+ else
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ if (is_uartdm)
+ msm_reset_dm_count(port, count);
+@@ -1661,7 +1661,7 @@ static void __msm_console_write(struct u
+ }
+
+ if (locked)
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ local_irq_restore(flags);
+ }
diff --git a/debian/patches-rt/0042-serial-mvebu-uart-Use-port-lock-wrappers.patch b/debian/patches-rt/0042-serial-mvebu-uart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..28b2c3b46e
--- /dev/null
+++ b/debian/patches-rt/0042-serial-mvebu-uart-Use-port-lock-wrappers.patch
@@ -0,0 +1,108 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:43:59 +0206
+Subject: [PATCH 042/134] serial: mvebu-uart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-43-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/mvebu-uart.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -187,9 +187,9 @@ static unsigned int mvebu_uart_tx_empty(
+ unsigned long flags;
+ unsigned int st;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ st = readl(port->membase + UART_STAT);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
+ }
+@@ -249,14 +249,14 @@ static void mvebu_uart_break_ctl(struct
+ unsigned int ctl;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ ctl = readl(port->membase + UART_CTRL(port));
+ if (brk == -1)
+ ctl |= CTRL_SND_BRK_SEQ;
+ else
+ ctl &= ~CTRL_SND_BRK_SEQ;
+ writel(ctl, port->membase + UART_CTRL(port));
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
+@@ -540,7 +540,7 @@ static void mvebu_uart_set_termios(struc
+ unsigned long flags;
+ unsigned int baud, min_baud, max_baud;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
+ STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
+@@ -589,7 +589,7 @@ static void mvebu_uart_set_termios(struc
+ uart_update_timeout(port, termios->c_cflag, baud);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *mvebu_uart_type(struct uart_port *port)
+@@ -735,9 +735,9 @@ static void mvebu_uart_console_write(str
+ int locked = 1;
+
+ if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
+ intr = readl(port->membase + UART_INTR(port)) &
+@@ -758,7 +758,7 @@ static void mvebu_uart_console_write(str
+ }
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int mvebu_uart_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0043-serial-omap-Use-port-lock-wrappers.patch b/debian/patches-rt/0043-serial-omap-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..a79d6253c4
--- /dev/null
+++ b/debian/patches-rt/0043-serial-omap-Use-port-lock-wrappers.patch
@@ -0,0 +1,180 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:00 +0206
+Subject: [PATCH 043/134] serial: omap: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-44-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/omap-serial.c | 38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -390,10 +390,10 @@ static void serial_omap_throttle(struct
+ struct uart_omap_port *up = to_uart_omap_port(port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
+ serial_out(up, UART_IER, up->ier);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static void serial_omap_unthrottle(struct uart_port *port)
+@@ -401,10 +401,10 @@ static void serial_omap_unthrottle(struc
+ struct uart_omap_port *up = to_uart_omap_port(port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ up->ier |= UART_IER_RLSI | UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static unsigned int check_modem_status(struct uart_omap_port *up)
+@@ -527,7 +527,7 @@ static irqreturn_t serial_omap_irq(int i
+ irqreturn_t ret = IRQ_NONE;
+ int max_count = 256;
+
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+
+ do {
+ iir = serial_in(up, UART_IIR);
+@@ -563,7 +563,7 @@ static irqreturn_t serial_omap_irq(int i
+ }
+ } while (max_count--);
+
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+
+ tty_flip_buffer_push(&up->port.state->port);
+
+@@ -579,9 +579,9 @@ static unsigned int serial_omap_tx_empty
+ unsigned int ret = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ return ret;
+ }
+@@ -647,13 +647,13 @@ static void serial_omap_break_ctl(struct
+ unsigned long flags;
+
+ dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static int serial_omap_startup(struct uart_port *port)
+@@ -701,13 +701,13 @@ static int serial_omap_startup(struct ua
+ * Now, initialize the UART
+ */
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ /*
+ * Most PC uarts need OUT2 raised to enable interrupts.
+ */
+ up->port.mctrl |= TIOCM_OUT2;
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ up->msr_saved_flags = 0;
+ /*
+@@ -742,10 +742,10 @@ static void serial_omap_shutdown(struct
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ up->port.mctrl &= ~TIOCM_OUT2;
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ /*
+ * Disable break condition and FIFOs
+@@ -815,7 +815,7 @@ serial_omap_set_termios(struct uart_port
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /*
+ * Update the per-port timeout.
+@@ -1013,7 +1013,7 @@ serial_omap_set_termios(struct uart_port
+
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
+ }
+
+@@ -1216,9 +1216,9 @@ serial_omap_console_write(struct console
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&up->port.lock);
++ locked = uart_port_trylock(&up->port);
+ else
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -1245,7 +1245,7 @@ serial_omap_console_write(struct console
+ check_modem_status(up);
+
+ if (locked)
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+ local_irq_restore(flags);
+ }
+
diff --git a/debian/patches-rt/0044-serial-owl-Use-port-lock-wrappers.patch b/debian/patches-rt/0044-serial-owl-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..fd8dc75013
--- /dev/null
+++ b/debian/patches-rt/0044-serial-owl-Use-port-lock-wrappers.patch
@@ -0,0 +1,147 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:01 +0206
+Subject: [PATCH 044/134] serial: owl: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-45-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/owl-uart.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/tty/serial/owl-uart.c
++++ b/drivers/tty/serial/owl-uart.c
+@@ -125,12 +125,12 @@ static unsigned int owl_uart_tx_empty(st
+ u32 val;
+ unsigned int ret;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ val = owl_uart_read(port, OWL_UART_STAT);
+ ret = (val & OWL_UART_STAT_TFES) ? TIOCSER_TEMT : 0;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return ret;
+ }
+@@ -232,7 +232,7 @@ static irqreturn_t owl_uart_irq(int irq,
+ unsigned long flags;
+ u32 stat;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ stat = owl_uart_read(port, OWL_UART_STAT);
+
+@@ -246,7 +246,7 @@ static irqreturn_t owl_uart_irq(int irq,
+ stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP;
+ owl_uart_write(port, stat, OWL_UART_STAT);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return IRQ_HANDLED;
+ }
+@@ -256,14 +256,14 @@ static void owl_uart_shutdown(struct uar
+ u32 val;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ val = owl_uart_read(port, OWL_UART_CTL);
+ val &= ~(OWL_UART_CTL_TXIE | OWL_UART_CTL_RXIE
+ | OWL_UART_CTL_TXDE | OWL_UART_CTL_RXDE | OWL_UART_CTL_EN);
+ owl_uart_write(port, val, OWL_UART_CTL);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ free_irq(port->irq, port);
+ }
+@@ -279,7 +279,7 @@ static int owl_uart_startup(struct uart_
+ if (ret)
+ return ret;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ val = owl_uart_read(port, OWL_UART_STAT);
+ val |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP
+@@ -291,7 +291,7 @@ static int owl_uart_startup(struct uart_
+ val |= OWL_UART_CTL_EN;
+ owl_uart_write(port, val, OWL_UART_CTL);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+@@ -311,7 +311,7 @@ static void owl_uart_set_termios(struct
+ u32 ctl;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ ctl = owl_uart_read(port, OWL_UART_CTL);
+
+@@ -371,7 +371,7 @@ static void owl_uart_set_termios(struct
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void owl_uart_release_port(struct uart_port *port)
+@@ -515,9 +515,9 @@ static void owl_uart_port_write(struct u
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&port->lock);
++ locked = uart_port_trylock(port);
+ else {
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ locked = 1;
+ }
+
+@@ -541,7 +541,7 @@ static void owl_uart_port_write(struct u
+ owl_uart_write(port, old_ctl, OWL_UART_CTL);
+
+ if (locked)
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ local_irq_restore(flags);
+ }
diff --git a/debian/patches-rt/0045-serial-pch-Use-port-lock-wrappers.patch b/debian/patches-rt/0045-serial-pch-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..6c4781cd09
--- /dev/null
+++ b/debian/patches-rt/0045-serial-pch-Use-port-lock-wrappers.patch
@@ -0,0 +1,80 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:02 +0206
+Subject: [PATCH 045/134] serial: pch: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-46-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/pch_uart.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -1347,7 +1347,7 @@ static void pch_uart_set_termios(struct
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+
+ spin_lock_irqsave(&priv->lock, flags);
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+ rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
+@@ -1360,7 +1360,7 @@ static void pch_uart_set_termios(struct
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ out:
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+@@ -1581,10 +1581,10 @@ pch_console_write(struct console *co, co
+ port_locked = 0;
+ } else if (oops_in_progress) {
+ priv_locked = spin_trylock(&priv->lock);
+- port_locked = spin_trylock(&priv->port.lock);
++ port_locked = uart_port_trylock(&priv->port);
+ } else {
+ spin_lock(&priv->lock);
+- spin_lock(&priv->port.lock);
++ uart_port_lock(&priv->port);
+ }
+
+ /*
+@@ -1604,7 +1604,7 @@ pch_console_write(struct console *co, co
+ iowrite8(ier, priv->membase + UART_IER);
+
+ if (port_locked)
+- spin_unlock(&priv->port.lock);
++ uart_port_unlock(&priv->port);
+ if (priv_locked)
+ spin_unlock(&priv->lock);
+ local_irq_restore(flags);
diff --git a/debian/patches-rt/0046-serial-pic32-Use-port-lock-wrappers.patch b/debian/patches-rt/0046-serial-pic32-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..7cb8da7d86
--- /dev/null
+++ b/debian/patches-rt/0046-serial-pic32-Use-port-lock-wrappers.patch
@@ -0,0 +1,118 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:03 +0206
+Subject: [PATCH 046/134] serial: pic32: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-47-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/pic32_uart.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/tty/serial/pic32_uart.c
++++ b/drivers/tty/serial/pic32_uart.c
+@@ -243,7 +243,7 @@ static void pic32_uart_break_ctl(struct
+ struct pic32_sport *sport = to_pic32_sport(port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ if (ctl)
+ pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
+@@ -252,7 +252,7 @@ static void pic32_uart_break_ctl(struct
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+ PIC32_UART_STA_UTXBRK);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /* get port type in string format */
+@@ -274,7 +274,7 @@ static void pic32_uart_do_rx(struct uart
+ */
+ max_count = PIC32_UART_RX_FIFO_DEPTH;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ tty = &port->state->port;
+
+@@ -331,7 +331,7 @@ static void pic32_uart_do_rx(struct uart
+
+ } while (--max_count);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ tty_flip_buffer_push(tty);
+ }
+@@ -410,9 +410,9 @@ static irqreturn_t pic32_uart_tx_interru
+ struct uart_port *port = dev_id;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ pic32_uart_do_tx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return IRQ_HANDLED;
+ }
+@@ -580,9 +580,9 @@ static void pic32_uart_shutdown(struct u
+ unsigned long flags;
+
+ /* disable uart */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ pic32_uart_dsbl_and_mask(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ clk_disable_unprepare(sport->clk);
+
+ /* free all 3 interrupts for this UART */
+@@ -604,7 +604,7 @@ static void pic32_uart_set_termios(struc
+ unsigned int quot;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* disable uart and mask all interrupts while changing speed */
+ pic32_uart_dsbl_and_mask(port);
+@@ -672,7 +672,7 @@ static void pic32_uart_set_termios(struc
+ /* enable uart */
+ pic32_uart_en_and_unmask(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /* serial core request to claim uart iomem */
diff --git a/debian/patches-rt/0047-serial-pmac_zilog-Use-port-lock-wrappers.patch b/debian/patches-rt/0047-serial-pmac_zilog-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..e4a81157e1
--- /dev/null
+++ b/debian/patches-rt/0047-serial-pmac_zilog-Use-port-lock-wrappers.patch
@@ -0,0 +1,232 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:04 +0206
+Subject: [PATCH 047/134] serial: pmac_zilog: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-48-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/pmac_zilog.c | 52 ++++++++++++++++++++--------------------
+ 1 file changed, 26 insertions(+), 26 deletions(-)
+
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -246,9 +246,9 @@ static bool pmz_receive_chars(struct uar
+ #endif /* USE_CTRL_O_SYSRQ */
+ if (uap->port.sysrq) {
+ int swallow;
+- spin_unlock(&uap->port.lock);
++ uart_port_unlock(&uap->port);
+ swallow = uart_handle_sysrq_char(&uap->port, ch);
+- spin_lock(&uap->port.lock);
++ uart_port_lock(&uap->port);
+ if (swallow)
+ goto next_char;
+ }
+@@ -435,7 +435,7 @@ static irqreturn_t pmz_interrupt(int irq
+ uap_a = pmz_get_port_A(uap);
+ uap_b = uap_a->mate;
+
+- spin_lock(&uap_a->port.lock);
++ uart_port_lock(&uap_a->port);
+ r3 = read_zsreg(uap_a, R3);
+
+ /* Channel A */
+@@ -456,14 +456,14 @@ static irqreturn_t pmz_interrupt(int irq
+ rc = IRQ_HANDLED;
+ }
+ skip_a:
+- spin_unlock(&uap_a->port.lock);
++ uart_port_unlock(&uap_a->port);
+ if (push)
+ tty_flip_buffer_push(&uap->port.state->port);
+
+ if (!uap_b)
+ goto out;
+
+- spin_lock(&uap_b->port.lock);
++ uart_port_lock(&uap_b->port);
+ push = false;
+ if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
+ if (!ZS_IS_OPEN(uap_b)) {
+@@ -481,7 +481,7 @@ static irqreturn_t pmz_interrupt(int irq
+ rc = IRQ_HANDLED;
+ }
+ skip_b:
+- spin_unlock(&uap_b->port.lock);
++ uart_port_unlock(&uap_b->port);
+ if (push)
+ tty_flip_buffer_push(&uap->port.state->port);
+
+@@ -497,9 +497,9 @@ static inline u8 pmz_peek_status(struct
+ unsigned long flags;
+ u8 status;
+
+- spin_lock_irqsave(&uap->port.lock, flags);
++ uart_port_lock_irqsave(&uap->port, &flags);
+ status = read_zsreg(uap, R0);
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+
+ return status;
+ }
+@@ -685,7 +685,7 @@ static void pmz_break_ctl(struct uart_po
+ else
+ clear_bits |= SND_BRK;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
+ if (new_reg != uap->curregs[R5]) {
+@@ -693,7 +693,7 @@ static void pmz_break_ctl(struct uart_po
+ write_zsreg(uap, R5, uap->curregs[R5]);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ #ifdef CONFIG_PPC_PMAC
+@@ -865,18 +865,18 @@ static void pmz_irda_reset(struct uart_p
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&uap->port.lock, flags);
++ uart_port_lock_irqsave(&uap->port, &flags);
+ uap->curregs[R5] |= DTR;
+ write_zsreg(uap, R5, uap->curregs[R5]);
+ zssync(uap);
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+ msleep(110);
+
+- spin_lock_irqsave(&uap->port.lock, flags);
++ uart_port_lock_irqsave(&uap->port, &flags);
+ uap->curregs[R5] &= ~DTR;
+ write_zsreg(uap, R5, uap->curregs[R5]);
+ zssync(uap);
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+ msleep(10);
+ }
+
+@@ -896,9 +896,9 @@ static int pmz_startup(struct uart_port
+ * initialize the chip
+ */
+ if (!ZS_IS_CONS(uap)) {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ pwr_delay = __pmz_startup(uap);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+ sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line);
+ if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
+@@ -921,9 +921,9 @@ static int pmz_startup(struct uart_port
+ pmz_irda_reset(uap);
+
+ /* Enable interrupt requests for the channel */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ pmz_interrupt_control(uap, 1);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+@@ -933,7 +933,7 @@ static void pmz_shutdown(struct uart_por
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Disable interrupt requests for the channel */
+ pmz_interrupt_control(uap, 0);
+@@ -948,19 +948,19 @@ static void pmz_shutdown(struct uart_por
+ pmz_maybe_update_regs(uap);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /* Release interrupt handler */
+ free_irq(uap->port.irq, uap);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
+
+ if (!ZS_IS_CONS(uap))
+ pmz_set_scc_power(uap, 0); /* Shut the chip down */
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /* Shared by TTY driver and serial console setup. The port lock is held
+@@ -1247,7 +1247,7 @@ static void pmz_set_termios(struct uart_
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Disable IRQs on the port */
+ pmz_interrupt_control(uap, 0);
+@@ -1259,7 +1259,7 @@ static void pmz_set_termios(struct uart_
+ if (ZS_IS_OPEN(uap))
+ pmz_interrupt_control(uap, 1);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *pmz_type(struct uart_port *port)
+@@ -1896,7 +1896,7 @@ static void pmz_console_write(struct con
+ struct uart_pmac_port *uap = &pmz_ports[con->index];
+ unsigned long flags;
+
+- spin_lock_irqsave(&uap->port.lock, flags);
++ uart_port_lock_irqsave(&uap->port, &flags);
+
+ /* Turn of interrupts and enable the transmitter. */
+ write_zsreg(uap, R1, uap->curregs[1] & ~TxINT_ENAB);
+@@ -1908,7 +1908,7 @@ static void pmz_console_write(struct con
+ write_zsreg(uap, R1, uap->curregs[1]);
+ /* Don't disable the transmitter. */
+
+- spin_unlock_irqrestore(&uap->port.lock, flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+ }
+
+ /*
diff --git a/debian/patches-rt/0048-serial-pxa-Use-port-lock-wrappers.patch b/debian/patches-rt/0048-serial-pxa-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..b0df211b6f
--- /dev/null
+++ b/debian/patches-rt/0048-serial-pxa-Use-port-lock-wrappers.patch
@@ -0,0 +1,150 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:05 +0206
+Subject: [PATCH 048/134] serial: pxa: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-49-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/pxa.c | 30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/drivers/tty/serial/pxa.c
++++ b/drivers/tty/serial/pxa.c
+@@ -225,14 +225,14 @@ static inline irqreturn_t serial_pxa_irq
+ iir = serial_in(up, UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return IRQ_NONE;
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+ lsr = serial_in(up, UART_LSR);
+ if (lsr & UART_LSR_DR)
+ receive_chars(up, &lsr);
+ check_modem_status(up);
+ if (lsr & UART_LSR_THRE)
+ transmit_chars(up);
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+ return IRQ_HANDLED;
+ }
+
+@@ -242,9 +242,9 @@ static unsigned int serial_pxa_tx_empty(
+ unsigned long flags;
+ unsigned int ret;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ return ret;
+ }
+@@ -295,13 +295,13 @@ static void serial_pxa_break_ctl(struct
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static int serial_pxa_startup(struct uart_port *port)
+@@ -346,10 +346,10 @@ static int serial_pxa_startup(struct uar
+ */
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ up->port.mctrl |= TIOCM_OUT2;
+ serial_pxa_set_mctrl(&up->port, up->port.mctrl);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+@@ -383,10 +383,10 @@ static void serial_pxa_shutdown(struct u
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ up->port.mctrl &= ~TIOCM_OUT2;
+ serial_pxa_set_mctrl(&up->port, up->port.mctrl);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ /*
+ * Disable break condition and FIFOs
+@@ -434,7 +434,7 @@ serial_pxa_set_termios(struct uart_port
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /*
+ * Ensure the port will be enabled.
+@@ -504,7 +504,7 @@ serial_pxa_set_termios(struct uart_port
+ up->lcr = cval; /* Save LCR */
+ serial_pxa_set_mctrl(&up->port, up->port.mctrl);
+ serial_out(up, UART_FCR, fcr);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static void
+@@ -608,9 +608,9 @@ serial_pxa_console_write(struct console
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&up->port.lock);
++ locked = uart_port_trylock(&up->port);
+ else
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -628,7 +628,7 @@ serial_pxa_console_write(struct console
+ serial_out(up, UART_IER, ier);
+
+ if (locked)
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+ local_irq_restore(flags);
+ clk_disable(up->clk);
+
diff --git a/debian/patches-rt/0049-serial-qcom-geni-Use-port-lock-wrappers.patch b/debian/patches-rt/0049-serial-qcom-geni-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..4f1bc6a1cb
--- /dev/null
+++ b/debian/patches-rt/0049-serial-qcom-geni-Use-port-lock-wrappers.patch
@@ -0,0 +1,71 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:06 +0206
+Subject: [PATCH 049/134] serial: qcom-geni: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Bjorn Andersson <quic_bjorande@quicinc.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-50-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/qcom_geni_serial.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -482,9 +482,9 @@ static void qcom_geni_serial_console_wri
+
+ uport = &port->uport;
+ if (oops_in_progress)
+- locked = spin_trylock_irqsave(&uport->lock, flags);
++ locked = uart_port_trylock_irqsave(uport, &flags);
+ else
+- spin_lock_irqsave(&uport->lock, flags);
++ uart_port_lock_irqsave(uport, &flags);
+
+ geni_status = readl(uport->membase + SE_GENI_STATUS);
+
+@@ -520,7 +520,7 @@ static void qcom_geni_serial_console_wri
+ qcom_geni_serial_setup_tx(uport, port->tx_remaining);
+
+ if (locked)
+- spin_unlock_irqrestore(&uport->lock, flags);
++ uart_port_unlock_irqrestore(uport, flags);
+ }
+
+ static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
+@@ -970,7 +970,7 @@ static irqreturn_t qcom_geni_serial_isr(
+ if (uport->suspended)
+ return IRQ_NONE;
+
+- spin_lock(&uport->lock);
++ uart_port_lock(uport);
+
+ m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
+ s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
diff --git a/debian/patches-rt/0050-serial-rda-Use-port-lock-wrappers.patch b/debian/patches-rt/0050-serial-rda-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..6d51972bcd
--- /dev/null
+++ b/debian/patches-rt/0050-serial-rda-Use-port-lock-wrappers.patch
@@ -0,0 +1,177 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:07 +0206
+Subject: [PATCH 050/134] serial: rda: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-51-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/rda-uart.c | 34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+--- a/drivers/tty/serial/rda-uart.c
++++ b/drivers/tty/serial/rda-uart.c
+@@ -139,12 +139,12 @@ static unsigned int rda_uart_tx_empty(st
+ unsigned int ret;
+ u32 val;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ val = rda_uart_read(port, RDA_UART_STATUS);
+ ret = (val & RDA_UART_TX_FIFO_MASK) ? TIOCSER_TEMT : 0;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return ret;
+ }
+@@ -246,7 +246,7 @@ static void rda_uart_set_termios(struct
+ unsigned int baud;
+ u32 irq_mask;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ baud = uart_get_baud_rate(port, termios, old, 9600, port->uartclk / 4);
+ rda_uart_change_baudrate(rda_port, baud);
+@@ -325,7 +325,7 @@ static void rda_uart_set_termios(struct
+ /* update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void rda_uart_send_chars(struct uart_port *port)
+@@ -408,7 +408,7 @@ static irqreturn_t rda_interrupt(int irq
+ unsigned long flags;
+ u32 val, irq_mask;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Clear IRQ cause */
+ val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
+@@ -425,7 +425,7 @@ static irqreturn_t rda_interrupt(int irq
+ rda_uart_send_chars(port);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return IRQ_HANDLED;
+ }
+@@ -436,16 +436,16 @@ static int rda_uart_startup(struct uart_
+ int ret;
+ u32 val;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ ret = request_irq(port->irq, rda_interrupt, IRQF_NO_SUSPEND,
+ "rda-uart", port);
+ if (ret)
+ return ret;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ val = rda_uart_read(port, RDA_UART_CTRL);
+ val |= RDA_UART_ENABLE;
+@@ -456,7 +456,7 @@ static int rda_uart_startup(struct uart_
+ val |= (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT);
+ rda_uart_write(port, val, RDA_UART_IRQ_MASK);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+@@ -466,7 +466,7 @@ static void rda_uart_shutdown(struct uar
+ unsigned long flags;
+ u32 val;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ rda_uart_stop_tx(port);
+ rda_uart_stop_rx(port);
+@@ -475,7 +475,7 @@ static void rda_uart_shutdown(struct uar
+ val &= ~RDA_UART_ENABLE;
+ rda_uart_write(port, val, RDA_UART_CTRL);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *rda_uart_type(struct uart_port *port)
+@@ -515,7 +515,7 @@ static void rda_uart_config_port(struct
+ rda_uart_request_port(port);
+ }
+
+- spin_lock_irqsave(&port->lock, irq_flags);
++ uart_port_lock_irqsave(port, &irq_flags);
+
+ /* Clear mask, so no surprise interrupts. */
+ rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
+@@ -523,7 +523,7 @@ static void rda_uart_config_port(struct
+ /* Clear status register */
+ rda_uart_write(port, 0, RDA_UART_STATUS);
+
+- spin_unlock_irqrestore(&port->lock, irq_flags);
++ uart_port_unlock_irqrestore(port, irq_flags);
+ }
+
+ static void rda_uart_release_port(struct uart_port *port)
+@@ -597,9 +597,9 @@ static void rda_uart_port_write(struct u
+ if (port->sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+- locked = spin_trylock(&port->lock);
++ locked = uart_port_trylock(port);
+ } else {
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ locked = 1;
+ }
+
+@@ -615,7 +615,7 @@ static void rda_uart_port_write(struct u
+ rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
+
+ if (locked)
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ local_irq_restore(flags);
+ }
diff --git a/debian/patches-rt/0051-serial-rp2-Use-port-lock-wrappers.patch b/debian/patches-rt/0051-serial-rp2-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..e2009ff4c6
--- /dev/null
+++ b/debian/patches-rt/0051-serial-rp2-Use-port-lock-wrappers.patch
@@ -0,0 +1,114 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:08 +0206
+Subject: [PATCH 051/134] serial: rp2: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-52-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/rp2.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/tty/serial/rp2.c
++++ b/drivers/tty/serial/rp2.c
+@@ -276,9 +276,9 @@ static unsigned int rp2_uart_tx_empty(st
+ * But the TXEMPTY bit doesn't seem to work unless the TX IRQ is
+ * enabled.
+ */
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ tx_fifo_bytes = readw(up->base + RP2_TX_FIFO_COUNT);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ return tx_fifo_bytes ? 0 : TIOCSER_TEMT;
+ }
+@@ -323,10 +323,10 @@ static void rp2_uart_break_ctl(struct ua
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_BREAK_m,
+ break_state ? RP2_TXRX_CTL_BREAK_m : 0);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void rp2_uart_enable_ms(struct uart_port *port)
+@@ -383,7 +383,7 @@ static void rp2_uart_set_termios(struct
+ if (tty_termios_baud_rate(new))
+ tty_termios_encode_baud_rate(new, baud, baud);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* ignore all characters if CREAD is not set */
+ port->ignore_status_mask = (new->c_cflag & CREAD) ? 0 : RP2_DUMMY_READ;
+@@ -391,7 +391,7 @@ static void rp2_uart_set_termios(struct
+ __rp2_uart_set_termios(up, new->c_cflag, new->c_iflag, baud_div);
+ uart_update_timeout(port, new->c_cflag, baud);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void rp2_rx_chars(struct rp2_uart_port *up)
+@@ -440,7 +440,7 @@ static void rp2_ch_interrupt(struct rp2_
+ {
+ u32 status;
+
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+
+ /*
+ * The IRQ status bits are clear-on-write. Other status bits in
+@@ -456,7 +456,7 @@ static void rp2_ch_interrupt(struct rp2_
+ if (status & RP2_CHAN_STAT_MS_CHANGED_MASK)
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+ }
+
+ static int rp2_asic_interrupt(struct rp2_card *card, unsigned int asic_id)
+@@ -516,10 +516,10 @@ static void rp2_uart_shutdown(struct uar
+
+ rp2_uart_break_ctl(port, 0);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ rp2_mask_ch_irq(up, up->idx, 0);
+ rp2_rmw(up, RP2_CHAN_STAT, 0, 0);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *rp2_uart_type(struct uart_port *port)
diff --git a/debian/patches-rt/0052-serial-sa1100-Use-port-lock-wrappers.patch b/debian/patches-rt/0052-serial-sa1100-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..2f8073a485
--- /dev/null
+++ b/debian/patches-rt/0052-serial-sa1100-Use-port-lock-wrappers.patch
@@ -0,0 +1,117 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:09 +0206
+Subject: [PATCH 052/134] serial: sa1100: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-53-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/sa1100.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/tty/serial/sa1100.c
++++ b/drivers/tty/serial/sa1100.c
+@@ -115,9 +115,9 @@ static void sa1100_timeout(struct timer_
+ unsigned long flags;
+
+ if (sport->port.state) {
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ sa1100_mctrl_check(sport);
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+
+ mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
+ }
+@@ -247,7 +247,7 @@ static irqreturn_t sa1100_int(int irq, v
+ struct sa1100_port *sport = dev_id;
+ unsigned int status, pass_counter = 0;
+
+- spin_lock(&sport->port.lock);
++ uart_port_lock(&sport->port);
+ status = UART_GET_UTSR0(sport);
+ status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS;
+ do {
+@@ -276,7 +276,7 @@ static irqreturn_t sa1100_int(int irq, v
+ status &= SM_TO_UTSR0(sport->port.read_status_mask) |
+ ~UTSR0_TFS;
+ } while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID));
+- spin_unlock(&sport->port.lock);
++ uart_port_unlock(&sport->port);
+
+ return IRQ_HANDLED;
+ }
+@@ -321,14 +321,14 @@ static void sa1100_break_ctl(struct uart
+ unsigned long flags;
+ unsigned int utcr3;
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+ utcr3 = UART_GET_UTCR3(sport);
+ if (break_state == -1)
+ utcr3 |= UTCR3_BRK;
+ else
+ utcr3 &= ~UTCR3_BRK;
+ UART_PUT_UTCR3(sport, utcr3);
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static int sa1100_startup(struct uart_port *port)
+@@ -354,9 +354,9 @@ static int sa1100_startup(struct uart_po
+ /*
+ * Enable modem status interrupts
+ */
+- spin_lock_irq(&sport->port.lock);
++ uart_port_lock_irq(&sport->port);
+ sa1100_enable_ms(&sport->port);
+- spin_unlock_irq(&sport->port.lock);
++ uart_port_unlock_irq(&sport->port);
+
+ return 0;
+ }
+@@ -423,7 +423,7 @@ sa1100_set_termios(struct uart_port *por
+
+ del_timer_sync(&sport->timer);
+
+- spin_lock_irqsave(&sport->port.lock, flags);
++ uart_port_lock_irqsave(&sport->port, &flags);
+
+ sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
+ sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
+@@ -485,7 +485,7 @@ sa1100_set_termios(struct uart_port *por
+ if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
+ sa1100_enable_ms(&sport->port);
+
+- spin_unlock_irqrestore(&sport->port.lock, flags);
++ uart_port_unlock_irqrestore(&sport->port, flags);
+ }
+
+ static const char *sa1100_type(struct uart_port *port)
diff --git a/debian/patches-rt/0053-serial-samsung_tty-Use-port-lock-wrappers.patch b/debian/patches-rt/0053-serial-samsung_tty-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..74dc3c0145
--- /dev/null
+++ b/debian/patches-rt/0053-serial-samsung_tty-Use-port-lock-wrappers.patch
@@ -0,0 +1,245 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:10 +0206
+Subject: [PATCH 053/134] serial: samsung_tty: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-54-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/samsung_tty.c | 50 +++++++++++++++++++--------------------
+ 1 file changed, 25 insertions(+), 25 deletions(-)
+
+--- a/drivers/tty/serial/samsung_tty.c
++++ b/drivers/tty/serial/samsung_tty.c
+@@ -248,7 +248,7 @@ static void s3c24xx_serial_rx_enable(str
+ unsigned int ucon, ufcon;
+ int count = 10000;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ while (--count && !s3c24xx_serial_txempty_nofifo(port))
+ udelay(100);
+@@ -262,7 +262,7 @@ static void s3c24xx_serial_rx_enable(str
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ ourport->rx_enabled = 1;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void s3c24xx_serial_rx_disable(struct uart_port *port)
+@@ -271,14 +271,14 @@ static void s3c24xx_serial_rx_disable(st
+ unsigned long flags;
+ unsigned int ucon;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= ~S3C2410_UCON_RXIRQMODE;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ ourport->rx_enabled = 0;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void s3c24xx_serial_stop_tx(struct uart_port *port)
+@@ -344,7 +344,7 @@ static void s3c24xx_serial_tx_dma_comple
+ dma->tx_transfer_addr, dma->tx_size,
+ DMA_TO_DEVICE);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ uart_xmit_advance(port, count);
+ ourport->tx_in_progress = 0;
+@@ -353,7 +353,7 @@ static void s3c24xx_serial_tx_dma_comple
+ uart_write_wakeup(port);
+
+ s3c24xx_serial_start_next_tx(ourport);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
+@@ -619,7 +619,7 @@ static void s3c24xx_serial_rx_dma_comple
+ received = dma->rx_bytes_requested - state.residue;
+ async_tx_ack(dma->rx_desc);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ if (received)
+ s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
+@@ -631,7 +631,7 @@ static void s3c24xx_serial_rx_dma_comple
+
+ s3c64xx_start_rx_dma(ourport);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
+@@ -722,7 +722,7 @@ static irqreturn_t s3c24xx_serial_rx_cha
+ utrstat = rd_regl(port, S3C2410_UTRSTAT);
+ rd_regl(port, S3C2410_UFSTAT);
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) {
+ s3c64xx_start_rx_dma(ourport);
+@@ -751,7 +751,7 @@ static irqreturn_t s3c24xx_serial_rx_cha
+ wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT);
+
+ finish:
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -849,9 +849,9 @@ static irqreturn_t s3c24xx_serial_rx_cha
+ struct s3c24xx_uart_port *ourport = dev_id;
+ struct uart_port *port = &ourport->port;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ s3c24xx_serial_rx_drain_fifo(ourport);
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -932,11 +932,11 @@ static irqreturn_t s3c24xx_serial_tx_irq
+ struct s3c24xx_uart_port *ourport = id;
+ struct uart_port *port = &ourport->port;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ s3c24xx_serial_tx_chars(ourport);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ return IRQ_HANDLED;
+ }
+
+@@ -1033,7 +1033,7 @@ static void s3c24xx_serial_break_ctl(str
+ unsigned long flags;
+ unsigned int ucon;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ ucon = rd_regl(port, S3C2410_UCON);
+
+@@ -1044,7 +1044,7 @@ static void s3c24xx_serial_break_ctl(str
+
+ wr_regl(port, S3C2410_UCON, ucon);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
+@@ -1303,7 +1303,7 @@ static int s3c64xx_serial_startup(struct
+ ourport->rx_enabled = 1;
+ ourport->tx_enabled = 0;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ ufcon = rd_regl(port, S3C2410_UFCON);
+ ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
+@@ -1313,7 +1313,7 @@ static int s3c64xx_serial_startup(struct
+
+ enable_rx_pio(ourport);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /* Enable Rx Interrupt */
+ s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
+@@ -1341,7 +1341,7 @@ static int apple_s5l_serial_startup(stru
+ ourport->rx_enabled = 1;
+ ourport->tx_enabled = 0;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ ufcon = rd_regl(port, S3C2410_UFCON);
+ ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
+@@ -1351,7 +1351,7 @@ static int apple_s5l_serial_startup(stru
+
+ enable_rx_pio(ourport);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /* Enable Rx Interrupt */
+ s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
+@@ -1626,7 +1626,7 @@ static void s3c24xx_serial_set_termios(s
+ ulcon |= S3C2410_LCON_PNONE;
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ dev_dbg(port->dev,
+ "setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
+@@ -1684,7 +1684,7 @@ static void s3c24xx_serial_set_termios(s
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= RXSTAT_DUMMY_READ;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *s3c24xx_serial_type(struct uart_port *port)
+@@ -2376,14 +2376,14 @@ s3c24xx_serial_console_write(struct cons
+ if (cons_uart->sysrq)
+ locked = false;
+ else if (oops_in_progress)
+- locked = spin_trylock_irqsave(&cons_uart->lock, flags);
++ locked = uart_port_trylock_irqsave(cons_uart, &flags);
+ else
+- spin_lock_irqsave(&cons_uart->lock, flags);
++ uart_port_lock_irqsave(cons_uart, &flags);
+
+ uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
+
+ if (locked)
+- spin_unlock_irqrestore(&cons_uart->lock, flags);
++ uart_port_unlock_irqrestore(cons_uart, flags);
+ }
+
+ /* Shouldn't be __init, as it can be instantiated from other module */
diff --git a/debian/patches-rt/0054-serial-sb1250-duart-Use-port-lock-wrappers.patch b/debian/patches-rt/0054-serial-sb1250-duart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..e5aeb528fe
--- /dev/null
+++ b/debian/patches-rt/0054-serial-sb1250-duart-Use-port-lock-wrappers.patch
@@ -0,0 +1,85 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:11 +0206
+Subject: [PATCH 054/134] serial: sb1250-duart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-55-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/sb1250-duart.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/tty/serial/sb1250-duart.c
++++ b/drivers/tty/serial/sb1250-duart.c
+@@ -610,7 +610,7 @@ static void sbd_set_termios(struct uart_
+ else
+ aux &= ~M_DUART_CTS_CHNG_ENA;
+
+- spin_lock(&uport->lock);
++ uart_port_lock(uport);
+
+ if (sport->tx_stopped)
+ command |= M_DUART_TX_DIS;
+@@ -632,7 +632,7 @@ static void sbd_set_termios(struct uart_
+
+ write_sbdchn(sport, R_DUART_CMD, command);
+
+- spin_unlock(&uport->lock);
++ uart_port_unlock(uport);
+ }
+
+
+@@ -839,22 +839,22 @@ static void sbd_console_write(struct con
+ unsigned int mask;
+
+ /* Disable transmit interrupts and enable the transmitter. */
+- spin_lock_irqsave(&uport->lock, flags);
++ uart_port_lock_irqsave(uport, &flags);
+ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
+ mask & ~M_DUART_IMR_TX);
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
+- spin_unlock_irqrestore(&uport->lock, flags);
++ uart_port_unlock_irqrestore(uport, flags);
+
+ uart_console_write(&sport->port, s, count, sbd_console_putchar);
+
+ /* Restore transmit interrupts and the transmitter enable. */
+- spin_lock_irqsave(&uport->lock, flags);
++ uart_port_lock_irqsave(uport, &flags);
+ sbd_line_drain(sport);
+ if (sport->tx_stopped)
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+- spin_unlock_irqrestore(&uport->lock, flags);
++ uart_port_unlock_irqrestore(uport, flags);
+ }
+
+ static int __init sbd_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0056-serial-tegra-Use-port-lock-wrappers.patch b/debian/patches-rt/0056-serial-tegra-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..f53b521dc0
--- /dev/null
+++ b/debian/patches-rt/0056-serial-tegra-Use-port-lock-wrappers.patch
@@ -0,0 +1,176 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:13 +0206
+Subject: [PATCH 056/134] serial: tegra: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-57-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/serial-tegra.c | 32 ++++++++++++++++----------------
+ 1 file changed, 16 insertions(+), 16 deletions(-)
+
+--- a/drivers/tty/serial/serial-tegra.c
++++ b/drivers/tty/serial/serial-tegra.c
+@@ -411,7 +411,7 @@ static int tegra_set_baudrate(struct teg
+ divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
+ }
+
+- spin_lock_irqsave(&tup->uport.lock, flags);
++ uart_port_lock_irqsave(&tup->uport, &flags);
+ lcr = tup->lcr_shadow;
+ lcr |= UART_LCR_DLAB;
+ tegra_uart_write(tup, lcr, UART_LCR);
+@@ -424,7 +424,7 @@ static int tegra_set_baudrate(struct teg
+
+ /* Dummy read to ensure the write is posted */
+ tegra_uart_read(tup, UART_SCR);
+- spin_unlock_irqrestore(&tup->uport.lock, flags);
++ uart_port_unlock_irqrestore(&tup->uport, flags);
+
+ tup->current_baud = baud;
+
+@@ -522,13 +522,13 @@ static void tegra_uart_tx_dma_complete(v
+ dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
+ count = tup->tx_bytes_requested - state.residue;
+ async_tx_ack(tup->tx_dma_desc);
+- spin_lock_irqsave(&tup->uport.lock, flags);
++ uart_port_lock_irqsave(&tup->uport, &flags);
+ uart_xmit_advance(&tup->uport, count);
+ tup->tx_in_progress = 0;
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&tup->uport);
+ tegra_uart_start_next_tx(tup);
+- spin_unlock_irqrestore(&tup->uport.lock, flags);
++ uart_port_unlock_irqrestore(&tup->uport, flags);
+ }
+
+ static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
+@@ -598,13 +598,13 @@ static unsigned int tegra_uart_tx_empty(
+ unsigned int ret = 0;
+ unsigned long flags;
+
+- spin_lock_irqsave(&u->lock, flags);
++ uart_port_lock_irqsave(u, &flags);
+ if (!tup->tx_in_progress) {
+ unsigned long lsr = tegra_uart_read(tup, UART_LSR);
+ if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
+ ret = TIOCSER_TEMT;
+ }
+- spin_unlock_irqrestore(&u->lock, flags);
++ uart_port_unlock_irqrestore(u, flags);
+ return ret;
+ }
+
+@@ -727,7 +727,7 @@ static void tegra_uart_rx_dma_complete(v
+ struct dma_tx_state state;
+ enum dma_status status;
+
+- spin_lock_irqsave(&u->lock, flags);
++ uart_port_lock_irqsave(u, &flags);
+
+ status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+
+@@ -749,7 +749,7 @@ static void tegra_uart_rx_dma_complete(v
+ set_rts(tup, true);
+
+ done:
+- spin_unlock_irqrestore(&u->lock, flags);
++ uart_port_unlock_irqrestore(u, flags);
+ }
+
+ static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
+@@ -836,7 +836,7 @@ static irqreturn_t tegra_uart_isr(int ir
+ bool is_rx_int = false;
+ unsigned long flags;
+
+- spin_lock_irqsave(&u->lock, flags);
++ uart_port_lock_irqsave(u, &flags);
+ while (1) {
+ iir = tegra_uart_read(tup, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+@@ -852,7 +852,7 @@ static irqreturn_t tegra_uart_isr(int ir
+ } else if (is_rx_start) {
+ tegra_uart_start_rx_dma(tup);
+ }
+- spin_unlock_irqrestore(&u->lock, flags);
++ uart_port_unlock_irqrestore(u, flags);
+ return IRQ_HANDLED;
+ }
+
+@@ -969,11 +969,11 @@ static void tegra_uart_hw_deinit(struct
+ }
+ }
+
+- spin_lock_irqsave(&tup->uport.lock, flags);
++ uart_port_lock_irqsave(&tup->uport, &flags);
+ /* Reset the Rx and Tx FIFOs */
+ tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
+ tup->current_baud = 0;
+- spin_unlock_irqrestore(&tup->uport.lock, flags);
++ uart_port_unlock_irqrestore(&tup->uport, flags);
+
+ tup->rx_in_progress = 0;
+ tup->tx_in_progress = 0;
+@@ -1292,7 +1292,7 @@ static void tegra_uart_set_termios(struc
+ int ret;
+
+ max_divider *= 16;
+- spin_lock_irqsave(&u->lock, flags);
++ uart_port_lock_irqsave(u, &flags);
+
+ /* Changing configuration, it is safe to stop any rx now */
+ if (tup->rts_active)
+@@ -1341,7 +1341,7 @@ static void tegra_uart_set_termios(struc
+ baud = uart_get_baud_rate(u, termios, oldtermios,
+ parent_clk_rate/max_divider,
+ parent_clk_rate/16);
+- spin_unlock_irqrestore(&u->lock, flags);
++ uart_port_unlock_irqrestore(u, flags);
+ ret = tegra_set_baudrate(tup, baud);
+ if (ret < 0) {
+ dev_err(tup->uport.dev, "Failed to set baud rate\n");
+@@ -1349,7 +1349,7 @@ static void tegra_uart_set_termios(struc
+ }
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+- spin_lock_irqsave(&u->lock, flags);
++ uart_port_lock_irqsave(u, &flags);
+
+ /* Flow control */
+ if (termios->c_cflag & CRTSCTS) {
+@@ -1382,7 +1382,7 @@ static void tegra_uart_set_termios(struc
+ if (termios->c_iflag & IGNBRK)
+ tup->uport.ignore_status_mask |= UART_LSR_BI;
+
+- spin_unlock_irqrestore(&u->lock, flags);
++ uart_port_unlock_irqrestore(u, flags);
+ }
+
+ static const char *tegra_uart_type(struct uart_port *u)
diff --git a/debian/patches-rt/0057-serial-core-Use-port-lock-wrappers.patch b/debian/patches-rt/0057-serial-core-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..33c2703827
--- /dev/null
+++ b/debian/patches-rt/0057-serial-core-Use-port-lock-wrappers.patch
@@ -0,0 +1,365 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:14 +0206
+Subject: [PATCH 057/134] serial: core: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-58-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/serial_core.c | 92 +++++++++++++++++++--------------------
+ drivers/tty/serial/serial_port.c | 4 -
+ 2 files changed, 48 insertions(+), 48 deletions(-)
+
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -79,7 +79,7 @@
+ ({ \
+ struct uart_port *__uport = uart_port_ref(state); \
+ if (__uport) \
+- spin_lock_irqsave(&__uport->lock, flags); \
++ uart_port_lock_irqsave(__uport, &flags); \
+ __uport; \
+ })
+
+@@ -87,7 +87,7 @@
+ ({ \
+ struct uart_port *__uport = uport; \
+ if (__uport) { \
+- spin_unlock_irqrestore(&__uport->lock, flags); \
++ uart_port_unlock_irqrestore(__uport, flags); \
+ uart_port_deref(__uport); \
+ } \
+ })
+@@ -179,12 +179,12 @@
+ unsigned long flags;
+ unsigned int old;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ old = port->mctrl;
+ port->mctrl = (old & ~clear) | set;
+ if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED))
+ port->ops->set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ #define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0)
+@@ -219,7 +219,7 @@
+ /*
+ * Set modem status enables based on termios cflag
+ */
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ if (termios->c_cflag & CRTSCTS)
+ uport->status |= UPSTAT_CTS_ENABLE;
+ else
+@@ -240,7 +240,7 @@
+ else
+ __uart_start(state);
+ }
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+ }
+
+ /*
+@@ -702,11 +702,11 @@
+ if (port->ops->send_xchar)
+ port->ops->send_xchar(port, ch);
+ else {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ port->x_char = ch;
+ if (ch)
+ port->ops->start_tx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+ uart_port_deref(port);
+ }
+@@ -1085,9 +1085,9 @@
+
+ if (!tty_io_error(tty)) {
+ result = uport->mctrl;
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ result |= uport->ops->get_mctrl(uport);
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+ }
+ out:
+ mutex_unlock(&port->mutex);
+@@ -1223,16 +1223,16 @@
+ uport = uart_port_ref(state);
+ if (!uport)
+ return -EIO;
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
+ uart_enable_ms(uport);
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+
+ add_wait_queue(&port->delta_msr_wait, &wait);
+ for (;;) {
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+@@ -1277,9 +1277,9 @@
+ uport = uart_port_ref(state);
+ if (!uport)
+ return -EIO;
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+ uart_port_deref(uport);
+
+ icount->cts = cnow.cts;
+@@ -1432,9 +1432,9 @@
+ uart_set_rs485_termination(port, rs485);
+ uart_set_rs485_rx_during_tx(port, rs485);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ ret = port->rs485_config(port, NULL, rs485);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ if (ret) {
+ memset(rs485, 0, sizeof(*rs485));
+ /* unset GPIOs */
+@@ -1451,9 +1451,9 @@
+ unsigned long flags;
+ struct serial_rs485 aux;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ aux = port->rs485;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (copy_to_user(rs485, &aux, sizeof(aux)))
+ return -EFAULT;
+@@ -1481,7 +1481,7 @@
+ uart_set_rs485_termination(port, &rs485);
+ uart_set_rs485_rx_during_tx(port, &rs485);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ ret = port->rs485_config(port, &tty->termios, &rs485);
+ if (!ret) {
+ port->rs485 = rs485;
+@@ -1490,7 +1490,7 @@
+ if (!(rs485.flags & SER_RS485_ENABLED))
+ port->ops->set_mctrl(port, port->mctrl);
+ }
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ if (ret) {
+ /* restore old GPIO settings */
+ gpiod_set_value_cansleep(port->rs485_term_gpio,
+@@ -1515,9 +1515,9 @@
+ if (!port->iso7816_config)
+ return -ENOTTY;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ aux = port->iso7816;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (copy_to_user(iso7816, &aux, sizeof(aux)))
+ return -EFAULT;
+@@ -1546,9 +1546,9 @@
+ if (iso7816.reserved[i])
+ return -EINVAL;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ ret = port->iso7816_config(port, &iso7816);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ if (ret)
+ return ret;
+
+@@ -1765,9 +1765,9 @@
+ if (WARN(!uport, "detached port still initialized!\n"))
+ return;
+
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ uport->ops->stop_rx(uport);
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+
+ uart_port_shutdown(port);
+
+@@ -1781,10 +1781,10 @@
+ /*
+ * Free the transmit buffer.
+ */
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ buf = state->xmit.buf;
+ state->xmit.buf = NULL;
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+
+ free_page((unsigned long)buf);
+
+@@ -1927,10 +1927,10 @@
+ */
+ if (WARN_ON(!uport))
+ return true;
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ uart_enable_ms(uport);
+ mctrl = uport->ops->get_mctrl(uport);
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+ uart_port_deref(uport);
+
+ return mctrl & TIOCM_CAR;
+@@ -2047,9 +2047,9 @@
+ pm_state = state->pm_state;
+ if (pm_state != UART_PM_STATE_ON)
+ uart_change_pm(state, UART_PM_STATE_ON);
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ status = uport->ops->get_mctrl(uport);
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+ if (pm_state != UART_PM_STATE_ON)
+ uart_change_pm(state, pm_state);
+
+@@ -2388,9 +2388,9 @@
+ */
+ if (!console_suspend_enabled && uart_console(uport)) {
+ if (uport->ops->start_rx) {
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ uport->ops->stop_rx(uport);
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+ }
+ goto unlock;
+ }
+@@ -2405,7 +2405,7 @@
+ tty_port_set_suspended(port, true);
+ tty_port_set_initialized(port, false);
+
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ ops->stop_tx(uport);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, 0);
+@@ -2413,7 +2413,7 @@
+ mctrl = uport->mctrl;
+ uport->mctrl = 0;
+ ops->stop_rx(uport);
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+
+ /*
+ * Wait for the transmitter to empty.
+@@ -2485,9 +2485,9 @@
+ uart_change_pm(state, UART_PM_STATE_ON);
+ uport->ops->set_termios(uport, &termios, NULL);
+ if (!console_suspend_enabled && uport->ops->start_rx) {
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ uport->ops->start_rx(uport);
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+ }
+ if (console_suspend_enabled)
+ console_start(uport->cons);
+@@ -2498,10 +2498,10 @@
+ int ret;
+
+ uart_change_pm(state, UART_PM_STATE_ON);
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, 0);
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+ if (console_suspend_enabled || !uart_console(uport)) {
+ /* Protected by port mutex for now */
+ struct tty_struct *tty = port->tty;
+@@ -2511,11 +2511,11 @@
+ if (tty)
+ uart_change_line_settings(tty, state, NULL);
+ uart_rs485_config(uport);
+- spin_lock_irq(&uport->lock);
++ uart_port_lock_irq(uport);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, uport->mctrl);
+ ops->start_tx(uport);
+- spin_unlock_irq(&uport->lock);
++ uart_port_unlock_irq(uport);
+ tty_port_set_initialized(port, true);
+ } else {
+ /*
+@@ -2618,11 +2618,11 @@
+ * keep the DTR setting that is set in uart_set_options()
+ * We probably don't need a spinlock around this, but
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ port->mctrl &= TIOCM_DTR;
+ if (!(port->rs485.flags & SER_RS485_ENABLED))
+ port->ops->set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ uart_rs485_config(port);
+
+--- a/drivers/tty/serial/serial_port.c
++++ b/drivers/tty/serial/serial_port.c
+@@ -35,10 +35,10 @@
+ goto out;
+
+ /* Flush any pending TX for the port */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (__serial_port_busy(port))
+ port->ops->start_tx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ out:
+ pm_runtime_mark_last_busy(dev);
diff --git a/debian/patches-rt/0058-serial-mctrl_gpio-Use-port-lock-wrappers.patch b/debian/patches-rt/0058-serial-mctrl_gpio-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..46719cb077
--- /dev/null
+++ b/debian/patches-rt/0058-serial-mctrl_gpio-Use-port-lock-wrappers.patch
@@ -0,0 +1,58 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:15 +0206
+Subject: [PATCH 058/134] serial: mctrl_gpio: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-59-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/serial_mctrl_gpio.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/tty/serial/serial_mctrl_gpio.c
++++ b/drivers/tty/serial/serial_mctrl_gpio.c
+@@ -184,7 +184,7 @@ static irqreturn_t mctrl_gpio_irq_handle
+
+ mctrl_gpio_get(gpios, &mctrl);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ mctrl_diff = mctrl ^ gpios->mctrl_prev;
+ gpios->mctrl_prev = mctrl;
+@@ -205,7 +205,7 @@ static irqreturn_t mctrl_gpio_irq_handle
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return IRQ_HANDLED;
+ }
diff --git a/debian/patches-rt/0059-serial-txx9-Use-port-lock-wrappers.patch b/debian/patches-rt/0059-serial-txx9-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..bbe48684ba
--- /dev/null
+++ b/debian/patches-rt/0059-serial-txx9-Use-port-lock-wrappers.patch
@@ -0,0 +1,134 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:16 +0206
+Subject: [PATCH 059/134] serial: txx9: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-60-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/serial_txx9.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/tty/serial/serial_txx9.c
++++ b/drivers/tty/serial/serial_txx9.c
+@@ -335,13 +335,13 @@ static irqreturn_t serial_txx9_interrupt
+ unsigned int status;
+
+ while (1) {
+- spin_lock(&up->lock);
++ uart_port_lock(up);
+ status = sio_in(up, TXX9_SIDISR);
+ if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE))
+ status &= ~TXX9_SIDISR_TDIS;
+ if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
+ TXX9_SIDISR_TOUT))) {
+- spin_unlock(&up->lock);
++ uart_port_unlock(up);
+ break;
+ }
+
+@@ -353,7 +353,7 @@ static irqreturn_t serial_txx9_interrupt
+ sio_mask(up, TXX9_SIDISR,
+ TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
+ TXX9_SIDISR_TOUT);
+- spin_unlock(&up->lock);
++ uart_port_unlock(up);
+
+ if (pass_counter++ > PASS_LIMIT)
+ break;
+@@ -367,9 +367,9 @@ static unsigned int serial_txx9_tx_empty
+ unsigned long flags;
+ unsigned int ret;
+
+- spin_lock_irqsave(&up->lock, flags);
++ uart_port_lock_irqsave(up, &flags);
+ ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0;
+- spin_unlock_irqrestore(&up->lock, flags);
++ uart_port_unlock_irqrestore(up, flags);
+
+ return ret;
+ }
+@@ -399,12 +399,12 @@ static void serial_txx9_break_ctl(struct
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->lock, flags);
++ uart_port_lock_irqsave(up, &flags);
+ if (break_state == -1)
+ sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
+ else
+ sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
+- spin_unlock_irqrestore(&up->lock, flags);
++ uart_port_unlock_irqrestore(up, flags);
+ }
+
+ #if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
+@@ -517,9 +517,9 @@ static int serial_txx9_startup(struct ua
+ /*
+ * Now, initialize the UART
+ */
+- spin_lock_irqsave(&up->lock, flags);
++ uart_port_lock_irqsave(up, &flags);
+ serial_txx9_set_mctrl(up, up->mctrl);
+- spin_unlock_irqrestore(&up->lock, flags);
++ uart_port_unlock_irqrestore(up, flags);
+
+ /* Enable RX/TX */
+ sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
+@@ -541,9 +541,9 @@ static void serial_txx9_shutdown(struct
+ */
+ sio_out(up, TXX9_SIDICR, 0); /* disable all intrs */
+
+- spin_lock_irqsave(&up->lock, flags);
++ uart_port_lock_irqsave(up, &flags);
+ serial_txx9_set_mctrl(up, up->mctrl);
+- spin_unlock_irqrestore(&up->lock, flags);
++ uart_port_unlock_irqrestore(up, flags);
+
+ /*
+ * Disable break condition
+@@ -625,7 +625,7 @@ serial_txx9_set_termios(struct uart_port
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+- spin_lock_irqsave(&up->lock, flags);
++ uart_port_lock_irqsave(up, &flags);
+
+ /*
+ * Update the per-port timeout.
+@@ -676,7 +676,7 @@ serial_txx9_set_termios(struct uart_port
+ sio_out(up, TXX9_SIFCR, fcr);
+
+ serial_txx9_set_mctrl(up, up->mctrl);
+- spin_unlock_irqrestore(&up->lock, flags);
++ uart_port_unlock_irqrestore(up, flags);
+ }
+
+ static void
diff --git a/debian/patches-rt/0060-serial-sh-sci-Use-port-lock-wrappers.patch b/debian/patches-rt/0060-serial-sh-sci-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..6d039c6658
--- /dev/null
+++ b/debian/patches-rt/0060-serial-sh-sci-Use-port-lock-wrappers.patch
@@ -0,0 +1,302 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:17 +0206
+Subject: [PATCH 060/134] serial: sh-sci: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-61-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/sh-sci.c | 68 ++++++++++++++++++++++----------------------
+ 1 file changed, 34 insertions(+), 34 deletions(-)
+
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -1205,7 +1205,7 @@ static void sci_dma_tx_complete(void *ar
+
+ dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ uart_xmit_advance(port, s->tx_dma_len);
+
+@@ -1229,7 +1229,7 @@ static void sci_dma_tx_complete(void *ar
+ }
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /* Locking: called with port lock held */
+@@ -1320,7 +1320,7 @@ static void sci_dma_rx_complete(void *ar
+ dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
+ s->active_rx);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ active = sci_dma_rx_find_active(s);
+ if (active >= 0)
+@@ -1347,20 +1347,20 @@ static void sci_dma_rx_complete(void *ar
+
+ dma_async_issue_pending(chan);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
+ __func__, s->cookie_rx[active], active, s->active_rx);
+ return;
+
+ fail:
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
+ /* Switch to PIO */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ dmaengine_terminate_async(chan);
+ sci_dma_rx_chan_invalidate(s);
+ sci_dma_rx_reenable_irq(s);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void sci_dma_tx_release(struct sci_port *s)
+@@ -1409,13 +1409,13 @@ static int sci_dma_rx_submit(struct sci_
+ fail:
+ /* Switch to PIO */
+ if (!port_lock_held)
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (i)
+ dmaengine_terminate_async(chan);
+ sci_dma_rx_chan_invalidate(s);
+ sci_start_rx(port);
+ if (!port_lock_held)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return -EAGAIN;
+ }
+
+@@ -1437,14 +1437,14 @@ static void sci_dma_tx_work_fn(struct wo
+ * transmit till the end, and then the rest. Take the port lock to get a
+ * consistent xmit buffer state.
+ */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ head = xmit->head;
+ tail = xmit->tail;
+ buf = s->tx_dma_addr + tail;
+ s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE);
+ if (!s->tx_dma_len) {
+ /* Transmit buffer has been flushed */
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ return;
+ }
+
+@@ -1452,7 +1452,7 @@ static void sci_dma_tx_work_fn(struct wo
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
+ goto switch_to_pio;
+ }
+@@ -1464,12 +1464,12 @@ static void sci_dma_tx_work_fn(struct wo
+ desc->callback_param = s;
+ s->cookie_tx = dmaengine_submit(desc);
+ if (dma_submit_error(s->cookie_tx)) {
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
+ goto switch_to_pio;
+ }
+
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
+ __func__, xmit->buf, tail, head, s->cookie_tx);
+
+@@ -1477,10 +1477,10 @@ static void sci_dma_tx_work_fn(struct wo
+ return;
+
+ switch_to_pio:
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ s->chan_tx = NULL;
+ sci_start_tx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return;
+ }
+
+@@ -1497,17 +1497,17 @@ static enum hrtimer_restart sci_dma_rx_t
+
+ dev_dbg(port->dev, "DMA Rx timed out\n");
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ active = sci_dma_rx_find_active(s);
+ if (active < 0) {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return HRTIMER_NORESTART;
+ }
+
+ status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
+ if (status == DMA_COMPLETE) {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
+ s->active_rx, active);
+
+@@ -1525,7 +1525,7 @@ static enum hrtimer_restart sci_dma_rx_t
+ */
+ status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
+ if (status == DMA_COMPLETE) {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ dev_dbg(port->dev, "Transaction complete after DMA engine was stopped");
+ return HRTIMER_NORESTART;
+ }
+@@ -1546,7 +1546,7 @@ static enum hrtimer_restart sci_dma_rx_t
+
+ sci_dma_rx_reenable_irq(s);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return HRTIMER_NORESTART;
+ }
+@@ -1770,9 +1770,9 @@ static irqreturn_t sci_tx_interrupt(int
+ struct uart_port *port = ptr;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sci_transmit_chars(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return IRQ_HANDLED;
+ }
+@@ -1786,11 +1786,11 @@ static irqreturn_t sci_tx_end_interrupt(
+ if (port->type != PORT_SCI)
+ return sci_tx_interrupt(irq, ptr);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ ctrl = serial_port_in(port, SCSCR);
+ ctrl &= ~(SCSCR_TE | SCSCR_TEIE);
+ serial_port_out(port, SCSCR, ctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return IRQ_HANDLED;
+ }
+@@ -2187,7 +2187,7 @@ static void sci_break_ctl(struct uart_po
+ return;
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ scsptr = serial_port_in(port, SCSPTR);
+ scscr = serial_port_in(port, SCSCR);
+
+@@ -2201,7 +2201,7 @@ static void sci_break_ctl(struct uart_po
+
+ serial_port_out(port, SCSPTR, scsptr);
+ serial_port_out(port, SCSCR, scscr);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int sci_startup(struct uart_port *port)
+@@ -2233,7 +2233,7 @@ static void sci_shutdown(struct uart_por
+ s->autorts = false;
+ mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sci_stop_rx(port);
+ sci_stop_tx(port);
+ /*
+@@ -2243,7 +2243,7 @@ static void sci_shutdown(struct uart_por
+ scr = serial_port_in(port, SCSCR);
+ serial_port_out(port, SCSCR, scr &
+ (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ #ifdef CONFIG_SERIAL_SH_SCI_DMA
+ if (s->chan_rx_saved) {
+@@ -2545,7 +2545,7 @@ static void sci_set_termios(struct uart_
+ serial_port_out(port, SCCKS, sccks);
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ sci_reset(port);
+
+@@ -2667,7 +2667,7 @@ static void sci_set_termios(struct uart_
+ if ((termios->c_cflag & CREAD) != 0)
+ sci_start_rx(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ sci_port_disable(s);
+
+@@ -3052,9 +3052,9 @@ static void serial_console_write(struct
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* first save SCSCR then disable interrupts, keep clock source */
+ ctrl = serial_port_in(port, SCSCR);
+@@ -3074,7 +3074,7 @@ static void serial_console_write(struct
+ serial_port_out(port, SCSCR, ctrl);
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int serial_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0061-serial-sifive-Use-port-lock-wrappers.patch b/debian/patches-rt/0061-serial-sifive-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..a2babdbea9
--- /dev/null
+++ b/debian/patches-rt/0061-serial-sifive-Use-port-lock-wrappers.patch
@@ -0,0 +1,102 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:18 +0206
+Subject: [PATCH 061/134] serial: sifive: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-62-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/sifive.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/tty/serial/sifive.c
++++ b/drivers/tty/serial/sifive.c
+@@ -521,11 +521,11 @@ static irqreturn_t sifive_serial_irq(int
+ struct sifive_serial_port *ssp = dev_id;
+ u32 ip;
+
+- spin_lock(&ssp->port.lock);
++ uart_port_lock(&ssp->port);
+
+ ip = __ssp_readl(ssp, SIFIVE_SERIAL_IP_OFFS);
+ if (!ip) {
+- spin_unlock(&ssp->port.lock);
++ uart_port_unlock(&ssp->port);
+ return IRQ_NONE;
+ }
+
+@@ -534,7 +534,7 @@ static irqreturn_t sifive_serial_irq(int
+ if (ip & SIFIVE_SERIAL_IP_TXWM_MASK)
+ __ssp_transmit_chars(ssp);
+
+- spin_unlock(&ssp->port.lock);
++ uart_port_unlock(&ssp->port);
+
+ return IRQ_HANDLED;
+ }
+@@ -653,7 +653,7 @@ static void sifive_serial_set_termios(st
+ ssp->port.uartclk / 16);
+ __ssp_update_baud_rate(ssp, rate);
+
+- spin_lock_irqsave(&ssp->port.lock, flags);
++ uart_port_lock_irqsave(&ssp->port, &flags);
+
+ /* Update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, rate);
+@@ -670,7 +670,7 @@ static void sifive_serial_set_termios(st
+ if (v != old_v)
+ __ssp_writel(v, SIFIVE_SERIAL_RXCTRL_OFFS, ssp);
+
+- spin_unlock_irqrestore(&ssp->port.lock, flags);
++ uart_port_unlock_irqrestore(&ssp->port, flags);
+ }
+
+ static void sifive_serial_release_port(struct uart_port *port)
+@@ -795,9 +795,9 @@ static void sifive_serial_console_write(
+ if (ssp->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&ssp->port.lock);
++ locked = uart_port_trylock(&ssp->port);
+ else
+- spin_lock(&ssp->port.lock);
++ uart_port_lock(&ssp->port);
+
+ ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
+ __ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
+@@ -807,7 +807,7 @@ static void sifive_serial_console_write(
+ __ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
+
+ if (locked)
+- spin_unlock(&ssp->port.lock);
++ uart_port_unlock(&ssp->port);
+ local_irq_restore(flags);
+ }
+
diff --git a/debian/patches-rt/0062-serial-sprd-Use-port-lock-wrappers.patch b/debian/patches-rt/0062-serial-sprd-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..5be78db092
--- /dev/null
+++ b/debian/patches-rt/0062-serial-sprd-Use-port-lock-wrappers.patch
@@ -0,0 +1,162 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:19 +0206
+Subject: [PATCH 062/134] serial: sprd: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-63-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/sprd_serial.c | 30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/drivers/tty/serial/sprd_serial.c
++++ b/drivers/tty/serial/sprd_serial.c
+@@ -247,7 +247,7 @@ static void sprd_complete_tx_dma(void *d
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ dma_unmap_single(port->dev, sp->tx_dma.phys_addr,
+ sp->tx_dma.trans_len, DMA_TO_DEVICE);
+
+@@ -260,7 +260,7 @@ static void sprd_complete_tx_dma(void *d
+ sprd_tx_dma_config(port))
+ sp->tx_dma.trans_len = 0;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int sprd_uart_dma_submit(struct uart_port *port,
+@@ -429,13 +429,13 @@ static void sprd_complete_rx_dma(void *d
+ enum dma_status status;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ status = dmaengine_tx_status(sp->rx_dma.chn,
+ sp->rx_dma.cookie, &state);
+ if (status != DMA_COMPLETE) {
+ sprd_stop_rx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return;
+ }
+
+@@ -449,7 +449,7 @@ static void sprd_complete_rx_dma(void *d
+ if (sprd_start_dma_rx(port))
+ sprd_stop_rx(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int sprd_start_dma_rx(struct uart_port *port)
+@@ -638,12 +638,12 @@ static irqreturn_t sprd_handle_irq(int i
+ struct uart_port *port = dev_id;
+ unsigned int ims;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ ims = serial_in(port, SPRD_IMSR);
+
+ if (!ims) {
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ return IRQ_NONE;
+ }
+
+@@ -660,7 +660,7 @@ static irqreturn_t sprd_handle_irq(int i
+ if (ims & SPRD_IMSR_TX_FIFO_EMPTY)
+ sprd_tx(port);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -727,13 +727,13 @@ static int sprd_startup(struct uart_port
+ serial_out(port, SPRD_CTL1, fc);
+
+ /* enable interrupt */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ ien = serial_in(port, SPRD_IEN);
+ ien |= SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT;
+ if (!sp->rx_dma.enable)
+ ien |= SPRD_IEN_RX_FULL;
+ serial_out(port, SPRD_IEN, ien);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+@@ -793,7 +793,7 @@ static void sprd_set_termios(struct uart
+ lcr |= SPRD_LCR_EVEN_PAR;
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, baud);
+@@ -837,7 +837,7 @@ static void sprd_set_termios(struct uart
+ fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF;
+ serial_out(port, SPRD_CTL1, fc);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+@@ -974,9 +974,9 @@ static void sprd_console_write(struct co
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ uart_console_write(port, s, count, sprd_console_putchar);
+
+@@ -984,7 +984,7 @@ static void sprd_console_write(struct co
+ wait_for_xmitr(port);
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int sprd_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0063-serial-st-asc-Use-port-lock-wrappers.patch b/debian/patches-rt/0063-serial-st-asc-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..99f4358e61
--- /dev/null
+++ b/debian/patches-rt/0063-serial-st-asc-Use-port-lock-wrappers.patch
@@ -0,0 +1,110 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:20 +0206
+Subject: [PATCH 063/134] serial: st-asc: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-64-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/st-asc.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/tty/serial/st-asc.c
++++ b/drivers/tty/serial/st-asc.c
+@@ -319,7 +319,7 @@ static irqreturn_t asc_interrupt(int irq
+ struct uart_port *port = ptr;
+ u32 status;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ status = asc_in(port, ASC_STA);
+
+@@ -334,7 +334,7 @@ static irqreturn_t asc_interrupt(int irq
+ asc_transmit_chars(port);
+ }
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -452,10 +452,10 @@ static void asc_pm(struct uart_port *por
+ * we can come to turning it off. Note this is not called with
+ * the port spinlock held.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN;
+ asc_out(port, ASC_CTL, ctl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ clk_disable_unprepare(ascport->clk);
+ break;
+ }
+@@ -480,7 +480,7 @@ static void asc_set_termios(struct uart_
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ cflag = termios->c_cflag;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* read control register */
+ ctrl_val = asc_in(port, ASC_CTL);
+@@ -594,7 +594,7 @@ static void asc_set_termios(struct uart_
+ /* write final value and enable port */
+ asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN));
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *asc_type(struct uart_port *port)
+@@ -849,9 +849,9 @@ static void asc_console_write(struct con
+ if (port->sysrq)
+ locked = 0; /* asc_interrupt has already claimed the lock */
+ else if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /*
+ * Disable interrupts so we don't get the IRQ line bouncing
+@@ -869,7 +869,7 @@ static void asc_console_write(struct con
+ asc_out(port, ASC_INTEN, intenable);
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int asc_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0064-serial-stm32-Use-port-lock-wrappers.patch b/debian/patches-rt/0064-serial-stm32-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..cc261d43ee
--- /dev/null
+++ b/debian/patches-rt/0064-serial-stm32-Use-port-lock-wrappers.patch
@@ -0,0 +1,184 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:21 +0206
+Subject: [PATCH 064/134] serial: stm32: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-65-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/stm32-usart.c | 38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -537,7 +537,7 @@ static void stm32_usart_rx_dma_complete(
+ unsigned int size;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ size = stm32_usart_receive_chars(port, false);
+ uart_unlock_and_check_sysrq_irqrestore(port, flags);
+ if (size)
+@@ -643,9 +643,9 @@ static void stm32_usart_tx_dma_complete(
+ stm32_usart_tx_dma_terminate(stm32port);
+
+ /* Let's see if we have pending data to send */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ stm32_usart_transmit_chars(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
+@@ -889,7 +889,7 @@ static irqreturn_t stm32_usart_interrupt
+ if (!stm32_port->throttled) {
+ if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
+ ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ size = stm32_usart_receive_chars(port, false);
+ uart_unlock_and_check_sysrq(port);
+ if (size)
+@@ -898,14 +898,14 @@ static irqreturn_t stm32_usart_interrupt
+ }
+
+ if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ stm32_usart_transmit_chars(port);
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ }
+
+ /* Receiver timeout irq for DMA RX */
+ if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ size = stm32_usart_receive_chars(port, false);
+ uart_unlock_and_check_sysrq(port);
+ if (size)
+@@ -993,7 +993,7 @@ static void stm32_usart_throttle(struct
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /*
+ * Pause DMA transfer, so the RX data gets queued into the FIFO.
+@@ -1006,7 +1006,7 @@ static void stm32_usart_throttle(struct
+ stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
+
+ stm32_port->throttled = true;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /* Unthrottle the remote, the input buffer can now accept data. */
+@@ -1016,7 +1016,7 @@ static void stm32_usart_unthrottle(struc
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
+ if (stm32_port->cr3_irq)
+ stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
+@@ -1030,7 +1030,7 @@ static void stm32_usart_unthrottle(struc
+ if (stm32_port->rx_ch)
+ stm32_usart_rx_dma_start_or_resume(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /* Receive stop */
+@@ -1158,7 +1158,7 @@ static void stm32_usart_set_termios(stru
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
+ isr,
+@@ -1349,7 +1349,7 @@ static void stm32_usart_set_termios(stru
+ writel_relaxed(cr1, port->membase + ofs->cr1);
+
+ stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /* Handle modem control interrupts */
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+@@ -1399,9 +1399,9 @@ static void stm32_usart_pm(struct uart_p
+ pm_runtime_get_sync(port->dev);
+ break;
+ case UART_PM_STATE_OFF:
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ pm_runtime_put_sync(port->dev);
+ break;
+ }
+@@ -1884,9 +1884,9 @@ static void stm32_usart_console_write(st
+ int locked = 1;
+
+ if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Save and disable interrupts, enable the transmitter */
+ old_cr1 = readl_relaxed(port->membase + ofs->cr1);
+@@ -1900,7 +1900,7 @@ static void stm32_usart_console_write(st
+ writel_relaxed(old_cr1, port->membase + ofs->cr1);
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int stm32_usart_console_setup(struct console *co, char *options)
+@@ -2035,7 +2035,7 @@ static int __maybe_unused stm32_usart_se
+ * low-power mode.
+ */
+ if (stm32_port->rx_ch) {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ /* Poll data from DMA RX buffer if any */
+ if (!stm32_usart_rx_dma_pause(stm32_port))
+ size += stm32_usart_receive_chars(port, true);
diff --git a/debian/patches-rt/0065-serial-sunhv-Use-port-lock-wrappers.patch b/debian/patches-rt/0065-serial-sunhv-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..beacd2be79
--- /dev/null
+++ b/debian/patches-rt/0065-serial-sunhv-Use-port-lock-wrappers.patch
@@ -0,0 +1,149 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:22 +0206
+Subject: [PATCH 065/134] serial: sunhv: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-66-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/sunhv.c | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+--- a/drivers/tty/serial/sunhv.c
++++ b/drivers/tty/serial/sunhv.c
+@@ -217,10 +217,10 @@ static irqreturn_t sunhv_interrupt(int i
+ struct tty_port *tport;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ tport = receive_chars(port);
+ transmit_chars(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (tport)
+ tty_flip_buffer_push(tport);
+@@ -271,7 +271,7 @@ static void sunhv_send_xchar(struct uart
+ if (ch == __DISABLED_CHAR)
+ return;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ while (limit-- > 0) {
+ long status = sun4v_con_putchar(ch);
+@@ -280,7 +280,7 @@ static void sunhv_send_xchar(struct uart
+ udelay(1);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /* port->lock held by caller. */
+@@ -295,7 +295,7 @@ static void sunhv_break_ctl(struct uart_
+ unsigned long flags;
+ int limit = 10000;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ while (limit-- > 0) {
+ long status = sun4v_con_putchar(CON_BREAK);
+@@ -304,7 +304,7 @@ static void sunhv_break_ctl(struct uart_
+ udelay(1);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+ }
+
+@@ -328,7 +328,7 @@ static void sunhv_set_termios(struct uar
+ unsigned int iflag, cflag;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ iflag = termios->c_iflag;
+ cflag = termios->c_cflag;
+@@ -343,7 +343,7 @@ static void sunhv_set_termios(struct uar
+ uart_update_timeout(port, cflag,
+ (port->uartclk / (16 * quot)));
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *sunhv_type(struct uart_port *port)
+@@ -437,9 +437,9 @@ static void sunhv_console_write_paged(st
+ int locked = 1;
+
+ if (port->sysrq || oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ while (n > 0) {
+ unsigned long ra = __pa(con_write_page);
+@@ -470,7 +470,7 @@ static void sunhv_console_write_paged(st
+ }
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static inline void sunhv_console_putchar(struct uart_port *port, char c)
+@@ -492,9 +492,9 @@ static void sunhv_console_write_bychar(s
+ int i, locked = 1;
+
+ if (port->sysrq || oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ for (i = 0; i < n; i++) {
+ if (*s == '\n')
+@@ -503,7 +503,7 @@ static void sunhv_console_write_bychar(s
+ }
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static struct console sunhv_console = {
diff --git a/debian/patches-rt/0066-serial-sunplus-uart-Use-port-lock-wrappers.patch b/debian/patches-rt/0066-serial-sunplus-uart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..10ae6c477c
--- /dev/null
+++ b/debian/patches-rt/0066-serial-sunplus-uart-Use-port-lock-wrappers.patch
@@ -0,0 +1,146 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:23 +0206
+Subject: [PATCH 066/134] serial: sunplus-uart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-67-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/sunplus-uart.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/tty/serial/sunplus-uart.c
++++ b/drivers/tty/serial/sunplus-uart.c
+@@ -184,7 +184,7 @@ static void sunplus_break_ctl(struct uar
+ unsigned long flags;
+ unsigned int lcr;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ lcr = readl(port->membase + SUP_UART_LCR);
+
+@@ -195,7 +195,7 @@ static void sunplus_break_ctl(struct uar
+
+ writel(lcr, port->membase + SUP_UART_LCR);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void transmit_chars(struct uart_port *port)
+@@ -277,7 +277,7 @@ static irqreturn_t sunplus_uart_irq(int
+ struct uart_port *port = args;
+ unsigned int isc;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ isc = readl(port->membase + SUP_UART_ISC);
+
+@@ -287,7 +287,7 @@ static irqreturn_t sunplus_uart_irq(int
+ if (isc & SUP_UART_ISC_TX)
+ transmit_chars(port);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -302,14 +302,14 @@ static int sunplus_startup(struct uart_p
+ if (ret)
+ return ret;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ /* isc define Bit[7:4] int setting, Bit[3:0] int status
+ * isc register will clean Bit[3:0] int status after read
+ * only do a write to Bit[7:4] int setting
+ */
+ isc |= SUP_UART_ISC_RXM;
+ writel(isc, port->membase + SUP_UART_ISC);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+@@ -318,13 +318,13 @@ static void sunplus_shutdown(struct uart
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ /* isc define Bit[7:4] int setting, Bit[3:0] int status
+ * isc register will clean Bit[3:0] int status after read
+ * only do a write to Bit[7:4] int setting
+ */
+ writel(0, port->membase + SUP_UART_ISC); /* disable all interrupt */
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ free_irq(port->irq, port);
+ }
+@@ -372,7 +372,7 @@ static void sunplus_set_termios(struct u
+ lcr |= UART_LCR_EPAR;
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+@@ -407,7 +407,7 @@ static void sunplus_set_termios(struct u
+ writel(div_l, port->membase + SUP_UART_DIV_L);
+ writel(lcr, port->membase + SUP_UART_LCR);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void sunplus_set_ldisc(struct uart_port *port, struct ktermios *termios)
+@@ -517,15 +517,15 @@ static void sunplus_console_write(struct
+ if (sunplus_console_ports[co->index]->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&sunplus_console_ports[co->index]->port.lock);
++ locked = uart_port_trylock(&sunplus_console_ports[co->index]->port);
+ else
+- spin_lock(&sunplus_console_ports[co->index]->port.lock);
++ uart_port_lock(&sunplus_console_ports[co->index]->port);
+
+ uart_console_write(&sunplus_console_ports[co->index]->port, s, count,
+ sunplus_uart_console_putchar);
+
+ if (locked)
+- spin_unlock(&sunplus_console_ports[co->index]->port.lock);
++ uart_port_unlock(&sunplus_console_ports[co->index]->port);
+
+ local_irq_restore(flags);
+ }
diff --git a/debian/patches-rt/0067-serial-sunsab-Use-port-lock-wrappers.patch b/debian/patches-rt/0067-serial-sunsab-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..b96d5080cd
--- /dev/null
+++ b/debian/patches-rt/0067-serial-sunsab-Use-port-lock-wrappers.patch
@@ -0,0 +1,176 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:24 +0206
+Subject: [PATCH 067/134] serial: sunsab: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-68-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/sunsab.c | 34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+--- a/drivers/tty/serial/sunsab.c
++++ b/drivers/tty/serial/sunsab.c
+@@ -310,7 +310,7 @@ static irqreturn_t sunsab_interrupt(int
+ unsigned long flags;
+ unsigned char gis;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ status.stat = 0;
+ gis = readb(&up->regs->r.gis) >> up->gis_shift;
+@@ -331,7 +331,7 @@ static irqreturn_t sunsab_interrupt(int
+ transmit_chars(up, &status);
+ }
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ if (port)
+ tty_flip_buffer_push(port);
+@@ -473,12 +473,12 @@ static void sunsab_send_xchar(struct uar
+ if (ch == __DISABLED_CHAR)
+ return;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ sunsab_tec_wait(up);
+ writeb(ch, &up->regs->w.tic);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ /* port->lock held by caller. */
+@@ -499,7 +499,7 @@ static void sunsab_break_ctl(struct uart
+ unsigned long flags;
+ unsigned char val;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ val = up->cached_dafo;
+ if (break_state)
+@@ -512,7 +512,7 @@ static void sunsab_break_ctl(struct uart
+ if (test_bit(SAB82532_XPR, &up->irqflags))
+ sunsab_tx_idle(up);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ /* port->lock is not held. */
+@@ -527,7 +527,7 @@ static int sunsab_startup(struct uart_po
+ if (err)
+ return err;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /*
+ * Wait for any commands or immediate characters
+@@ -582,7 +582,7 @@ static int sunsab_startup(struct uart_po
+ set_bit(SAB82532_ALLS, &up->irqflags);
+ set_bit(SAB82532_XPR, &up->irqflags);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ return 0;
+ }
+@@ -594,7 +594,7 @@ static void sunsab_shutdown(struct uart_
+ container_of(port, struct uart_sunsab_port, port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /* Disable Interrupts */
+ up->interrupt_mask0 = 0xff;
+@@ -628,7 +628,7 @@ static void sunsab_shutdown(struct uart_
+ writeb(tmp, &up->regs->rw.ccr0);
+ #endif
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ free_irq(up->port.irq, up);
+ }
+
+@@ -779,9 +779,9 @@ static void sunsab_set_termios(struct ua
+ unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ unsigned int quot = uart_get_divisor(port, baud);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ sunsab_convert_to_sab(up, termios->c_cflag, termios->c_iflag, baud, quot);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static const char *sunsab_type(struct uart_port *port)
+@@ -857,15 +857,15 @@ static void sunsab_console_write(struct
+ int locked = 1;
+
+ if (up->port.sysrq || oops_in_progress)
+- locked = spin_trylock_irqsave(&up->port.lock, flags);
++ locked = uart_port_trylock_irqsave(&up->port, &flags);
+ else
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ uart_console_write(&up->port, s, n, sunsab_console_putchar);
+ sunsab_tec_wait(up);
+
+ if (locked)
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static int sunsab_console_setup(struct console *con, char *options)
+@@ -914,7 +914,7 @@ static int sunsab_console_setup(struct c
+ */
+ sunsab_startup(&up->port);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /*
+ * Finally, enable interrupts
+@@ -932,7 +932,7 @@ static int sunsab_console_setup(struct c
+ sunsab_convert_to_sab(up, con->cflag, 0, baud, quot);
+ sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ return 0;
+ }
diff --git a/debian/patches-rt/0068-serial-sunsu-Use-port-lock-wrappers.patch b/debian/patches-rt/0068-serial-sunsu-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..77945a1edb
--- /dev/null
+++ b/debian/patches-rt/0068-serial-sunsu-Use-port-lock-wrappers.patch
@@ -0,0 +1,219 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:25 +0206
+Subject: [PATCH 068/134] serial: sunsu: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-69-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/sunsu.c | 46 ++++++++++++++++++++++-----------------------
+ 1 file changed, 23 insertions(+), 23 deletions(-)
+
+--- a/drivers/tty/serial/sunsu.c
++++ b/drivers/tty/serial/sunsu.c
+@@ -212,9 +212,9 @@ static void enable_rsa(struct uart_sunsu
+ {
+ if (up->port.type == PORT_RSA) {
+ if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
+- spin_lock_irq(&up->port.lock);
++ uart_port_lock_irq(&up->port);
+ __enable_rsa(up);
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ }
+ if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
+ serial_outp(up, UART_RSA_FRR, 0);
+@@ -234,7 +234,7 @@ static void disable_rsa(struct uart_suns
+
+ if (up->port.type == PORT_RSA &&
+ up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
+- spin_lock_irq(&up->port.lock);
++ uart_port_lock_irq(&up->port);
+
+ mode = serial_inp(up, UART_RSA_MSR);
+ result = !(mode & UART_RSA_MSR_FIFO);
+@@ -247,7 +247,7 @@ static void disable_rsa(struct uart_suns
+
+ if (result)
+ up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ }
+ }
+ #endif /* CONFIG_SERIAL_8250_RSA */
+@@ -311,10 +311,10 @@ static void sunsu_enable_ms(struct uart_
+ container_of(port, struct uart_sunsu_port, port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static void
+@@ -456,7 +456,7 @@ static irqreturn_t sunsu_serial_interrup
+ unsigned long flags;
+ unsigned char status;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ do {
+ status = serial_inp(up, UART_LSR);
+@@ -470,7 +470,7 @@ static irqreturn_t sunsu_serial_interrup
+
+ } while (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT));
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ return IRQ_HANDLED;
+ }
+@@ -545,9 +545,9 @@ static unsigned int sunsu_tx_empty(struc
+ unsigned long flags;
+ unsigned int ret;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ return ret;
+ }
+@@ -599,13 +599,13 @@ static void sunsu_break_ctl(struct uart_
+ container_of(port, struct uart_sunsu_port, port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static int sunsu_startup(struct uart_port *port)
+@@ -683,12 +683,12 @@ static int sunsu_startup(struct uart_por
+ */
+ serial_outp(up, UART_LCR, UART_LCR_WLEN8);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ up->port.mctrl |= TIOCM_OUT2;
+
+ sunsu_set_mctrl(&up->port, up->port.mctrl);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+@@ -731,7 +731,7 @@ static void sunsu_shutdown(struct uart_p
+ up->ier = 0;
+ serial_outp(up, UART_IER, 0);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ if (up->port.flags & UPF_FOURPORT) {
+ /* reset interrupts on the AST Fourport board */
+ inb((up->port.iobase & 0xfe0) | 0x1f);
+@@ -740,7 +740,7 @@ static void sunsu_shutdown(struct uart_p
+ up->port.mctrl &= ~TIOCM_OUT2;
+
+ sunsu_set_mctrl(&up->port, up->port.mctrl);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ /*
+ * Disable break condition and FIFOs
+@@ -826,7 +826,7 @@ sunsu_change_speed(struct uart_port *por
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /*
+ * Update the per-port timeout.
+@@ -891,7 +891,7 @@ sunsu_change_speed(struct uart_port *por
+
+ up->cflag = cflag;
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static void
+@@ -1038,7 +1038,7 @@ static void sunsu_autoconfig(struct uart
+ up->type_probed = PORT_UNKNOWN;
+ up->port.iotype = UPIO_MEM;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ if (!(up->port.flags & UPF_BUGGY_UART)) {
+ /*
+@@ -1173,7 +1173,7 @@ static void sunsu_autoconfig(struct uart
+ serial_outp(up, UART_IER, 0);
+
+ out:
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static struct uart_driver sunsu_reg = {
+@@ -1298,9 +1298,9 @@ static void sunsu_console_write(struct c
+ int locked = 1;
+
+ if (up->port.sysrq || oops_in_progress)
+- locked = spin_trylock_irqsave(&up->port.lock, flags);
++ locked = uart_port_trylock_irqsave(&up->port, &flags);
+ else
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /*
+ * First save the UER then disable the interrupts
+@@ -1318,7 +1318,7 @@ static void sunsu_console_write(struct c
+ serial_out(up, UART_IER, ier);
+
+ if (locked)
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ /*
diff --git a/debian/patches-rt/0069-serial-sunzilog-Use-port-lock-wrappers.patch b/debian/patches-rt/0069-serial-sunzilog-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..dca4327a46
--- /dev/null
+++ b/debian/patches-rt/0069-serial-sunzilog-Use-port-lock-wrappers.patch
@@ -0,0 +1,211 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:26 +0206
+Subject: [PATCH 069/134] serial: sunzilog: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-70-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/sunzilog.c | 42 +++++++++++++++++++++---------------------
+ 1 file changed, 21 insertions(+), 21 deletions(-)
+
+--- a/drivers/tty/serial/sunzilog.c
++++ b/drivers/tty/serial/sunzilog.c
+@@ -531,7 +531,7 @@ static irqreturn_t sunzilog_interrupt(in
+ struct tty_port *port;
+ unsigned char r3;
+
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+ r3 = read_zsreg(channel, R3);
+
+ /* Channel A */
+@@ -548,7 +548,7 @@ static irqreturn_t sunzilog_interrupt(in
+ if (r3 & CHATxIP)
+ sunzilog_transmit_chars(up, channel);
+ }
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+
+ if (port)
+ tty_flip_buffer_push(port);
+@@ -557,7 +557,7 @@ static irqreturn_t sunzilog_interrupt(in
+ up = up->next;
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+- spin_lock(&up->port.lock);
++ uart_port_lock(&up->port);
+ port = NULL;
+ if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
+ writeb(RES_H_IUS, &channel->control);
+@@ -571,7 +571,7 @@ static irqreturn_t sunzilog_interrupt(in
+ if (r3 & CHBTxIP)
+ sunzilog_transmit_chars(up, channel);
+ }
+- spin_unlock(&up->port.lock);
++ uart_port_unlock(&up->port);
+
+ if (port)
+ tty_flip_buffer_push(port);
+@@ -604,11 +604,11 @@ static unsigned int sunzilog_tx_empty(st
+ unsigned char status;
+ unsigned int ret;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ status = sunzilog_read_channel_status(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (status & Tx_BUF_EMP)
+ ret = TIOCSER_TEMT;
+@@ -764,7 +764,7 @@ static void sunzilog_break_ctl(struct ua
+ else
+ clear_bits |= SND_BRK;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
+ if (new_reg != up->curregs[R5]) {
+@@ -774,7 +774,7 @@ static void sunzilog_break_ctl(struct ua
+ write_zsreg(channel, R5, up->curregs[R5]);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void __sunzilog_startup(struct uart_sunzilog_port *up)
+@@ -800,9 +800,9 @@ static int sunzilog_startup(struct uart_
+ if (ZS_IS_CONS(up))
+ return 0;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ __sunzilog_startup(up);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return 0;
+ }
+
+@@ -840,7 +840,7 @@ static void sunzilog_shutdown(struct uar
+ if (ZS_IS_CONS(up))
+ return;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ channel = ZILOG_CHANNEL_FROM_PORT(port);
+
+@@ -853,7 +853,7 @@ static void sunzilog_shutdown(struct uar
+ up->curregs[R5] &= ~SND_BRK;
+ sunzilog_maybe_update_regs(up, channel);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /* Shared by TTY driver and serial console setup. The port lock is held
+@@ -945,7 +945,7 @@ sunzilog_set_termios(struct uart_port *p
+
+ baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
+
+@@ -962,7 +962,7 @@ sunzilog_set_termios(struct uart_port *p
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static const char *sunzilog_type(struct uart_port *port)
+@@ -1201,15 +1201,15 @@ sunzilog_console_write(struct console *c
+ int locked = 1;
+
+ if (up->port.sysrq || oops_in_progress)
+- locked = spin_trylock_irqsave(&up->port.lock, flags);
++ locked = uart_port_trylock_irqsave(&up->port, &flags);
+ else
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ uart_console_write(&up->port, s, count, sunzilog_putchar);
+ udelay(2);
+
+ if (locked)
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static int __init sunzilog_console_setup(struct console *con, char *options)
+@@ -1244,7 +1244,7 @@ static int __init sunzilog_console_setup
+
+ brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ up->curregs[R15] |= BRKIE;
+ sunzilog_convert_to_zs(up, con->cflag, 0, brg);
+@@ -1252,7 +1252,7 @@ static int __init sunzilog_console_setup
+ sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
+ __sunzilog_startup(up);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ return 0;
+ }
+@@ -1333,7 +1333,7 @@ static void sunzilog_init_hw(struct uart
+
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+ if (ZS_IS_CHANNEL_A(up)) {
+ write_zsreg(channel, R9, FHWRES);
+ ZSDELAY_LONG();
+@@ -1383,7 +1383,7 @@ static void sunzilog_init_hw(struct uart
+ write_zsreg(channel, R9, up->curregs[R9]);
+ }
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ #ifdef CONFIG_SERIO
+ if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
diff --git a/debian/patches-rt/0070-serial-timbuart-Use-port-lock-wrappers.patch b/debian/patches-rt/0070-serial-timbuart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..2d5a3d95e3
--- /dev/null
+++ b/debian/patches-rt/0070-serial-timbuart-Use-port-lock-wrappers.patch
@@ -0,0 +1,71 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:27 +0206
+Subject: [PATCH 070/134] serial: timbuart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-71-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/timbuart.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/timbuart.c
++++ b/drivers/tty/serial/timbuart.c
+@@ -174,7 +174,7 @@ static void timbuart_tasklet(struct task
+ struct timbuart_port *uart = from_tasklet(uart, t, tasklet);
+ u32 isr, ier = 0;
+
+- spin_lock(&uart->port.lock);
++ uart_port_lock(&uart->port);
+
+ isr = ioread32(uart->port.membase + TIMBUART_ISR);
+ dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
+@@ -189,7 +189,7 @@ static void timbuart_tasklet(struct task
+
+ iowrite32(ier, uart->port.membase + TIMBUART_IER);
+
+- spin_unlock(&uart->port.lock);
++ uart_port_unlock(&uart->port);
+ dev_dbg(uart->port.dev, "%s leaving\n", __func__);
+ }
+
+@@ -295,10 +295,10 @@ static void timbuart_set_termios(struct
+ tty_termios_copy_hw(termios, old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
+ uart_update_timeout(port, termios->c_cflag, baud);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *timbuart_type(struct uart_port *port)
diff --git a/debian/patches-rt/0071-serial-uartlite-Use-port-lock-wrappers.patch b/debian/patches-rt/0071-serial-uartlite-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..13c64d7eed
--- /dev/null
+++ b/debian/patches-rt/0071-serial-uartlite-Use-port-lock-wrappers.patch
@@ -0,0 +1,105 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:28 +0206
+Subject: [PATCH 071/134] serial: uartlite: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-72-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/uartlite.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/tty/serial/uartlite.c
++++ b/drivers/tty/serial/uartlite.c
+@@ -216,11 +216,11 @@ static irqreturn_t ulite_isr(int irq, vo
+ unsigned long flags;
+
+ do {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ stat = uart_in32(ULITE_STATUS, port);
+ busy = ulite_receive(port, stat);
+ busy |= ulite_transmit(port, stat);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ n++;
+ } while (busy);
+
+@@ -238,9 +238,9 @@ static unsigned int ulite_tx_empty(struc
+ unsigned long flags;
+ unsigned int ret;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ ret = uart_in32(ULITE_STATUS, port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
+ }
+@@ -323,7 +323,7 @@ static void ulite_set_termios(struct uar
+ termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE);
+ tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
+ | ULITE_STATUS_TXFULL;
+@@ -346,7 +346,7 @@ static void ulite_set_termios(struct uar
+ /* update timeout */
+ uart_update_timeout(port, termios->c_cflag, pdata->baud);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *ulite_type(struct uart_port *port)
+@@ -495,9 +495,9 @@ static void ulite_console_write(struct c
+ int locked = 1;
+
+ if (oops_in_progress) {
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ } else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* save and disable interrupt */
+ ier = uart_in32(ULITE_STATUS, port) & ULITE_STATUS_IE;
+@@ -512,7 +512,7 @@ static void ulite_console_write(struct c
+ uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int ulite_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0072-serial-ucc_uart-Use-port-lock-wrappers.patch b/debian/patches-rt/0072-serial-ucc_uart-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..30681434a7
--- /dev/null
+++ b/debian/patches-rt/0072-serial-ucc_uart-Use-port-lock-wrappers.patch
@@ -0,0 +1,59 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:29 +0206
+Subject: [PATCH 072/134] serial: ucc_uart: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Timur Tabi <timur@kernel.org>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-73-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/ucc_uart.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/tty/serial/ucc_uart.c
++++ b/drivers/tty/serial/ucc_uart.c
+@@ -931,7 +931,7 @@ static void qe_uart_set_termios(struct u
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+
+ /* Do we really need a spinlock here? */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, baud);
+@@ -949,7 +949,7 @@ static void qe_uart_set_termios(struct u
+ qe_setbrg(qe_port->us_info.tx_clock, baud, 16);
+ }
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /*
diff --git a/debian/patches-rt/0073-serial-vt8500-Use-port-lock-wrappers.patch b/debian/patches-rt/0073-serial-vt8500-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..7209ed0661
--- /dev/null
+++ b/debian/patches-rt/0073-serial-vt8500-Use-port-lock-wrappers.patch
@@ -0,0 +1,76 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:30 +0206
+Subject: [PATCH 073/134] serial: vt8500: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-74-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/vt8500_serial.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/vt8500_serial.c
++++ b/drivers/tty/serial/vt8500_serial.c
+@@ -227,7 +227,7 @@ static irqreturn_t vt8500_irq(int irq, v
+ struct uart_port *port = dev_id;
+ unsigned long isr;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ isr = vt8500_read(port, VT8500_URISR);
+
+ /* Acknowledge active status bits */
+@@ -240,7 +240,7 @@ static irqreturn_t vt8500_irq(int irq, v
+ if (isr & TCTS)
+ handle_delta_cts(port);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ return IRQ_HANDLED;
+ }
+@@ -342,7 +342,7 @@ static void vt8500_set_termios(struct ua
+ unsigned int baud, lcr;
+ unsigned int loops = 1000;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* calculate and set baud rate */
+ baud = uart_get_baud_rate(port, termios, old, 900, 921600);
+@@ -410,7 +410,7 @@ static void vt8500_set_termios(struct ua
+ vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR);
+ vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *vt8500_type(struct uart_port *port)
diff --git a/debian/patches-rt/0074-serial-xilinx_uartps-Use-port-lock-wrappers.patch b/debian/patches-rt/0074-serial-xilinx_uartps-Use-port-lock-wrappers.patch
new file mode 100644
index 0000000000..307ee6df75
--- /dev/null
+++ b/debian/patches-rt/0074-serial-xilinx_uartps-Use-port-lock-wrappers.patch
@@ -0,0 +1,276 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Sep 2023 20:44:31 +0206
+Subject: [PATCH 074/134] serial: xilinx_uartps: Use port lock wrappers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20230914183831.587273-75-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/xilinx_uartps.c | 56 ++++++++++++++++++-------------------
+ 1 file changed, 28 insertions(+), 28 deletions(-)
+
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -346,7 +346,7 @@ static irqreturn_t cdns_uart_isr(int irq
+ struct uart_port *port = (struct uart_port *)dev_id;
+ unsigned int isrstatus;
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ /* Read the interrupt status register to determine which
+ * interrupt(s) is/are active and clear them.
+@@ -369,7 +369,7 @@ static irqreturn_t cdns_uart_isr(int irq
+ !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
+ cdns_uart_handle_rx(dev_id, isrstatus);
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ return IRQ_HANDLED;
+ }
+
+@@ -506,14 +506,14 @@ static int cdns_uart_clk_notifier_cb(str
+ return NOTIFY_BAD;
+ }
+
+- spin_lock_irqsave(&cdns_uart->port->lock, flags);
++ uart_port_lock_irqsave(cdns_uart->port, &flags);
+
+ /* Disable the TX and RX to set baud rate */
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
+ ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
+ writel(ctrl_reg, port->membase + CDNS_UART_CR);
+
+- spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
++ uart_port_unlock_irqrestore(cdns_uart->port, flags);
+
+ return NOTIFY_OK;
+ }
+@@ -523,7 +523,7 @@ static int cdns_uart_clk_notifier_cb(str
+ * frequency.
+ */
+
+- spin_lock_irqsave(&cdns_uart->port->lock, flags);
++ uart_port_lock_irqsave(cdns_uart->port, &flags);
+
+ locked = 1;
+ port->uartclk = ndata->new_rate;
+@@ -533,7 +533,7 @@ static int cdns_uart_clk_notifier_cb(str
+ fallthrough;
+ case ABORT_RATE_CHANGE:
+ if (!locked)
+- spin_lock_irqsave(&cdns_uart->port->lock, flags);
++ uart_port_lock_irqsave(cdns_uart->port, &flags);
+
+ /* Set TX/RX Reset */
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
+@@ -555,7 +555,7 @@ static int cdns_uart_clk_notifier_cb(str
+ ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
+ writel(ctrl_reg, port->membase + CDNS_UART_CR);
+
+- spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
++ uart_port_unlock_irqrestore(cdns_uart->port, flags);
+
+ return NOTIFY_OK;
+ default:
+@@ -652,7 +652,7 @@ static void cdns_uart_break_ctl(struct u
+ unsigned int status;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ status = readl(port->membase + CDNS_UART_CR);
+
+@@ -664,7 +664,7 @@ static void cdns_uart_break_ctl(struct u
+ writel(CDNS_UART_CR_STOPBRK | status,
+ port->membase + CDNS_UART_CR);
+ }
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /**
+@@ -683,7 +683,7 @@ static void cdns_uart_set_termios(struct
+ unsigned long flags;
+ unsigned int ctrl_reg, mode_reg;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Disable the TX and RX to set baud rate */
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
+@@ -794,7 +794,7 @@ static void cdns_uart_set_termios(struct
+ cval &= ~CDNS_UART_MODEMCR_FCM;
+ writel(cval, port->membase + CDNS_UART_MODEMCR);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /**
+@@ -813,7 +813,7 @@ static int cdns_uart_startup(struct uart
+
+ is_brk_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Disable the TX and RX */
+ writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
+@@ -861,7 +861,7 @@ static int cdns_uart_startup(struct uart
+ writel(readl(port->membase + CDNS_UART_ISR),
+ port->membase + CDNS_UART_ISR);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ ret = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME, port);
+ if (ret) {
+@@ -889,7 +889,7 @@ static void cdns_uart_shutdown(struct ua
+ int status;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Disable interrupts */
+ status = readl(port->membase + CDNS_UART_IMR);
+@@ -900,7 +900,7 @@ static void cdns_uart_shutdown(struct ua
+ writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
+ port->membase + CDNS_UART_CR);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ free_irq(port->irq, port);
+ }
+@@ -1050,7 +1050,7 @@ static int cdns_uart_poll_get_char(struc
+ int c;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Check if FIFO is empty */
+ if (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)
+@@ -1058,7 +1058,7 @@ static int cdns_uart_poll_get_char(struc
+ else /* Read a character */
+ c = (unsigned char) readl(port->membase + CDNS_UART_FIFO);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return c;
+ }
+@@ -1067,7 +1067,7 @@ static void cdns_uart_poll_put_char(stru
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Wait until FIFO is empty */
+ while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
+@@ -1080,7 +1080,7 @@ static void cdns_uart_poll_put_char(stru
+ while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
+ cpu_relax();
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+ #endif
+
+@@ -1232,9 +1232,9 @@ static void cdns_uart_console_write(stru
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* save and disable interrupt */
+ imr = readl(port->membase + CDNS_UART_IMR);
+@@ -1257,7 +1257,7 @@ static void cdns_uart_console_write(stru
+ writel(imr, port->membase + CDNS_UART_IER);
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /**
+@@ -1325,7 +1325,7 @@ static int cdns_uart_suspend(struct devi
+ if (console_suspend_enabled && uart_console(port) && may_wake) {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ /* Empty the receive FIFO 1st before making changes */
+ while (!(readl(port->membase + CDNS_UART_SR) &
+ CDNS_UART_SR_RXEMPTY))
+@@ -1334,7 +1334,7 @@ static int cdns_uart_suspend(struct devi
+ writel(1, port->membase + CDNS_UART_RXWM);
+ /* disable RX timeout interrups */
+ writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IDR);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /*
+@@ -1372,7 +1372,7 @@ static int cdns_uart_resume(struct devic
+ return ret;
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Set TX/RX Reset */
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
+@@ -1392,14 +1392,14 @@ static int cdns_uart_resume(struct devic
+
+ clk_disable(cdns_uart->uartclk);
+ clk_disable(cdns_uart->pclk);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ } else {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ /* restore original rx trigger level */
+ writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
+ /* enable RX timeout interrupt */
+ writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IER);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ return uart_resume_port(cdns_uart->cdns_uart_driver, port);
diff --git a/debian/patches-rt/0075-printk-Add-non-BKL-nbcon-console-basic-infrastructur.patch b/debian/patches-rt/0075-printk-Add-non-BKL-nbcon-console-basic-infrastructur.patch
new file mode 100644
index 0000000000..00f7850c36
--- /dev/null
+++ b/debian/patches-rt/0075-printk-Add-non-BKL-nbcon-console-basic-infrastructur.patch
@@ -0,0 +1,261 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 16 Sep 2023 21:26:00 +0206
+Subject: [PATCH 075/134] printk: Add non-BKL (nbcon) console basic
+ infrastructure
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The current console/printk subsystem is protected by a Big Kernel Lock,
+(aka console_lock) which has ill defined semantics and is more or less
+stateless. This puts severe limitations on the console subsystem and
+makes forced takeover and output in emergency and panic situations a
+fragile endeavour that is based on try and pray.
+
+The goal of non-BKL (nbcon) consoles is to break out of the console lock
+jail and to provide a new infrastructure that avoids the pitfalls and
+also allows console drivers to be gradually converted over.
+
+The proposed infrastructure aims for the following properties:
+
+ - Per console locking instead of global locking
+ - Per console state that allows to make informed decisions
+ - Stateful handover and takeover
+
+As a first step, state is added to struct console. The per console state
+is an atomic_t using a 32bit bit field.
+
+Reserve state bits, which will be populated later in the series. Wire
+it up into the console register/unregister functionality.
+
+It was decided to use a bitfield because using a plain u32 with
+mask/shift operations resulted in uncomprehensible code.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20230916192007.608398-2-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 31 ++++++++++++++++++++
+ kernel/printk/Makefile | 2 -
+ kernel/printk/internal.h | 8 +++++
+ kernel/printk/nbcon.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++
+ kernel/printk/printk.c | 13 ++++++--
+ 5 files changed, 120 insertions(+), 4 deletions(-)
+ create mode 100644 kernel/printk/nbcon.c
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -156,6 +156,8 @@ static inline int con_debug_leave(void)
+ * /dev/kmesg which requires a larger output buffer.
+ * @CON_SUSPENDED: Indicates if a console is suspended. If true, the
+ * printing callbacks must not be called.
++ * @CON_NBCON: Console can operate outside of the legacy style console_lock
++ * constraints.
+ */
+ enum cons_flags {
+ CON_PRINTBUFFER = BIT(0),
+@@ -166,9 +168,33 @@ enum cons_flags {
+ CON_BRL = BIT(5),
+ CON_EXTENDED = BIT(6),
+ CON_SUSPENDED = BIT(7),
++ CON_NBCON = BIT(8),
+ };
+
+ /**
++ * struct nbcon_state - console state for nbcon consoles
++ * @atom: Compound of the state fields for atomic operations
++ *
++ * To be used for reading and preparing of the value stored in the nbcon
++ * state variable @console::nbcon_state.
++ */
++struct nbcon_state {
++ union {
++ unsigned int atom;
++ struct {
++ };
++ };
++};
++
++/*
++ * The nbcon_state struct is used to easily create and interpret values that
++ * are stored in the @console::nbcon_state variable. Ensure this struct stays
++ * within the size boundaries of the atomic variable's underlying type in
++ * order to avoid any accidental truncation.
++ */
++static_assert(sizeof(struct nbcon_state) <= sizeof(int));
++
++/**
+ * struct console - The console descriptor structure
+ * @name: The name of the console driver
+ * @write: Write callback to output messages (Optional)
+@@ -187,6 +213,8 @@ enum cons_flags {
+ * @dropped: Number of unreported dropped ringbuffer records
+ * @data: Driver private data
+ * @node: hlist node for the console list
++ *
++ * @nbcon_state: State for nbcon consoles
+ */
+ struct console {
+ char name[16];
+@@ -206,6 +234,9 @@ struct console {
+ unsigned long dropped;
+ void *data;
+ struct hlist_node node;
++
++ /* nbcon console specific members */
++ atomic_t __private nbcon_state;
+ };
+
+ #ifdef CONFIG_LOCKDEP
+--- a/kernel/printk/Makefile
++++ b/kernel/printk/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ obj-y = printk.o
+-obj-$(CONFIG_PRINTK) += printk_safe.o
++obj-$(CONFIG_PRINTK) += printk_safe.o nbcon.o
+ obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
+ obj-$(CONFIG_PRINTK_INDEX) += index.o
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -3,6 +3,7 @@
+ * internal.h - printk internal definitions
+ */
+ #include <linux/percpu.h>
++#include <linux/console.h>
+
+ #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
+ void __init printk_sysctl_init(void);
+@@ -61,6 +62,10 @@ void defer_console_output(void);
+
+ u16 printk_parse_prefix(const char *text, int *level,
+ enum printk_info_flags *flags);
++
++void nbcon_init(struct console *con);
++void nbcon_cleanup(struct console *con);
++
+ #else
+
+ #define PRINTK_PREFIX_MAX 0
+@@ -76,6 +81,9 @@ u16 printk_parse_prefix(const char *text
+ #define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
+
+ static inline bool printk_percpu_data_ready(void) { return false; }
++static inline void nbcon_init(struct console *con) { }
++static inline void nbcon_cleanup(struct console *con) { }
++
+ #endif /* CONFIG_PRINTK */
+
+ /**
+--- /dev/null
++++ b/kernel/printk/nbcon.c
+@@ -0,0 +1,70 @@
++// SPDX-License-Identifier: GPL-2.0-only
++// Copyright (C) 2022 Linutronix GmbH, John Ogness
++// Copyright (C) 2022 Intel, Thomas Gleixner
++
++#include <linux/kernel.h>
++#include <linux/console.h>
++#include "internal.h"
++/*
++ * Printk console printing implementation for consoles which does not depend
++ * on the legacy style console_lock mechanism.
++ */
++
++/**
++ * nbcon_state_set - Helper function to set the console state
++ * @con: Console to update
++ * @new: The new state to write
++ *
++ * Only to be used when the console is not yet or no longer visible in the
++ * system. Otherwise use nbcon_state_try_cmpxchg().
++ */
++static inline void nbcon_state_set(struct console *con, struct nbcon_state *new)
++{
++ atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom);
++}
++
++/**
++ * nbcon_state_read - Helper function to read the console state
++ * @con: Console to read
++ * @state: The state to store the result
++ */
++static inline void nbcon_state_read(struct console *con, struct nbcon_state *state)
++{
++ state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state));
++}
++
++/**
++ * nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state
++ * @con: Console to update
++ * @cur: Old/expected state
++ * @new: New state
++ *
++ * Return: True on success. False on fail and @cur is updated.
++ */
++static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur,
++ struct nbcon_state *new)
++{
++ return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
++}
++
++/**
++ * nbcon_init - Initialize the nbcon console specific data
++ * @con: Console to initialize
++ */
++void nbcon_init(struct console *con)
++{
++ struct nbcon_state state = { };
++
++ nbcon_state_set(con, &state);
++}
++
++/**
++ * nbcon_cleanup - Cleanup the nbcon console specific data
++ * @con: Console to cleanup
++ */
++void nbcon_cleanup(struct console *con)
++{
++ struct nbcon_state state = { };
++
++ nbcon_state_set(con, &state);
++}
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3326,9 +3326,10 @@ static void try_enable_default_console(s
+ newcon->flags |= CON_CONSDEV;
+ }
+
+-#define con_printk(lvl, con, fmt, ...) \
+- printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \
+- (con->flags & CON_BOOT) ? "boot" : "", \
++#define con_printk(lvl, con, fmt, ...) \
++ printk(lvl pr_fmt("%s%sconsole [%s%d] " fmt), \
++ (con->flags & CON_NBCON) ? "" : "legacy ", \
++ (con->flags & CON_BOOT) ? "boot" : "", \
+ con->name, con->index, ##__VA_ARGS__)
+
+ static void console_init_seq(struct console *newcon, bool bootcon_registered)
+@@ -3488,6 +3489,9 @@ void register_console(struct console *ne
+ newcon->dropped = 0;
+ console_init_seq(newcon, bootcon_registered);
+
++ if (newcon->flags & CON_NBCON)
++ nbcon_init(newcon);
++
+ /*
+ * Put this console in the list - keep the
+ * preferred driver at the head of the list.
+@@ -3579,6 +3583,9 @@ static int unregister_console_locked(str
+ */
+ synchronize_srcu(&console_srcu);
+
++ if (console->flags & CON_NBCON)
++ nbcon_cleanup(console);
++
+ console_sysfs_notify();
+
+ if (console->exit)
diff --git a/debian/patches-rt/0076-printk-nbcon-Add-acquire-release-logic.patch b/debian/patches-rt/0076-printk-nbcon-Add-acquire-release-logic.patch
new file mode 100644
index 0000000000..2718379038
--- /dev/null
+++ b/debian/patches-rt/0076-printk-nbcon-Add-acquire-release-logic.patch
@@ -0,0 +1,705 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 16 Sep 2023 21:26:01 +0206
+Subject: [PATCH 076/134] printk: nbcon: Add acquire/release logic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Add per console acquire/release functionality.
+
+The state of the console is maintained in the "nbcon_state" atomic
+variable.
+
+The console is locked when:
+
+ - The 'prio' field contains the priority of the context that owns the
+ console. Only higher priority contexts are allowed to take over the
+ lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked.
+
+ - The 'cpu' field denotes on which CPU the console is locked. It is used
+ to prevent busy waiting on the same CPU. Also it informs the lock owner
+ that it has lost the lock in a more complex scenario when the lock was
+ taken over by a higher priority context, released, and taken on another
+ CPU with the same priority as the interrupted owner.
+
+The acquire mechanism uses a few more fields:
+
+ - The 'req_prio' field is used by the handover approach to make the
+ current owner aware that there is a context with a higher priority
+ waiting for the friendly handover.
+
+ - The 'unsafe' field allows to take over the console in a safe way in the
+ middle of emitting a message. The field is set only when accessing some
+ shared resources or when the console device is manipulated. It can be
+ cleared, for example, after emitting one character when the console
+ device is in a consistent state.
+
+ - The 'unsafe_takeover' field is set when a hostile takeover took the
+ console in an unsafe state. The console will stay in the unsafe state
+ until re-initialized.
+
+The acquire mechanism uses three approaches:
+
+ 1) Direct acquire when the console is not owned or is owned by a lower
+ priority context and is in a safe state.
+
+ 2) Friendly handover mechanism uses a request/grant handshake. It is used
+ when the current owner has lower priority and the console is in an
+ unsafe state.
+
+ The requesting context:
+
+ a) Sets its priority into the 'req_prio' field.
+
+ b) Waits (with a timeout) for the owning context to unlock the
+ console.
+
+ c) Takes the lock and clears the 'req_prio' field.
+
+ The owning context:
+
+ a) Observes the 'req_prio' field set on exit from the unsafe
+ console state.
+
+ b) Gives up console ownership by clearing the 'prio' field.
+
+ 3) Unsafe hostile takeover allows to take over the lock even when the
+ console is an unsafe state. It is used only in panic() by the final
+ attempt to flush consoles in a try and hope mode.
+
+ Note that separate record buffers are used in panic(). As a result,
+ the messages can be read and formatted without any risk even after
+ using the hostile takeover in unsafe state.
+
+The release function simply clears the 'prio' field.
+
+All operations on @console::nbcon_state are atomic cmpxchg based to
+handle concurrency.
+
+The acquire/release functions implement only minimal policies:
+
+ - Preference for higher priority contexts.
+ - Protection of the panic CPU.
+
+All other policy decisions must be made at the call sites:
+
+ - What is marked as an unsafe section.
+ - Whether to spin-wait if there is already an owner and the console is
+ in an unsafe state.
+ - Whether to attempt an unsafe hostile takeover.
+
+The design allows to implement the well known:
+
+ acquire()
+ output_one_printk_record()
+ release()
+
+The output of one printk record might be interrupted with a higher priority
+context. The new owner is supposed to reprint the entire interrupted record
+from scratch.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20230916192007.608398-3-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 56 +++++
+ kernel/printk/nbcon.c | 497 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 553 insertions(+)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -175,13 +175,29 @@ enum cons_flags {
+ * struct nbcon_state - console state for nbcon consoles
+ * @atom: Compound of the state fields for atomic operations
+ *
++ * @req_prio: The priority of a handover request
++ * @prio: The priority of the current owner
++ * @unsafe: Console is busy in a non takeover region
++ * @unsafe_takeover: A hostile takeover in an unsafe state happened in the
++ * past. The console cannot be safe until re-initialized.
++ * @cpu: The CPU on which the owner runs
++ *
+ * To be used for reading and preparing of the value stored in the nbcon
+ * state variable @console::nbcon_state.
++ *
++ * The @prio and @req_prio fields are particularly important to allow
++ * spin-waiting to timeout and give up without the risk of a waiter being
++ * assigned the lock after giving up.
+ */
+ struct nbcon_state {
+ union {
+ unsigned int atom;
+ struct {
++ unsigned int prio : 2;
++ unsigned int req_prio : 2;
++ unsigned int unsafe : 1;
++ unsigned int unsafe_takeover : 1;
++ unsigned int cpu : 24;
+ };
+ };
+ };
+@@ -195,6 +211,46 @@ struct nbcon_state {
+ static_assert(sizeof(struct nbcon_state) <= sizeof(int));
+
+ /**
++ * nbcon_prio - console owner priority for nbcon consoles
++ * @NBCON_PRIO_NONE: Unused
++ * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
++ * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
++ * @NBCON_PRIO_PANIC: Panic output
++ * @NBCON_PRIO_MAX: The number of priority levels
++ *
++ * A higher priority context can takeover the console when it is
++ * in the safe state. The final attempt to flush consoles in panic()
++ * can be allowed to do so even in an unsafe state (Hope and pray).
++ */
++enum nbcon_prio {
++ NBCON_PRIO_NONE = 0,
++ NBCON_PRIO_NORMAL,
++ NBCON_PRIO_EMERGENCY,
++ NBCON_PRIO_PANIC,
++ NBCON_PRIO_MAX,
++};
++
++struct console;
++
++/**
++ * struct nbcon_context - Context for console acquire/release
++ * @console: The associated console
++ * @spinwait_max_us: Limit for spin-wait acquire
++ * @prio: Priority of the context
++ * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can
++ * be used only with NBCON_PRIO_PANIC @prio. It
++ * might cause a system freeze when the console
++ * is used later.
++ */
++struct nbcon_context {
++ /* members set by caller */
++ struct console *console;
++ unsigned int spinwait_max_us;
++ enum nbcon_prio prio;
++ unsigned int allow_unsafe_takeover : 1;
++};
++
++/**
+ * struct console - The console descriptor structure
+ * @name: The name of the console driver
+ * @write: Write callback to output messages (Optional)
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -4,10 +4,98 @@
+
+ #include <linux/kernel.h>
+ #include <linux/console.h>
++#include <linux/delay.h>
+ #include "internal.h"
+ /*
+ * Printk console printing implementation for consoles which does not depend
+ * on the legacy style console_lock mechanism.
++ *
++ * The state of the console is maintained in the "nbcon_state" atomic
++ * variable.
++ *
++ * The console is locked when:
++ *
++ * - The 'prio' field contains the priority of the context that owns the
++ * console. Only higher priority contexts are allowed to take over the
++ * lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked.
++ *
++ * - The 'cpu' field denotes on which CPU the console is locked. It is used
++ * to prevent busy waiting on the same CPU. Also it informs the lock owner
++ * that it has lost the lock in a more complex scenario when the lock was
++ * taken over by a higher priority context, released, and taken on another
++ * CPU with the same priority as the interrupted owner.
++ *
++ * The acquire mechanism uses a few more fields:
++ *
++ * - The 'req_prio' field is used by the handover approach to make the
++ * current owner aware that there is a context with a higher priority
++ * waiting for the friendly handover.
++ *
++ * - The 'unsafe' field allows to take over the console in a safe way in the
++ * middle of emitting a message. The field is set only when accessing some
++ * shared resources or when the console device is manipulated. It can be
++ * cleared, for example, after emitting one character when the console
++ * device is in a consistent state.
++ *
++ * - The 'unsafe_takeover' field is set when a hostile takeover took the
++ * console in an unsafe state. The console will stay in the unsafe state
++ * until re-initialized.
++ *
++ * The acquire mechanism uses three approaches:
++ *
++ * 1) Direct acquire when the console is not owned or is owned by a lower
++ * priority context and is in a safe state.
++ *
++ * 2) Friendly handover mechanism uses a request/grant handshake. It is used
++ * when the current owner has lower priority and the console is in an
++ * unsafe state.
++ *
++ * The requesting context:
++ *
++ * a) Sets its priority into the 'req_prio' field.
++ *
++ * b) Waits (with a timeout) for the owning context to unlock the
++ * console.
++ *
++ * c) Takes the lock and clears the 'req_prio' field.
++ *
++ * The owning context:
++ *
++ * a) Observes the 'req_prio' field set on exit from the unsafe
++ * console state.
++ *
++ * b) Gives up console ownership by clearing the 'prio' field.
++ *
++ * 3) Unsafe hostile takeover allows to take over the lock even when the
++ * console is an unsafe state. It is used only in panic() by the final
++ * attempt to flush consoles in a try and hope mode.
++ *
++ * The release function simply clears the 'prio' field.
++ *
++ * All operations on @console::nbcon_state are atomic cmpxchg based to
++ * handle concurrency.
++ *
++ * The acquire/release functions implement only minimal policies:
++ *
++ * - Preference for higher priority contexts.
++ * - Protection of the panic CPU.
++ *
++ * All other policy decisions must be made at the call sites:
++ *
++ * - What is marked as an unsafe section.
++ * - Whether to spin-wait if there is already an owner and the console is
++ * in an unsafe state.
++ * - Whether to attempt an unsafe hostile takeover.
++ *
++ * The design allows to implement the well known:
++ *
++ * acquire()
++ * output_one_printk_record()
++ * release()
++ *
++ * The output of one printk record might be interrupted with a higher priority
++ * context. The new owner is supposed to reprint the entire interrupted record
++ * from scratch.
+ */
+
+ /**
+@@ -48,6 +136,415 @@ static inline bool nbcon_state_try_cmpxc
+ }
+
+ /**
++ * nbcon_context_try_acquire_direct - Try to acquire directly
++ * @ctxt: The context of the caller
++ * @cur: The current console state
++ *
++ * Acquire the console when it is released. Also acquire the console when
++ * the current owner has a lower priority and the console is in a safe state.
++ *
++ * Return: 0 on success. Otherwise, an error code on failure. Also @cur
++ * is updated to the latest state when failed to modify it.
++ *
++ * Errors:
++ *
++ * -EPERM: A panic is in progress and this is not the panic CPU.
++ * Or the current owner or waiter has the same or higher
++ * priority. No acquire method can be successful in
++ * this case.
++ *
++ * -EBUSY: The current owner has a lower priority but the console
++ * in an unsafe state. The caller should try using
++ * the handover acquire method.
++ */
++static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
++ struct nbcon_state *cur)
++{
++ unsigned int cpu = smp_processor_id();
++ struct console *con = ctxt->console;
++ struct nbcon_state new;
++
++ do {
++ if (other_cpu_in_panic())
++ return -EPERM;
++
++ if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
++ return -EPERM;
++
++ if (cur->unsafe)
++ return -EBUSY;
++
++ /*
++ * The console should never be safe for a direct acquire
++ * if an unsafe hostile takeover has ever happened.
++ */
++ WARN_ON_ONCE(cur->unsafe_takeover);
++
++ new.atom = cur->atom;
++ new.prio = ctxt->prio;
++ new.req_prio = NBCON_PRIO_NONE;
++ new.unsafe = cur->unsafe_takeover;
++ new.cpu = cpu;
++
++ } while (!nbcon_state_try_cmpxchg(con, cur, &new));
++
++ return 0;
++}
++
++static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
++{
++ /*
++ * The request context is well defined by the @req_prio because:
++ *
++ * - Only a context with a higher priority can take over the request.
++ * - There are only three priorities.
++ * - Only one CPU is allowed to request PANIC priority.
++ * - Lower priorities are ignored during panic() until reboot.
++ *
++ * As a result, the following scenario is *not* possible:
++ *
++ * 1. Another context with a higher priority directly takes ownership.
++ * 2. The higher priority context releases the ownership.
++ * 3. A lower priority context takes the ownership.
++ * 4. Another context with the same priority as this context
++ * creates a request and starts waiting.
++ */
++
++ return (cur->req_prio == expected_prio);
++}
++
++/**
++ * nbcon_context_try_acquire_requested - Try to acquire after having
++ * requested a handover
++ * @ctxt: The context of the caller
++ * @cur: The current console state
++ *
++ * This is a helper function for nbcon_context_try_acquire_handover().
++ * It is called when the console is in an unsafe state. The current
++ * owner will release the console on exit from the unsafe region.
++ *
++ * Return: 0 on success and @cur is updated to the new console state.
++ * Otherwise an error code on failure.
++ *
++ * Errors:
++ *
++ * -EPERM: A panic is in progress and this is not the panic CPU
++ * or this context is no longer the waiter.
++ *
++ * -EBUSY: The console is still locked. The caller should
++ * continue waiting.
++ *
++ * Note: The caller must still remove the request when an error has occurred
++ * except when this context is no longer the waiter.
++ */
++static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt,
++ struct nbcon_state *cur)
++{
++ unsigned int cpu = smp_processor_id();
++ struct console *con = ctxt->console;
++ struct nbcon_state new;
++
++ /* Note that the caller must still remove the request! */
++ if (other_cpu_in_panic())
++ return -EPERM;
++
++ /*
++ * Note that the waiter will also change if there was an unsafe
++ * hostile takeover.
++ */
++ if (!nbcon_waiter_matches(cur, ctxt->prio))
++ return -EPERM;
++
++ /* If still locked, caller should continue waiting. */
++ if (cur->prio != NBCON_PRIO_NONE)
++ return -EBUSY;
++
++ /*
++ * The previous owner should have never released ownership
++ * in an unsafe region.
++ */
++ WARN_ON_ONCE(cur->unsafe);
++
++ new.atom = cur->atom;
++ new.prio = ctxt->prio;
++ new.req_prio = NBCON_PRIO_NONE;
++ new.unsafe = cur->unsafe_takeover;
++ new.cpu = cpu;
++
++ if (!nbcon_state_try_cmpxchg(con, cur, &new)) {
++ /*
++ * The acquire could fail only when it has been taken
++ * over by a higher priority context.
++ */
++ WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio));
++ return -EPERM;
++ }
++
++ /* Handover success. This context now owns the console. */
++ return 0;
++}
++
++/**
++ * nbcon_context_try_acquire_handover - Try to acquire via handover
++ * @ctxt: The context of the caller
++ * @cur: The current console state
++ *
++ * The function must be called only when the context has higher priority
++ * than the current owner and the console is in an unsafe state.
++ * It is the case when nbcon_context_try_acquire_direct() returns -EBUSY.
++ *
++ * The function sets "req_prio" field to make the current owner aware of
++ * the request. Then it waits until the current owner releases the console,
++ * or an even higher context takes over the request, or timeout expires.
++ *
++ * The current owner checks the "req_prio" field on exit from the unsafe
++ * region and releases the console. It does not touch the "req_prio" field
++ * so that the console stays reserved for the waiter.
++ *
++ * Return: 0 on success. Otherwise, an error code on failure. Also @cur
++ * is updated to the latest state when failed to modify it.
++ *
++ * Errors:
++ *
++ * -EPERM: A panic is in progress and this is not the panic CPU.
++ * Or a higher priority context has taken over the
++ * console or the handover request.
++ *
++ * -EBUSY: The current owner is on the same CPU so that the hand
++ * shake could not work. Or the current owner is not
++ * willing to wait (zero timeout). Or the console does
++ * not enter the safe state before timeout passed. The
++ * caller might still use the unsafe hostile takeover
++ * when allowed.
++ *
++ * -EAGAIN: @cur has changed when creating the handover request.
++ * The caller should retry with direct acquire.
++ */
++static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
++ struct nbcon_state *cur)
++{
++ unsigned int cpu = smp_processor_id();
++ struct console *con = ctxt->console;
++ struct nbcon_state new;
++ int timeout;
++ int request_err = -EBUSY;
++
++ /*
++ * Check that the handover is called when the direct acquire failed
++ * with -EBUSY.
++ */
++ WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
++ WARN_ON_ONCE(!cur->unsafe);
++
++ /* Handover is not possible on the same CPU. */
++ if (cur->cpu == cpu)
++ return -EBUSY;
++
++ /*
++ * Console stays unsafe after an unsafe takeover until re-initialized.
++ * Waiting is not going to help in this case.
++ */
++ if (cur->unsafe_takeover)
++ return -EBUSY;
++
++ /* Is the caller willing to wait? */
++ if (ctxt->spinwait_max_us == 0)
++ return -EBUSY;
++
++ /*
++ * Setup a request for the handover. The caller should try to acquire
++ * the console directly when the current state has been modified.
++ */
++ new.atom = cur->atom;
++ new.req_prio = ctxt->prio;
++ if (!nbcon_state_try_cmpxchg(con, cur, &new))
++ return -EAGAIN;
++
++ cur->atom = new.atom;
++
++ /* Wait until there is no owner and then acquire the console. */
++ for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) {
++ /* On successful acquire, this request is cleared. */
++ request_err = nbcon_context_try_acquire_requested(ctxt, cur);
++ if (!request_err)
++ return 0;
++
++ /*
++ * If the acquire should be aborted, it must be ensured
++ * that the request is removed before returning to caller.
++ */
++ if (request_err == -EPERM)
++ break;
++
++ udelay(1);
++
++ /* Re-read the state because some time has passed. */
++ nbcon_state_read(con, cur);
++ }
++
++ /* Timed out or aborted. Carefully remove handover request. */
++ do {
++ /*
++ * No need to remove request if there is a new waiter. This
++ * can only happen if a higher priority context has taken over
++ * the console or the handover request.
++ */
++ if (!nbcon_waiter_matches(cur, ctxt->prio))
++ return -EPERM;
++
++ /* Unset request for handover. */
++ new.atom = cur->atom;
++ new.req_prio = NBCON_PRIO_NONE;
++ if (nbcon_state_try_cmpxchg(con, cur, &new)) {
++ /*
++ * Request successfully unset. Report failure of
++ * acquiring via handover.
++ */
++ cur->atom = new.atom;
++ return request_err;
++ }
++
++ /*
++ * Unable to remove request. Try to acquire in case
++ * the owner has released the lock.
++ */
++ } while (nbcon_context_try_acquire_requested(ctxt, cur));
++
++ /* Lucky timing. The acquire succeeded while removing the request. */
++ return 0;
++}
++
++/**
++ * nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover
++ * @ctxt: The context of the caller
++ * @cur: The current console state
++ *
++ * Acquire the console even in the unsafe state.
++ *
++ * It can be permitted by setting the 'allow_unsafe_takeover' field only
++ * by the final attempt to flush messages in panic().
++ *
++ * Return: 0 on success. -EPERM when not allowed by the context.
++ */
++static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt,
++ struct nbcon_state *cur)
++{
++ unsigned int cpu = smp_processor_id();
++ struct console *con = ctxt->console;
++ struct nbcon_state new;
++
++ if (!ctxt->allow_unsafe_takeover)
++ return -EPERM;
++
++ /* Ensure caller is allowed to perform unsafe hostile takeovers. */
++ if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC))
++ return -EPERM;
++
++ /*
++ * Check that try_acquire_direct() and try_acquire_handover() returned
++ * -EBUSY in the right situation.
++ */
++ WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
++ WARN_ON_ONCE(cur->unsafe != true);
++
++ do {
++ new.atom = cur->atom;
++ new.cpu = cpu;
++ new.prio = ctxt->prio;
++ new.unsafe |= cur->unsafe_takeover;
++ new.unsafe_takeover |= cur->unsafe;
++
++ } while (!nbcon_state_try_cmpxchg(con, cur, &new));
++
++ return 0;
++}
++
++/**
++ * nbcon_context_try_acquire - Try to acquire nbcon console
++ * @ctxt: The context of the caller
++ *
++ * Return: True if the console was acquired. False otherwise.
++ *
++ * If the caller allowed an unsafe hostile takeover, on success the
++ * caller should check the current console state to see if it is
++ * in an unsafe state. Otherwise, on success the caller may assume
++ * the console is not in an unsafe state.
++ */
++__maybe_unused
++static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
++{
++ struct console *con = ctxt->console;
++ struct nbcon_state cur;
++ int err;
++
++ nbcon_state_read(con, &cur);
++try_again:
++ err = nbcon_context_try_acquire_direct(ctxt, &cur);
++ if (err != -EBUSY)
++ goto out;
++
++ err = nbcon_context_try_acquire_handover(ctxt, &cur);
++ if (err == -EAGAIN)
++ goto try_again;
++ if (err != -EBUSY)
++ goto out;
++
++ err = nbcon_context_try_acquire_hostile(ctxt, &cur);
++out:
++ return !err;
++}
++
++static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
++ int expected_prio)
++{
++ /*
++ * Since consoles can only be acquired by higher priorities,
++ * owning contexts are uniquely identified by @prio. However,
++ * since contexts can unexpectedly lose ownership, it is
++ * possible that later another owner appears with the same
++ * priority. For this reason @cpu is also needed.
++ */
++
++ if (cur->prio != expected_prio)
++ return false;
++
++ if (cur->cpu != expected_cpu)
++ return false;
++
++ return true;
++}
++
++/**
++ * nbcon_context_release - Release the console
++ * @ctxt: The nbcon context from nbcon_context_try_acquire()
++ */
++__maybe_unused
++static void nbcon_context_release(struct nbcon_context *ctxt)
++{
++ unsigned int cpu = smp_processor_id();
++ struct console *con = ctxt->console;
++ struct nbcon_state cur;
++ struct nbcon_state new;
++
++ nbcon_state_read(con, &cur);
++
++ do {
++ if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
++ return;
++
++ new.atom = cur.atom;
++ new.prio = NBCON_PRIO_NONE;
++
++ /*
++ * If @unsafe_takeover is set, it is kept set so that
++ * the state remains permanently unsafe.
++ */
++ new.unsafe |= cur.unsafe_takeover;
++
++ } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
++}
++
++/**
+ * nbcon_init - Initialize the nbcon console specific data
+ * @con: Console to initialize
+ */
diff --git a/debian/patches-rt/0077-printk-Make-static-printk-buffers-available-to-nbcon.patch b/debian/patches-rt/0077-printk-Make-static-printk-buffers-available-to-nbcon.patch
new file mode 100644
index 0000000000..f3fbb6a822
--- /dev/null
+++ b/debian/patches-rt/0077-printk-Make-static-printk-buffers-available-to-nbcon.patch
@@ -0,0 +1,65 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Sat, 16 Sep 2023 21:26:02 +0206
+Subject: [PATCH 077/134] printk: Make static printk buffers available to nbcon
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The nbcon boot consoles also need printk buffers that are available
+very early. Since the nbcon boot consoles will also be serialized
+by the console_lock, they can use the same static printk buffers
+that the legacy consoles are using.
+
+Make the legacy static printk buffers available outside of printk.c
+so they can be used by nbcon.c.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20230916192007.608398-4-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 2 ++
+ kernel/printk/printk.c | 13 +++++++++----
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -86,6 +86,8 @@ static inline void nbcon_cleanup(struct
+
+ #endif /* CONFIG_PRINTK */
+
++extern struct printk_buffers printk_shared_pbufs;
++
+ /**
+ * struct printk_buffers - Buffers to read/format/output printk messages.
+ * @outbuf: After formatting, contains text to output.
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2847,6 +2847,13 @@ static bool printk_get_next_message(stru
+ }
+
+ /*
++ * Used as the printk buffers for non-panic, serialized console printing.
++ * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
++ * Its usage requires the console_lock held.
++ */
++struct printk_buffers printk_shared_pbufs;
++
++/*
+ * Print one record for the given console. The record printed is whatever
+ * record is the next available record for the given console.
+ *
+@@ -2863,12 +2870,10 @@ static bool printk_get_next_message(stru
+ */
+ static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
+ {
+- static struct printk_buffers pbufs;
+-
+ bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
+- char *outbuf = &pbufs.outbuf[0];
++ char *outbuf = &printk_shared_pbufs.outbuf[0];
+ struct printk_message pmsg = {
+- .pbufs = &pbufs,
++ .pbufs = &printk_shared_pbufs,
+ };
+ unsigned long flags;
+
diff --git a/debian/patches-rt/0078-printk-nbcon-Add-buffer-management.patch b/debian/patches-rt/0078-printk-nbcon-Add-buffer-management.patch
new file mode 100644
index 0000000000..ea96abd7ed
--- /dev/null
+++ b/debian/patches-rt/0078-printk-nbcon-Add-buffer-management.patch
@@ -0,0 +1,311 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 16 Sep 2023 21:26:03 +0206
+Subject: [PATCH 078/134] printk: nbcon: Add buffer management
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+In case of hostile takeovers it must be ensured that the previous
+owner cannot scribble over the output buffer of the emergency/panic
+context. This is achieved by:
+
+ - Adding a global output buffer instance for the panic context.
+ This is the only situation where hostile takeovers can occur and
+ there is always at most 1 panic context.
+
+ - Allocating an output buffer per non-boot console upon console
+ registration. This buffer is used by the console owner when not
+ in panic context. (For boot consoles, the existing shared global
+ legacy output buffer is used instead. Boot console printing will
+ be synchronized with legacy console printing.)
+
+ - Choosing the appropriate buffer is handled in the acquire/release
+ functions.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20230916192007.608398-5-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 7 ++++
+ kernel/printk/internal.h | 12 ++++++-
+ kernel/printk/nbcon.c | 73 +++++++++++++++++++++++++++++++++++++++++++----
+ kernel/printk/printk.c | 22 +++++++++-----
+ 4 files changed, 99 insertions(+), 15 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -231,6 +231,7 @@ enum nbcon_prio {
+ };
+
+ struct console;
++struct printk_buffers;
+
+ /**
+ * struct nbcon_context - Context for console acquire/release
+@@ -241,6 +242,7 @@ struct console;
+ * be used only with NBCON_PRIO_PANIC @prio. It
+ * might cause a system freeze when the console
+ * is used later.
++ * @pbufs: Pointer to the text buffer for this context
+ */
+ struct nbcon_context {
+ /* members set by caller */
+@@ -248,6 +250,9 @@ struct nbcon_context {
+ unsigned int spinwait_max_us;
+ enum nbcon_prio prio;
+ unsigned int allow_unsafe_takeover : 1;
++
++ /* members set by acquire */
++ struct printk_buffers *pbufs;
+ };
+
+ /**
+@@ -271,6 +276,7 @@ struct nbcon_context {
+ * @node: hlist node for the console list
+ *
+ * @nbcon_state: State for nbcon consoles
++ * @pbufs: Pointer to nbcon private buffer
+ */
+ struct console {
+ char name[16];
+@@ -293,6 +299,7 @@ struct console {
+
+ /* nbcon console specific members */
+ atomic_t __private nbcon_state;
++ struct printk_buffers *pbufs;
+ };
+
+ #ifdef CONFIG_LOCKDEP
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -13,6 +13,12 @@ int devkmsg_sysctl_set_loglvl(struct ctl
+ #define printk_sysctl_init() do { } while (0)
+ #endif
+
++#define con_printk(lvl, con, fmt, ...) \
++ printk(lvl pr_fmt("%s%sconsole [%s%d] " fmt), \
++ (con->flags & CON_NBCON) ? "" : "legacy ", \
++ (con->flags & CON_BOOT) ? "boot" : "", \
++ con->name, con->index, ##__VA_ARGS__)
++
+ #ifdef CONFIG_PRINTK
+
+ #ifdef CONFIG_PRINTK_CALLER
+@@ -63,8 +69,9 @@ void defer_console_output(void);
+ u16 printk_parse_prefix(const char *text, int *level,
+ enum printk_info_flags *flags);
+
++bool nbcon_alloc(struct console *con);
+ void nbcon_init(struct console *con);
+-void nbcon_cleanup(struct console *con);
++void nbcon_free(struct console *con);
+
+ #else
+
+@@ -81,8 +88,9 @@ void nbcon_cleanup(struct console *con);
+ #define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
+
+ static inline bool printk_percpu_data_ready(void) { return false; }
++static inline bool nbcon_alloc(struct console *con) { return false; }
+ static inline void nbcon_init(struct console *con) { }
+-static inline void nbcon_cleanup(struct console *con) { }
++static inline void nbcon_free(struct console *con) { }
+
+ #endif /* CONFIG_PRINTK */
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -5,6 +5,7 @@
+ #include <linux/kernel.h>
+ #include <linux/console.h>
+ #include <linux/delay.h>
++#include <linux/slab.h>
+ #include "internal.h"
+ /*
+ * Printk console printing implementation for consoles which does not depend
+@@ -70,6 +71,10 @@
+ * console is an unsafe state. It is used only in panic() by the final
+ * attempt to flush consoles in a try and hope mode.
+ *
++ * Note that separate record buffers are used in panic(). As a result,
++ * the messages can be read and formatted without any risk even after
++ * using the hostile takeover in unsafe state.
++ *
+ * The release function simply clears the 'prio' field.
+ *
+ * All operations on @console::nbcon_state are atomic cmpxchg based to
+@@ -459,6 +464,8 @@ static int nbcon_context_try_acquire_hos
+ return 0;
+ }
+
++static struct printk_buffers panic_nbcon_pbufs;
++
+ /**
+ * nbcon_context_try_acquire - Try to acquire nbcon console
+ * @ctxt: The context of the caller
+@@ -473,6 +480,7 @@ static int nbcon_context_try_acquire_hos
+ __maybe_unused
+ static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
+ {
++ unsigned int cpu = smp_processor_id();
+ struct console *con = ctxt->console;
+ struct nbcon_state cur;
+ int err;
+@@ -491,7 +499,18 @@ static bool nbcon_context_try_acquire(st
+
+ err = nbcon_context_try_acquire_hostile(ctxt, &cur);
+ out:
+- return !err;
++ if (err)
++ return false;
++
++ /* Acquire succeeded. */
++
++ /* Assign the appropriate buffer for this context. */
++ if (atomic_read(&panic_cpu) == cpu)
++ ctxt->pbufs = &panic_nbcon_pbufs;
++ else
++ ctxt->pbufs = con->pbufs;
++
++ return true;
+ }
+
+ static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
+@@ -530,7 +549,7 @@ static void nbcon_context_release(struct
+
+ do {
+ if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
+- return;
++ break;
+
+ new.atom = cur.atom;
+ new.prio = NBCON_PRIO_NONE;
+@@ -542,26 +561,70 @@ static void nbcon_context_release(struct
+ new.unsafe |= cur.unsafe_takeover;
+
+ } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
++
++ ctxt->pbufs = NULL;
++}
++
++/**
++ * nbcon_alloc - Allocate buffers needed by the nbcon console
++ * @con: Console to allocate buffers for
++ *
++ * Return: True on success. False otherwise and the console cannot
++ * be used.
++ *
++ * This is not part of nbcon_init() because buffer allocation must
++ * be performed earlier in the console registration process.
++ */
++bool nbcon_alloc(struct console *con)
++{
++ if (con->flags & CON_BOOT) {
++ /*
++ * Boot console printing is synchronized with legacy console
++ * printing, so boot consoles can share the same global printk
++ * buffers.
++ */
++ con->pbufs = &printk_shared_pbufs;
++ } else {
++ con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL);
++ if (!con->pbufs) {
++ con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
++ return false;
++ }
++ }
++
++ return true;
+ }
+
+ /**
+ * nbcon_init - Initialize the nbcon console specific data
+ * @con: Console to initialize
++ *
++ * nbcon_alloc() *must* be called and succeed before this function
++ * is called.
+ */
+ void nbcon_init(struct console *con)
+ {
+ struct nbcon_state state = { };
+
++ /* nbcon_alloc() must have been called and successful! */
++ BUG_ON(!con->pbufs);
++
+ nbcon_state_set(con, &state);
+ }
+
+ /**
+- * nbcon_cleanup - Cleanup the nbcon console specific data
+- * @con: Console to cleanup
++ * nbcon_free - Free and cleanup the nbcon console specific data
++ * @con: Console to free/cleanup nbcon data
+ */
+-void nbcon_cleanup(struct console *con)
++void nbcon_free(struct console *con)
+ {
+ struct nbcon_state state = { };
+
+ nbcon_state_set(con, &state);
++
++ /* Boot consoles share global printk buffers. */
++ if (!(con->flags & CON_BOOT))
++ kfree(con->pbufs);
++
++ con->pbufs = NULL;
+ }
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3331,12 +3331,6 @@ static void try_enable_default_console(s
+ newcon->flags |= CON_CONSDEV;
+ }
+
+-#define con_printk(lvl, con, fmt, ...) \
+- printk(lvl pr_fmt("%s%sconsole [%s%d] " fmt), \
+- (con->flags & CON_NBCON) ? "" : "legacy ", \
+- (con->flags & CON_BOOT) ? "boot" : "", \
+- con->name, con->index, ##__VA_ARGS__)
+-
+ static void console_init_seq(struct console *newcon, bool bootcon_registered)
+ {
+ struct console *con;
+@@ -3450,6 +3444,15 @@ void register_console(struct console *ne
+ goto unlock;
+ }
+
++ if (newcon->flags & CON_NBCON) {
++ /*
++ * Ensure the nbcon console buffers can be allocated
++ * before modifying any global data.
++ */
++ if (!nbcon_alloc(newcon))
++ goto unlock;
++ }
++
+ /*
+ * See if we want to enable this console driver by default.
+ *
+@@ -3477,8 +3480,11 @@ void register_console(struct console *ne
+ err = try_enable_preferred_console(newcon, false);
+
+ /* printk() messages are not printed to the Braille console. */
+- if (err || newcon->flags & CON_BRL)
++ if (err || newcon->flags & CON_BRL) {
++ if (newcon->flags & CON_NBCON)
++ nbcon_free(newcon);
+ goto unlock;
++ }
+
+ /*
+ * If we have a bootconsole, and are switching to a real console,
+@@ -3589,7 +3595,7 @@ static int unregister_console_locked(str
+ synchronize_srcu(&console_srcu);
+
+ if (console->flags & CON_NBCON)
+- nbcon_cleanup(console);
++ nbcon_free(console);
+
+ console_sysfs_notify();
+
diff --git a/debian/patches-rt/0079-printk-nbcon-Add-ownership-state-functions.patch b/debian/patches-rt/0079-printk-nbcon-Add-ownership-state-functions.patch
new file mode 100644
index 0000000000..e26a5f332d
--- /dev/null
+++ b/debian/patches-rt/0079-printk-nbcon-Add-ownership-state-functions.patch
@@ -0,0 +1,179 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 16 Sep 2023 21:26:04 +0206
+Subject: [PATCH 079/134] printk: nbcon: Add ownership state functions
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Provide functions that are related to the safe handover mechanism
+and allow console drivers to dynamically specify unsafe regions:
+
+ - nbcon_context_can_proceed()
+
+ Invoked by a console owner to check whether a handover request
+ is pending or whether the console has been taken over by another
+ context. If a handover request is pending, this function will
+ also perform the handover, thus cancelling its own ownership.
+
+ - nbcon_context_enter_unsafe()/nbcon_context_exit_unsafe()
+
+ Invoked by a console owner to denote that the driver is about
+ to enter or leave a critical region where a take over is unsafe.
+ The exit variant is the point where the current owner releases
+ the lock for a higher priority context which asked for the
+ friendly handover.
+
+ The unsafe state is stored in the console state and allows a
+ new context to make informed decisions whether to attempt a
+ takeover of such a console. The unsafe state is also available
+ to the driver so that it can make informed decisions about the
+ required actions and possibly take a special emergency path.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20230916192007.608398-6-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/nbcon.c | 123 +++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 122 insertions(+), 1 deletion(-)
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -537,7 +537,6 @@ static bool nbcon_owner_matches(struct n
+ * nbcon_context_release - Release the console
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
+ */
+-__maybe_unused
+ static void nbcon_context_release(struct nbcon_context *ctxt)
+ {
+ unsigned int cpu = smp_processor_id();
+@@ -566,6 +565,128 @@ static void nbcon_context_release(struct
+ }
+
+ /**
++ * nbcon_context_can_proceed - Check whether ownership can proceed
++ * @ctxt: The nbcon context from nbcon_context_try_acquire()
++ * @cur: The current console state
++ *
++ * Return: True if this context still owns the console. False if
++ * ownership was handed over or taken.
++ *
++ * Must be invoked when entering the unsafe state to make sure that it still
++ * owns the lock. Also must be invoked when exiting the unsafe context
++ * to eventually free the lock for a higher priority context which asked
++ * for the friendly handover.
++ *
++ * It can be called inside an unsafe section when the console is just
++ * temporary in safe state instead of exiting and entering the unsafe
++ * state.
++ *
++ * Also it can be called in the safe context before doing an expensive
++ * safe operation. It does not make sense to do the operation when
++ * a higher priority context took the lock.
++ *
++ * When this function returns false then the calling context no longer owns
++ * the console and is no longer allowed to go forward. In this case it must
++ * back out immediately and carefully. The buffer content is also no longer
++ * trusted since it no longer belongs to the calling context.
++ */
++static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur)
++{
++ unsigned int cpu = smp_processor_id();
++
++ /* Make sure this context still owns the console. */
++ if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
++ return false;
++
++ /* The console owner can proceed if there is no waiter. */
++ if (cur->req_prio == NBCON_PRIO_NONE)
++ return true;
++
++ /*
++ * A console owner within an unsafe region is always allowed to
++ * proceed, even if there are waiters. It can perform a handover
++ * when exiting the unsafe region. Otherwise the waiter will
++ * need to perform an unsafe hostile takeover.
++ */
++ if (cur->unsafe)
++ return true;
++
++ /* Waiters always have higher priorities than owners. */
++ WARN_ON_ONCE(cur->req_prio <= cur->prio);
++
++ /*
++ * Having a safe point for take over and eventually a few
++ * duplicated characters or a full line is way better than a
++ * hostile takeover. Post processing can take care of the garbage.
++ * Release and hand over.
++ */
++ nbcon_context_release(ctxt);
++
++ /*
++ * It is not clear whether the waiter really took over ownership. The
++ * outermost callsite must make the final decision whether console
++ * ownership is needed for it to proceed. If yes, it must reacquire
++ * ownership (possibly hostile) before carefully proceeding.
++ *
++ * The calling context no longer owns the console so go back all the
++ * way instead of trying to implement reacquire heuristics in tons of
++ * places.
++ */
++ return false;
++}
++
++#define nbcon_context_enter_unsafe(c) __nbcon_context_update_unsafe(c, true)
++#define nbcon_context_exit_unsafe(c) __nbcon_context_update_unsafe(c, false)
++
++/**
++ * __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state
++ * @ctxt: The nbcon context from nbcon_context_try_acquire()
++ * @unsafe: The new value for the unsafe bit
++ *
++ * Return: True if the unsafe state was updated and this context still
++ * owns the console. Otherwise false if ownership was handed
++ * over or taken.
++ *
++ * This function allows console owners to modify the unsafe status of the
++ * console.
++ *
++ * When this function returns false then the calling context no longer owns
++ * the console and is no longer allowed to go forward. In this case it must
++ * back out immediately and carefully. The buffer content is also no longer
++ * trusted since it no longer belongs to the calling context.
++ *
++ * Internal helper to avoid duplicated code.
++ */
++__maybe_unused
++static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe)
++{
++ struct console *con = ctxt->console;
++ struct nbcon_state cur;
++ struct nbcon_state new;
++
++ nbcon_state_read(con, &cur);
++
++ do {
++ /*
++ * The unsafe bit must not be cleared if an
++ * unsafe hostile takeover has occurred.
++ */
++ if (!unsafe && cur.unsafe_takeover)
++ goto out;
++
++ if (!nbcon_context_can_proceed(ctxt, &cur))
++ return false;
++
++ new.atom = cur.atom;
++ new.unsafe = unsafe;
++ } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
++
++ cur.atom = new.atom;
++out:
++ return nbcon_context_can_proceed(ctxt, &cur);
++}
++
++/**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+ *
diff --git a/debian/patches-rt/0080-printk-nbcon-Add-sequence-handling.patch b/debian/patches-rt/0080-printk-nbcon-Add-sequence-handling.patch
new file mode 100644
index 0000000000..3988d6124a
--- /dev/null
+++ b/debian/patches-rt/0080-printk-nbcon-Add-sequence-handling.patch
@@ -0,0 +1,311 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 16 Sep 2023 21:26:05 +0206
+Subject: [PATCH 080/134] printk: nbcon: Add sequence handling
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Add an atomic_long_t field @nbcon_seq to the console struct to
+store the sequence number for nbcon consoles. For nbcon consoles
+this will be used instead of the non-atomic @seq field. The new
+field allows for safe atomic sequence number updates without
+requiring any locking.
+
+On 64bit systems the new field stores the full sequence number.
+On 32bit systems the new field stores the lower 32 bits of the
+sequence number, which are expanded to 64bit as needed by
+folding the values based on the sequence numbers available in
+the ringbuffer.
+
+For 32bit systems, having a 32bit representation in the console
+is sufficient. If a console ever gets more than 2^31 records
+behind the ringbuffer then this is the least of the problems.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20230916192007.608398-7-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 4 +
+ kernel/printk/internal.h | 7 +++
+ kernel/printk/nbcon.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++
+ kernel/printk/printk.c | 31 +++++++++++---
+ 4 files changed, 136 insertions(+), 7 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -243,6 +243,7 @@ struct printk_buffers;
+ * might cause a system freeze when the console
+ * is used later.
+ * @pbufs: Pointer to the text buffer for this context
++ * @seq: The sequence number to print for this context
+ */
+ struct nbcon_context {
+ /* members set by caller */
+@@ -253,6 +254,7 @@ struct nbcon_context {
+
+ /* members set by acquire */
+ struct printk_buffers *pbufs;
++ u64 seq;
+ };
+
+ /**
+@@ -276,6 +278,7 @@ struct nbcon_context {
+ * @node: hlist node for the console list
+ *
+ * @nbcon_state: State for nbcon consoles
++ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @pbufs: Pointer to nbcon private buffer
+ */
+ struct console {
+@@ -299,6 +302,7 @@ struct console {
+
+ /* nbcon console specific members */
+ atomic_t __private nbcon_state;
++ atomic_long_t __private nbcon_seq;
+ struct printk_buffers *pbufs;
+ };
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -4,6 +4,7 @@
+ */
+ #include <linux/percpu.h>
+ #include <linux/console.h>
++#include "printk_ringbuffer.h"
+
+ #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
+ void __init printk_sysctl_init(void);
+@@ -42,6 +43,8 @@ enum printk_info_flags {
+ LOG_CONT = 8, /* text is a fragment of a continuation line */
+ };
+
++extern struct printk_ringbuffer *prb;
++
+ __printf(4, 0)
+ int vprintk_store(int facility, int level,
+ const struct dev_printk_info *dev_info,
+@@ -69,6 +72,8 @@ void defer_console_output(void);
+ u16 printk_parse_prefix(const char *text, int *level,
+ enum printk_info_flags *flags);
+
++u64 nbcon_seq_read(struct console *con);
++void nbcon_seq_force(struct console *con, u64 seq);
+ bool nbcon_alloc(struct console *con);
+ void nbcon_init(struct console *con);
+ void nbcon_free(struct console *con);
+@@ -88,6 +93,8 @@ void nbcon_free(struct console *con);
+ #define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
+
+ static inline bool printk_percpu_data_ready(void) { return false; }
++static inline u64 nbcon_seq_read(struct console *con) { return 0; }
++static inline void nbcon_seq_force(struct console *con, u64 seq) { }
+ static inline bool nbcon_alloc(struct console *con) { return false; }
+ static inline void nbcon_init(struct console *con) { }
+ static inline void nbcon_free(struct console *con) { }
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -140,6 +140,101 @@ static inline bool nbcon_state_try_cmpxc
+ return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
+ }
+
++#ifdef CONFIG_64BIT
++
++#define __seq_to_nbcon_seq(seq) (seq)
++#define __nbcon_seq_to_seq(seq) (seq)
++
++#else /* CONFIG_64BIT */
++
++#define __seq_to_nbcon_seq(seq) ((u32)seq)
++
++static inline u64 __nbcon_seq_to_seq(u32 nbcon_seq)
++{
++ u64 seq;
++ u64 rb_next_seq;
++
++ /*
++ * The provided sequence is only the lower 32 bits of the ringbuffer
++ * sequence. It needs to be expanded to 64bit. Get the next sequence
++ * number from the ringbuffer and fold it.
++ *
++ * Having a 32bit representation in the console is sufficient.
++ * If a console ever gets more than 2^31 records behind
++ * the ringbuffer then this is the least of the problems.
++ *
++ * Also the access to the ring buffer is always safe.
++ */
++ rb_next_seq = prb_next_seq(prb);
++ seq = rb_next_seq - ((u32)rb_next_seq - nbcon_seq);
++
++ return seq;
++}
++
++#endif /* CONFIG_64BIT */
++
++/**
++ * nbcon_seq_read - Read the current console sequence
++ * @con: Console to read the sequence of
++ *
++ * Return: Sequence number of the next record to print on @con.
++ */
++u64 nbcon_seq_read(struct console *con)
++{
++ unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
++
++ return __nbcon_seq_to_seq(nbcon_seq);
++}
++
++/**
++ * nbcon_seq_force - Force console sequence to a specific value
++ * @con: Console to work on
++ * @seq: Sequence number value to set
++ *
++ * Only to be used during init (before registration) or in extreme situations
++ * (such as panic with CONSOLE_REPLAY_ALL).
++ */
++void nbcon_seq_force(struct console *con, u64 seq)
++{
++ /*
++ * If the specified record no longer exists, the oldest available record
++ * is chosen. This is especially important on 32bit systems because only
++ * the lower 32 bits of the sequence number are stored. The upper 32 bits
++ * are derived from the sequence numbers available in the ringbuffer.
++ */
++ u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
++
++ atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __seq_to_nbcon_seq(valid_seq));
++
++ /* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */
++ con->seq = 0;
++}
++
++/**
++ * nbcon_seq_try_update - Try to update the console sequence number
++ * @ctxt: Pointer to an acquire context that contains
++ * all information about the acquire mode
++ * @new_seq: The new sequence number to set
++ *
++ * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to
++ * the 64bit value). This could be a different value than @new_seq if
++ * nbcon_seq_force() was used or the current context no longer owns the
++ * console. In the later case, it will stop printing anyway.
++ */
++__maybe_unused
++static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
++{
++ unsigned long nbcon_seq = __seq_to_nbcon_seq(ctxt->seq);
++ struct console *con = ctxt->console;
++
++ if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
++ __seq_to_nbcon_seq(new_seq))) {
++ ctxt->seq = new_seq;
++ } else {
++ ctxt->seq = nbcon_seq_read(con);
++ }
++}
++
+ /**
+ * nbcon_context_try_acquire_direct - Try to acquire directly
+ * @ctxt: The context of the caller
+@@ -510,6 +605,9 @@ static bool nbcon_context_try_acquire(st
+ else
+ ctxt->pbufs = con->pbufs;
+
++ /* Set the record sequence for this context to print. */
++ ctxt->seq = nbcon_seq_read(ctxt->console);
++
+ return true;
+ }
+
+@@ -722,6 +820,8 @@ bool nbcon_alloc(struct console *con)
+ *
+ * nbcon_alloc() *must* be called and succeed before this function
+ * is called.
++ *
++ * This function expects that the legacy @con->seq has been set.
+ */
+ void nbcon_init(struct console *con)
+ {
+@@ -730,6 +830,7 @@ void nbcon_init(struct console *con)
+ /* nbcon_alloc() must have been called and successful! */
+ BUG_ON(!con->pbufs);
+
++ nbcon_seq_force(con, con->seq);
+ nbcon_state_set(con, &state);
+ }
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -494,7 +494,7 @@ static u32 log_buf_len = __LOG_BUF_LEN;
+
+ static struct printk_ringbuffer printk_rb_dynamic;
+
+-static struct printk_ringbuffer *prb = &printk_rb_static;
++struct printk_ringbuffer *prb = &printk_rb_static;
+
+ /*
+ * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
+@@ -3168,6 +3168,7 @@ void console_flush_on_panic(enum con_flu
+
+ if (mode == CONSOLE_REPLAY_ALL) {
+ struct console *c;
++ short flags;
+ int cookie;
+ u64 seq;
+
+@@ -3175,11 +3176,17 @@ void console_flush_on_panic(enum con_flu
+
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(c) {
+- /*
+- * This is an unsynchronized assignment, but the
+- * kernel is in "hope and pray" mode anyway.
+- */
+- c->seq = seq;
++ flags = console_srcu_read_flags(c);
++
++ if (flags & CON_NBCON) {
++ nbcon_seq_force(c, seq);
++ } else {
++ /*
++ * This is an unsynchronized assignment. On
++ * panic legacy consoles are only best effort.
++ */
++ c->seq = seq;
++ }
+ }
+ console_srcu_read_unlock(cookie);
+ }
+@@ -3750,6 +3757,7 @@ static bool __pr_flush(struct console *c
+ struct console *c;
+ u64 last_diff = 0;
+ u64 printk_seq;
++ short flags;
+ int cookie;
+ u64 diff;
+ u64 seq;
+@@ -3777,6 +3785,9 @@ static bool __pr_flush(struct console *c
+ for_each_console_srcu(c) {
+ if (con && con != c)
+ continue;
++
++ flags = console_srcu_read_flags(c);
++
+ /*
+ * If consoles are not usable, it cannot be expected
+ * that they make forward progress, so only increment
+@@ -3784,7 +3795,13 @@ static bool __pr_flush(struct console *c
+ */
+ if (!console_is_usable(c))
+ continue;
+- printk_seq = c->seq;
++
++ if (flags & CON_NBCON) {
++ printk_seq = nbcon_seq_read(c);
++ } else {
++ printk_seq = c->seq;
++ }
++
+ if (printk_seq < seq)
+ diff += seq - printk_seq;
+ }
diff --git a/debian/patches-rt/0081-printk-nbcon-Add-emit-function-and-callback-function.patch b/debian/patches-rt/0081-printk-nbcon-Add-emit-function-and-callback-function.patch
new file mode 100644
index 0000000000..ab32a05b19
--- /dev/null
+++ b/debian/patches-rt/0081-printk-nbcon-Add-emit-function-and-callback-function.patch
@@ -0,0 +1,262 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 16 Sep 2023 21:26:06 +0206
+Subject: [PATCH 081/134] printk: nbcon: Add emit function and callback
+ function for atomic printing
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Implement an emit function for nbcon consoles to output printk
+messages. It utilizes the lockless printk_get_next_message() and
+console_prepend_dropped() functions to retrieve/build the output
+message. The emit function includes the required safety points to
+check for handover/takeover and calls a new write_atomic callback
+of the console driver to output the message. It also includes
+proper handling for updating the nbcon console sequence number.
+
+A new nbcon_write_context struct is introduced. This is provided
+to the write_atomic callback and includes only the information
+necessary for performing atomic writes.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20230916192007.608398-8-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 21 +++++++++
+ kernel/printk/internal.h | 6 ++
+ kernel/printk/nbcon.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/printk/printk.c | 9 +--
+ 4 files changed, 134 insertions(+), 8 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -242,6 +242,7 @@ struct printk_buffers;
+ * be used only with NBCON_PRIO_PANIC @prio. It
+ * might cause a system freeze when the console
+ * is used later.
++ * @backlog: Ringbuffer has pending records
+ * @pbufs: Pointer to the text buffer for this context
+ * @seq: The sequence number to print for this context
+ */
+@@ -252,12 +253,29 @@ struct nbcon_context {
+ enum nbcon_prio prio;
+ unsigned int allow_unsafe_takeover : 1;
+
++ /* members set by emit */
++ unsigned int backlog : 1;
++
+ /* members set by acquire */
+ struct printk_buffers *pbufs;
+ u64 seq;
+ };
+
+ /**
++ * struct nbcon_write_context - Context handed to the nbcon write callbacks
++ * @ctxt: The core console context
++ * @outbuf: Pointer to the text buffer for output
++ * @len: Length to write
++ * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
++ */
++struct nbcon_write_context {
++ struct nbcon_context __private ctxt;
++ char *outbuf;
++ unsigned int len;
++ bool unsafe_takeover;
++};
++
++/**
+ * struct console - The console descriptor structure
+ * @name: The name of the console driver
+ * @write: Write callback to output messages (Optional)
+@@ -277,6 +295,7 @@ struct nbcon_context {
+ * @data: Driver private data
+ * @node: hlist node for the console list
+ *
++ * @write_atomic: Write callback for atomic context
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @pbufs: Pointer to nbcon private buffer
+@@ -301,6 +320,8 @@ struct console {
+ struct hlist_node node;
+
+ /* nbcon console specific members */
++ bool (*write_atomic)(struct console *con,
++ struct nbcon_write_context *wctxt);
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct printk_buffers *pbufs;
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -130,3 +130,9 @@ struct printk_message {
+ };
+
+ bool other_cpu_in_panic(void);
++bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
++ bool is_extended, bool may_supress);
++
++#ifdef CONFIG_PRINTK
++void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
++#endif
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -221,7 +221,6 @@ void nbcon_seq_force(struct console *con
+ * nbcon_seq_force() was used or the current context no longer owns the
+ * console. In the later case, it will stop printing anyway.
+ */
+-__maybe_unused
+ static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
+ {
+ unsigned long nbcon_seq = __seq_to_nbcon_seq(ctxt->seq);
+@@ -755,7 +754,6 @@ static bool nbcon_context_can_proceed(st
+ *
+ * Internal helper to avoid duplicated code.
+ */
+-__maybe_unused
+ static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe)
+ {
+ struct console *con = ctxt->console;
+@@ -785,6 +783,110 @@ static bool __nbcon_context_update_unsaf
+ }
+
+ /**
++ * nbcon_emit_next_record - Emit a record in the acquired context
++ * @wctxt: The write context that will be handed to the write function
++ *
++ * Return: True if this context still owns the console. False if
++ * ownership was handed over or taken.
++ *
++ * When this function returns false then the calling context no longer owns
++ * the console and is no longer allowed to go forward. In this case it must
++ * back out immediately and carefully. The buffer content is also no longer
++ * trusted since it no longer belongs to the calling context. If the caller
++ * wants to do more it must reacquire the console first.
++ *
++ * When true is returned, @wctxt->ctxt.backlog indicates whether there are
++ * still records pending in the ringbuffer,
++ */
++__maybe_unused
++static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
++{
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
++ struct console *con = ctxt->console;
++ bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
++ struct printk_message pmsg = {
++ .pbufs = ctxt->pbufs,
++ };
++ unsigned long con_dropped;
++ struct nbcon_state cur;
++ unsigned long dropped;
++ bool done;
++
++ /*
++ * The printk buffers are filled within an unsafe section. This
++ * prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from
++ * clobbering each other.
++ */
++
++ if (!nbcon_context_enter_unsafe(ctxt))
++ return false;
++
++ ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true);
++ if (!ctxt->backlog)
++ return nbcon_context_exit_unsafe(ctxt);
++
++ /*
++ * @con->dropped is not protected in case of an unsafe hostile
++ * takeover. In that situation the update can be racy so
++ * annotate it accordingly.
++ */
++ con_dropped = data_race(READ_ONCE(con->dropped));
++
++ dropped = con_dropped + pmsg.dropped;
++ if (dropped && !is_extended)
++ console_prepend_dropped(&pmsg, dropped);
++
++ if (!nbcon_context_exit_unsafe(ctxt))
++ return false;
++
++ /* For skipped records just update seq/dropped in @con. */
++ if (pmsg.outbuf_len == 0)
++ goto update_con;
++
++ /* Initialize the write context for driver callbacks. */
++ wctxt->outbuf = &pmsg.pbufs->outbuf[0];
++ wctxt->len = pmsg.outbuf_len;
++ nbcon_state_read(con, &cur);
++ wctxt->unsafe_takeover = cur.unsafe_takeover;
++
++ if (con->write_atomic) {
++ done = con->write_atomic(con, wctxt);
++ } else {
++ nbcon_context_release(ctxt);
++ WARN_ON_ONCE(1);
++ done = false;
++ }
++
++ /* If not done, the emit was aborted. */
++ if (!done)
++ return false;
++
++ /*
++ * Since any dropped message was successfully output, reset the
++ * dropped count for the console.
++ */
++ dropped = 0;
++update_con:
++ /*
++ * The dropped count and the sequence number are updated within an
++ * unsafe section. This limits update races to the panic context and
++ * allows the panic context to win.
++ */
++
++ if (!nbcon_context_enter_unsafe(ctxt))
++ return false;
++
++ if (dropped != con_dropped) {
++ /* Counterpart to the READ_ONCE() above. */
++ WRITE_ONCE(con->dropped, dropped);
++ }
++
++ nbcon_seq_try_update(ctxt, pmsg.seq + 1);
++
++ return nbcon_context_exit_unsafe(ctxt);
++}
++
++/**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+ *
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -698,9 +698,6 @@ static ssize_t msg_print_ext_body(char *
+ return len;
+ }
+
+-static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
+- bool is_extended, bool may_supress);
+-
+ /* /dev/kmsg - userspace message inject/listen interface */
+ struct devkmsg_user {
+ atomic64_t seq;
+@@ -2733,7 +2730,7 @@ static void __console_unlock(void)
+ * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
+ */
+ #ifdef CONFIG_PRINTK
+-static void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
++void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
+ {
+ struct printk_buffers *pbufs = pmsg->pbufs;
+ const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
+@@ -2787,8 +2784,8 @@ static void console_prepend_dropped(stru
+ * of @pmsg are valid. (See the documentation of struct printk_message
+ * for information about the @pmsg fields.)
+ */
+-static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
+- bool is_extended, bool may_suppress)
++bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
++ bool is_extended, bool may_suppress)
+ {
+ static int panic_console_dropped;
+
diff --git a/debian/patches-rt/0082-printk-nbcon-Allow-drivers-to-mark-unsafe-regions-an.patch b/debian/patches-rt/0082-printk-nbcon-Allow-drivers-to-mark-unsafe-regions-an.patch
new file mode 100644
index 0000000000..16a07a839e
--- /dev/null
+++ b/debian/patches-rt/0082-printk-nbcon-Allow-drivers-to-mark-unsafe-regions-an.patch
@@ -0,0 +1,136 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 16 Sep 2023 21:26:07 +0206
+Subject: [PATCH 082/134] printk: nbcon: Allow drivers to mark unsafe regions
+ and check state
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+For the write_atomic callback, the console driver may have unsafe
+regions that need to be appropriately marked. Provide functions
+that accept the nbcon_write_context struct to allow for the driver
+to enter and exit unsafe regions.
+
+Also provide a function for drivers to check if they are still the
+owner of the console.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20230916192007.608398-9-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 10 ++++++
+ kernel/printk/nbcon.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 85 insertions(+)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -451,6 +451,16 @@ static inline bool console_is_registered
+ lockdep_assert_console_list_lock_held(); \
+ hlist_for_each_entry(con, &console_list, node)
+
++#ifdef CONFIG_PRINTK
++extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
++extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
++extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
++#else
++static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
++static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
++static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
++#endif
++
+ extern int console_set_on_cmdline;
+ extern struct console *early_console;
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -732,6 +732,41 @@ static bool nbcon_context_can_proceed(st
+ return false;
+ }
+
++/**
++ * nbcon_can_proceed - Check whether ownership can proceed
++ * @wctxt: The write context that was handed to the write function
++ *
++ * Return: True if this context still owns the console. False if
++ * ownership was handed over or taken.
++ *
++ * It is used in nbcon_enter_unsafe() to make sure that it still owns the
++ * lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock
++ * for a higher priority context which asked for the friendly handover.
++ *
++ * It can be called inside an unsafe section when the console is just
++ * temporary in safe state instead of exiting and entering the unsafe state.
++ *
++ * Also it can be called in the safe context before doing an expensive safe
++ * operation. It does not make sense to do the operation when a higher
++ * priority context took the lock.
++ *
++ * When this function returns false then the calling context no longer owns
++ * the console and is no longer allowed to go forward. In this case it must
++ * back out immediately and carefully. The buffer content is also no longer
++ * trusted since it no longer belongs to the calling context.
++ */
++bool nbcon_can_proceed(struct nbcon_write_context *wctxt)
++{
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
++ struct console *con = ctxt->console;
++ struct nbcon_state cur;
++
++ nbcon_state_read(con, &cur);
++
++ return nbcon_context_can_proceed(ctxt, &cur);
++}
++EXPORT_SYMBOL_GPL(nbcon_can_proceed);
++
+ #define nbcon_context_enter_unsafe(c) __nbcon_context_update_unsafe(c, true)
+ #define nbcon_context_exit_unsafe(c) __nbcon_context_update_unsafe(c, false)
+
+@@ -783,6 +818,46 @@ static bool __nbcon_context_update_unsaf
+ }
+
+ /**
++ * nbcon_enter_unsafe - Enter an unsafe region in the driver
++ * @wctxt: The write context that was handed to the write function
++ *
++ * Return: True if this context still owns the console. False if
++ * ownership was handed over or taken.
++ *
++ * When this function returns false then the calling context no longer owns
++ * the console and is no longer allowed to go forward. In this case it must
++ * back out immediately and carefully. The buffer content is also no longer
++ * trusted since it no longer belongs to the calling context.
++ */
++bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt)
++{
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
++
++ return nbcon_context_enter_unsafe(ctxt);
++}
++EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
++
++/**
++ * nbcon_exit_unsafe - Exit an unsafe region in the driver
++ * @wctxt: The write context that was handed to the write function
++ *
++ * Return: True if this context still owns the console. False if
++ * ownership was handed over or taken.
++ *
++ * When this function returns false then the calling context no longer owns
++ * the console and is no longer allowed to go forward. In this case it must
++ * back out immediately and carefully. The buffer content is also no longer
++ * trusted since it no longer belongs to the calling context.
++ */
++bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt)
++{
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
++
++ return nbcon_context_exit_unsafe(ctxt);
++}
++EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
++
++/**
+ * nbcon_emit_next_record - Emit a record in the acquired context
+ * @wctxt: The write context that will be handed to the write function
+ *
diff --git a/debian/patches-rt/0083-printk-fix-illegal-pbufs-access-for-CONFIG_PRINTK.patch b/debian/patches-rt/0083-printk-fix-illegal-pbufs-access-for-CONFIG_PRINTK.patch
new file mode 100644
index 0000000000..80de1c716d
--- /dev/null
+++ b/debian/patches-rt/0083-printk-fix-illegal-pbufs-access-for-CONFIG_PRINTK.patch
@@ -0,0 +1,134 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 20 Sep 2023 17:58:38 +0206
+Subject: [PATCH 083/134] printk: fix illegal pbufs access for !CONFIG_PRINTK
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+When CONFIG_PRINTK is not set, PRINTK_MESSAGE_MAX is 0. This
+leads to a zero-sized array @outbuf in @printk_shared_pbufs. In
+console_flush_all() a pointer to the first element of the array
+is assigned with:
+
+ char *outbuf = &printk_shared_pbufs.outbuf[0];
+
+For !CONFIG_PRINTK this leads to a compiler warning:
+
+ warning: array subscript 0 is outside array bounds of
+ 'char[0]' [-Warray-bounds]
+
+This is not really dangerous because printk_get_next_message()
+always returns false for !CONFIG_PRINTK, which leads to @outbuf
+never being used. However, it makes no sense to even compile
+these functions for !CONFIG_PRINTK.
+
+Extend the existing '#ifdef CONFIG_PRINTK' block to contain
+the formatting and emitting functions since these have no
+purpose in !CONFIG_PRINTK. This also allows removing several
+more !CONFIG_PRINTK dummies as well as moving
+@suppress_panic_printk into a CONFIG_PRINTK block.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202309201724.M9BMAQIh-lkp@intel.com/
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20230920155238.670439-1-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 44 ++++++++++++++++++--------------------------
+ 1 file changed, 18 insertions(+), 26 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -102,12 +102,6 @@ DEFINE_STATIC_SRCU(console_srcu);
+ */
+ int __read_mostly suppress_printk;
+
+-/*
+- * During panic, heavy printk by other CPUs can delay the
+- * panic and risk deadlock on console resources.
+- */
+-static int __read_mostly suppress_panic_printk;
+-
+ #ifdef CONFIG_LOCKDEP
+ static struct lockdep_map console_lock_dep_map = {
+ .name = "console_lock"
+@@ -445,6 +439,12 @@ static int console_msg_format = MSG_FORM
+ static DEFINE_MUTEX(syslog_lock);
+
+ #ifdef CONFIG_PRINTK
++/*
++ * During panic, heavy printk by other CPUs can delay the
++ * panic and risk deadlock on console resources.
++ */
++static int __read_mostly suppress_panic_printk;
++
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+ /* All 3 protected by @syslog_lock. */
+ /* the next printk record to read by syslog(READ) or /proc/kmsg */
+@@ -2346,22 +2346,6 @@ static bool __pr_flush(struct console *c
+
+ static u64 syslog_seq;
+
+-static size_t record_print_text(const struct printk_record *r,
+- bool syslog, bool time)
+-{
+- return 0;
+-}
+-static ssize_t info_print_ext_header(char *buf, size_t size,
+- struct printk_info *info)
+-{
+- return 0;
+-}
+-static ssize_t msg_print_ext_body(char *buf, size_t size,
+- char *text, size_t text_len,
+- struct dev_printk_info *dev_info) { return 0; }
+-static void console_lock_spinning_enable(void) { }
+-static int console_lock_spinning_disable_and_check(int cookie) { return 0; }
+-static bool suppress_message_printing(int level) { return false; }
+ static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
+ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
+
+@@ -2715,6 +2699,8 @@ static void __console_unlock(void)
+ up_console_sem();
+ }
+
++#ifdef CONFIG_PRINTK
++
+ /*
+ * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This
+ * is achieved by shifting the existing message over and inserting the dropped
+@@ -2729,7 +2715,6 @@ static void __console_unlock(void)
+ *
+ * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
+ */
+-#ifdef CONFIG_PRINTK
+ void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
+ {
+ struct printk_buffers *pbufs = pmsg->pbufs;
+@@ -2761,9 +2746,6 @@ void console_prepend_dropped(struct prin
+ memcpy(outbuf, scratchbuf, len);
+ pmsg->outbuf_len += len;
+ }
+-#else
+-#define console_prepend_dropped(pmsg, dropped)
+-#endif /* CONFIG_PRINTK */
+
+ /*
+ * Read and format the specified record (or a later record if the specified
+@@ -2921,6 +2903,16 @@ static bool console_emit_next_record(str
+ return true;
+ }
+
++#else
++
++static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
++{
++ *handover = false;
++ return false;
++}
++
++#endif /* CONFIG_PRINTK */
++
+ /*
+ * Print out all remaining records to all consoles.
+ *
diff --git a/debian/patches-rt/0084-printk-Reduce-pr_flush-pooling-time.patch b/debian/patches-rt/0084-printk-Reduce-pr_flush-pooling-time.patch
new file mode 100644
index 0000000000..09d1d343f4
--- /dev/null
+++ b/debian/patches-rt/0084-printk-Reduce-pr_flush-pooling-time.patch
@@ -0,0 +1,102 @@
+From: Petr Mladek <pmladek@suse.com>
+Date: Fri, 6 Oct 2023 10:21:51 +0200
+Subject: [PATCH 084/134] printk: Reduce pr_flush() pooling time
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+pr_flush() does not guarantee that all messages would really get flushed
+to the console. The best it could do is to wait with a given timeout.[*]
+
+The current interval 100ms for checking the progress might seem too
+long in some situations. For example, such delays are not appreciated
+during suspend and resume especially when the consoles have been flushed
+"long" time before the check.
+
+On the other hand, the sleeping wait might be useful in other situations.
+Especially, it would allow flushing the messages using printk kthreads
+on the same CPU[*].
+
+Use msleep(1) as a compromise.
+
+Also measure the time using jiffies. msleep() does not guarantee
+precise wakeup after the given delay. It might be much longer,
+especially for times < 20s. See Documentation/timers/timers-howto.rst
+for more details.
+
+Note that msecs_to_jiffies() already translates a negative value into
+an infinite timeout.
+
+[*] console_unlock() does not guarantee flushing the consoles since
+ the commit dbdda842fe96f893 ("printk: Add console owner and waiter
+ logic to load balance console writes").
+
+ It would be possible to guarantee it another way. For example,
+ the spinning might be enabled only when the console_lock has been
+ taken via console_trylock().
+
+ But the load balancing is helpful. And more importantly, the flush
+ with a timeout has been added as a preparation step for introducing
+ printk kthreads.
+
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Reviewed-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20231006082151.6969-3-pmladek@suse.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3742,7 +3742,8 @@ late_initcall(printk_late_init);
+ /* If @con is specified, only wait for that console. Otherwise wait for all. */
+ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
+ {
+- int remaining = timeout_ms;
++ unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
++ unsigned long remaining_jiffies = timeout_jiffies;
+ struct console *c;
+ u64 last_diff = 0;
+ u64 printk_seq;
+@@ -3760,6 +3761,9 @@ static bool __pr_flush(struct console *c
+ console_unlock();
+
+ for (;;) {
++ unsigned long begin_jiffies;
++ unsigned long slept_jiffies;
++
+ diff = 0;
+
+ /*
+@@ -3797,24 +3801,20 @@ static bool __pr_flush(struct console *c
+ console_srcu_read_unlock(cookie);
+
+ if (diff != last_diff && reset_on_progress)
+- remaining = timeout_ms;
++ remaining_jiffies = timeout_jiffies;
+
+ console_unlock();
+
+ /* Note: @diff is 0 if there are no usable consoles. */
+- if (diff == 0 || remaining == 0)
++ if (diff == 0 || remaining_jiffies == 0)
+ break;
+
+- if (remaining < 0) {
+- /* no timeout limit */
+- msleep(100);
+- } else if (remaining < 100) {
+- msleep(remaining);
+- remaining = 0;
+- } else {
+- msleep(100);
+- remaining -= 100;
+- }
++ /* msleep(1) might sleep much longer. Check time by jiffies. */
++ begin_jiffies = jiffies;
++ msleep(1);
++ slept_jiffies = jiffies - begin_jiffies;
++
++ remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
+
+ last_diff = diff;
+ }
diff --git a/debian/patches-rt/0085-printk-nbcon-Relocate-32bit-seq-macros.patch b/debian/patches-rt/0085-printk-nbcon-Relocate-32bit-seq-macros.patch
new file mode 100644
index 0000000000..6d1cf3686b
--- /dev/null
+++ b/debian/patches-rt/0085-printk-nbcon-Relocate-32bit-seq-macros.patch
@@ -0,0 +1,141 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 6 Dec 2023 12:01:56 +0000
+Subject: [PATCH 085/134] printk: nbcon: Relocate 32bit seq macros
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The macros __seq_to_nbcon_seq() and __nbcon_seq_to_seq() are
+used to provide support for atomic handling of sequence numbers
+on 32bit systems. Until now this was only used by nbcon.c,
+which is why they were located in nbcon.c and include nbcon in
+the name.
+
+In a follow-up commit this functionality is also needed by
+printk_ringbuffer. Rather than duplicating the functionality,
+relocate the macros to printk_ringbuffer.h.
+
+Also, since the macros will be no longer nbcon-specific, rename
+them to __u64seq_to_ulseq() and __ulseq_to_u64seq().
+
+This does not result in any functional change.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/nbcon.c | 41 +++-----------------------------------
+ kernel/printk/printk_ringbuffer.h | 33 ++++++++++++++++++++++++++++++
+ 2 files changed, 37 insertions(+), 37 deletions(-)
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -140,39 +140,6 @@ static inline bool nbcon_state_try_cmpxc
+ return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
+ }
+
+-#ifdef CONFIG_64BIT
+-
+-#define __seq_to_nbcon_seq(seq) (seq)
+-#define __nbcon_seq_to_seq(seq) (seq)
+-
+-#else /* CONFIG_64BIT */
+-
+-#define __seq_to_nbcon_seq(seq) ((u32)seq)
+-
+-static inline u64 __nbcon_seq_to_seq(u32 nbcon_seq)
+-{
+- u64 seq;
+- u64 rb_next_seq;
+-
+- /*
+- * The provided sequence is only the lower 32 bits of the ringbuffer
+- * sequence. It needs to be expanded to 64bit. Get the next sequence
+- * number from the ringbuffer and fold it.
+- *
+- * Having a 32bit representation in the console is sufficient.
+- * If a console ever gets more than 2^31 records behind
+- * the ringbuffer then this is the least of the problems.
+- *
+- * Also the access to the ring buffer is always safe.
+- */
+- rb_next_seq = prb_next_seq(prb);
+- seq = rb_next_seq - ((u32)rb_next_seq - nbcon_seq);
+-
+- return seq;
+-}
+-
+-#endif /* CONFIG_64BIT */
+-
+ /**
+ * nbcon_seq_read - Read the current console sequence
+ * @con: Console to read the sequence of
+@@ -183,7 +150,7 @@ u64 nbcon_seq_read(struct console *con)
+ {
+ unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
+
+- return __nbcon_seq_to_seq(nbcon_seq);
++ return __ulseq_to_u64seq(prb, nbcon_seq);
+ }
+
+ /**
+@@ -204,7 +171,7 @@ void nbcon_seq_force(struct console *con
+ */
+ u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
+
+- atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __seq_to_nbcon_seq(valid_seq));
++ atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
+
+ /* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */
+ con->seq = 0;
+@@ -223,11 +190,11 @@ void nbcon_seq_force(struct console *con
+ */
+ static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
+ {
+- unsigned long nbcon_seq = __seq_to_nbcon_seq(ctxt->seq);
++ unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq);
+ struct console *con = ctxt->console;
+
+ if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
+- __seq_to_nbcon_seq(new_seq))) {
++ __u64seq_to_ulseq(new_seq))) {
+ ctxt->seq = new_seq;
+ } else {
+ ctxt->seq = nbcon_seq_read(con);
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -381,4 +381,37 @@ bool prb_read_valid_info(struct printk_r
+ u64 prb_first_valid_seq(struct printk_ringbuffer *rb);
+ u64 prb_next_seq(struct printk_ringbuffer *rb);
+
++#ifdef CONFIG_64BIT
++
++#define __u64seq_to_ulseq(u64seq) (u64seq)
++#define __ulseq_to_u64seq(rb, ulseq) (ulseq)
++
++#else /* CONFIG_64BIT */
++
++#define __u64seq_to_ulseq(u64seq) ((u32)u64seq)
++
++static inline u64 __ulseq_to_u64seq(struct printk_ringbuffer *rb, u32 ulseq)
++{
++ u64 seq;
++ u64 rb_next_seq;
++
++ /*
++ * The provided sequence is only the lower 32 bits of the ringbuffer
++ * sequence. It needs to be expanded to 64bit. Get the next sequence
++ * number from the ringbuffer and fold it.
++ *
++ * Having a 32bit representation in the console is sufficient.
++ * If a console ever gets more than 2^31 records behind
++ * the ringbuffer then this is the least of the problems.
++ *
++ * Also the access to the ring buffer is always safe.
++ */
++ rb_next_seq = prb_next_seq(rb);
++ seq = rb_next_seq - ((u32)rb_next_seq - ulseq);
++
++ return seq;
++}
++
++#endif /* CONFIG_64BIT */
++
+ #endif /* _KERNEL_PRINTK_RINGBUFFER_H */
diff --git a/debian/patches-rt/0086-printk-Adjust-mapping-for-32bit-seq-macros.patch b/debian/patches-rt/0086-printk-Adjust-mapping-for-32bit-seq-macros.patch
new file mode 100644
index 0000000000..d6ff341f46
--- /dev/null
+++ b/debian/patches-rt/0086-printk-Adjust-mapping-for-32bit-seq-macros.patch
@@ -0,0 +1,71 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 7 Dec 2023 14:15:15 +0000
+Subject: [PATCH 086/134] printk: Adjust mapping for 32bit seq macros
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Note: This change only applies to 32bit architectures. On 64bit
+ architectures the macros are NOPs.
+
+__ulseq_to_u64seq() computes the upper 32 bits of the passed
+argument value (@ulseq). The upper bits are derived from a base
+value (@rb_next_seq) in a way that assumes @ulseq represents a
+64bit number that is less than or equal to @rb_next_seq.
+
+Until now this mapping has been correct for all call sites. However,
+in a follow-up commit, values of @ulseq will be passed in that are
+higher than the base value. This requires a change to how the 32bit
+value is mapped to a 64bit sequence number.
+
+Rather than mapping @ulseq such that the base value is the end of a
+32bit block, map @ulseq such that the base value is in the middle of
+a 32bit block. This allows supporting 31 bits before and after the
+base value, which is deemed acceptable for the console sequence
+number during runtime.
+
+Here is an example to illustrate the previous and new mappings.
+
+For a base value (@rb_next_seq) of 2 2000 0000...
+
+Before this change the range of possible return values was:
+
+1 2000 0001 to 2 2000 0000
+
+__ulseq_to_u64seq(1fff ffff) => 2 1fff ffff
+__ulseq_to_u64seq(2000 0000) => 2 2000 0000
+__ulseq_to_u64seq(2000 0001) => 1 2000 0001
+__ulseq_to_u64seq(9fff ffff) => 1 9fff ffff
+__ulseq_to_u64seq(a000 0000) => 1 a000 0000
+__ulseq_to_u64seq(a000 0001) => 1 a000 0001
+
+After this change the range of possible return values are:
+1 a000 0001 to 2 a000 0000
+
+__ulseq_to_u64seq(1fff ffff) => 2 1fff ffff
+__ulseq_to_u64seq(2000 0000) => 2 2000 0000
+__ulseq_to_u64seq(2000 0001) => 2 2000 0001
+__ulseq_to_u64seq(9fff ffff) => 2 9fff ffff
+__ulseq_to_u64seq(a000 0000) => 2 a000 0000
+__ulseq_to_u64seq(a000 0001) => 1 a000 0001
+
+[ john.ogness: Rewrite commit message. ]
+
+Reported-by: Francesco Dolcini <francesco@dolcini.it>
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -407,7 +407,7 @@ static inline u64 __ulseq_to_u64seq(stru
+ * Also the access to the ring buffer is always safe.
+ */
+ rb_next_seq = prb_next_seq(rb);
+- seq = rb_next_seq - ((u32)rb_next_seq - ulseq);
++ seq = rb_next_seq - (s32)((u32)rb_next_seq - ulseq);
+
+ return seq;
+ }
diff --git a/debian/patches-rt/0087-printk-Use-prb_first_seq-as-base-for-32bit-seq-macro.patch b/debian/patches-rt/0087-printk-Use-prb_first_seq-as-base-for-32bit-seq-macro.patch
new file mode 100644
index 0000000000..2ed6d52b51
--- /dev/null
+++ b/debian/patches-rt/0087-printk-Use-prb_first_seq-as-base-for-32bit-seq-macro.patch
@@ -0,0 +1,71 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 22 Nov 2023 16:13:37 +0000
+Subject: [PATCH 087/134] printk: Use prb_first_seq() as base for 32bit seq
+ macros
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Note: This change only applies to 32bit architectures. On 64bit
+ architectures the macros are NOPs.
+
+Currently prb_next_seq() is used as the base for the 32bit seq
+macros __u64seq_to_ulseq() and __ulseq_to_u64seq(). However, in
+a follow-up commit, prb_next_seq() will need to make use of the
+32bit seq macros.
+
+Use prb_first_seq() as the base for the 32bit seq macros instead
+because it is guaranteed to return 64bit sequence numbers without
+relying on any 32bit seq macros.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 2 +-
+ kernel/printk/printk_ringbuffer.h | 8 ++++----
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -1832,7 +1832,7 @@ static int prb_read(struct printk_ringbu
+ }
+
+ /* Get the sequence number of the tail descriptor. */
+-static u64 prb_first_seq(struct printk_ringbuffer *rb)
++u64 prb_first_seq(struct printk_ringbuffer *rb)
+ {
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+ enum desc_state d_state;
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -378,6 +378,7 @@ bool prb_read_valid(struct printk_ringbu
+ bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
+ struct printk_info *info, unsigned int *line_count);
+
++u64 prb_first_seq(struct printk_ringbuffer *rb);
+ u64 prb_first_valid_seq(struct printk_ringbuffer *rb);
+ u64 prb_next_seq(struct printk_ringbuffer *rb);
+
+@@ -392,12 +393,12 @@ u64 prb_next_seq(struct printk_ringbuffe
+
+ static inline u64 __ulseq_to_u64seq(struct printk_ringbuffer *rb, u32 ulseq)
+ {
++ u64 rb_first_seq = prb_first_seq(rb);
+ u64 seq;
+- u64 rb_next_seq;
+
+ /*
+ * The provided sequence is only the lower 32 bits of the ringbuffer
+- * sequence. It needs to be expanded to 64bit. Get the next sequence
++ * sequence. It needs to be expanded to 64bit. Get the first sequence
+ * number from the ringbuffer and fold it.
+ *
+ * Having a 32bit representation in the console is sufficient.
+@@ -406,8 +407,7 @@ static inline u64 __ulseq_to_u64seq(stru
+ *
+ * Also the access to the ring buffer is always safe.
+ */
+- rb_next_seq = prb_next_seq(rb);
+- seq = rb_next_seq - (s32)((u32)rb_next_seq - ulseq);
++ seq = rb_first_seq - (s32)((u32)rb_first_seq - ulseq);
+
+ return seq;
+ }
diff --git a/debian/patches-rt/0088-printk-ringbuffer-Do-not-skip-non-finalized-records-.patch b/debian/patches-rt/0088-printk-ringbuffer-Do-not-skip-non-finalized-records-.patch
new file mode 100644
index 0000000000..917f777f1c
--- /dev/null
+++ b/debian/patches-rt/0088-printk-ringbuffer-Do-not-skip-non-finalized-records-.patch
@@ -0,0 +1,304 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 19 Oct 2023 10:32:05 +0000
+Subject: [PATCH 088/134] printk: ringbuffer: Do not skip non-finalized records
+ with prb_next_seq()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Commit f244b4dc53e5 ("printk: ringbuffer: Improve
+prb_next_seq() performance") introduced an optimization for
+prb_next_seq() by using best-effort to track recently finalized
+records. However, the order of finalization does not
+necessarily match the order of the records. The optimization
+changed prb_next_seq() to return inconsistent results, possibly
+yielding sequence numbers that are not available to readers
+because they are preceded by non-finalized records or they are
+not yet visible to the reader CPU.
+
+Rather than simply best-effort tracking recently finalized
+records, force the committing writer to read records and
+increment the last "contiguous block" of finalized records. In
+order to do this, the sequence number instead of ID must be
+stored because ID's cannot be directly compared.
+
+A new memory barrier pair is introduced to guarantee that a
+reader can always read the records up until the sequence number
+returned by prb_next_seq() (unless the records have since
+been overwritten in the ringbuffer).
+
+This restores the original functionality of prb_next_seq()
+while also keeping the optimization.
+
+For 32bit systems, only the lower 32 bits of the sequence
+number are stored. When reading the value, it is expanded to
+the full 64bit sequence number using the 32bit seq macros,
+which fold in the value returned by prb_first_seq().
+
+Fixes: f244b4dc53e5 ("printk: ringbuffer: Improve prb_next_seq() performance")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 164 ++++++++++++++++++++++++++++----------
+ kernel/printk/printk_ringbuffer.h | 4
+ 2 files changed, 127 insertions(+), 41 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -6,6 +6,7 @@
+ #include <linux/errno.h>
+ #include <linux/bug.h>
+ #include "printk_ringbuffer.h"
++#include "internal.h"
+
+ /**
+ * DOC: printk_ringbuffer overview
+@@ -303,6 +304,9 @@
+ *
+ * desc_push_tail:B / desc_reserve:D
+ * set descriptor reusable (state), then push descriptor tail (id)
++ *
++ * desc_update_last_finalized:A / desc_last_finalized_seq:A
++ * store finalized record, then set new highest finalized sequence number
+ */
+
+ #define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits)
+@@ -1442,19 +1446,117 @@ bool prb_reserve_in_last(struct prb_rese
+ }
+
+ /*
++ * @last_finalized_seq value guarantees that all records up to and including
++ * this sequence number are finalized and can be read. The only exception are
++ * too old records which have already been overwritten.
++ *
++ * It is also guaranteed that @last_finalized_seq only increases.
++ *
++ * Be aware that finalized records following non-finalized records are not
++ * reported because they are not yet available to the reader. For example,
++ * a new record stored via printk() will not be available to a printer if
++ * it follows a record that has not been finalized yet. However, once that
++ * non-finalized record becomes finalized, @last_finalized_seq will be
++ * appropriately updated and the full set of finalized records will be
++ * available to the printer. And since each printk() caller will either
++ * directly print or trigger deferred printing of all available unprinted
++ * records, all printk() messages will get printed.
++ */
++static u64 desc_last_finalized_seq(struct printk_ringbuffer *rb)
++{
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ unsigned long ulseq;
++
++ /*
++ * Guarantee the sequence number is loaded before loading the
++ * associated record in order to guarantee that the record can be
++ * seen by this CPU. This pairs with desc_update_last_finalized:A.
++ */
++ ulseq = atomic_long_read_acquire(&desc_ring->last_finalized_seq
++ ); /* LMM(desc_last_finalized_seq:A) */
++
++ return __ulseq_to_u64seq(rb, ulseq);
++}
++
++static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
++ struct printk_record *r, unsigned int *line_count);
++
++/*
++ * Check if there are records directly following @last_finalized_seq that are
++ * finalized. If so, update @last_finalized_seq to the latest of these
++ * records. It is not allowed to skip over records that are not yet finalized.
++ */
++static void desc_update_last_finalized(struct printk_ringbuffer *rb)
++{
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ u64 old_seq = desc_last_finalized_seq(rb);
++ unsigned long oldval;
++ unsigned long newval;
++ u64 finalized_seq;
++ u64 try_seq;
++
++try_again:
++ finalized_seq = old_seq;
++ try_seq = finalized_seq + 1;
++
++ /* Try to find later finalized records. */
++ while (_prb_read_valid(rb, &try_seq, NULL, NULL)) {
++ finalized_seq = try_seq;
++ try_seq++;
++ }
++
++ /* No update needed if no later finalized record was found. */
++ if (finalized_seq == old_seq)
++ return;
++
++ oldval = __u64seq_to_ulseq(old_seq);
++ newval = __u64seq_to_ulseq(finalized_seq);
++
++ /*
++ * Set the sequence number of a later finalized record that has been
++ * seen.
++ *
++ * Guarantee the record data is visible to other CPUs before storing
++ * its sequence number. This pairs with desc_last_finalized_seq:A.
++ *
++ * Memory barrier involvement:
++ *
++ * If desc_last_finalized_seq:A reads from
++ * desc_update_last_finalized:A, then desc_read:A reads from
++ * _prb_commit:B.
++ *
++ * Relies on:
++ *
++ * RELEASE from _prb_commit:B to desc_update_last_finalized:A
++ * matching
++ * ACQUIRE from desc_last_finalized_seq:A to desc_read:A
++ *
++ * Note: _prb_commit:B and desc_update_last_finalized:A can be
++ * different CPUs. However, the desc_update_last_finalized:A
++ * CPU (which performs the release) must have previously seen
++ * _prb_commit:B.
++ */
++ if (!atomic_long_try_cmpxchg_release(&desc_ring->last_finalized_seq,
++ &oldval, newval)) { /* LMM(desc_update_last_finalized:A) */
++ old_seq = __ulseq_to_u64seq(rb, oldval);
++ goto try_again;
++ }
++}
++
++/*
+ * Attempt to finalize a specified descriptor. If this fails, the descriptor
+ * is either already final or it will finalize itself when the writer commits.
+ */
+-static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
++static void desc_make_final(struct printk_ringbuffer *rb, unsigned long id)
+ {
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+ unsigned long prev_state_val = DESC_SV(id, desc_committed);
+ struct prb_desc *d = to_desc(desc_ring, id);
+
+- atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
+- DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
+-
+- /* Best effort to remember the last finalized @id. */
+- atomic_long_set(&desc_ring->last_finalized_id, id);
++ if (atomic_long_try_cmpxchg_relaxed(&d->state_var, &prev_state_val,
++ DESC_SV(id, desc_finalized))) { /* LMM(desc_make_final:A) */
++ desc_update_last_finalized(rb);
++ }
+ }
+
+ /**
+@@ -1550,7 +1652,7 @@ bool prb_reserve(struct prb_reserved_ent
+ * readers. (For seq==0 there is no previous descriptor.)
+ */
+ if (info->seq > 0)
+- desc_make_final(desc_ring, DESC_ID(id - 1));
++ desc_make_final(rb, DESC_ID(id - 1));
+
+ r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id);
+ /* If text data allocation fails, a data-less record is committed. */
+@@ -1643,7 +1745,7 @@ void prb_commit(struct prb_reserved_entr
+ */
+ head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */
+ if (head_id != e->id)
+- desc_make_final(desc_ring, e->id);
++ desc_make_final(e->rb, e->id);
+ }
+
+ /**
+@@ -1663,12 +1765,9 @@ void prb_commit(struct prb_reserved_entr
+ */
+ void prb_final_commit(struct prb_reserved_entry *e)
+ {
+- struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
+-
+ _prb_commit(e, desc_finalized);
+
+- /* Best effort to remember the last finalized @id. */
+- atomic_long_set(&desc_ring->last_finalized_id, e->id);
++ desc_update_last_finalized(e->rb);
+ }
+
+ /*
+@@ -2008,7 +2107,9 @@ u64 prb_first_valid_seq(struct printk_ri
+ * newest sequence number available to readers will be.
+ *
+ * This provides readers a sequence number to jump to if all currently
+- * available records should be skipped.
++ * available records should be skipped. It is guaranteed that all records
++ * previous to the returned value have been finalized and are (or were)
++ * available to the reader.
+ *
+ * Context: Any context.
+ * Return: The sequence number of the next newest (not yet available) record
+@@ -2016,34 +2117,19 @@ u64 prb_first_valid_seq(struct printk_ri
+ */
+ u64 prb_next_seq(struct printk_ringbuffer *rb)
+ {
+- struct prb_desc_ring *desc_ring = &rb->desc_ring;
+- enum desc_state d_state;
+- unsigned long id;
+ u64 seq;
+
+- /* Check if the cached @id still points to a valid @seq. */
+- id = atomic_long_read(&desc_ring->last_finalized_id);
+- d_state = desc_read(desc_ring, id, NULL, &seq, NULL);
++ seq = desc_last_finalized_seq(rb);
+
+- if (d_state == desc_finalized || d_state == desc_reusable) {
+- /*
+- * Begin searching after the last finalized record.
+- *
+- * On 0, the search must begin at 0 because of hack#2
+- * of the bootstrapping phase it is not known if a
+- * record at index 0 exists.
+- */
+- if (seq != 0)
+- seq++;
+- } else {
+- /*
+- * The information about the last finalized sequence number
+- * has gone. It should happen only when there is a flood of
+- * new messages and the ringbuffer is rapidly recycled.
+- * Give up and start from the beginning.
+- */
+- seq = 0;
+- }
++ /*
++ * Begin searching after the last finalized record.
++ *
++ * On 0, the search must begin at 0 because of hack#2
++ * of the bootstrapping phase it is not known if a
++ * record at index 0 exists.
++ */
++ if (seq != 0)
++ seq++;
+
+ /*
+ * The information about the last finalized @seq might be inaccurate.
+@@ -2085,7 +2171,7 @@ void prb_init(struct printk_ringbuffer *
+ rb->desc_ring.infos = infos;
+ atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
+ atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
+- atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits));
++ atomic_long_set(&rb->desc_ring.last_finalized_seq, 0);
+
+ rb->text_data_ring.size_bits = textbits;
+ rb->text_data_ring.data = text_buf;
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -75,7 +75,7 @@ struct prb_desc_ring {
+ struct printk_info *infos;
+ atomic_long_t head_id;
+ atomic_long_t tail_id;
+- atomic_long_t last_finalized_id;
++ atomic_long_t last_finalized_seq;
+ };
+
+ /*
+@@ -259,7 +259,7 @@ static struct printk_ringbuffer name = {
+ .infos = &_##name##_infos[0], \
+ .head_id = ATOMIC_INIT(DESC0_ID(descbits)), \
+ .tail_id = ATOMIC_INIT(DESC0_ID(descbits)), \
+- .last_finalized_id = ATOMIC_INIT(DESC0_ID(descbits)), \
++ .last_finalized_seq = ATOMIC_INIT(0), \
+ }, \
+ .text_data_ring = { \
+ .size_bits = (avgtextbits) + (descbits), \
diff --git a/debian/patches-rt/0089-printk-ringbuffer-Clarify-special-lpos-values.patch b/debian/patches-rt/0089-printk-ringbuffer-Clarify-special-lpos-values.patch
new file mode 100644
index 0000000000..397f61984c
--- /dev/null
+++ b/debian/patches-rt/0089-printk-ringbuffer-Clarify-special-lpos-values.patch
@@ -0,0 +1,92 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 23 Oct 2023 11:11:05 +0000
+Subject: [PATCH 089/134] printk: ringbuffer: Clarify special lpos values
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+For empty line records, no data blocks are created. Instead,
+these valid records are identified by special logical position
+values (in fields of @prb_desc.text_blk_lpos).
+
+Currently the macro NO_LPOS is used for empty line records.
+This name is confusing because it does not imply _why_ there is
+no data block.
+
+Rename NO_LPOS to EMPTY_LINE_LPOS so that it is clear why there
+is no data block.
+
+Also add comments explaining the use of EMPTY_LINE_LPOS as well
+as clarification to the values used to represent data-less
+blocks.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 20 ++++++++++++++++----
+ kernel/printk/printk_ringbuffer.h | 16 +++++++++++++++-
+ 2 files changed, 31 insertions(+), 5 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -1034,9 +1034,13 @@ static char *data_alloc(struct printk_ri
+ unsigned long next_lpos;
+
+ if (size == 0) {
+- /* Specify a data-less block. */
+- blk_lpos->begin = NO_LPOS;
+- blk_lpos->next = NO_LPOS;
++ /*
++ * Data blocks are not created for empty lines. Instead, the
++ * reader will recognize these special lpos values and handle
++ * it appropriately.
++ */
++ blk_lpos->begin = EMPTY_LINE_LPOS;
++ blk_lpos->next = EMPTY_LINE_LPOS;
+ return NULL;
+ }
+
+@@ -1214,10 +1218,18 @@ static const char *get_data(struct prb_d
+
+ /* Data-less data block description. */
+ if (BLK_DATALESS(blk_lpos)) {
+- if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
++ /*
++ * Records that are just empty lines are also valid, even
++ * though they do not have a data block. For such records
++ * explicitly return empty string data to signify success.
++ */
++ if (blk_lpos->begin == EMPTY_LINE_LPOS &&
++ blk_lpos->next == EMPTY_LINE_LPOS) {
+ *data_size = 0;
+ return "";
+ }
++
++ /* Data lost, invalid, or otherwise unavailable. */
+ return NULL;
+ }
+
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -127,8 +127,22 @@ enum desc_state {
+ #define DESC_SV(id, state) (((unsigned long)state << DESC_FLAGS_SHIFT) | id)
+ #define DESC_ID_MASK (~DESC_FLAGS_MASK)
+ #define DESC_ID(sv) ((sv) & DESC_ID_MASK)
++
++/*
++ * Special data block logical position values (for fields of
++ * @prb_desc.text_blk_lpos).
++ *
++ * - Bit0 is used to identify if the record has no data block. (Implemented in
++ * the LPOS_DATALESS() macro.)
++ *
++ * - Bit1 specifies the reason for not having a data block.
++ *
++ * These special values could never be real lpos values because of the
++ * meta data and alignment padding of data blocks. (See to_blk_size() for
++ * details.)
++ */
+ #define FAILED_LPOS 0x1
+-#define NO_LPOS 0x3
++#define EMPTY_LINE_LPOS 0x3
+
+ #define FAILED_BLK_LPOS \
+ { \
diff --git a/debian/patches-rt/0090-printk-For-suppress_panic_printk-check-for-other-CPU.patch b/debian/patches-rt/0090-printk-For-suppress_panic_printk-check-for-other-CPU.patch
new file mode 100644
index 0000000000..1fb32b1ace
--- /dev/null
+++ b/debian/patches-rt/0090-printk-For-suppress_panic_printk-check-for-other-CPU.patch
@@ -0,0 +1,34 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 13 Oct 2023 20:13:02 +0000
+Subject: [PATCH 090/134] printk: For @suppress_panic_printk check for other
+ CPU in panic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Currently @suppress_panic_printk is checked along with
+non-matching @panic_cpu and current CPU. This works
+because @suppress_panic_printk is only set when
+panic_in_progress() is true.
+
+Rather than relying on the @suppress_panic_printk semantics,
+use the concise helper function other_cpu_in_progress(). The
+helper function exists to avoid open coding such tests.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2271,8 +2271,7 @@ asmlinkage int vprintk_emit(int facility
+ if (unlikely(suppress_printk))
+ return 0;
+
+- if (unlikely(suppress_panic_printk) &&
+- atomic_read(&panic_cpu) != raw_smp_processor_id())
++ if (unlikely(suppress_panic_printk) && other_cpu_in_panic())
+ return 0;
+
+ if (level == LOGLEVEL_SCHED) {
diff --git a/debian/patches-rt/0091-printk-Add-this_cpu_in_panic.patch b/debian/patches-rt/0091-printk-Add-this_cpu_in_panic.patch
new file mode 100644
index 0000000000..0925911259
--- /dev/null
+++ b/debian/patches-rt/0091-printk-Add-this_cpu_in_panic.patch
@@ -0,0 +1,88 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 13 Oct 2023 14:30:49 +0000
+Subject: [PATCH 091/134] printk: Add this_cpu_in_panic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+There is already panic_in_progress() and other_cpu_in_panic(),
+but checking if the current CPU is the panic CPU must still be
+open coded.
+
+Add this_cpu_in_panic() to complete the set.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 1 +
+ kernel/printk/printk.c | 43 +++++++++++++++++++++++--------------------
+ 2 files changed, 24 insertions(+), 20 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -130,6 +130,7 @@ struct printk_message {
+ };
+
+ bool other_cpu_in_panic(void);
++bool this_cpu_in_panic(void);
+ bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
+ bool is_extended, bool may_supress);
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -347,6 +347,29 @@ static bool panic_in_progress(void)
+ return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
+ }
+
++/* Return true if a panic is in progress on the current CPU. */
++bool this_cpu_in_panic(void)
++{
++ /*
++ * We can use raw_smp_processor_id() here because it is impossible for
++ * the task to be migrated to the panic_cpu, or away from it. If
++ * panic_cpu has already been set, and we're not currently executing on
++ * that CPU, then we never will be.
++ */
++ return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id());
++}
++
++/*
++ * Return true if a panic is in progress on a remote CPU.
++ *
++ * On true, the local CPU should immediately release any printing resources
++ * that may be needed by the panic CPU.
++ */
++bool other_cpu_in_panic(void)
++{
++ return (panic_in_progress() && !this_cpu_in_panic());
++}
++
+ /*
+ * This is used for debugging the mess that is the VT code by
+ * keeping track if we have the console semaphore held. It's
+@@ -2593,26 +2616,6 @@ static int console_cpu_notify(unsigned i
+ return 0;
+ }
+
+-/*
+- * Return true if a panic is in progress on a remote CPU.
+- *
+- * On true, the local CPU should immediately release any printing resources
+- * that may be needed by the panic CPU.
+- */
+-bool other_cpu_in_panic(void)
+-{
+- if (!panic_in_progress())
+- return false;
+-
+- /*
+- * We can use raw_smp_processor_id() here because it is impossible for
+- * the task to be migrated to the panic_cpu, or away from it. If
+- * panic_cpu has already been set, and we're not currently executing on
+- * that CPU, then we never will be.
+- */
+- return atomic_read(&panic_cpu) != raw_smp_processor_id();
+-}
+-
+ /**
+ * console_lock - block the console subsystem from printing
+ *
diff --git a/debian/patches-rt/0092-printk-ringbuffer-Cleanup-reader-terminology.patch b/debian/patches-rt/0092-printk-ringbuffer-Cleanup-reader-terminology.patch
new file mode 100644
index 0000000000..31c6312c46
--- /dev/null
+++ b/debian/patches-rt/0092-printk-ringbuffer-Cleanup-reader-terminology.patch
@@ -0,0 +1,67 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 6 Nov 2023 15:01:58 +0000
+Subject: [PATCH 092/134] printk: ringbuffer: Cleanup reader terminology
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+With the lockless ringbuffer, it is allowed that multiple
+CPUs/contexts write simultaneously into the buffer. This creates
+an ambiguity as some writers will finalize sooner.
+
+The documentation for the prb_read functions is not clear as it
+refers to "not yet written" and "no data available". Clarify the
+return values and language to be in terms of the reader: records
+available for reading.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -1987,11 +1987,13 @@ u64 prb_first_seq(struct printk_ringbuff
+ }
+
+ /*
+- * Non-blocking read of a record. Updates @seq to the last finalized record
+- * (which may have no data available).
++ * Non-blocking read of a record.
+ *
+- * See the description of prb_read_valid() and prb_read_valid_info()
+- * for details.
++ * On success @seq is updated to the record that was read and (if provided)
++ * @r and @line_count will contain the read/calculated data.
++ *
++ * On failure @seq is updated to a record that is not yet available to the
++ * reader, but it will be the next record available to the reader.
+ */
+ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
+ struct printk_record *r, unsigned int *line_count)
+@@ -2010,7 +2012,7 @@ static bool _prb_read_valid(struct print
+ *seq = tail_seq;
+
+ } else if (err == -ENOENT) {
+- /* Record exists, but no data available. Skip. */
++ /* Record exists, but the data was lost. Skip. */
+ (*seq)++;
+
+ } else {
+@@ -2043,7 +2045,7 @@ static bool _prb_read_valid(struct print
+ * On success, the reader must check r->info.seq to see which record was
+ * actually read. This allows the reader to detect dropped records.
+ *
+- * Failure means @seq refers to a not yet written record.
++ * Failure means @seq refers to a record not yet available to the reader.
+ */
+ bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
+ struct printk_record *r)
+@@ -2073,7 +2075,7 @@ bool prb_read_valid(struct printk_ringbu
+ * On success, the reader must check info->seq to see which record meta data
+ * was actually read. This allows the reader to detect dropped records.
+ *
+- * Failure means @seq refers to a not yet written record.
++ * Failure means @seq refers to a record not yet available to the reader.
+ */
+ bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
+ struct printk_info *info, unsigned int *line_count)
diff --git a/debian/patches-rt/0093-printk-Wait-for-all-reserved-records-with-pr_flush.patch b/debian/patches-rt/0093-printk-Wait-for-all-reserved-records-with-pr_flush.patch
new file mode 100644
index 0000000000..661aea2105
--- /dev/null
+++ b/debian/patches-rt/0093-printk-Wait-for-all-reserved-records-with-pr_flush.patch
@@ -0,0 +1,170 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 6 Nov 2023 14:59:55 +0000
+Subject: [PATCH 093/134] printk: Wait for all reserved records with pr_flush()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Currently pr_flush() will only wait for records that were
+available to readers at the time of the call (using
+prb_next_seq()). But there may be more records (non-finalized)
+that have following finalized records. pr_flush() should wait
+for these to print as well. Particularly because any trailing
+finalized records may be the messages that the calling context
+wants to ensure are printed.
+
+Add a new ringbuffer function prb_next_reserve_seq() to return
+the sequence number following the most recently reserved record.
+This guarantees that pr_flush() will wait until all current
+printk() messages (completed or in progress) have been printed.
+
+Fixes: 3b604ca81202 ("printk: add pr_flush()")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 2
+ kernel/printk/printk_ringbuffer.c | 113 ++++++++++++++++++++++++++++++++++++++
+ kernel/printk/printk_ringbuffer.h | 1
+ 3 files changed, 115 insertions(+), 1 deletion(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3756,7 +3756,7 @@ static bool __pr_flush(struct console *c
+
+ might_sleep();
+
+- seq = prb_next_seq(prb);
++ seq = prb_next_reserve_seq(prb);
+
+ /* Flush the consoles so that records up to @seq are printed. */
+ console_lock();
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -1986,6 +1986,119 @@ u64 prb_first_seq(struct printk_ringbuff
+ return seq;
+ }
+
++/**
++ * prb_next_reserve_seq() - Get the sequence number after the most recently
++ * reserved record.
++ *
++ * @rb: The ringbuffer to get the sequence number from.
++ *
++ * This is the public function available to readers to see what sequence
++ * number will be assigned to the next reserved record.
++ *
++ * Note that depending on the situation, this value can be equal to or
++ * higher than the sequence number returned by prb_next_seq().
++ *
++ * Context: Any context.
++ * Return: The sequence number that will be assigned to the next record
++ * reserved.
++ */
++u64 prb_next_reserve_seq(struct printk_ringbuffer *rb)
++{
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ unsigned long last_finalized_id;
++ atomic_long_t *state_var;
++ u64 last_finalized_seq;
++ unsigned long head_id;
++ struct prb_desc desc;
++ unsigned long diff;
++ struct prb_desc *d;
++ int err;
++
++ /*
++ * It may not be possible to read a sequence number for @head_id.
++ * So the ID of @last_finailzed_seq is used to calculate what the
++ * sequence number of @head_id will be.
++ */
++
++try_again:
++ last_finalized_seq = desc_last_finalized_seq(rb);
++
++ /*
++ * @head_id is loaded after @last_finalized_seq to ensure that it is
++ * at or beyond @last_finalized_seq.
++ *
++ * Memory barrier involvement:
++ *
++ * If desc_last_finalized_seq:A reads from
++ * desc_update_last_finalized:A, then
++ * prb_next_reserve_seq:A reads from desc_reserve:D.
++ *
++ * Relies on:
++ *
++ * RELEASE from desc_reserve:D to desc_update_last_finalized:A
++ * matching
++ * ACQUIRE from desc_last_finalized_seq:A to prb_next_reserve_seq:A
++ *
++ * Note: desc_reserve:D and desc_update_last_finalized:A can be
++ * different CPUs. However, the desc_update_last_finalized:A CPU
++ * (which performs the release) must have previously seen
++ * desc_read:C, which implies desc_reserve:D can be seen.
++ */
++ head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_next_reserve_seq:A) */
++
++ d = to_desc(desc_ring, last_finalized_seq);
++ state_var = &d->state_var;
++
++ /* Extract the ID, used to specify the descriptor to read. */
++ last_finalized_id = DESC_ID(atomic_long_read(state_var));
++
++ /* Ensure @last_finalized_id is correct. */
++ err = desc_read_finalized_seq(desc_ring, last_finalized_id, last_finalized_seq, &desc);
++
++ if (err == -EINVAL) {
++ if (last_finalized_seq == 0) {
++ /*
++ * @last_finalized_seq still contains its initial
++ * value. Probably no record has been finalized yet.
++ * This means the ringbuffer is not yet full and the
++ * @head_id value can be used directly (subtracting
++ * off the id value corresponding to seq=0).
++ */
++
++ /*
++ * Because of hack#2 of the bootstrapping phase, the
++ * @head_id initial value must be handled separately.
++ */
++ if (head_id == DESC0_ID(desc_ring->count_bits))
++ return 0;
++
++ /*
++ * The @head_id is initialized such that the first
++ * increment will yield the first record (seq=0).
++ * Therefore use the initial value +1 as the base to
++ * subtract from @head_id.
++ */
++ last_finalized_id = DESC0_ID(desc_ring->count_bits) + 1;
++ } else {
++ /* Record must have been overwritten. Try again. */
++ goto try_again;
++ }
++ }
++
++ /*
++ * @diff is the number of records beyond the last record available
++ * to readers.
++ */
++ diff = head_id - last_finalized_id;
++
++ /*
++ * @head_id points to the most recently reserved record, but this
++ * function returns the sequence number that will be assigned to the
++ * next (not yet reserved) record. Thus +1 is needed.
++ */
++ return (last_finalized_seq + diff + 1);
++}
++
+ /*
+ * Non-blocking read of a record.
+ *
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -395,6 +395,7 @@ bool prb_read_valid_info(struct printk_r
+ u64 prb_first_seq(struct printk_ringbuffer *rb);
+ u64 prb_first_valid_seq(struct printk_ringbuffer *rb);
+ u64 prb_next_seq(struct printk_ringbuffer *rb);
++u64 prb_next_reserve_seq(struct printk_ringbuffer *rb);
+
+ #ifdef CONFIG_64BIT
+
diff --git a/debian/patches-rt/0094-printk-ringbuffer-Skip-non-finalized-records-in-pani.patch b/debian/patches-rt/0094-printk-ringbuffer-Skip-non-finalized-records-in-pani.patch
new file mode 100644
index 0000000000..56c675cea9
--- /dev/null
+++ b/debian/patches-rt/0094-printk-ringbuffer-Skip-non-finalized-records-in-pani.patch
@@ -0,0 +1,68 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 13 Oct 2023 10:23:11 +0000
+Subject: [PATCH 094/134] printk: ringbuffer: Skip non-finalized records in
+ panic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Normally a reader will stop once reaching a non-finalized
+record. However, when a panic happens, writers from other CPUs
+(or an interrupted context on the panic CPU) may have been
+writing a record and were unable to finalize it. The panic CPU
+will reserve/commit/finalize its panic records, but these will
+be located after the non-finalized records. This results in
+panic() not flushing the panic messages.
+
+Extend _prb_read_valid() to skip over non-finalized records if
+on the panic CPU.
+
+Fixes: 896fbe20b4e2 ("printk: use the lockless ringbuffer")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 28 ++++++++++++++++++++++++++--
+ 1 file changed, 26 insertions(+), 2 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -2107,6 +2107,10 @@ u64 prb_next_reserve_seq(struct printk_r
+ *
+ * On failure @seq is updated to a record that is not yet available to the
+ * reader, but it will be the next record available to the reader.
++ *
++ * Note: When the current CPU is in panic, this function will skip over any
++ * non-existent/non-finalized records in order to allow the panic CPU
++ * to print any and all records that have been finalized.
+ */
+ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
+ struct printk_record *r, unsigned int *line_count)
+@@ -2129,8 +2133,28 @@ static bool _prb_read_valid(struct print
+ (*seq)++;
+
+ } else {
+- /* Non-existent/non-finalized record. Must stop. */
+- return false;
++ /*
++ * Non-existent/non-finalized record. Must stop.
++ *
++ * For panic situations it cannot be expected that
++ * non-finalized records will become finalized. But
++ * there may be other finalized records beyond that
++ * need to be printed for a panic situation. If this
++ * is the panic CPU, skip this
++ * non-existent/non-finalized record unless it is
++ * at or beyond the head, in which case it is not
++ * possible to continue.
++ *
++ * Note that new messages printed on panic CPU are
++ * finalized when we are here. The only exception
++ * might be the last message without trailing newline.
++ * But it would have the sequence number returned
++ * by "prb_next_reserve_seq() - 1".
++ */
++ if (this_cpu_in_panic() && ((*seq + 1) < prb_next_reserve_seq(rb)))
++ (*seq)++;
++ else
++ return false;
+ }
+ }
+
diff --git a/debian/patches-rt/0095-printk-ringbuffer-Consider-committed-as-finalized-in.patch b/debian/patches-rt/0095-printk-ringbuffer-Consider-committed-as-finalized-in.patch
new file mode 100644
index 0000000000..932ba13e7d
--- /dev/null
+++ b/debian/patches-rt/0095-printk-ringbuffer-Consider-committed-as-finalized-in.patch
@@ -0,0 +1,62 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 20 Nov 2023 12:46:35 +0100
+Subject: [PATCH 095/134] printk: ringbuffer: Consider committed as finalized
+ in panic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+A descriptor in the committed state means the record does not yet
+exist for the reader. However, for the panic CPU, committed
+records should be handled as finalized records since they contain
+message data in a consistent state and may contain additional
+hints as to the cause of the panic.
+
+Add an exception for records in the commit state to not be
+considered non-existing when reading from the panic CPU.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -1857,6 +1857,8 @@ static bool copy_data(struct prb_data_ri
+ * descriptor. However, it also verifies that the record is finalized and has
+ * the sequence number @seq. On success, 0 is returned.
+ *
++ * For the panic CPU, committed descriptors are also considered finalized.
++ *
+ * Error return values:
+ * -EINVAL: A finalized record with sequence number @seq does not exist.
+ * -ENOENT: A finalized record with sequence number @seq exists, but its data
+@@ -1875,17 +1877,26 @@ static int desc_read_finalized_seq(struc
+
+ /*
+ * An unexpected @id (desc_miss) or @seq mismatch means the record
+- * does not exist. A descriptor in the reserved or committed state
+- * means the record does not yet exist for the reader.
++ * does not exist. A descriptor in the reserved state means the
++ * record does not yet exist for the reader.
+ */
+ if (d_state == desc_miss ||
+ d_state == desc_reserved ||
+- d_state == desc_committed ||
+ s != seq) {
+ return -EINVAL;
+ }
+
+ /*
++ * A descriptor in the committed state means the record does not yet
++ * exist for the reader. However, for the panic CPU, committed
++ * records are also handled as finalized records since they contain
++ * message data in a consistent state and may contain additional
++ * hints as to the cause of the panic.
++ */
++ if (d_state == desc_committed && !this_cpu_in_panic())
++ return -EINVAL;
++
++ /*
+ * A descriptor in the reusable state may no longer have its data
+ * available; report it as existing but with lost data. Or the record
+ * may actually be a record with lost data.
diff --git a/debian/patches-rt/0096-printk-Disable-passing-console-lock-owner-completely.patch b/debian/patches-rt/0096-printk-Disable-passing-console-lock-owner-completely.patch
new file mode 100644
index 0000000000..d5dcdcdccc
--- /dev/null
+++ b/debian/patches-rt/0096-printk-Disable-passing-console-lock-owner-completely.patch
@@ -0,0 +1,107 @@
+From: Petr Mladek <pmladek@suse.com>
+Date: Fri, 13 Oct 2023 14:12:05 +0000
+Subject: [PATCH 096/134] printk: Disable passing console lock owner completely
+ during panic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The commit d51507098ff91 ("printk: disable optimistic spin
+during panic") added checks to avoid becoming a console waiter
+if a panic is in progress.
+
+However, the transition to panic can occur while there is
+already a waiter. The current owner should not pass the lock to
+the waiter because it might get stopped or blocked anytime.
+
+Also the panic context might pass the console lock owner to an
+already stopped waiter by mistake. It might happen when
+console_flush_on_panic() ignores the current lock owner, for
+example:
+
+CPU0 CPU1
+---- ----
+console_lock_spinning_enable()
+ console_trylock_spinning()
+ [CPU1 now console waiter]
+NMI: panic()
+ panic_other_cpus_shutdown()
+ [stopped as console waiter]
+ console_flush_on_panic()
+ console_lock_spinning_enable()
+ [print 1 record]
+ console_lock_spinning_disable_and_check()
+ [handover to stopped CPU1]
+
+This results in panic() not flushing the panic messages.
+
+Fix these problems by disabling all spinning operations
+completely during panic().
+
+Another advantage is that it prevents possible deadlocks caused
+by "console_owner_lock". The panic() context does not need to
+take it any longer. The lockless checks are safe because the
+functions become NOPs when they see the panic in progress. All
+operations manipulating the state are still synchronized by the
+lock even when non-panic CPUs would notice the panic
+synchronously.
+
+The current owner might stay spinning. But non-panic() CPUs
+would get stopped anyway and the panic context will never start
+spinning.
+
+Fixes: dbdda842fe96 ("printk: Add console owner and waiter logic to load balance console writes")
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1870,10 +1870,23 @@ static bool console_waiter;
+ */
+ static void console_lock_spinning_enable(void)
+ {
++ /*
++ * Do not use spinning in panic(). The panic CPU wants to keep the lock.
++ * Non-panic CPUs abandon the flush anyway.
++ *
++ * Just keep the lockdep annotation. The panic-CPU should avoid
++ * taking console_owner_lock because it might cause a deadlock.
++ * This looks like the easiest way how to prevent false lockdep
++ * reports without handling races a lockless way.
++ */
++ if (panic_in_progress())
++ goto lockdep;
++
+ raw_spin_lock(&console_owner_lock);
+ console_owner = current;
+ raw_spin_unlock(&console_owner_lock);
+
++lockdep:
+ /* The waiter may spin on us after setting console_owner */
+ spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
+ }
+@@ -1898,6 +1911,22 @@ static int console_lock_spinning_disable
+ {
+ int waiter;
+
++ /*
++ * Ignore spinning waiters during panic() because they might get stopped
++ * or blocked at any time,
++ *
++ * It is safe because nobody is allowed to start spinning during panic
++ * in the first place. If there has been a waiter then non panic CPUs
++ * might stay spinning. They would get stopped anyway. The panic context
++ * will never start spinning and an interrupted spin on panic CPU will
++ * never continue.
++ */
++ if (panic_in_progress()) {
++ /* Keep lockdep happy. */
++ spin_release(&console_owner_dep_map, _THIS_IP_);
++ return 0;
++ }
++
+ raw_spin_lock(&console_owner_lock);
+ waiter = READ_ONCE(console_waiter);
+ console_owner = NULL;
diff --git a/debian/patches-rt/0097-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch b/debian/patches-rt/0097-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch
new file mode 100644
index 0000000000..2f5a4ba17d
--- /dev/null
+++ b/debian/patches-rt/0097-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch
@@ -0,0 +1,78 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 20 Oct 2023 09:37:05 +0000
+Subject: [PATCH 097/134] printk: Avoid non-panic CPUs writing to ringbuffer
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Commit 13fb0f74d702 ("printk: Avoid livelock with heavy printk
+during panic") introduced a mechanism to silence non-panic CPUs
+if too many messages are being dropped. Aside from trying to
+workaround the livelock bugs of legacy consoles, it was also
+intended to avoid losing panic messages. However, if non-panic
+CPUs are writing to the ringbuffer, then reacting to dropped
+messages is too late.
+
+To avoid losing panic CPU messages, silence non-panic CPUs
+immediately on panic.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 26 ++++++--------------------
+ 1 file changed, 6 insertions(+), 20 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -462,12 +462,6 @@ static int console_msg_format = MSG_FORM
+ static DEFINE_MUTEX(syslog_lock);
+
+ #ifdef CONFIG_PRINTK
+-/*
+- * During panic, heavy printk by other CPUs can delay the
+- * panic and risk deadlock on console resources.
+- */
+-static int __read_mostly suppress_panic_printk;
+-
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+ /* All 3 protected by @syslog_lock. */
+ /* the next printk record to read by syslog(READ) or /proc/kmsg */
+@@ -2323,7 +2317,12 @@ asmlinkage int vprintk_emit(int facility
+ if (unlikely(suppress_printk))
+ return 0;
+
+- if (unlikely(suppress_panic_printk) && other_cpu_in_panic())
++ /*
++ * The messages on the panic CPU are the most important. If
++ * non-panic CPUs are generating any messages, they will be
++ * silently dropped.
++ */
++ if (other_cpu_in_panic())
+ return 0;
+
+ if (level == LOGLEVEL_SCHED) {
+@@ -2800,8 +2799,6 @@ void console_prepend_dropped(struct prin
+ bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
+ bool is_extended, bool may_suppress)
+ {
+- static int panic_console_dropped;
+-
+ struct printk_buffers *pbufs = pmsg->pbufs;
+ const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
+ const size_t outbuf_sz = sizeof(pbufs->outbuf);
+@@ -2829,17 +2826,6 @@ bool printk_get_next_message(struct prin
+ pmsg->seq = r.info->seq;
+ pmsg->dropped = r.info->seq - seq;
+
+- /*
+- * Check for dropped messages in panic here so that printk
+- * suppression can occur as early as possible if necessary.
+- */
+- if (pmsg->dropped &&
+- panic_in_progress() &&
+- panic_console_dropped++ > 10) {
+- suppress_panic_printk = 1;
+- pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
+- }
+-
+ /* Skip record that has level above the console loglevel. */
+ if (may_suppress && suppress_message_printing(r.info->level))
+ goto out;
diff --git a/debian/patches-rt/0098-panic-Flush-kernel-log-buffer-at-the-end.patch b/debian/patches-rt/0098-panic-Flush-kernel-log-buffer-at-the-end.patch
new file mode 100644
index 0000000000..ec3cbc5dd2
--- /dev/null
+++ b/debian/patches-rt/0098-panic-Flush-kernel-log-buffer-at-the-end.patch
@@ -0,0 +1,38 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 14 Dec 2023 20:48:23 +0000
+Subject: [PATCH 098/134] panic: Flush kernel log buffer at the end
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+If the kernel crashes in a context where printk() calls always
+defer printing (such as in NMI or inside a printk_safe section)
+then the final panic messages will be deferred to irq_work. But
+if irq_work is not available, the messages will not get printed
+unless explicitly flushed. The result is that the final
+"end Kernel panic" banner does not get printed.
+
+Add one final flush after the last printk() call to make sure
+the final panic messages make it out as well.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/panic.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -442,6 +442,14 @@ void panic(const char *fmt, ...)
+
+ /* Do not scroll important messages printed above */
+ suppress_printk = 1;
++
++ /*
++ * The final messages may not have been printed if in a context that
++ * defers printing (such as NMI) and irq_work is not available.
++ * Explicitly flush the kernel log buffer one last time.
++ */
++ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
++
+ local_irq_enable();
+ for (i = 0; ; i += PANIC_TIMER_STEP) {
+ touch_softlockup_watchdog();
diff --git a/debian/patches-rt/0099-printk-Consider-nbcon-boot-consoles-on-seq-init.patch b/debian/patches-rt/0099-printk-Consider-nbcon-boot-consoles-on-seq-init.patch
new file mode 100644
index 0000000000..b4e61c7689
--- /dev/null
+++ b/debian/patches-rt/0099-printk-Consider-nbcon-boot-consoles-on-seq-init.patch
@@ -0,0 +1,51 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 22 Nov 2023 11:23:43 +0000
+Subject: [PATCH 099/134] printk: Consider nbcon boot consoles on seq init
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+If a non-boot console is registering and boot consoles exist, the
+consoles are flushed before being unregistered. This allows the
+non-boot console to continue where the boot console left off.
+
+If for whatever reason flushing fails, the lowest seq found from
+any of the enabled boot consoles is used. Until now con->seq was
+checked. However, if it is an nbcon boot console, the function
+nbcon_seq_read() must be used to read seq because con->seq is
+always 0.
+
+Check if it is an nbcon boot console and if so call
+nbcon_seq_read() to read seq.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3392,11 +3392,20 @@ static void console_init_seq(struct cons
+
+ newcon->seq = prb_next_seq(prb);
+ for_each_console(con) {
+- if ((con->flags & CON_BOOT) &&
+- (con->flags & CON_ENABLED) &&
+- con->seq < newcon->seq) {
+- newcon->seq = con->seq;
++ u64 seq;
++
++ if (!((con->flags & CON_BOOT) &&
++ (con->flags & CON_ENABLED))) {
++ continue;
+ }
++
++ if (con->flags & CON_NBCON)
++ seq = nbcon_seq_read(con);
++ else
++ seq = con->seq;
++
++ if (seq < newcon->seq)
++ newcon->seq = seq;
+ }
+ }
+
diff --git a/debian/patches-rt/0100-printk-Add-sparse-notation-to-console_srcu-locking.patch b/debian/patches-rt/0100-printk-Add-sparse-notation-to-console_srcu-locking.patch
new file mode 100644
index 0000000000..8a1473c93d
--- /dev/null
+++ b/debian/patches-rt/0100-printk-Add-sparse-notation-to-console_srcu-locking.patch
@@ -0,0 +1,36 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 9 Oct 2023 13:55:19 +0000
+Subject: [PATCH 100/134] printk: Add sparse notation to console_srcu locking
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+kernel/printk/printk.c:284:5: sparse: sparse: context imbalance in
+'console_srcu_read_lock' - wrong count at exit
+include/linux/srcu.h:301:9: sparse: sparse: context imbalance in
+'console_srcu_read_unlock' - unexpected unlock
+
+Reported-by: kernel test robot <lkp@intel.com>
+Fixes: 6c4afa79147e ("printk: Prepare for SRCU console list protection")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -282,6 +282,7 @@ EXPORT_SYMBOL(console_list_unlock);
+ * Return: A cookie to pass to console_srcu_read_unlock().
+ */
+ int console_srcu_read_lock(void)
++ __acquires(&console_srcu)
+ {
+ return srcu_read_lock_nmisafe(&console_srcu);
+ }
+@@ -295,6 +296,7 @@ EXPORT_SYMBOL(console_srcu_read_lock);
+ * Counterpart to console_srcu_read_lock()
+ */
+ void console_srcu_read_unlock(int cookie)
++ __releases(&console_srcu)
+ {
+ srcu_read_unlock_nmisafe(&console_srcu, cookie);
+ }
diff --git a/debian/patches-rt/0101-printk-nbcon-Ensure-ownership-release-on-failed-emit.patch b/debian/patches-rt/0101-printk-nbcon-Ensure-ownership-release-on-failed-emit.patch
new file mode 100644
index 0000000000..d96f2dc2b9
--- /dev/null
+++ b/debian/patches-rt/0101-printk-nbcon-Ensure-ownership-release-on-failed-emit.patch
@@ -0,0 +1,58 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 20 Oct 2023 09:52:59 +0000
+Subject: [PATCH 101/134] printk: nbcon: Ensure ownership release on failed
+ emit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Until now it was assumed that ownership has been lost when the
+write_atomic() callback fails. nbcon_emit_next_record() only
+returns false when ownership has been lost.
+
+Ensure ownership has been lost before reporting failure by
+explicitly attempting a release. If the current context is not
+the owner, the release has no effect.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/nbcon.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -852,7 +852,7 @@ static bool nbcon_emit_next_record(struc
+ unsigned long con_dropped;
+ struct nbcon_state cur;
+ unsigned long dropped;
+- bool done;
++ bool done = false;
+
+ /*
+ * The printk buffers are filled within an unsafe section. This
+@@ -891,17 +891,18 @@ static bool nbcon_emit_next_record(struc
+ nbcon_state_read(con, &cur);
+ wctxt->unsafe_takeover = cur.unsafe_takeover;
+
+- if (con->write_atomic) {
++ if (con->write_atomic)
+ done = con->write_atomic(con, wctxt);
+- } else {
+- nbcon_context_release(ctxt);
+- WARN_ON_ONCE(1);
+- done = false;
+- }
+
+- /* If not done, the emit was aborted. */
+- if (!done)
++ if (!done) {
++ /*
++ * The emit was aborted, probably due to a loss of ownership.
++ * Ensure ownership was lost or released before reporting the
++ * loss.
++ */
++ nbcon_context_release(ctxt);
+ return false;
++ }
+
+ /*
+ * Since any dropped message was successfully output, reset the
diff --git a/debian/patches-rt/0102-printk-Check-printk_deferred_enter-_exit-usage.patch b/debian/patches-rt/0102-printk-Check-printk_deferred_enter-_exit-usage.patch
new file mode 100644
index 0000000000..2e1d736ea7
--- /dev/null
+++ b/debian/patches-rt/0102-printk-Check-printk_deferred_enter-_exit-usage.patch
@@ -0,0 +1,58 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 22 Sep 2023 14:58:18 +0000
+Subject: [PATCH 102/134] printk: Check printk_deferred_enter()/_exit() usage
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Add validation that printk_deferred_enter()/_exit() are called in
+non-migration contexts.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/printk.h | 7 +++++--
+ kernel/printk/printk_safe.c | 12 ++++++++++++
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -159,13 +159,16 @@ int _printk(const char *fmt, ...);
+
+ extern void __printk_safe_enter(void);
+ extern void __printk_safe_exit(void);
++extern void __printk_deferred_enter(void);
++extern void __printk_deferred_exit(void);
++
+ /*
+ * The printk_deferred_enter/exit macros are available only as a hack for
+ * some code paths that need to defer all printk console printing. Interrupts
+ * must be disabled for the deferred duration.
+ */
+-#define printk_deferred_enter __printk_safe_enter
+-#define printk_deferred_exit __printk_safe_exit
++#define printk_deferred_enter() __printk_deferred_enter()
++#define printk_deferred_exit() __printk_deferred_exit()
+
+ /*
+ * Please don't use printk_ratelimit(), because it shares ratelimiting state
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -26,6 +26,18 @@ void __printk_safe_exit(void)
+ this_cpu_dec(printk_context);
+ }
+
++void __printk_deferred_enter(void)
++{
++ cant_migrate();
++ this_cpu_inc(printk_context);
++}
++
++void __printk_deferred_exit(void)
++{
++ cant_migrate();
++ this_cpu_dec(printk_context);
++}
++
+ asmlinkage int vprintk(const char *fmt, va_list args)
+ {
+ #ifdef CONFIG_KGDB_KDB
diff --git a/debian/patches-rt/0103-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch b/debian/patches-rt/0103-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch
new file mode 100644
index 0000000000..cee70cb44e
--- /dev/null
+++ b/debian/patches-rt/0103-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch
@@ -0,0 +1,244 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 13 Sep 2023 08:35:23 +0000
+Subject: [PATCH 103/134] printk: nbcon: Implement processing in port->lock
+ wrapper
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Currently the port->lock wrappers uart_port_lock(),
+uart_port_unlock() (and their variants) only lock/unlock
+the spin_lock.
+
+If the port is an nbcon console, the wrappers must also
+acquire/release the console and mark the region as unsafe. This
+allows general port->lock synchronization to be synchronized
+with the nbcon console ownership.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 2 +
+ include/linux/printk.h | 13 +++++++
+ include/linux/serial_core.h | 18 +++++++++-
+ kernel/printk/nbcon.c | 77 ++++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 108 insertions(+), 2 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -299,6 +299,7 @@ struct nbcon_write_context {
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @pbufs: Pointer to nbcon private buffer
++ * @locked_port: True, if the port lock is locked by nbcon
+ */
+ struct console {
+ char name[16];
+@@ -325,6 +326,7 @@ struct console {
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct printk_buffers *pbufs;
++ bool locked_port;
+ };
+
+ #ifdef CONFIG_LOCKDEP
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -9,6 +9,8 @@
+ #include <linux/ratelimit_types.h>
+ #include <linux/once_lite.h>
+
++struct uart_port;
++
+ extern const char linux_banner[];
+ extern const char linux_proc_banner[];
+
+@@ -195,6 +197,8 @@ void show_regs_print_info(const char *lo
+ extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
+ extern asmlinkage void dump_stack(void) __cold;
+ void printk_trigger_flush(void);
++extern void nbcon_acquire(struct uart_port *up);
++extern void nbcon_release(struct uart_port *up);
+ #else
+ static inline __printf(1, 0)
+ int vprintk(const char *s, va_list args)
+@@ -274,6 +278,15 @@ static inline void dump_stack(void)
+ static inline void printk_trigger_flush(void)
+ {
+ }
++
++static inline void nbcon_acquire(struct uart_port *up)
++{
++}
++
++static inline void nbcon_release(struct uart_port *up)
++{
++}
++
+ #endif
+
+ #ifdef CONFIG_SMP
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -595,6 +595,7 @@ struct uart_port {
+ static inline void uart_port_lock(struct uart_port *up)
+ {
+ spin_lock(&up->lock);
++ nbcon_acquire(up);
+ }
+
+ /**
+@@ -604,6 +605,7 @@ static inline void uart_port_lock(struct
+ static inline void uart_port_lock_irq(struct uart_port *up)
+ {
+ spin_lock_irq(&up->lock);
++ nbcon_acquire(up);
+ }
+
+ /**
+@@ -614,6 +616,7 @@ static inline void uart_port_lock_irq(st
+ static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+ {
+ spin_lock_irqsave(&up->lock, *flags);
++ nbcon_acquire(up);
+ }
+
+ /**
+@@ -624,7 +627,11 @@ static inline void uart_port_lock_irqsav
+ */
+ static inline bool uart_port_trylock(struct uart_port *up)
+ {
+- return spin_trylock(&up->lock);
++ if (!spin_trylock(&up->lock))
++ return false;
++
++ nbcon_acquire(up);
++ return true;
+ }
+
+ /**
+@@ -636,7 +643,11 @@ static inline bool uart_port_trylock(str
+ */
+ static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
+ {
+- return spin_trylock_irqsave(&up->lock, *flags);
++ if (!spin_trylock_irqsave(&up->lock, *flags))
++ return false;
++
++ nbcon_acquire(up);
++ return true;
+ }
+
+ /**
+@@ -645,6 +656,7 @@ static inline bool uart_port_trylock_irq
+ */
+ static inline void uart_port_unlock(struct uart_port *up)
+ {
++ nbcon_release(up);
+ spin_unlock(&up->lock);
+ }
+
+@@ -654,6 +666,7 @@ static inline void uart_port_unlock(stru
+ */
+ static inline void uart_port_unlock_irq(struct uart_port *up)
+ {
++ nbcon_release(up);
+ spin_unlock_irq(&up->lock);
+ }
+
+@@ -664,6 +677,7 @@ static inline void uart_port_unlock_irq(
+ */
+ static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+ {
++ nbcon_release(up);
+ spin_unlock_irqrestore(&up->lock, flags);
+ }
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -6,6 +6,7 @@
+ #include <linux/console.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
++#include <linux/serial_core.h>
+ #include "internal.h"
+ /*
+ * Printk console printing implementation for consoles which does not depend
+@@ -995,3 +996,79 @@ void nbcon_free(struct console *con)
+
+ con->pbufs = NULL;
+ }
++
++static inline bool uart_is_nbcon(struct uart_port *up)
++{
++ int cookie;
++ bool ret;
++
++ if (!uart_console(up))
++ return false;
++
++ cookie = console_srcu_read_lock();
++ ret = (console_srcu_read_flags(up->cons) & CON_NBCON);
++ console_srcu_read_unlock(cookie);
++ return ret;
++}
++
++/**
++ * nbcon_acquire - The second half of the port locking wrapper
++ * @up: The uart port whose @lock was locked
++ *
++ * The uart_port_lock() wrappers will first lock the spin_lock @up->lock.
++ * Then this function is called to implement nbcon-specific processing.
++ *
++ * If @up is an nbcon console, this console will be acquired and marked as
++ * unsafe. Otherwise this function does nothing.
++ */
++void nbcon_acquire(struct uart_port *up)
++{
++ struct console *con = up->cons;
++ struct nbcon_context ctxt;
++
++ if (!uart_is_nbcon(up))
++ return;
++
++ WARN_ON_ONCE(con->locked_port);
++
++ do {
++ do {
++ memset(&ctxt, 0, sizeof(ctxt));
++ ctxt.console = con;
++ ctxt.prio = NBCON_PRIO_NORMAL;
++ } while (!nbcon_context_try_acquire(&ctxt));
++
++ } while (!nbcon_context_enter_unsafe(&ctxt));
++
++ con->locked_port = true;
++}
++EXPORT_SYMBOL_GPL(nbcon_acquire);
++
++/**
++ * nbcon_release - The first half of the port unlocking wrapper
++ * @up: The uart port whose @lock is about to be unlocked
++ *
++ * The uart_port_unlock() wrappers will first call this function to implement
++ * nbcon-specific processing. Then afterwards the uart_port_unlock() wrappers
++ * will unlock the spin_lock @up->lock.
++ *
++ * If @up is an nbcon console, the console will be marked as safe and
++ * released. Otherwise this function does nothing.
++ */
++void nbcon_release(struct uart_port *up)
++{
++ struct console *con = up->cons;
++ struct nbcon_context ctxt = {
++ .console = con,
++ .prio = NBCON_PRIO_NORMAL,
++ };
++
++ if (!con->locked_port)
++ return;
++
++ if (nbcon_context_exit_unsafe(&ctxt))
++ nbcon_context_release(&ctxt);
++
++ con->locked_port = false;
++}
++EXPORT_SYMBOL_GPL(nbcon_release);
diff --git a/debian/patches-rt/0104-printk-nbcon-Add-driver_enter-driver_exit-console-ca.patch b/debian/patches-rt/0104-printk-nbcon-Add-driver_enter-driver_exit-console-ca.patch
new file mode 100644
index 0000000000..d314cb53d4
--- /dev/null
+++ b/debian/patches-rt/0104-printk-nbcon-Add-driver_enter-driver_exit-console-ca.patch
@@ -0,0 +1,42 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 8 Dec 2023 15:54:27 +0000
+Subject: [PATCH 104/134] printk: nbcon: Add driver_enter/driver_exit console
+ callbacks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Console drivers need some mechanism to synchronize between "normal
+driver activity" and console printing. For uart serial drivers it
+is the port lock. Other types of console drivers (network,
+graphics, USB) will need something as well.
+
+Provide 2 new mandatory nbcon console callbacks (driver_enter and
+driver_exit) to allow the consoles drivers to implement the
+appropriate synchronization. The callbacks are also expected to
+disable/enable migration.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -296,6 +296,8 @@ struct nbcon_write_context {
+ * @node: hlist node for the console list
+ *
+ * @write_atomic: Write callback for atomic context
++ * @driver_enter: Callback to begin synchronization with driver code
++ * @driver_exit: Callback to finish synchronization with driver code
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @pbufs: Pointer to nbcon private buffer
+@@ -323,6 +325,8 @@ struct console {
+ /* nbcon console specific members */
+ bool (*write_atomic)(struct console *con,
+ struct nbcon_write_context *wctxt);
++ void (*driver_enter)(struct console *con, unsigned long *flags);
++ void (*driver_exit)(struct console *con, unsigned long flags);
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct printk_buffers *pbufs;
diff --git a/debian/patches-rt/0105-printk-Make-console_is_usable-available-to-nbcon.patch b/debian/patches-rt/0105-printk-Make-console_is_usable-available-to-nbcon.patch
new file mode 100644
index 0000000000..a0bf51492a
--- /dev/null
+++ b/debian/patches-rt/0105-printk-Make-console_is_usable-available-to-nbcon.patch
@@ -0,0 +1,103 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 12 Sep 2023 13:25:41 +0000
+Subject: [PATCH 105/134] printk: Make console_is_usable() available to nbcon
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Move console_is_usable() as-is into internal.h so that it can
+be used by nbcon printing functions as well.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 32 ++++++++++++++++++++++++++++++++
+ kernel/printk/printk.c | 30 ------------------------------
+ 2 files changed, 32 insertions(+), 30 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -78,6 +78,36 @@ bool nbcon_alloc(struct console *con);
+ void nbcon_init(struct console *con);
+ void nbcon_free(struct console *con);
+
++/*
++ * Check if the given console is currently capable and allowed to print
++ * records.
++ *
++ * Requires the console_srcu_read_lock.
++ */
++static inline bool console_is_usable(struct console *con)
++{
++ short flags = console_srcu_read_flags(con);
++
++ if (!(flags & CON_ENABLED))
++ return false;
++
++ if ((flags & CON_SUSPENDED))
++ return false;
++
++ if (!con->write)
++ return false;
++
++ /*
++ * Console drivers may assume that per-cpu resources have been
++ * allocated. So unless they're explicitly marked as being able to
++ * cope (CON_ANYTIME) don't call them until this CPU is officially up.
++ */
++ if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
++ return false;
++
++ return true;
++}
++
+ #else
+
+ #define PRINTK_PREFIX_MAX 0
+@@ -99,6 +129,8 @@ static inline bool nbcon_alloc(struct co
+ static inline void nbcon_init(struct console *con) { }
+ static inline void nbcon_free(struct console *con) { }
+
++static inline bool console_is_usable(struct console *con) { return false; }
++
+ #endif /* CONFIG_PRINTK */
+
+ extern struct printk_buffers printk_shared_pbufs;
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2695,36 +2695,6 @@ int is_console_locked(void)
+ }
+ EXPORT_SYMBOL(is_console_locked);
+
+-/*
+- * Check if the given console is currently capable and allowed to print
+- * records.
+- *
+- * Requires the console_srcu_read_lock.
+- */
+-static inline bool console_is_usable(struct console *con)
+-{
+- short flags = console_srcu_read_flags(con);
+-
+- if (!(flags & CON_ENABLED))
+- return false;
+-
+- if ((flags & CON_SUSPENDED))
+- return false;
+-
+- if (!con->write)
+- return false;
+-
+- /*
+- * Console drivers may assume that per-cpu resources have been
+- * allocated. So unless they're explicitly marked as being able to
+- * cope (CON_ANYTIME) don't call them until this CPU is officially up.
+- */
+- if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
+- return false;
+-
+- return true;
+-}
+-
+ static void __console_unlock(void)
+ {
+ console_locked = 0;
diff --git a/debian/patches-rt/0106-printk-Let-console_is_usable-handle-nbcon.patch b/debian/patches-rt/0106-printk-Let-console_is_usable-handle-nbcon.patch
new file mode 100644
index 0000000000..62a776cf2f
--- /dev/null
+++ b/debian/patches-rt/0106-printk-Let-console_is_usable-handle-nbcon.patch
@@ -0,0 +1,43 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 12 Sep 2023 13:53:21 +0000
+Subject: [PATCH 106/134] printk: Let console_is_usable() handle nbcon
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The nbcon consoles use a different printing callback. For nbcon
+consoles, check for the write_atomic() callback instead of
+write().
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -80,6 +80,8 @@ void nbcon_free(struct console *con);
+
+ /*
+ * Check if the given console is currently capable and allowed to print
++ * records. Note that this function does not consider the current context,
++ * which can also play a role in deciding if @con can be used to print
+ * records.
+ *
+ * Requires the console_srcu_read_lock.
+@@ -94,8 +96,13 @@ static inline bool console_is_usable(str
+ if ((flags & CON_SUSPENDED))
+ return false;
+
+- if (!con->write)
+- return false;
++ if (flags & CON_NBCON) {
++ if (!con->write_atomic)
++ return false;
++ } else {
++ if (!con->write)
++ return false;
++ }
+
+ /*
+ * Console drivers may assume that per-cpu resources have been
diff --git a/debian/patches-rt/0107-printk-Add-flags-argument-for-console_is_usable.patch b/debian/patches-rt/0107-printk-Add-flags-argument-for-console_is_usable.patch
new file mode 100644
index 0000000000..e8c4b4a739
--- /dev/null
+++ b/debian/patches-rt/0107-printk-Add-flags-argument-for-console_is_usable.patch
@@ -0,0 +1,67 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 12 Sep 2023 13:45:33 +0000
+Subject: [PATCH 107/134] printk: Add @flags argument for console_is_usable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The caller of console_is_usable() usually needs @console->flags
+for its own checks. Rather than having console_is_usable() read
+its own copy, make the caller pass in the @flags. This also
+ensures that the caller saw the same @flags value.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 8 ++------
+ kernel/printk/printk.c | 5 +++--
+ 2 files changed, 5 insertions(+), 8 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -83,13 +83,9 @@ void nbcon_free(struct console *con);
+ * records. Note that this function does not consider the current context,
+ * which can also play a role in deciding if @con can be used to print
+ * records.
+- *
+- * Requires the console_srcu_read_lock.
+ */
+-static inline bool console_is_usable(struct console *con)
++static inline bool console_is_usable(struct console *con, short flags)
+ {
+- short flags = console_srcu_read_flags(con);
+-
+ if (!(flags & CON_ENABLED))
+ return false;
+
+@@ -136,7 +132,7 @@ static inline bool nbcon_alloc(struct co
+ static inline void nbcon_init(struct console *con) { }
+ static inline void nbcon_free(struct console *con) { }
+
+-static inline bool console_is_usable(struct console *con) { return false; }
++static inline bool console_is_usable(struct console *con, short flags) { return false; }
+
+ #endif /* CONFIG_PRINTK */
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2940,9 +2940,10 @@ static bool console_flush_all(bool do_co
+
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(con) {
++ short flags = console_srcu_read_flags(con);
+ bool progress;
+
+- if (!console_is_usable(con))
++ if (!console_is_usable(con, flags))
+ continue;
+ any_usable = true;
+
+@@ -3784,7 +3785,7 @@ static bool __pr_flush(struct console *c
+ * that they make forward progress, so only increment
+ * @diff for usable consoles.
+ */
+- if (!console_is_usable(c))
++ if (!console_is_usable(c, flags))
+ continue;
+
+ if (flags & CON_NBCON) {
diff --git a/debian/patches-rt/0108-printk-nbcon-Provide-function-to-flush-using-write_a.patch b/debian/patches-rt/0108-printk-nbcon-Provide-function-to-flush-using-write_a.patch
new file mode 100644
index 0000000000..14933eef67
--- /dev/null
+++ b/debian/patches-rt/0108-printk-nbcon-Provide-function-to-flush-using-write_a.patch
@@ -0,0 +1,186 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Sep 2023 12:00:08 +0000
+Subject: [PATCH 108/134] printk: nbcon: Provide function to flush using
+ write_atomic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Provide nbcon_atomic_flush_all() to perform flushing of all
+registered nbcon consoles using their write_atomic() callback.
+Like with legacy consoles, the nbcon consoles are flushed one
+record per console. This allows all nbcon consoles to print
+lines pseudo-simultaneously, rather than one console waiting
+for the full ringbuffer to dump to another console before
+printing anything.
+
+Unlike console_flush_all(), nbcon_atomic_flush_all() will only
+flush up through the newest record at the time of the call.
+This prevents a CPU from printing unbounded when other CPUs are
+adding records.
+
+Perform nbcon console atomic flushing in
+console_flush_on_panic(). This function is not only used in
+panic() but also other locations where there may be stored
+messages that need to be flushed.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 2
+ kernel/printk/nbcon.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/printk/printk.c | 2
+ 3 files changed, 102 insertions(+), 2 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -77,6 +77,7 @@ void nbcon_seq_force(struct console *con
+ bool nbcon_alloc(struct console *con);
+ void nbcon_init(struct console *con);
+ void nbcon_free(struct console *con);
++void nbcon_atomic_flush_all(void);
+
+ /*
+ * Check if the given console is currently capable and allowed to print
+@@ -131,6 +132,7 @@ static inline void nbcon_seq_force(struc
+ static inline bool nbcon_alloc(struct console *con) { return false; }
+ static inline void nbcon_init(struct console *con) { }
+ static inline void nbcon_free(struct console *con) { }
++static inline void nbcon_atomic_flush_all(void) { }
+
+ static inline bool console_is_usable(struct console *con, short flags) { return false; }
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -539,7 +539,6 @@ static struct printk_buffers panic_nbcon
+ * in an unsafe state. Otherwise, on success the caller may assume
+ * the console is not in an unsafe state.
+ */
+-__maybe_unused
+ static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
+ {
+ unsigned int cpu = smp_processor_id();
+@@ -841,7 +840,6 @@ EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
+ * When true is returned, @wctxt->ctxt.backlog indicates whether there are
+ * still records pending in the ringbuffer,
+ */
+-__maybe_unused
+ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
+ {
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+@@ -931,6 +929,104 @@ static bool nbcon_emit_next_record(struc
+ }
+
+ /**
++ * nbcon_atomic_emit_one - Print one record for an nbcon console using the
++ * write_atomic() callback
++ * @wctxt: An initialized write context struct to use
++ * for this context
++ *
++ * Return: False if the given console could not print a record or there
++ * are no more records to print, otherwise true.
++ *
++ * This is an internal helper to handle the locking of the console before
++ * calling nbcon_emit_next_record().
++ */
++static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt)
++{
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
++
++ if (!nbcon_context_try_acquire(ctxt))
++ return false;
++
++ /*
++ * nbcon_emit_next_record() returns false when the console was
++ * handed over or taken over. In both cases the context is no
++ * longer valid.
++ */
++ if (!nbcon_emit_next_record(wctxt))
++ return false;
++
++ nbcon_context_release(ctxt);
++
++ return ctxt->backlog;
++}
++
++/**
++ * __nbcon_atomic_flush_all - Flush all nbcon consoles using their
++ * write_atomic() callback
++ * @stop_seq: Flush up until this record
++ */
++static void __nbcon_atomic_flush_all(u64 stop_seq)
++{
++ struct nbcon_write_context wctxt = { };
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
++ struct console *con;
++ bool any_progress;
++ int cookie;
++
++ do {
++ any_progress = false;
++
++ cookie = console_srcu_read_lock();
++ for_each_console_srcu(con) {
++ short flags = console_srcu_read_flags(con);
++ unsigned long irq_flags;
++
++ if (!(flags & CON_NBCON))
++ continue;
++
++ if (!console_is_usable(con, flags))
++ continue;
++
++ if (nbcon_seq_read(con) >= stop_seq)
++ continue;
++
++ memset(ctxt, 0, sizeof(*ctxt));
++ ctxt->console = con;
++ ctxt->spinwait_max_us = 2000;
++ ctxt->prio = NBCON_PRIO_NORMAL;
++
++ /*
++ * Atomic flushing does not use console driver
++ * synchronization (i.e. it does not hold the port
++ * lock for uart consoles). Therefore IRQs must be
++ * disabled to avoid being interrupted and then
++ * calling into a driver that will deadlock trying
++ * acquire console ownership.
++ */
++ local_irq_save(irq_flags);
++
++ any_progress |= nbcon_atomic_emit_one(&wctxt);
++
++ local_irq_restore(irq_flags);
++ }
++ console_srcu_read_unlock(cookie);
++ } while (any_progress);
++}
++
++/**
++ * nbcon_atomic_flush_all - Flush all nbcon consoles using their
++ * write_atomic() callback
++ *
++ * Flush the backlog up through the currently newest record. Any new
++ * records added while flushing will not be flushed. This is to avoid
++ * one CPU printing unbounded because other CPUs continue to add records.
++ */
++void nbcon_atomic_flush_all(void)
++{
++ __nbcon_atomic_flush_all(prb_next_reserve_seq(prb));
++}
++
++/**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+ *
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3170,6 +3170,8 @@ void console_flush_on_panic(enum con_flu
+ console_srcu_read_unlock(cookie);
+ }
+
++ nbcon_atomic_flush_all();
++
+ console_flush_all(false, &next_seq, &handover);
+ }
+
diff --git a/debian/patches-rt/0109-printk-Track-registered-boot-consoles.patch b/debian/patches-rt/0109-printk-Track-registered-boot-consoles.patch
new file mode 100644
index 0000000000..51b866f20f
--- /dev/null
+++ b/debian/patches-rt/0109-printk-Track-registered-boot-consoles.patch
@@ -0,0 +1,78 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 24 Oct 2023 14:13:14 +0000
+Subject: [PATCH 109/134] printk: Track registered boot consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Unfortunately it is not known if a boot console and a regular
+(legacy or nbcon) console use the same hardware. For this reason
+they must not be allowed to print simultaneously.
+
+For legacy consoles this is not an issue because they are
+already synchronized with the boot consoles using the console
+lock. However nbcon consoles can be triggered separately.
+
+Add a global flag @have_boot_console to identify if any boot
+consoles are registered. This will be used in follow-up commits
+to ensure that boot consoles and nbcon consoles cannot print
+simultaneously.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -463,6 +463,14 @@ static int console_msg_format = MSG_FORM
+ /* syslog_lock protects syslog_* variables and write access to clear_seq. */
+ static DEFINE_MUTEX(syslog_lock);
+
++/*
++ * Specifies if a boot console is registered. If boot consoles are present,
++ * nbcon consoles cannot print simultaneously and must be synchronized by
++ * the console lock. This is because boot consoles and nbcon consoles may
++ * have mapped the same hardware.
++ */
++bool have_boot_console;
++
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+ /* All 3 protected by @syslog_lock. */
+@@ -3500,6 +3508,9 @@ void register_console(struct console *ne
+ if (newcon->flags & CON_NBCON)
+ nbcon_init(newcon);
+
++ if (newcon->flags & CON_BOOT)
++ have_boot_console = true;
++
+ /*
+ * Put this console in the list - keep the
+ * preferred driver at the head of the list.
+@@ -3552,6 +3563,8 @@ EXPORT_SYMBOL(register_console);
+ /* Must be called under console_list_lock(). */
+ static int unregister_console_locked(struct console *console)
+ {
++ bool found_boot_con = false;
++ struct console *c;
+ int res;
+
+ lockdep_assert_console_list_lock_held();
+@@ -3599,6 +3612,17 @@ static int unregister_console_locked(str
+ if (console->exit)
+ res = console->exit(console);
+
++ /*
++ * With this console gone, the global flags tracking registered
++ * console types may have changed. Update them.
++ */
++ for_each_console(c) {
++ if (c->flags & CON_BOOT)
++ found_boot_con = true;
++ }
++ if (!found_boot_con)
++ have_boot_console = false;
++
+ return res;
+ }
+
diff --git a/debian/patches-rt/0110-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch b/debian/patches-rt/0110-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
new file mode 100644
index 0000000000..b46ecd7655
--- /dev/null
+++ b/debian/patches-rt/0110-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
@@ -0,0 +1,173 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 19 Sep 2023 14:33:27 +0000
+Subject: [PATCH 110/134] printk: nbcon: Use nbcon consoles in
+ console_flush_all()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Allow nbcon consoles to print messages in the printk() caller
+context by integrating them into console_flush_all(). The
+write_atomic() callback is used for printing.
+
+Provide nbcon_console_emit_next_record(), which acts as the
+nbcon variant of console_emit_next_record(). Call this variant
+within console_flush_all() for nbcon consoles. Since nbcon
+consoles use their own @nbcon_seq variable to track the next
+record to print, this also must be appropriately handled.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 5 ++++
+ kernel/printk/nbcon.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++
+ kernel/printk/printk.c | 19 +++++++++++++----
+ 3 files changed, 69 insertions(+), 5 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -71,6 +71,8 @@ void defer_console_output(void);
+
+ u16 printk_parse_prefix(const char *text, int *level,
+ enum printk_info_flags *flags);
++void console_lock_spinning_enable(void);
++int console_lock_spinning_disable_and_check(int cookie);
+
+ u64 nbcon_seq_read(struct console *con);
+ void nbcon_seq_force(struct console *con, u64 seq);
+@@ -78,6 +80,7 @@ bool nbcon_alloc(struct console *con);
+ void nbcon_init(struct console *con);
+ void nbcon_free(struct console *con);
+ void nbcon_atomic_flush_all(void);
++bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie);
+
+ /*
+ * Check if the given console is currently capable and allowed to print
+@@ -133,6 +136,8 @@ static inline bool nbcon_alloc(struct co
+ static inline void nbcon_init(struct console *con) { }
+ static inline void nbcon_free(struct console *con) { }
+ static inline void nbcon_atomic_flush_all(void) { }
++static inline bool nbcon_atomic_emit_next_record(struct console *con, bool *handover,
++ int cookie) { return false; }
+
+ static inline bool console_is_usable(struct console *con, short flags) { return false; }
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -532,6 +532,7 @@ static struct printk_buffers panic_nbcon
+ * nbcon_context_try_acquire - Try to acquire nbcon console
+ * @ctxt: The context of the caller
+ *
++ * Context: Any context which could not be migrated to another CPU.
+ * Return: True if the console was acquired. False otherwise.
+ *
+ * If the caller allowed an unsafe hostile takeover, on success the
+@@ -961,6 +962,55 @@ static bool nbcon_atomic_emit_one(struct
+ }
+
+ /**
++ * nbcon_atomic_emit_next_record - Print one record for an nbcon console
++ * using the write_atomic() callback
++ * @con: The console to print on
++ * @handover: Will be set to true if a printk waiter has taken over the
++ * console_lock, in which case the caller is no longer holding
++ * both the console_lock and the SRCU read lock. Otherwise it
++ * is set to false.
++ * @cookie: The cookie from the SRCU read lock.
++ *
++ * Context: Any context which could not be migrated to another CPU.
++ * Return: True if a record could be printed, otherwise false.
++ *
++ * This function is meant to be called by console_flush_all() to print records
++ * on nbcon consoles using the write_atomic() callback. Essentially it is the
++ * nbcon version of console_emit_next_record().
++ */
++bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie)
++{
++ struct nbcon_write_context wctxt = { };
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
++ unsigned long driver_flags;
++ bool progress = false;
++ unsigned long flags;
++
++ *handover = false;
++
++ /* Use the same locking order as console_emit_next_record(). */
++ printk_safe_enter_irqsave(flags);
++ console_lock_spinning_enable();
++ stop_critical_timings();
++
++ con->driver_enter(con, &driver_flags);
++ cant_migrate();
++
++ ctxt->console = con;
++ ctxt->prio = NBCON_PRIO_NORMAL;
++
++ progress = nbcon_atomic_emit_one(&wctxt);
++
++ con->driver_exit(con, driver_flags);
++
++ start_critical_timings();
++ *handover = console_lock_spinning_disable_and_check(cookie);
++ printk_safe_exit_irqrestore(flags);
++
++ return progress;
++}
++
++/**
+ * __nbcon_atomic_flush_all - Flush all nbcon consoles using their
+ * write_atomic() callback
+ * @stop_seq: Flush up until this record
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1872,7 +1872,7 @@ static bool console_waiter;
+ * there may be a waiter spinning (like a spinlock). Also it must be
+ * ready to hand over the lock at the end of the section.
+ */
+-static void console_lock_spinning_enable(void)
++void console_lock_spinning_enable(void)
+ {
+ /*
+ * Do not use spinning in panic(). The panic CPU wants to keep the lock.
+@@ -1911,7 +1911,7 @@ static void console_lock_spinning_enable
+ *
+ * Return: 1 if the lock rights were passed, 0 otherwise.
+ */
+-static int console_lock_spinning_disable_and_check(int cookie)
++int console_lock_spinning_disable_and_check(int cookie)
+ {
+ int waiter;
+
+@@ -2949,13 +2949,22 @@ static bool console_flush_all(bool do_co
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(con) {
+ short flags = console_srcu_read_flags(con);
++ u64 printk_seq;
+ bool progress;
+
+ if (!console_is_usable(con, flags))
+ continue;
+ any_usable = true;
+
+- progress = console_emit_next_record(con, handover, cookie);
++ if (flags & CON_NBCON) {
++ progress = nbcon_atomic_emit_next_record(con, handover, cookie);
++
++ printk_seq = nbcon_seq_read(con);
++ } else {
++ progress = console_emit_next_record(con, handover, cookie);
++
++ printk_seq = con->seq;
++ }
+
+ /*
+ * If a handover has occurred, the SRCU read lock
+@@ -2965,8 +2974,8 @@ static bool console_flush_all(bool do_co
+ return false;
+
+ /* Track the next of the highest seq flushed. */
+- if (con->seq > *next_seq)
+- *next_seq = con->seq;
++ if (printk_seq > *next_seq)
++ *next_seq = printk_seq;
+
+ if (!progress)
+ continue;
diff --git a/debian/patches-rt/0111-printk-nbcon-Assign-priority-based-on-CPU-state.patch b/debian/patches-rt/0111-printk-nbcon-Assign-priority-based-on-CPU-state.patch
new file mode 100644
index 0000000000..7e826c3e49
--- /dev/null
+++ b/debian/patches-rt/0111-printk-nbcon-Assign-priority-based-on-CPU-state.patch
@@ -0,0 +1,113 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 14 Dec 2023 14:38:42 +0000
+Subject: [PATCH 111/134] printk: nbcon: Assign priority based on CPU state
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Use the current state of the CPU to determine which priority to
+assign to the printing context.
+
+Note: The uart_port wrapper, which is responsible for non-console-
+ printing activities, will always use NORMAL priority.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 2 ++
+ kernel/printk/nbcon.c | 30 ++++++++++++++++++++++++++++--
+ 2 files changed, 30 insertions(+), 2 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -79,6 +79,7 @@ void nbcon_seq_force(struct console *con
+ bool nbcon_alloc(struct console *con);
+ void nbcon_init(struct console *con);
+ void nbcon_free(struct console *con);
++enum nbcon_prio nbcon_get_default_prio(void);
+ void nbcon_atomic_flush_all(void);
+ bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie);
+
+@@ -135,6 +136,7 @@ static inline void nbcon_seq_force(struc
+ static inline bool nbcon_alloc(struct console *con) { return false; }
+ static inline void nbcon_init(struct console *con) { }
+ static inline void nbcon_free(struct console *con) { }
++static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
+ static inline void nbcon_atomic_flush_all(void) { }
+ static inline bool nbcon_atomic_emit_next_record(struct console *con, bool *handover,
+ int cookie) { return false; }
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -962,6 +962,22 @@ static bool nbcon_atomic_emit_one(struct
+ }
+
+ /**
++ * nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
++ * printing on the current CPU
++ *
++ * Context: Any context which could not be migrated to another CPU.
++ * Return: The nbcon_prio to use for acquiring an nbcon console in this
++ * context for printing.
++ */
++enum nbcon_prio nbcon_get_default_prio(void)
++{
++ if (this_cpu_in_panic())
++ return NBCON_PRIO_PANIC;
++
++ return NBCON_PRIO_NORMAL;
++}
++
++/**
+ * nbcon_atomic_emit_next_record - Print one record for an nbcon console
+ * using the write_atomic() callback
+ * @con: The console to print on
+@@ -997,7 +1013,7 @@ bool nbcon_atomic_emit_next_record(struc
+ cant_migrate();
+
+ ctxt->console = con;
+- ctxt->prio = NBCON_PRIO_NORMAL;
++ ctxt->prio = nbcon_get_default_prio();
+
+ progress = nbcon_atomic_emit_one(&wctxt);
+
+@@ -1043,7 +1059,6 @@ static void __nbcon_atomic_flush_all(u64
+ memset(ctxt, 0, sizeof(*ctxt));
+ ctxt->console = con;
+ ctxt->spinwait_max_us = 2000;
+- ctxt->prio = NBCON_PRIO_NORMAL;
+
+ /*
+ * Atomic flushing does not use console driver
+@@ -1052,9 +1067,14 @@ static void __nbcon_atomic_flush_all(u64
+ * disabled to avoid being interrupted and then
+ * calling into a driver that will deadlock trying
+ * acquire console ownership.
++ *
++ * This also disables migration in order to get the
++ * current CPU priority.
+ */
+ local_irq_save(irq_flags);
+
++ ctxt->prio = nbcon_get_default_prio();
++
+ any_progress |= nbcon_atomic_emit_one(&wctxt);
+
+ local_irq_restore(irq_flags);
+@@ -1166,6 +1186,9 @@ static inline bool uart_is_nbcon(struct
+ *
+ * If @up is an nbcon console, this console will be acquired and marked as
+ * unsafe. Otherwise this function does nothing.
++ *
++ * nbcon consoles acquired via the port lock wrapper always use priority
++ * NBCON_PRIO_NORMAL.
+ */
+ void nbcon_acquire(struct uart_port *up)
+ {
+@@ -1200,6 +1223,9 @@ EXPORT_SYMBOL_GPL(nbcon_acquire);
+ *
+ * If @up is an nbcon console, the console will be marked as safe and
+ * released. Otherwise this function does nothing.
++ *
++ * nbcon consoles acquired via the port lock wrapper always use priority
++ * NBCON_PRIO_NORMAL.
+ */
+ void nbcon_release(struct uart_port *up)
+ {
diff --git a/debian/patches-rt/0112-printk-nbcon-Add-unsafe-flushing-on-panic.patch b/debian/patches-rt/0112-printk-nbcon-Add-unsafe-flushing-on-panic.patch
new file mode 100644
index 0000000000..cc689de08e
--- /dev/null
+++ b/debian/patches-rt/0112-printk-nbcon-Add-unsafe-flushing-on-panic.patch
@@ -0,0 +1,96 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 20 Oct 2023 10:03:42 +0000
+Subject: [PATCH 112/134] printk: nbcon: Add unsafe flushing on panic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Add nbcon_atomic_flush_unsafe() to flush all nbcon consoles
+using the write_atomic() callback and allowing unsafe hostile
+takeovers. Call this at the end of panic() as a final attempt
+to flush any pending messages.
+
+Note that legacy consoles use unsafe methods for flushing
+from the beginning of panic (see bust_spinlocks()). Therefore,
+systems using both legacy and nbcon consoles may still fail to
+see panic messages due to unsafe legacy console usage.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/printk.h | 5 +++++
+ kernel/panic.c | 1 +
+ kernel/printk/nbcon.c | 18 ++++++++++++++++--
+ 3 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -199,6 +199,7 @@ extern asmlinkage void dump_stack(void)
+ void printk_trigger_flush(void);
+ extern void nbcon_acquire(struct uart_port *up);
+ extern void nbcon_release(struct uart_port *up);
++void nbcon_atomic_flush_unsafe(void);
+ #else
+ static inline __printf(1, 0)
+ int vprintk(const char *s, va_list args)
+@@ -287,6 +288,10 @@ static inline void nbcon_release(struct
+ {
+ }
+
++static inline void nbcon_atomic_flush_unsafe(void)
++{
++}
++
+ #endif
+
+ #ifdef CONFIG_SMP
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -449,6 +449,7 @@ void panic(const char *fmt, ...)
+ * Explicitly flush the kernel log buffer one last time.
+ */
+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
++ nbcon_atomic_flush_unsafe();
+
+ local_irq_enable();
+ for (i = 0; ; i += PANIC_TIMER_STEP) {
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1030,8 +1030,9 @@ bool nbcon_atomic_emit_next_record(struc
+ * __nbcon_atomic_flush_all - Flush all nbcon consoles using their
+ * write_atomic() callback
+ * @stop_seq: Flush up until this record
++ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
+ */
+-static void __nbcon_atomic_flush_all(u64 stop_seq)
++static void __nbcon_atomic_flush_all(u64 stop_seq, bool allow_unsafe_takeover)
+ {
+ struct nbcon_write_context wctxt = { };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
+@@ -1059,6 +1060,7 @@ static void __nbcon_atomic_flush_all(u64
+ memset(ctxt, 0, sizeof(*ctxt));
+ ctxt->console = con;
+ ctxt->spinwait_max_us = 2000;
++ ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
+
+ /*
+ * Atomic flushing does not use console driver
+@@ -1093,7 +1095,19 @@ static void __nbcon_atomic_flush_all(u64
+ */
+ void nbcon_atomic_flush_all(void)
+ {
+- __nbcon_atomic_flush_all(prb_next_reserve_seq(prb));
++ __nbcon_atomic_flush_all(prb_next_reserve_seq(prb), false);
++}
++
++/**
++ * nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
++ * write_atomic() callback and allowing unsafe hostile takeovers
++ *
++ * Flush the backlog up through the currently newest record. Unsafe hostile
++ * takeovers will be performed, if necessary.
++ */
++void nbcon_atomic_flush_unsafe(void)
++{
++ __nbcon_atomic_flush_all(prb_next_reserve_seq(prb), true);
+ }
+
+ /**
diff --git a/debian/patches-rt/0113-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch b/debian/patches-rt/0113-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
new file mode 100644
index 0000000000..a733638677
--- /dev/null
+++ b/debian/patches-rt/0113-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
@@ -0,0 +1,211 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 26 Sep 2023 12:44:07 +0000
+Subject: [PATCH 113/134] printk: Avoid console_lock dance if no legacy or boot
+ consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Currently the console lock is used to attempt legacy-type
+printing even if there are no legacy or boot consoles registered.
+If no such consoles are registered, the console lock does not
+need to be taken.
+
+Also, if boot consoles are registered, nbcon consoles must
+perform their atomic printing under the console lock in order
+to be synchronized with boot consoles.
+
+Add tracking of legacy console registration and use it with
+boot console tracking to avoid unnecessary code paths, i.e.
+do not use the console lock if there are no boot consoles
+and no legacy consoles.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 12 +++++++++
+ kernel/printk/printk.c | 59 +++++++++++++++++++++++++++++++++++------------
+ 2 files changed, 56 insertions(+), 15 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -44,6 +44,16 @@ enum printk_info_flags {
+ };
+
+ extern struct printk_ringbuffer *prb;
++extern bool have_legacy_console;
++extern bool have_boot_console;
++
++/*
++ * Specifies if the console lock/unlock dance is needed for console
++ * printing. If @have_boot_console is true, the nbcon consoles will
++ * be printed serially along with the legacy consoles because nbcon
++ * consoles cannot print simultaneously with boot consoles.
++ */
++#define printing_via_unlock (have_legacy_console || have_boot_console)
+
+ __printf(4, 0)
+ int vprintk_store(int facility, int level,
+@@ -122,6 +132,8 @@ static inline bool console_is_usable(str
+ #define PRINTK_MESSAGE_MAX 0
+ #define PRINTKRB_RECORD_MAX 0
+
++#define printing_via_unlock (false)
++
+ /*
+ * In !PRINTK builds we still export console_sem
+ * semaphore and some of console functions (console_unlock()/etc.), so
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -464,6 +464,13 @@ static int console_msg_format = MSG_FORM
+ static DEFINE_MUTEX(syslog_lock);
+
+ /*
++ * Specifies if a legacy console is registered. If legacy consoles are
++ * present, it is necessary to perform the console_lock/console_unlock dance
++ * whenever console flushing should occur.
++ */
++bool have_legacy_console;
++
++/*
+ * Specifies if a boot console is registered. If boot consoles are present,
+ * nbcon consoles cannot print simultaneously and must be synchronized by
+ * the console lock. This is because boot consoles and nbcon consoles may
+@@ -2345,7 +2352,7 @@ asmlinkage int vprintk_emit(int facility
+ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
+
+ /* If called from the scheduler, we can not call up(). */
+- if (!in_sched) {
++ if (!in_sched && printing_via_unlock) {
+ /*
+ * The caller may be holding system-critical or
+ * timing-sensitive locks. Disable preemption during
+@@ -2646,7 +2653,7 @@ void resume_console(void)
+ */
+ static int console_cpu_notify(unsigned int cpu)
+ {
+- if (!cpuhp_tasks_frozen) {
++ if (!cpuhp_tasks_frozen && printing_via_unlock) {
+ /* If trylock fails, someone else is doing the printing */
+ if (console_trylock())
+ console_unlock();
+@@ -3189,7 +3196,8 @@ void console_flush_on_panic(enum con_flu
+
+ nbcon_atomic_flush_all();
+
+- console_flush_all(false, &next_seq, &handover);
++ if (printing_via_unlock)
++ console_flush_all(false, &next_seq, &handover);
+ }
+
+ /*
+@@ -3514,8 +3522,11 @@ void register_console(struct console *ne
+ newcon->dropped = 0;
+ console_init_seq(newcon, bootcon_registered);
+
+- if (newcon->flags & CON_NBCON)
++ if (newcon->flags & CON_NBCON) {
+ nbcon_init(newcon);
++ } else {
++ have_legacy_console = true;
++ }
+
+ if (newcon->flags & CON_BOOT)
+ have_boot_console = true;
+@@ -3572,6 +3583,7 @@ EXPORT_SYMBOL(register_console);
+ /* Must be called under console_list_lock(). */
+ static int unregister_console_locked(struct console *console)
+ {
++ bool found_legacy_con = false;
+ bool found_boot_con = false;
+ struct console *c;
+ int res;
+@@ -3628,9 +3640,13 @@ static int unregister_console_locked(str
+ for_each_console(c) {
+ if (c->flags & CON_BOOT)
+ found_boot_con = true;
++ if (!(c->flags & CON_NBCON))
++ found_legacy_con = true;
+ }
+ if (!found_boot_con)
+ have_boot_console = false;
++ if (!found_legacy_con)
++ have_legacy_console = false;
+
+ return res;
+ }
+@@ -3782,6 +3798,7 @@ static bool __pr_flush(struct console *c
+ u64 last_diff = 0;
+ u64 printk_seq;
+ short flags;
++ bool locked;
+ int cookie;
+ u64 diff;
+ u64 seq;
+@@ -3791,22 +3808,28 @@ static bool __pr_flush(struct console *c
+ seq = prb_next_reserve_seq(prb);
+
+ /* Flush the consoles so that records up to @seq are printed. */
+- console_lock();
+- console_unlock();
++ if (printing_via_unlock) {
++ console_lock();
++ console_unlock();
++ }
+
+ for (;;) {
+ unsigned long begin_jiffies;
+ unsigned long slept_jiffies;
+
++ locked = false;
+ diff = 0;
+
+- /*
+- * Hold the console_lock to guarantee safe access to
+- * console->seq. Releasing console_lock flushes more
+- * records in case @seq is still not printed on all
+- * usable consoles.
+- */
+- console_lock();
++ if (printing_via_unlock) {
++ /*
++ * Hold the console_lock to guarantee safe access to
++ * console->seq. Releasing console_lock flushes more
++ * records in case @seq is still not printed on all
++ * usable consoles.
++ */
++ console_lock();
++ locked = true;
++ }
+
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(c) {
+@@ -3826,6 +3849,7 @@ static bool __pr_flush(struct console *c
+ if (flags & CON_NBCON) {
+ printk_seq = nbcon_seq_read(c);
+ } else {
++ WARN_ON_ONCE(!locked);
+ printk_seq = c->seq;
+ }
+
+@@ -3837,7 +3861,8 @@ static bool __pr_flush(struct console *c
+ if (diff != last_diff && reset_on_progress)
+ remaining_jiffies = timeout_jiffies;
+
+- console_unlock();
++ if (locked)
++ console_unlock();
+
+ /* Note: @diff is 0 if there are no usable consoles. */
+ if (diff == 0 || remaining_jiffies == 0)
+@@ -3959,7 +3984,11 @@ void defer_console_output(void)
+ * New messages may have been added directly to the ringbuffer
+ * using vprintk_store(), so wake any waiters as well.
+ */
+- __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
++ int val = PRINTK_PENDING_WAKEUP;
++
++ if (printing_via_unlock)
++ val |= PRINTK_PENDING_OUTPUT;
++ __wake_up_klogd(val);
+ }
+
+ void printk_trigger_flush(void)
diff --git a/debian/patches-rt/0114-printk-Track-nbcon-consoles.patch b/debian/patches-rt/0114-printk-Track-nbcon-consoles.patch
new file mode 100644
index 0000000000..fb85c5bbb5
--- /dev/null
+++ b/debian/patches-rt/0114-printk-Track-nbcon-consoles.patch
@@ -0,0 +1,67 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 11 Dec 2023 09:36:52 +0000
+Subject: [PATCH 114/134] printk: Track nbcon consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Add a global flag @have_nbcon_console to identify if any nbcon
+consoles are registered. This will be used in follow-up commits
+to preserve legacy behavior when no nbcon consoles are registered.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -471,6 +471,13 @@ static DEFINE_MUTEX(syslog_lock);
+ bool have_legacy_console;
+
+ /*
++ * Specifies if an nbcon console is registered. If nbcon consoles are present,
++ * synchronous printing of legacy consoles will not occur during panic until
++ * the backtrace has been stored to the ringbuffer.
++ */
++bool have_nbcon_console;
++
++/*
+ * Specifies if a boot console is registered. If boot consoles are present,
+ * nbcon consoles cannot print simultaneously and must be synchronized by
+ * the console lock. This is because boot consoles and nbcon consoles may
+@@ -3523,6 +3530,7 @@ void register_console(struct console *ne
+ console_init_seq(newcon, bootcon_registered);
+
+ if (newcon->flags & CON_NBCON) {
++ have_nbcon_console = true;
+ nbcon_init(newcon);
+ } else {
+ have_legacy_console = true;
+@@ -3584,6 +3592,7 @@ EXPORT_SYMBOL(register_console);
+ static int unregister_console_locked(struct console *console)
+ {
+ bool found_legacy_con = false;
++ bool found_nbcon_con = false;
+ bool found_boot_con = false;
+ struct console *c;
+ int res;
+@@ -3640,13 +3649,18 @@ static int unregister_console_locked(str
+ for_each_console(c) {
+ if (c->flags & CON_BOOT)
+ found_boot_con = true;
+- if (!(c->flags & CON_NBCON))
++
++ if (c->flags & CON_NBCON)
++ found_nbcon_con = true;
++ else
+ found_legacy_con = true;
+ }
+ if (!found_boot_con)
+ have_boot_console = false;
+ if (!found_legacy_con)
+ have_legacy_console = false;
++ if (!found_nbcon_con)
++ have_nbcon_console = false;
+
+ return res;
+ }
diff --git a/debian/patches-rt/0115-printk-Coordinate-direct-printing-in-panic.patch b/debian/patches-rt/0115-printk-Coordinate-direct-printing-in-panic.patch
new file mode 100644
index 0000000000..972df76688
--- /dev/null
+++ b/debian/patches-rt/0115-printk-Coordinate-direct-printing-in-panic.patch
@@ -0,0 +1,134 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 22 Nov 2023 11:56:58 +0000
+Subject: [PATCH 115/134] printk: Coordinate direct printing in panic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Perform printing by nbcon consoles on the panic CPU from the
+printk() caller context in order to get panic messages printed
+as soon as possible.
+
+If legacy and nbcon consoles are registered, the legacy consoles
+will no longer perform direct printing on the panic CPU until
+after the backtrace has been stored. This will give the safe
+nbcon consoles a chance to print the panic messages before
+allowing the unsafe legacy consoles to print.
+
+If no nbcon consoles are registered, there is no change in
+behavior (i.e. legacy consoles will always attempt to print
+from the printk() caller context).
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/printk.h | 2 +
+ kernel/panic.c | 2 +
+ kernel/printk/printk.c | 53 ++++++++++++++++++++++++++++++++++++++++++-------
+ 3 files changed, 50 insertions(+), 7 deletions(-)
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -782,3 +782,5 @@ static inline void print_hex_dump_debug(
+ print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
+
+ #endif
++
++void printk_legacy_allow_panic_sync(void);
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -366,6 +366,8 @@ void panic(const char *fmt, ...)
+ */
+ atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
+
++ printk_legacy_allow_panic_sync();
++
+ panic_print_sys_info(false);
+
+ kmsg_dump(KMSG_DUMP_PANIC);
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2330,12 +2330,23 @@ int vprintk_store(int facility, int leve
+ return ret;
+ }
+
++static bool legacy_allow_panic_sync;
++
++/*
++ * This acts as a one-way switch to allow legacy consoles to print from
++ * the printk() caller context on a panic CPU.
++ */
++void printk_legacy_allow_panic_sync(void)
++{
++ legacy_allow_panic_sync = true;
++}
++
+ asmlinkage int vprintk_emit(int facility, int level,
+ const struct dev_printk_info *dev_info,
+ const char *fmt, va_list args)
+ {
++ bool do_trylock_unlock = printing_via_unlock;
+ int printed_len;
+- bool in_sched = false;
+
+ /* Suppress unimportant messages after panic happens */
+ if (unlikely(suppress_printk))
+@@ -2351,15 +2362,43 @@ asmlinkage int vprintk_emit(int facility
+
+ if (level == LOGLEVEL_SCHED) {
+ level = LOGLEVEL_DEFAULT;
+- in_sched = true;
++ /* If called from the scheduler, we can not call up(). */
++ do_trylock_unlock = false;
+ }
+
+ printk_delay(level);
+
+ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
+
+- /* If called from the scheduler, we can not call up(). */
+- if (!in_sched && printing_via_unlock) {
++ if (!have_boot_console && have_nbcon_console) {
++ bool is_panic_context = this_cpu_in_panic();
++
++ /*
++ * In panic, the legacy consoles are not allowed to print from
++ * the printk calling context unless explicitly allowed. This
++ * gives the safe nbcon consoles a chance to print out all the
++ * panic messages first. This restriction only applies if
++ * there are nbcon consoles registered.
++ */
++ if (is_panic_context)
++ do_trylock_unlock &= legacy_allow_panic_sync;
++
++ /*
++ * There are situations where nbcon atomic printing should
++ * happen in the printk() caller context:
++ *
++ * - When this CPU is in panic.
++ *
++ * Note that if boot consoles are registered, the
++ * console_lock/console_unlock dance must be relied upon
++ * instead because nbcon consoles cannot print simultaneously
++ * with boot consoles.
++ */
++ if (is_panic_context)
++ nbcon_atomic_flush_all();
++ }
++
++ if (do_trylock_unlock) {
+ /*
+ * The caller may be holding system-critical or
+ * timing-sensitive locks. Disable preemption during
+@@ -2379,10 +2418,10 @@ asmlinkage int vprintk_emit(int facility
+ preempt_enable();
+ }
+
+- if (in_sched)
+- defer_console_output();
+- else
++ if (do_trylock_unlock)
+ wake_up_klogd();
++ else
++ defer_console_output();
+
+ return printed_len;
+ }
diff --git a/debian/patches-rt/0116-printk-nbcon-Implement-emergency-sections.patch b/debian/patches-rt/0116-printk-nbcon-Implement-emergency-sections.patch
new file mode 100644
index 0000000000..11175cacb0
--- /dev/null
+++ b/debian/patches-rt/0116-printk-nbcon-Implement-emergency-sections.patch
@@ -0,0 +1,232 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 11 Sep 2023 15:21:57 +0000
+Subject: [PATCH 116/134] printk: nbcon: Implement emergency sections
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+In emergency situations (something has gone wrong but the
+system continues to operate), usually important information
+(such as a backtrace) is generated via printk(). Each
+individual printk record has little meaning. It is the
+collection of printk messages that is most often needed by
+developers and users.
+
+In order to help ensure that the collection of printk messages
+in an emergency situation are all stored to the ringbuffer as
+quickly as possible, disable console output for that CPU while
+it is in the emergency situation. When exiting the emergency
+situation, trigger the consoles to be flushed.
+
+Add per-CPU emergency nesting tracking because an emergency
+can arise while in an emergency situation.
+
+Add functions to mark the beginning and end of emergency
+sections where the urgent messages are generated.
+
+Do not print if the current CPU is in an emergency state.
+
+Trigger console flushing when exiting all emergency nesting.
+
+Note that the emergency state is not system-wide. While one CPU
+is in an emergency state, another CPU may continue to print
+console messages.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 4 ++
+ include/linux/printk.h | 7 ++--
+ kernel/printk/nbcon.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++++
+ kernel/printk/printk.c | 25 +++++++++++---
+ 4 files changed, 109 insertions(+), 8 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -458,10 +458,14 @@ static inline bool console_is_registered
+ hlist_for_each_entry(con, &console_list, node)
+
+ #ifdef CONFIG_PRINTK
++extern void nbcon_cpu_emergency_enter(void);
++extern void nbcon_cpu_emergency_exit(void);
+ extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+ extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
+ extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+ #else
++static inline void nbcon_cpu_emergency_enter(void) { }
++static inline void nbcon_cpu_emergency_exit(void) { }
+ static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+ static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
+ static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -197,6 +197,7 @@ void show_regs_print_info(const char *lo
+ extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
+ extern asmlinkage void dump_stack(void) __cold;
+ void printk_trigger_flush(void);
++void printk_legacy_allow_panic_sync(void);
+ extern void nbcon_acquire(struct uart_port *up);
+ extern void nbcon_release(struct uart_port *up);
+ void nbcon_atomic_flush_unsafe(void);
+@@ -280,6 +281,10 @@ static inline void printk_trigger_flush(
+ {
+ }
+
++static inline void printk_legacy_allow_panic_sync(void)
++{
++}
++
+ static inline void nbcon_acquire(struct uart_port *up)
+ {
+ }
+@@ -782,5 +787,3 @@ static inline void print_hex_dump_debug(
+ print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
+
+ #endif
+-
+-void printk_legacy_allow_panic_sync(void);
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -929,6 +929,29 @@ static bool nbcon_emit_next_record(struc
+ return nbcon_context_exit_unsafe(ctxt);
+ }
+
++/* Track the nbcon emergency nesting per CPU. */
++static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
++static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
++
++/**
++ * nbcon_get_cpu_emergency_nesting - Get the per CPU emergency nesting pointer
++ *
++ * Return: Either a pointer to the per CPU emergency nesting counter of
++ * the current CPU or to the init data during early boot.
++ */
++static __ref unsigned int *nbcon_get_cpu_emergency_nesting(void)
++{
++ /*
++ * The value of __printk_percpu_data_ready gets set in normal
++ * context and before SMP initialization. As a result it could
++ * never change while inside an nbcon emergency section.
++ */
++ if (!printk_percpu_data_ready())
++ return &early_nbcon_pcpu_emergency_nesting;
++
++ return this_cpu_ptr(&nbcon_pcpu_emergency_nesting);
++}
++
+ /**
+ * nbcon_atomic_emit_one - Print one record for an nbcon console using the
+ * write_atomic() callback
+@@ -971,9 +994,15 @@ static bool nbcon_atomic_emit_one(struct
+ */
+ enum nbcon_prio nbcon_get_default_prio(void)
+ {
++ unsigned int *cpu_emergency_nesting;
++
+ if (this_cpu_in_panic())
+ return NBCON_PRIO_PANIC;
+
++ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
++ if (*cpu_emergency_nesting)
++ return NBCON_PRIO_EMERGENCY;
++
+ return NBCON_PRIO_NORMAL;
+ }
+
+@@ -1111,6 +1140,58 @@ void nbcon_atomic_flush_unsafe(void)
+ }
+
+ /**
++ * nbcon_cpu_emergency_enter - Enter an emergency section where printk()
++ * messages for that CPU are only stored
++ *
++ * Upon exiting the emergency section, all stored messages are flushed.
++ *
++ * Context: Any context. Disables preemption.
++ *
++ * When within an emergency section, no printing occurs on that CPU. This
++ * is to allow all emergency messages to be dumped into the ringbuffer before
++ * flushing the ringbuffer. The actual printing occurs when exiting the
++ * outermost emergency section.
++ */
++void nbcon_cpu_emergency_enter(void)
++{
++ unsigned int *cpu_emergency_nesting;
++
++ preempt_disable();
++
++ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
++ (*cpu_emergency_nesting)++;
++}
++
++/**
++ * nbcon_cpu_emergency_exit - Exit an emergency section and flush the
++ * stored messages
++ *
++ * Flushing only occurs when exiting all nesting for the CPU.
++ *
++ * Context: Any context. Enables preemption.
++ */
++void nbcon_cpu_emergency_exit(void)
++{
++ unsigned int *cpu_emergency_nesting;
++ bool do_trigger_flush = false;
++
++ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
++
++ WARN_ON_ONCE(*cpu_emergency_nesting == 0);
++
++ if (*cpu_emergency_nesting == 1)
++ do_trigger_flush = true;
++
++ /* Undo the nesting count of nbcon_cpu_emergency_enter(). */
++ (*cpu_emergency_nesting)--;
++
++ preempt_enable();
++
++ if (do_trigger_flush)
++ printk_trigger_flush();
++}
++
++/**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+ *
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2405,16 +2405,29 @@ asmlinkage int vprintk_emit(int facility
+ * printing of all remaining records to all consoles so that
+ * this context can return as soon as possible. Hopefully
+ * another printk() caller will take over the printing.
++ *
++ * Also, nbcon_get_default_prio() requires migration disabled.
+ */
+ preempt_disable();
++
+ /*
+- * Try to acquire and then immediately release the console
+- * semaphore. The release will print out buffers. With the
+- * spinning variant, this context tries to take over the
+- * printing from another printing context.
++ * Do not emit for EMERGENCY priority. The console will be
++ * explicitly flushed when exiting the emergency section.
+ */
+- if (console_trylock_spinning())
+- console_unlock();
++ if (nbcon_get_default_prio() == NBCON_PRIO_EMERGENCY) {
++ do_trylock_unlock = false;
++ } else {
++ /*
++ * Try to acquire and then immediately release the
++ * console semaphore. The release will print out
++ * buffers. With the spinning variant, this context
++ * tries to take over the printing from another
++ * printing context.
++ */
++ if (console_trylock_spinning())
++ console_unlock();
++ }
++
+ preempt_enable();
+ }
+
diff --git a/debian/patches-rt/0117-panic-Mark-emergency-section-in-warn.patch b/debian/patches-rt/0117-panic-Mark-emergency-section-in-warn.patch
new file mode 100644
index 0000000000..84201d25fe
--- /dev/null
+++ b/debian/patches-rt/0117-panic-Mark-emergency-section-in-warn.patch
@@ -0,0 +1,38 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 11 Sep 2023 15:53:04 +0000
+Subject: [PATCH 117/134] panic: Mark emergency section in warn
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Mark the full contents of __warn() as an emergency section. In
+this section, the CPU will not perform console output for the
+printk() calls. Instead, a flushing of the console output will
+triggered when exiting the emergency section.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/panic.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -663,6 +663,8 @@ struct warn_args {
+ void __warn(const char *file, int line, void *caller, unsigned taint,
+ struct pt_regs *regs, struct warn_args *args)
+ {
++ nbcon_cpu_emergency_enter();
++
+ disable_trace_on_warning();
+
+ if (file)
+@@ -693,6 +695,8 @@ void __warn(const char *file, int line,
+
+ /* Just a warning, don't kill lockdep. */
+ add_taint(taint, LOCKDEP_STILL_OK);
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ #ifdef CONFIG_BUG
diff --git a/debian/patches-rt/0118-panic-Mark-emergency-section-in-oops.patch b/debian/patches-rt/0118-panic-Mark-emergency-section-in-oops.patch
new file mode 100644
index 0000000000..6e52fd3a69
--- /dev/null
+++ b/debian/patches-rt/0118-panic-Mark-emergency-section-in-oops.patch
@@ -0,0 +1,39 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 19 Sep 2023 17:07:34 +0000
+Subject: [PATCH 118/134] panic: Mark emergency section in oops
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Mark an emergency section beginning with oops_enter() until the
+end of oops_exit(). In this section, the CPU will not perform
+console output for the printk() calls. Instead, a flushing of the
+console output will triggered when exiting the emergency section.
+
+The very end of oops_exit() performs a kmsg_dump(). This is not
+included in the emergency section because it is another
+flushing mechanism that should occur after the consoles have
+been triggered to flush.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/panic.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -630,6 +630,7 @@ bool oops_may_print(void)
+ */
+ void oops_enter(void)
+ {
++ nbcon_cpu_emergency_enter();
+ tracing_off();
+ /* can't trust the integrity of the kernel anymore: */
+ debug_locks_off();
+@@ -652,6 +653,7 @@ void oops_exit(void)
+ {
+ do_oops_enter_exit();
+ print_oops_end_marker();
++ nbcon_cpu_emergency_exit();
+ kmsg_dump(KMSG_DUMP_OOPS);
+ }
+
diff --git a/debian/patches-rt/0119-rcu-Mark-emergency-section-in-rcu-stalls.patch b/debian/patches-rt/0119-rcu-Mark-emergency-section-in-rcu-stalls.patch
new file mode 100644
index 0000000000..d1d6efced4
--- /dev/null
+++ b/debian/patches-rt/0119-rcu-Mark-emergency-section-in-rcu-stalls.patch
@@ -0,0 +1,45 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 11 Sep 2023 15:53:39 +0000
+Subject: [PATCH 119/134] rcu: Mark emergency section in rcu stalls
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Mark an emergency section within print_other_cpu_stall(), where
+RCU stall information is printed. In this section, the CPU will
+not perform console output for the printk() calls. Instead, a
+flushing of the console output will triggered when exiting the
+emergency section.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/rcu/tree_stall.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -8,6 +8,7 @@
+ */
+
+ #include <linux/kvm_para.h>
++#include <linux/console.h>
+
+ //////////////////////////////////////////////////////////////////////////////
+ //
+@@ -603,6 +604,8 @@ static void print_other_cpu_stall(unsign
+ if (rcu_stall_is_suppressed())
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ /*
+ * OK, time to rat on our buddy...
+ * See Documentation/RCU/stallwarn.rst for info on how to debug
+@@ -657,6 +660,8 @@ static void print_other_cpu_stall(unsign
+ panic_on_rcu_stall();
+
+ rcu_force_quiescent_state(); /* Kick them all. */
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ static void print_cpu_stall(unsigned long gps)
diff --git a/debian/patches-rt/0120-lockdep-Mark-emergency-section-in-lockdep-splats.patch b/debian/patches-rt/0120-lockdep-Mark-emergency-section-in-lockdep-splats.patch
new file mode 100644
index 0000000000..248d17afc2
--- /dev/null
+++ b/debian/patches-rt/0120-lockdep-Mark-emergency-section-in-lockdep-splats.patch
@@ -0,0 +1,45 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 18 Sep 2023 20:27:41 +0000
+Subject: [PATCH 120/134] lockdep: Mark emergency section in lockdep splats
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Mark an emergency section within print_usage_bug(), where
+lockdep bugs are printed. In this section, the CPU will not
+perform console output for the printk() calls. Instead, a
+flushing of the console output will triggered when exiting
+the emergency section.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/lockdep.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -56,6 +56,7 @@
+ #include <linux/kprobes.h>
+ #include <linux/lockdep.h>
+ #include <linux/context_tracking.h>
++#include <linux/console.h>
+
+ #include <asm/sections.h>
+
+@@ -3971,6 +3972,8 @@ print_usage_bug(struct task_struct *curr
+ if (!debug_locks_off() || debug_locks_silent)
+ return;
+
++ nbcon_cpu_emergency_enter();
++
+ pr_warn("\n");
+ pr_warn("================================\n");
+ pr_warn("WARNING: inconsistent lock state\n");
+@@ -3999,6 +4002,8 @@ print_usage_bug(struct task_struct *curr
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
++
++ nbcon_cpu_emergency_exit();
+ }
+
+ /*
diff --git a/debian/patches-rt/0121-printk-nbcon-Introduce-printing-kthreads.patch b/debian/patches-rt/0121-printk-nbcon-Introduce-printing-kthreads.patch
new file mode 100644
index 0000000000..7b158ddb74
--- /dev/null
+++ b/debian/patches-rt/0121-printk-nbcon-Introduce-printing-kthreads.patch
@@ -0,0 +1,441 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 22 Sep 2023 14:12:21 +0000
+Subject: [PATCH 121/134] printk: nbcon: Introduce printing kthreads
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Provide the main implementation for running a printer kthread
+per nbcon console that is takeover/handover aware.
+
+The main print function nbcon_emit_next_record() will generate
+a warning if a task other than the dedicated printer thread
+tries to print using write_thread().
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 8 ++
+ kernel/printk/internal.h | 25 ++++++
+ kernel/printk/nbcon.c | 188 ++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/printk/printk.c | 31 +++++++
+ 4 files changed, 249 insertions(+), 3 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -17,6 +17,7 @@
+ #include <linux/atomic.h>
+ #include <linux/bits.h>
+ #include <linux/rculist.h>
++#include <linux/rcuwait.h>
+ #include <linux/types.h>
+
+ struct vc_data;
+@@ -296,12 +297,15 @@ struct nbcon_write_context {
+ * @node: hlist node for the console list
+ *
+ * @write_atomic: Write callback for atomic context
++ * @write_thread: Write callback for non-atomic context
+ * @driver_enter: Callback to begin synchronization with driver code
+ * @driver_exit: Callback to finish synchronization with driver code
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @pbufs: Pointer to nbcon private buffer
+ * @locked_port: True, if the port lock is locked by nbcon
++ * @kthread: Printer kthread for this console
++ * @rcuwait: RCU-safe wait object for @kthread waking
+ */
+ struct console {
+ char name[16];
+@@ -325,12 +329,16 @@ struct console {
+ /* nbcon console specific members */
+ bool (*write_atomic)(struct console *con,
+ struct nbcon_write_context *wctxt);
++ bool (*write_thread)(struct console *con,
++ struct nbcon_write_context *wctxt);
+ void (*driver_enter)(struct console *con, unsigned long *flags);
+ void (*driver_exit)(struct console *con, unsigned long flags);
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct printk_buffers *pbufs;
+ bool locked_port;
++ struct task_struct *kthread;
++ struct rcuwait rcuwait;
+ };
+
+ #ifdef CONFIG_LOCKDEP
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -92,6 +92,7 @@ void nbcon_free(struct console *con);
+ enum nbcon_prio nbcon_get_default_prio(void);
+ void nbcon_atomic_flush_all(void);
+ bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie);
++void nbcon_kthread_create(struct console *con);
+
+ /*
+ * Check if the given console is currently capable and allowed to print
+@@ -110,6 +111,8 @@ static inline bool console_is_usable(str
+ if (flags & CON_NBCON) {
+ if (!con->write_atomic)
+ return false;
++ if (!con->write_thread || !con->kthread)
++ return false;
+ } else {
+ if (!con->write)
+ return false;
+@@ -126,12 +129,34 @@ static inline bool console_is_usable(str
+ return true;
+ }
+
++/**
++ * nbcon_kthread_wake - Wake up a printk thread
++ * @con: Console to operate on
++ */
++static inline void nbcon_kthread_wake(struct console *con)
++{
++ /*
++ * Guarantee any new records can be seen by tasks preparing to wait
++ * before this context checks if the rcuwait is empty.
++ *
++ * The full memory barrier in rcuwait_wake_up() pairs with the full
++ * memory barrier within set_current_state() of
++ * ___rcuwait_wait_event(), which is called after prepare_to_rcuwait()
++ * adds the waiter but before it has checked the wait condition.
++ *
++ * This pairs with nbcon_kthread_func:A.
++ */
++ rcuwait_wake_up(&con->rcuwait); /* LMM(nbcon_kthread_wake:A) */
++}
++
+ #else
+
+ #define PRINTK_PREFIX_MAX 0
+ #define PRINTK_MESSAGE_MAX 0
+ #define PRINTKRB_RECORD_MAX 0
+
++static inline void nbcon_kthread_wake(struct console *con) { }
++static inline void nbcon_kthread_create(struct console *con) { }
+ #define printing_via_unlock (false)
+
+ /*
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -5,8 +5,10 @@
+ #include <linux/kernel.h>
+ #include <linux/console.h>
+ #include <linux/delay.h>
++#include <linux/kthread.h>
+ #include <linux/slab.h>
+ #include <linux/serial_core.h>
++#include "printk_ringbuffer.h"
+ #include "internal.h"
+ /*
+ * Printk console printing implementation for consoles which does not depend
+@@ -828,6 +830,7 @@ EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
+ /**
+ * nbcon_emit_next_record - Emit a record in the acquired context
+ * @wctxt: The write context that will be handed to the write function
++ * @use_atomic: True if the write_atomic callback is to be used
+ *
+ * Return: True if this context still owns the console. False if
+ * ownership was handed over or taken.
+@@ -841,7 +844,7 @@ EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
+ * When true is returned, @wctxt->ctxt.backlog indicates whether there are
+ * still records pending in the ringbuffer,
+ */
+-static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
++static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_atomic)
+ {
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+ struct console *con = ctxt->console;
+@@ -891,9 +894,17 @@ static bool nbcon_emit_next_record(struc
+ nbcon_state_read(con, &cur);
+ wctxt->unsafe_takeover = cur.unsafe_takeover;
+
+- if (con->write_atomic)
++ if (use_atomic &&
++ con->write_atomic) {
+ done = con->write_atomic(con, wctxt);
+
++ } else if (!use_atomic &&
++ con->write_thread &&
++ con->kthread) {
++ WARN_ON_ONCE(con->kthread != current);
++ done = con->write_thread(con, wctxt);
++ }
++
+ if (!done) {
+ /*
+ * The emit was aborted, probably due to a loss of ownership.
+@@ -929,6 +940,118 @@ static bool nbcon_emit_next_record(struc
+ return nbcon_context_exit_unsafe(ctxt);
+ }
+
++/**
++ * nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup
++ * @con: Console to operate on
++ * @ctxt: The acquire context that contains the state
++ * at console_acquire()
++ *
++ * Return: True if the thread should shutdown or if the console is
++ * allowed to print and a record is available. False otherwise.
++ *
++ * After the thread wakes up, it must first check if it should shutdown before
++ * attempting any printing.
++ */
++static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt)
++{
++ bool is_usable;
++ short flags;
++ int cookie;
++
++ if (kthread_should_stop())
++ return true;
++
++ cookie = console_srcu_read_lock();
++ flags = console_srcu_read_flags(con);
++ is_usable = console_is_usable(con, flags);
++ console_srcu_read_unlock(cookie);
++
++ if (!is_usable)
++ return false;
++
++ /* Bring the sequence in @ctxt up to date */
++ ctxt->seq = nbcon_seq_read(con);
++
++ return prb_read_valid(prb, ctxt->seq, NULL);
++}
++
++/**
++ * nbcon_kthread_func - The printer thread function
++ * @__console: Console to operate on
++ */
++static int nbcon_kthread_func(void *__console)
++{
++ struct console *con = __console;
++ struct nbcon_write_context wctxt = {
++ .ctxt.console = con,
++ .ctxt.prio = NBCON_PRIO_NORMAL,
++ };
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
++ unsigned long flags;
++ short con_flags;
++ bool backlog;
++ int cookie;
++ int ret;
++
++wait_for_event:
++ /*
++ * Guarantee this task is visible on the rcuwait before
++ * checking the wake condition.
++ *
++ * The full memory barrier within set_current_state() of
++ * ___rcuwait_wait_event() pairs with the full memory
++ * barrier within rcuwait_has_sleeper().
++ *
++ * This pairs with rcuwait_has_sleeper:A and nbcon_kthread_wake:A.
++ */
++ ret = rcuwait_wait_event(&con->rcuwait,
++ nbcon_kthread_should_wakeup(con, ctxt),
++ TASK_INTERRUPTIBLE); /* LMM(nbcon_kthread_func:A) */
++
++ if (kthread_should_stop())
++ return 0;
++
++ /* Wait was interrupted by a spurious signal, go back to sleep. */
++ if (ret)
++ goto wait_for_event;
++
++ do {
++ backlog = false;
++
++ cookie = console_srcu_read_lock();
++
++ con_flags = console_srcu_read_flags(con);
++
++ if (console_is_usable(con, con_flags)) {
++ con->driver_enter(con, &flags);
++
++ /*
++ * Ensure this stays on the CPU to make handover and
++ * takeover possible.
++ */
++ cant_migrate();
++
++ if (nbcon_context_try_acquire(ctxt)) {
++ /*
++ * If the emit fails, this context is no
++ * longer the owner.
++ */
++ if (nbcon_emit_next_record(&wctxt, false)) {
++ nbcon_context_release(ctxt);
++ backlog = ctxt->backlog;
++ }
++ }
++
++ con->driver_exit(con, flags);
++ }
++
++ console_srcu_read_unlock(cookie);
++
++ } while (backlog);
++
++ goto wait_for_event;
++}
++
+ /* Track the nbcon emergency nesting per CPU. */
+ static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
+ static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
+@@ -976,7 +1099,7 @@ static bool nbcon_atomic_emit_one(struct
+ * handed over or taken over. In both cases the context is no
+ * longer valid.
+ */
+- if (!nbcon_emit_next_record(wctxt))
++ if (!nbcon_emit_next_record(wctxt, true))
+ return false;
+
+ nbcon_context_release(ctxt);
+@@ -1192,6 +1315,63 @@ void nbcon_cpu_emergency_exit(void)
+ }
+
+ /**
++ * nbcon_kthread_stop - Stop a printer thread
++ * @con: Console to operate on
++ */
++static void nbcon_kthread_stop(struct console *con)
++{
++ lockdep_assert_console_list_lock_held();
++
++ if (!con->kthread)
++ return;
++
++ kthread_stop(con->kthread);
++ con->kthread = NULL;
++}
++
++/**
++ * nbcon_kthread_create - Create a printer thread
++ * @con: Console to operate on
++ *
++ * If it fails, let the console proceed. The atomic part might
++ * be usable and useful.
++ */
++void nbcon_kthread_create(struct console *con)
++{
++ struct task_struct *kt;
++
++ lockdep_assert_console_list_lock_held();
++
++ if (!(con->flags & CON_NBCON) || !con->write_thread)
++ return;
++
++ if (con->kthread)
++ return;
++
++ /*
++ * Printer threads cannot be started as long as any boot console is
++ * registered because there is no way to synchronize the hardware
++ * registers between boot console code and regular console code.
++ */
++ if (have_boot_console)
++ return;
++
++ kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index);
++ if (IS_ERR(kt)) {
++ con_printk(KERN_ERR, con, "failed to start printing thread\n");
++ return;
++ }
++
++ con->kthread = kt;
++
++ /*
++ * It is important that console printing threads are scheduled
++ * shortly after a printk call and with generous runtime budgets.
++ */
++ sched_set_normal(con->kthread, -20);
++}
++
++/**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+ *
+@@ -1237,6 +1417,7 @@ void nbcon_init(struct console *con)
+ /* nbcon_alloc() must have been called and successful! */
+ BUG_ON(!con->pbufs);
+
++ rcuwait_init(&con->rcuwait);
+ nbcon_seq_force(con, con->seq);
+ nbcon_state_set(con, &state);
+ }
+@@ -1249,6 +1430,7 @@ void nbcon_free(struct console *con)
+ {
+ struct nbcon_state state = { };
+
++ nbcon_kthread_stop(con);
+ nbcon_state_set(con, &state);
+
+ /* Boot consoles share global printk buffers. */
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2682,6 +2682,8 @@ void suspend_console(void)
+ void resume_console(void)
+ {
+ struct console *con;
++ short flags;
++ int cookie;
+
+ if (!console_suspend_enabled)
+ return;
+@@ -2698,6 +2700,14 @@ void resume_console(void)
+ */
+ synchronize_srcu(&console_srcu);
+
++ cookie = console_srcu_read_lock();
++ for_each_console_srcu(con) {
++ flags = console_srcu_read_flags(con);
++ if (flags & CON_NBCON)
++ nbcon_kthread_wake(con);
++ }
++ console_srcu_read_unlock(cookie);
++
+ pr_flush(1000, true);
+ }
+
+@@ -3018,6 +3028,13 @@ static bool console_flush_all(bool do_co
+ u64 printk_seq;
+ bool progress;
+
++ /*
++ * console_flush_all() is only for legacy consoles,
++ * unless the nbcon console has no kthread printer.
++ */
++ if ((flags & CON_NBCON) && con->kthread)
++ continue;
++
+ if (!console_is_usable(con, flags))
+ continue;
+ any_usable = true;
+@@ -3313,9 +3330,23 @@ EXPORT_SYMBOL(console_stop);
+
+ void console_start(struct console *console)
+ {
++ short flags;
++
+ console_list_lock();
+ console_srcu_write_flags(console, console->flags | CON_ENABLED);
++ flags = console->flags;
+ console_list_unlock();
++
++ /*
++ * Ensure that all SRCU list walks have completed. The related
++ * printing context must be able to see it is enabled so that
++ * it is guaranteed to wake up and resume printing.
++ */
++ synchronize_srcu(&console_srcu);
++
++ if (flags & CON_NBCON)
++ nbcon_kthread_wake(console);
++
+ __pr_flush(console, 1000, true);
+ }
+ EXPORT_SYMBOL(console_start);
diff --git a/debian/patches-rt/0122-printk-Atomic-print-in-printk-context-on-shutdown.patch b/debian/patches-rt/0122-printk-Atomic-print-in-printk-context-on-shutdown.patch
new file mode 100644
index 0000000000..35c88289b1
--- /dev/null
+++ b/debian/patches-rt/0122-printk-Atomic-print-in-printk-context-on-shutdown.patch
@@ -0,0 +1,41 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 23 Oct 2023 17:43:48 +0000
+Subject: [PATCH 122/134] printk: Atomic print in printk context on shutdown
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+For nbcon consoles, normally the printing is handled by the
+dedicated console printing threads. However, on shutdown the
+printing threads may not get a chance to print the final
+messages.
+
+When shutting down or rebooting (system_state > SYSTEM_RUNNING),
+perform atomic printing from the printk() caller context.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2389,13 +2389,18 @@ asmlinkage int vprintk_emit(int facility
+ *
+ * - When this CPU is in panic.
+ *
++ * - During shutdown, since the printing threads may not get
++ * a chance to print the final messages.
++ *
+ * Note that if boot consoles are registered, the
+ * console_lock/console_unlock dance must be relied upon
+ * instead because nbcon consoles cannot print simultaneously
+ * with boot consoles.
+ */
+- if (is_panic_context)
++ if (is_panic_context ||
++ (system_state > SYSTEM_RUNNING)) {
+ nbcon_atomic_flush_all();
++ }
+ }
+
+ if (do_trylock_unlock) {
diff --git a/debian/patches-rt/0123-printk-nbcon-Add-context-to-console_is_usable.patch b/debian/patches-rt/0123-printk-nbcon-Add-context-to-console_is_usable.patch
new file mode 100644
index 0000000000..26a494c165
--- /dev/null
+++ b/debian/patches-rt/0123-printk-nbcon-Add-context-to-console_is_usable.patch
@@ -0,0 +1,111 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 26 Sep 2023 14:43:30 +0000
+Subject: [PATCH 123/134] printk: nbcon: Add context to console_is_usable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The nbcon consoles have two callbacks to be used for different
+contexts. In order to determine if an nbcon console is usable,
+console_is_usable() needs to know if it is a context that will
+use the write_atomic() callback or the write_thread() callback.
+
+Add an extra parameter @use_atomic to specify this.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 16 ++++++++++------
+ kernel/printk/nbcon.c | 6 +++---
+ kernel/printk/printk.c | 6 ++++--
+ 3 files changed, 17 insertions(+), 11 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -100,7 +100,7 @@ void nbcon_kthread_create(struct console
+ * which can also play a role in deciding if @con can be used to print
+ * records.
+ */
+-static inline bool console_is_usable(struct console *con, short flags)
++static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
+ {
+ if (!(flags & CON_ENABLED))
+ return false;
+@@ -109,10 +109,13 @@ static inline bool console_is_usable(str
+ return false;
+
+ if (flags & CON_NBCON) {
+- if (!con->write_atomic)
+- return false;
+- if (!con->write_thread || !con->kthread)
+- return false;
++ if (use_atomic) {
++ if (!con->write_atomic)
++ return false;
++ } else {
++ if (!con->write_thread || !con->kthread)
++ return false;
++ }
+ } else {
+ if (!con->write)
+ return false;
+@@ -178,7 +181,8 @@ static inline void nbcon_atomic_flush_al
+ static inline bool nbcon_atomic_emit_next_record(struct console *con, bool *handover,
+ int cookie) { return false; }
+
+-static inline bool console_is_usable(struct console *con, short flags) { return false; }
++static inline bool console_is_usable(struct console *con, short flags,
++ bool use_atomic) { return false; }
+
+ #endif /* CONFIG_PRINTK */
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -963,7 +963,7 @@ static bool nbcon_kthread_should_wakeup(
+
+ cookie = console_srcu_read_lock();
+ flags = console_srcu_read_flags(con);
+- is_usable = console_is_usable(con, flags);
++ is_usable = console_is_usable(con, flags, false);
+ console_srcu_read_unlock(cookie);
+
+ if (!is_usable)
+@@ -1022,7 +1022,7 @@ static int nbcon_kthread_func(void *__co
+
+ con_flags = console_srcu_read_flags(con);
+
+- if (console_is_usable(con, con_flags)) {
++ if (console_is_usable(con, con_flags, false)) {
+ con->driver_enter(con, &flags);
+
+ /*
+@@ -1203,7 +1203,7 @@ static void __nbcon_atomic_flush_all(u64
+ if (!(flags & CON_NBCON))
+ continue;
+
+- if (!console_is_usable(con, flags))
++ if (!console_is_usable(con, flags, true))
+ continue;
+
+ if (nbcon_seq_read(con) >= stop_seq)
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3040,7 +3040,7 @@ static bool console_flush_all(bool do_co
+ if ((flags & CON_NBCON) && con->kthread)
+ continue;
+
+- if (!console_is_usable(con, flags))
++ if (!console_is_usable(con, flags, true))
+ continue;
+ any_usable = true;
+
+@@ -3945,8 +3945,10 @@ static bool __pr_flush(struct console *c
+ * that they make forward progress, so only increment
+ * @diff for usable consoles.
+ */
+- if (!console_is_usable(c, flags))
++ if (!console_is_usable(c, flags, true) &&
++ !console_is_usable(c, flags, false)) {
+ continue;
++ }
+
+ if (flags & CON_NBCON) {
+ printk_seq = nbcon_seq_read(c);
diff --git a/debian/patches-rt/0124-printk-nbcon-Add-printer-thread-wakeups.patch b/debian/patches-rt/0124-printk-nbcon-Add-printer-thread-wakeups.patch
new file mode 100644
index 0000000000..18e02593f8
--- /dev/null
+++ b/debian/patches-rt/0124-printk-nbcon-Add-printer-thread-wakeups.patch
@@ -0,0 +1,164 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 26 Sep 2023 13:03:52 +0000
+Subject: [PATCH 124/134] printk: nbcon: Add printer thread wakeups
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Add a function to wakeup the printer threads. Use the new function
+when:
+
+ - records are added to the printk ringbuffer
+ - consoles are resumed
+ - triggered via printk_trigger_flush()
+
+The actual waking is performed via irq_work so that the wakeup can
+be triggered from any context.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 3 ++
+ kernel/printk/internal.h | 1
+ kernel/printk/nbcon.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++
+ kernel/printk/printk.c | 7 +++++
+ 4 files changed, 67 insertions(+)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -16,6 +16,7 @@
+
+ #include <linux/atomic.h>
+ #include <linux/bits.h>
++#include <linux/irq_work.h>
+ #include <linux/rculist.h>
+ #include <linux/rcuwait.h>
+ #include <linux/types.h>
+@@ -306,6 +307,7 @@ struct nbcon_write_context {
+ * @locked_port: True, if the port lock is locked by nbcon
+ * @kthread: Printer kthread for this console
+ * @rcuwait: RCU-safe wait object for @kthread waking
++ * @irq_work: Defer @kthread waking to IRQ work context
+ */
+ struct console {
+ char name[16];
+@@ -339,6 +341,7 @@ struct console {
+ bool locked_port;
+ struct task_struct *kthread;
+ struct rcuwait rcuwait;
++ struct irq_work irq_work;
+ };
+
+ #ifdef CONFIG_LOCKDEP
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -93,6 +93,7 @@ enum nbcon_prio nbcon_get_default_prio(v
+ void nbcon_atomic_flush_all(void);
+ bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie);
+ void nbcon_kthread_create(struct console *con);
++void nbcon_wake_threads(void);
+
+ /*
+ * Check if the given console is currently capable and allowed to print
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1052,6 +1052,61 @@ static int nbcon_kthread_func(void *__co
+ goto wait_for_event;
+ }
+
++/**
++ * nbcon_irq_work - irq work to wake printk thread
++ * @irq_work: The irq work to operate on
++ */
++static void nbcon_irq_work(struct irq_work *irq_work)
++{
++ struct console *con = container_of(irq_work, struct console, irq_work);
++
++ nbcon_kthread_wake(con);
++}
++
++static inline bool rcuwait_has_sleeper(struct rcuwait *w)
++{
++ bool has_sleeper;
++
++ rcu_read_lock();
++ /*
++ * Guarantee any new records can be seen by tasks preparing to wait
++ * before this context checks if the rcuwait is empty.
++ *
++ * This full memory barrier pairs with the full memory barrier within
++ * set_current_state() of ___rcuwait_wait_event(), which is called
++ * after prepare_to_rcuwait() adds the waiter but before it has
++ * checked the wait condition.
++ *
++ * This pairs with nbcon_kthread_func:A.
++ */
++ smp_mb(); /* LMM(rcuwait_has_sleeper:A) */
++ has_sleeper = !!rcu_dereference(w->task);
++ rcu_read_unlock();
++
++ return has_sleeper;
++}
++
++/**
++ * nbcon_wake_threads - Wake up printing threads using irq_work
++ */
++void nbcon_wake_threads(void)
++{
++ struct console *con;
++ int cookie;
++
++ cookie = console_srcu_read_lock();
++ for_each_console_srcu(con) {
++ /*
++ * Only schedule irq_work if the printing thread is
++ * actively waiting. If not waiting, the thread will
++ * notice by itself that it has work to do.
++ */
++ if (con->kthread && rcuwait_has_sleeper(&con->rcuwait))
++ irq_work_queue(&con->irq_work);
++ }
++ console_srcu_read_unlock(cookie);
++}
++
+ /* Track the nbcon emergency nesting per CPU. */
+ static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
+ static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
+@@ -1418,6 +1473,7 @@ void nbcon_init(struct console *con)
+ BUG_ON(!con->pbufs);
+
+ rcuwait_init(&con->rcuwait);
++ init_irq_work(&con->irq_work, nbcon_irq_work);
+ nbcon_seq_force(con, con->seq);
+ nbcon_state_set(con, &state);
+ }
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2403,6 +2403,8 @@ asmlinkage int vprintk_emit(int facility
+ }
+ }
+
++ nbcon_wake_threads();
++
+ if (do_trylock_unlock) {
+ /*
+ * The caller may be holding system-critical or
+@@ -2705,6 +2707,10 @@ void resume_console(void)
+ */
+ synchronize_srcu(&console_srcu);
+
++ /*
++ * Since this runs in task context, wake the threaded printers
++ * directly rather than scheduling irq_work to do it.
++ */
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(con) {
+ flags = console_srcu_read_flags(con);
+@@ -4097,6 +4103,7 @@ void defer_console_output(void)
+
+ void printk_trigger_flush(void)
+ {
++ nbcon_wake_threads();
+ defer_console_output();
+ }
+
diff --git a/debian/patches-rt/0125-printk-nbcon-Stop-threads-on-shutdown-reboot.patch b/debian/patches-rt/0125-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
new file mode 100644
index 0000000000..55ec76c5c0
--- /dev/null
+++ b/debian/patches-rt/0125-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
@@ -0,0 +1,60 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 26 Sep 2023 13:04:15 +0000
+Subject: [PATCH 125/134] printk: nbcon: Stop threads on shutdown/reboot
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Register a syscore_ops shutdown function to stop all threaded
+printers on shutdown/reboot. This allows printk to transition back
+to atomic printing in order to provide a robust mechanism for
+outputting the final messages.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/nbcon.c | 31 +++++++++++++++++++++++++++++++
+ 1 file changed, 31 insertions(+)
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -8,6 +8,7 @@
+ #include <linux/kthread.h>
+ #include <linux/slab.h>
+ #include <linux/serial_core.h>
++#include <linux/syscore_ops.h>
+ #include "printk_ringbuffer.h"
+ #include "internal.h"
+ /*
+@@ -1577,3 +1578,33 @@ void nbcon_release(struct uart_port *up)
+ con->locked_port = false;
+ }
+ EXPORT_SYMBOL_GPL(nbcon_release);
++
++/**
++ * printk_kthread_shutdown - shutdown all threaded printers
++ *
++ * On system shutdown all threaded printers are stopped. This allows printk
++ * to transition back to atomic printing, thus providing a robust mechanism
++ * for the final shutdown/reboot messages to be output.
++ */
++static void printk_kthread_shutdown(void)
++{
++ struct console *con;
++
++ console_list_lock();
++ for_each_console(con) {
++ if (con->flags & CON_NBCON)
++ nbcon_kthread_stop(con);
++ }
++ console_list_unlock();
++}
++
++static struct syscore_ops printk_syscore_ops = {
++ .shutdown = printk_kthread_shutdown,
++};
++
++static int __init printk_init_ops(void)
++{
++ register_syscore_ops(&printk_syscore_ops);
++ return 0;
++}
++device_initcall(printk_init_ops);
diff --git a/debian/patches-rt/0126-printk-nbcon-Start-printing-threads.patch b/debian/patches-rt/0126-printk-nbcon-Start-printing-threads.patch
new file mode 100644
index 0000000000..ec41221636
--- /dev/null
+++ b/debian/patches-rt/0126-printk-nbcon-Start-printing-threads.patch
@@ -0,0 +1,135 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 5 Dec 2023 14:09:31 +0000
+Subject: [PATCH 126/134] printk: nbcon: Start printing threads
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+If there are no boot consoles, the printing threads are started
+in early_initcall.
+
+If there are boot consoles, the printing threads are started
+after the last boot console has unregistered. The printing
+threads do not need to be concerned about boot consoles because
+boot consoles cannot register once a non-boot console has
+registered.
+
+Until a printing thread of a console has started, that console
+will print using atomic_write() in the printk() caller context.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 2 ++
+ kernel/printk/nbcon.c | 18 +++++++++++++++++-
+ kernel/printk/printk.c | 14 ++++++++++++++
+ 3 files changed, 33 insertions(+), 1 deletion(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -44,6 +44,7 @@ enum printk_info_flags {
+ };
+
+ extern struct printk_ringbuffer *prb;
++extern bool printk_threads_enabled;
+ extern bool have_legacy_console;
+ extern bool have_boot_console;
+
+@@ -161,6 +162,7 @@ static inline void nbcon_kthread_wake(st
+
+ static inline void nbcon_kthread_wake(struct console *con) { }
+ static inline void nbcon_kthread_create(struct console *con) { }
++#define printk_threads_enabled (false)
+ #define printing_via_unlock (false)
+
+ /*
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -205,6 +205,8 @@ static void nbcon_seq_try_update(struct
+ }
+ }
+
++bool printk_threads_enabled __ro_after_init;
++
+ /**
+ * nbcon_context_try_acquire_direct - Try to acquire directly
+ * @ctxt: The context of the caller
+@@ -1401,7 +1403,7 @@ void nbcon_kthread_create(struct console
+ if (!(con->flags & CON_NBCON) || !con->write_thread)
+ return;
+
+- if (con->kthread)
++ if (!printk_threads_enabled || con->kthread)
+ return;
+
+ /*
+@@ -1427,6 +1429,19 @@ void nbcon_kthread_create(struct console
+ sched_set_normal(con->kthread, -20);
+ }
+
++static int __init printk_setup_threads(void)
++{
++ struct console *con;
++
++ console_list_lock();
++ printk_threads_enabled = true;
++ for_each_console(con)
++ nbcon_kthread_create(con);
++ console_list_unlock();
++ return 0;
++}
++early_initcall(printk_setup_threads);
++
+ /**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+@@ -1477,6 +1492,7 @@ void nbcon_init(struct console *con)
+ init_irq_work(&con->irq_work, nbcon_irq_work);
+ nbcon_seq_force(con, con->seq);
+ nbcon_state_set(con, &state);
++ nbcon_kthread_create(con);
+ }
+
+ /**
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2389,6 +2389,9 @@ asmlinkage int vprintk_emit(int facility
+ *
+ * - When this CPU is in panic.
+ *
++ * - When booting, before the printing threads have been
++ * started.
++ *
+ * - During shutdown, since the printing threads may not get
+ * a chance to print the final messages.
+ *
+@@ -2398,6 +2401,7 @@ asmlinkage int vprintk_emit(int facility
+ * with boot consoles.
+ */
+ if (is_panic_context ||
++ !printk_threads_enabled ||
+ (system_state > SYSTEM_RUNNING)) {
+ nbcon_atomic_flush_all();
+ }
+@@ -3685,6 +3689,7 @@ EXPORT_SYMBOL(register_console);
+ /* Must be called under console_list_lock(). */
+ static int unregister_console_locked(struct console *console)
+ {
++ bool is_boot_con = (console->flags & CON_BOOT);
+ bool found_legacy_con = false;
+ bool found_nbcon_con = false;
+ bool found_boot_con = false;
+@@ -3756,6 +3761,15 @@ static int unregister_console_locked(str
+ if (!found_nbcon_con)
+ have_nbcon_console = false;
+
++ /*
++ * When the last boot console unregisters, start up the
++ * printing threads.
++ */
++ if (is_boot_con && !have_boot_console) {
++ for_each_console(c)
++ nbcon_kthread_create(c);
++ }
++
+ return res;
+ }
+
diff --git a/debian/patches-rt/0127-proc-Add-nbcon-support-for-proc-consoles.patch b/debian/patches-rt/0127-proc-Add-nbcon-support-for-proc-consoles.patch
new file mode 100644
index 0000000000..615ecb02a3
--- /dev/null
+++ b/debian/patches-rt/0127-proc-Add-nbcon-support-for-proc-consoles.patch
@@ -0,0 +1,53 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 26 Sep 2023 13:31:00 +0000
+Subject: [PATCH 127/134] proc: Add nbcon support for /proc/consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Update /proc/consoles output to show 'W' if an nbcon write
+callback is implemented (write_atomic or write_thread).
+
+Also update /proc/consoles output to show 'N' if it is an
+nbcon console.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/proc/consoles.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/fs/proc/consoles.c
++++ b/fs/proc/consoles.c
+@@ -21,12 +21,14 @@ static int show_console_dev(struct seq_f
+ { CON_ENABLED, 'E' },
+ { CON_CONSDEV, 'C' },
+ { CON_BOOT, 'B' },
++ { CON_NBCON, 'N' },
+ { CON_PRINTBUFFER, 'p' },
+ { CON_BRL, 'b' },
+ { CON_ANYTIME, 'a' },
+ };
+ char flags[ARRAY_SIZE(con_flags) + 1];
+ struct console *con = v;
++ char con_write = '-';
+ unsigned int a;
+ dev_t dev = 0;
+
+@@ -57,9 +59,15 @@ static int show_console_dev(struct seq_f
+ seq_setwidth(m, 21 - 1);
+ seq_printf(m, "%s%d", con->name, con->index);
+ seq_pad(m, ' ');
+- seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-',
+- con->write ? 'W' : '-', con->unblank ? 'U' : '-',
+- flags);
++ if (con->flags & CON_NBCON) {
++ if (con->write_atomic || con->write_thread)
++ con_write = 'W';
++ } else {
++ if (con->write)
++ con_write = 'W';
++ }
++ seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-', con_write,
++ con->unblank ? 'U' : '-', flags);
+ if (dev)
+ seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev));
+
diff --git a/debian/patches-rt/0128-tty-sysfs-Add-nbcon-support-for-active.patch b/debian/patches-rt/0128-tty-sysfs-Add-nbcon-support-for-active.patch
new file mode 100644
index 0000000000..f8b285f432
--- /dev/null
+++ b/debian/patches-rt/0128-tty-sysfs-Add-nbcon-support-for-active.patch
@@ -0,0 +1,33 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 22 Sep 2023 14:31:09 +0000
+Subject: [PATCH 128/134] tty: sysfs: Add nbcon support for 'active'
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Allow the 'active' attribute to list nbcon consoles.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/tty_io.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -3540,8 +3540,15 @@ static ssize_t show_cons_active(struct d
+ for_each_console(c) {
+ if (!c->device)
+ continue;
+- if (!c->write)
+- continue;
++ if (c->flags & CON_NBCON) {
++ if (!c->write_atomic &&
++ !(c->write_thread && c->kthread)) {
++ continue;
++ }
++ } else {
++ if (!c->write)
++ continue;
++ }
+ if ((c->flags & CON_ENABLED) == 0)
+ continue;
+ cs[i++] = c;
diff --git a/debian/patches-rt/0129-printk-nbcon-Provide-function-to-reacquire-ownership.patch b/debian/patches-rt/0129-printk-nbcon-Provide-function-to-reacquire-ownership.patch
new file mode 100644
index 0000000000..e878a4b1b7
--- /dev/null
+++ b/debian/patches-rt/0129-printk-nbcon-Provide-function-to-reacquire-ownership.patch
@@ -0,0 +1,92 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 20 Oct 2023 10:01:58 +0000
+Subject: [PATCH 129/134] printk: nbcon: Provide function to reacquire
+ ownership
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Contexts may become nbcon owners for various reasons, not just
+for printing. Indeed, the port->lock wrapper takes ownership
+for anything relating to the hardware.
+
+Since ownership can be lost at any time due to handover or
+takeover, a context _should_ be prepared to back out
+immediately and carefully. However, there are many scenarios
+where the context _must_ reacquire ownership in order to
+finalize or revert hardware changes.
+
+One such example is when interrupts are disabled by a context.
+No other context will automagically re-enable the interrupts.
+For this case, the disabling context _must_ reacquire nbcon
+ownership so that it can re-enable the interrupts.
+
+Provide nbcon_reacquire() for exactly this purpose.
+
+Note that for printing contexts, after a successful reacquire
+the context will have no output buffer because that has been
+lost. nbcon_reacquire() cannot be used to resume printing.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 2 ++
+ kernel/printk/nbcon.c | 32 ++++++++++++++++++++++++++++++++
+ 2 files changed, 34 insertions(+)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -474,12 +474,14 @@ extern void nbcon_cpu_emergency_exit(voi
+ extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+ extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
+ extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
++extern void nbcon_reacquire(struct nbcon_write_context *wctxt);
+ #else
+ static inline void nbcon_cpu_emergency_enter(void) { }
+ static inline void nbcon_cpu_emergency_exit(void) { }
+ static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+ static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
+ static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
++static inline void nbcon_reacquire(struct nbcon_write_context *wctxt) { }
+ #endif
+
+ extern int console_set_on_cmdline;
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -831,6 +831,38 @@ bool nbcon_exit_unsafe(struct nbcon_writ
+ EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
+
+ /**
++ * nbcon_reacquire - Reacquire a console after losing ownership
++ * @wctxt: The write context that was handed to the write function
++ *
++ * Since ownership can be lost at any time due to handover or takeover, a
++ * printing context _should_ be prepared to back out immediately and
++ * carefully. However, there are many scenarios where the context _must_
++ * reacquire ownership in order to finalize or revert hardware changes.
++ *
++ * This function allows a context to reacquire ownership using the same
++ * priority as its previous ownership.
++ *
++ * Note that for printing contexts, after a successful reacquire the
++ * context will have no output buffer because that has been lost. This
++ * function cannot be used to resume printing.
++ */
++void nbcon_reacquire(struct nbcon_write_context *wctxt)
++{
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
++ struct console *con = ctxt->console;
++ struct nbcon_state cur;
++
++ while (!nbcon_context_try_acquire(ctxt))
++ cpu_relax();
++
++ wctxt->outbuf = NULL;
++ wctxt->len = 0;
++ nbcon_state_read(con, &cur);
++ wctxt->unsafe_takeover = cur.unsafe_takeover;
++}
++EXPORT_SYMBOL_GPL(nbcon_reacquire);
++
++/**
+ * nbcon_emit_next_record - Emit a record in the acquired context
+ * @wctxt: The write context that will be handed to the write function
+ * @use_atomic: True if the write_atomic callback is to be used
diff --git a/debian/patches-rt/0130-serial-core-Provide-low-level-functions-to-port-lock.patch b/debian/patches-rt/0130-serial-core-Provide-low-level-functions-to-port-lock.patch
new file mode 100644
index 0000000000..e8e506fe52
--- /dev/null
+++ b/debian/patches-rt/0130-serial-core-Provide-low-level-functions-to-port-lock.patch
@@ -0,0 +1,43 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 11 Dec 2023 09:19:18 +0000
+Subject: [PATCH 130/134] serial: core: Provide low-level functions to port
+ lock
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The nbcon console's driver_enter() and driver_exit() callbacks need
+to lock the port lock in order to synchronize against other hardware
+activity (such as adjusting baud rates). However, they cannot use
+the uart_port_lock() wrappers because the printk subsystem will
+perform nbcon locking after calling the driver_enter() callback.
+
+Provide low-level variants __uart_port_lock_irqsave() and
+__uart_port_unlock_irqrestore() for this purpose. These are only
+to be used by the driver_enter()/driver_exit() callbacks.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/serial_core.h | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -681,6 +681,18 @@ static inline void uart_port_unlock_irqr
+ spin_unlock_irqrestore(&up->lock, flags);
+ }
+
++/* Only for use in the console->driver_enter() callback. */
++static inline void __uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ spin_lock_irqsave(&up->lock, *flags);
++}
++
++/* Only for use in the console->driver_exit() callback. */
++static inline void __uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
++{
++ spin_unlock_irqrestore(&up->lock, flags);
++}
++
+ static inline int serial_port_in(struct uart_port *up, int offset)
+ {
+ return up->serial_in(up, offset);
diff --git a/debian/patches-rt/0131-serial-8250-Switch-to-nbcon-console.patch b/debian/patches-rt/0131-serial-8250-Switch-to-nbcon-console.patch
new file mode 100644
index 0000000000..7ba577d9c4
--- /dev/null
+++ b/debian/patches-rt/0131-serial-8250-Switch-to-nbcon-console.patch
@@ -0,0 +1,335 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 13 Sep 2023 15:30:36 +0000
+Subject: [PATCH 131/134] serial: 8250: Switch to nbcon console
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Implement the necessary callbacks to switch the 8250 console driver
+to perform as an nbcon console.
+
+Add implementations for the nbcon consoles (write_atomic, write_thread,
+driver_enter, driver_exit) and add CON_NBCON to the initial flags.
+
+The legacy code is kept in order to easily switch back to legacy mode
+by defining CONFIG_SERIAL_8250_LEGACY_CONSOLE.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_core.c | 42 +++++++++
+ drivers/tty/serial/8250/8250_port.c | 156 +++++++++++++++++++++++++++++++++++-
+ include/linux/serial_8250.h | 6 +
+ 3 files changed, 201 insertions(+), 3 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -592,6 +592,7 @@ serial8250_register_ports(struct uart_dr
+
+ #ifdef CONFIG_SERIAL_8250_CONSOLE
+
++#ifdef CONFIG_SERIAL_8250_LEGACY_CONSOLE
+ static void univ8250_console_write(struct console *co, const char *s,
+ unsigned int count)
+ {
+@@ -599,6 +600,37 @@ static void univ8250_console_write(struc
+
+ serial8250_console_write(up, s, count);
+ }
++#else
++static bool univ8250_console_write_atomic(struct console *co,
++ struct nbcon_write_context *wctxt)
++{
++ struct uart_8250_port *up = &serial8250_ports[co->index];
++
++ return serial8250_console_write_atomic(up, wctxt);
++}
++
++static bool univ8250_console_write_thread(struct console *co,
++ struct nbcon_write_context *wctxt)
++{
++ struct uart_8250_port *up = &serial8250_ports[co->index];
++
++ return serial8250_console_write_thread(up, wctxt);
++}
++
++static void univ8250_console_driver_enter(struct console *con, unsigned long *flags)
++{
++ struct uart_port *up = &serial8250_ports[con->index].port;
++
++ __uart_port_lock_irqsave(up, flags);
++}
++
++static void univ8250_console_driver_exit(struct console *con, unsigned long flags)
++{
++ struct uart_port *up = &serial8250_ports[con->index].port;
++
++ __uart_port_unlock_irqrestore(up, flags);
++}
++#endif /* CONFIG_SERIAL_8250_LEGACY_CONSOLE */
+
+ static int univ8250_console_setup(struct console *co, char *options)
+ {
+@@ -698,12 +730,20 @@ static int univ8250_console_match(struct
+
+ static struct console univ8250_console = {
+ .name = "ttyS",
++#ifdef CONFIG_SERIAL_8250_LEGACY_CONSOLE
+ .write = univ8250_console_write,
++ .flags = CON_PRINTBUFFER | CON_ANYTIME,
++#else
++ .write_atomic = univ8250_console_write_atomic,
++ .write_thread = univ8250_console_write_thread,
++ .driver_enter = univ8250_console_driver_enter,
++ .driver_exit = univ8250_console_driver_exit,
++ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_NBCON,
++#endif
+ .device = uart_console_device,
+ .setup = univ8250_console_setup,
+ .exit = univ8250_console_exit,
+ .match = univ8250_console_match,
+- .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .index = -1,
+ .data = &serial8250_reg,
+ };
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -557,6 +557,11 @@ static int serial8250_em485_init(struct
+ if (!p->em485)
+ return -ENOMEM;
+
++#ifndef CONFIG_SERIAL_8250_LEGACY_CONSOLE
++ if (uart_console(&p->port))
++ dev_warn(p->port.dev, "no atomic printing for rs485 consoles\n");
++#endif
++
+ hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
+@@ -709,7 +714,11 @@ static void serial8250_set_sleep(struct
+ serial8250_rpm_put(p);
+ }
+
+-static void serial8250_clear_IER(struct uart_8250_port *up)
++/*
++ * Only to be used by write_atomic() and the legacy write(), which do not
++ * require port lock.
++ */
++static void __serial8250_clear_IER(struct uart_8250_port *up)
+ {
+ if (up->capabilities & UART_CAP_UUE)
+ serial_out(up, UART_IER, UART_IER_UUE);
+@@ -717,6 +726,11 @@ static void serial8250_clear_IER(struct
+ serial_out(up, UART_IER, 0);
+ }
+
++static inline void serial8250_clear_IER(struct uart_8250_port *up)
++{
++ __serial8250_clear_IER(up);
++}
++
+ #ifdef CONFIG_SERIAL_8250_RSA
+ /*
+ * Attempts to turn on the RSA FIFO. Returns zero on failure.
+@@ -3328,6 +3342,11 @@ static void serial8250_console_putchar(s
+
+ wait_for_xmitr(up, UART_LSR_THRE);
+ serial_port_out(port, UART_TX, ch);
++
++ if (ch == '\n')
++ up->console_newline_needed = false;
++ else
++ up->console_newline_needed = true;
+ }
+
+ /*
+@@ -3356,6 +3375,7 @@ static void serial8250_console_restore(s
+ serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
+ }
+
++#ifdef CONFIG_SERIAL_8250_LEGACY_CONSOLE
+ /*
+ * Print a string to the serial port using the device FIFO
+ *
+@@ -3414,7 +3434,7 @@ void serial8250_console_write(struct uar
+ * First save the IER then disable the interrupts
+ */
+ ier = serial_port_in(port, UART_IER);
+- serial8250_clear_IER(up);
++ __serial8250_clear_IER(up);
+
+ /* check scratch reg to see if port powered off during system sleep */
+ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
+@@ -3480,6 +3500,135 @@ void serial8250_console_write(struct uar
+ if (locked)
+ uart_port_unlock_irqrestore(port, flags);
+ }
++#else
++bool serial8250_console_write_thread(struct uart_8250_port *up,
++ struct nbcon_write_context *wctxt)
++{
++ struct uart_8250_em485 *em485 = up->em485;
++ struct uart_port *port = &up->port;
++ bool done = false;
++ unsigned int ier;
++
++ touch_nmi_watchdog();
++
++ if (!nbcon_enter_unsafe(wctxt))
++ return false;
++
++ /* First save IER then disable the interrupts. */
++ ier = serial_port_in(port, UART_IER);
++ serial8250_clear_IER(up);
++
++ /* Check scratch reg if port powered off during system sleep. */
++ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
++ serial8250_console_restore(up);
++ up->canary = 0;
++ }
++
++ if (em485) {
++ if (em485->tx_stopped)
++ up->rs485_start_tx(up);
++ mdelay(port->rs485.delay_rts_before_send);
++ }
++
++ if (nbcon_exit_unsafe(wctxt)) {
++ int len = READ_ONCE(wctxt->len);
++ int i;
++
++ /*
++ * Write out the message. Toggle unsafe for each byte in order
++ * to give another (higher priority) context the opportunity
++ * for a friendly takeover. If such a takeover occurs, this
++ * context must reacquire ownership in order to perform final
++ * actions (such as re-enabling the interrupts).
++ *
++ * IMPORTANT: wctxt->outbuf and wctxt->len are no longer valid
++ * after a reacquire so writing the message must be
++ * aborted.
++ */
++ for (i = 0; i < len; i++) {
++ if (!nbcon_enter_unsafe(wctxt)) {
++ nbcon_reacquire(wctxt);
++ break;
++ }
++
++ uart_console_write(port, wctxt->outbuf + i, 1, serial8250_console_putchar);
++
++ if (!nbcon_exit_unsafe(wctxt)) {
++ nbcon_reacquire(wctxt);
++ break;
++ }
++ }
++ done = (i == len);
++ } else {
++ nbcon_reacquire(wctxt);
++ }
++
++ while (!nbcon_enter_unsafe(wctxt))
++ nbcon_reacquire(wctxt);
++
++ /* Finally, wait for transmitter to become empty and restore IER. */
++ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
++ if (em485) {
++ mdelay(port->rs485.delay_rts_after_send);
++ if (em485->tx_stopped)
++ up->rs485_stop_tx(up);
++ }
++ serial_port_out(port, UART_IER, ier);
++
++ /*
++ * The receive handling will happen properly because the receive ready
++ * bit will still be set; it is not cleared on read. However, modem
++ * control will not, we must call it if we have saved something in the
++ * saved flags while processing with interrupts off.
++ */
++ if (up->msr_saved_flags)
++ serial8250_modem_status(up);
++
++ /* Success if no handover/takeover and message fully printed. */
++ return (nbcon_exit_unsafe(wctxt) && done);
++}
++
++bool serial8250_console_write_atomic(struct uart_8250_port *up,
++ struct nbcon_write_context *wctxt)
++{
++ struct uart_port *port = &up->port;
++ unsigned int ier;
++
++ /* Atomic console not supported for rs485 mode. */
++ if (up->em485)
++ return false;
++
++ touch_nmi_watchdog();
++
++ if (!nbcon_enter_unsafe(wctxt))
++ return false;
++
++ /*
++ * First save IER then disable the interrupts. The special variant to
++ * clear IER is used because atomic printing may occur without holding
++ * the port lock.
++ */
++ ier = serial_port_in(port, UART_IER);
++ __serial8250_clear_IER(up);
++
++ /* Check scratch reg if port powered off during system sleep. */
++ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
++ serial8250_console_restore(up);
++ up->canary = 0;
++ }
++
++ if (up->console_newline_needed)
++ uart_console_write(port, "\n", 1, serial8250_console_putchar);
++ uart_console_write(port, wctxt->outbuf, wctxt->len, serial8250_console_putchar);
++
++ /* Finally, wait for transmitter to become empty and restore IER. */
++ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
++ serial_port_out(port, UART_IER, ier);
++
++ /* Success if no handover/takeover. */
++ return nbcon_exit_unsafe(wctxt);
++}
++#endif /* CONFIG_SERIAL_8250_LEGACY_CONSOLE */
+
+ static unsigned int probe_baud(struct uart_port *port)
+ {
+@@ -3498,6 +3647,7 @@ static unsigned int probe_baud(struct ua
+
+ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
+ {
++ struct uart_8250_port *up = up_to_u8250p(port);
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+@@ -3507,6 +3657,8 @@ int serial8250_console_setup(struct uart
+ if (!port->iobase && !port->membase)
+ return -ENODEV;
+
++ up->console_newline_needed = false;
++
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else if (probe)
+--- a/include/linux/serial_8250.h
++++ b/include/linux/serial_8250.h
+@@ -153,6 +153,8 @@ struct uart_8250_port {
+ #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+ unsigned char msr_saved_flags;
+
++ bool console_newline_needed;
++
+ struct uart_8250_dma *dma;
+ const struct uart_8250_ops *ops;
+
+@@ -204,6 +206,10 @@ void serial8250_init_port(struct uart_82
+ void serial8250_set_defaults(struct uart_8250_port *up);
+ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ unsigned int count);
++bool serial8250_console_write_atomic(struct uart_8250_port *up,
++ struct nbcon_write_context *wctxt);
++bool serial8250_console_write_thread(struct uart_8250_port *up,
++ struct nbcon_write_context *wctxt);
+ int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
+ int serial8250_console_exit(struct uart_port *port);
+
diff --git a/debian/patches-rt/0132-printk-Add-kthread-for-all-legacy-consoles.patch b/debian/patches-rt/0132-printk-Add-kthread-for-all-legacy-consoles.patch
new file mode 100644
index 0000000000..7c46be7133
--- /dev/null
+++ b/debian/patches-rt/0132-printk-Add-kthread-for-all-legacy-consoles.patch
@@ -0,0 +1,422 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 22 Sep 2023 17:35:04 +0000
+Subject: [PATCH 132/134] printk: Add kthread for all legacy consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The write callback of legacy consoles make use of spinlocks.
+This is not permitted with PREEMPT_RT in atomic contexts.
+
+Create a new kthread to handle printing of all the legacy
+consoles (and nbcon consoles if boot consoles are registered).
+
+Since the consoles are printing in a task context, it is no
+longer appropriate to support the legacy handover mechanism.
+
+These changes exist only for CONFIG_PREEMPT_RT.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 1
+ kernel/printk/nbcon.c | 18 ++-
+ kernel/printk/printk.c | 237 +++++++++++++++++++++++++++++++++++++++--------
+ 3 files changed, 210 insertions(+), 46 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -95,6 +95,7 @@ void nbcon_atomic_flush_all(void);
+ bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie);
+ void nbcon_kthread_create(struct console *con);
+ void nbcon_wake_threads(void);
++void nbcon_legacy_kthread_create(void);
+
+ /*
+ * Check if the given console is currently capable and allowed to print
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1247,9 +1247,11 @@ bool nbcon_atomic_emit_next_record(struc
+ *handover = false;
+
+ /* Use the same locking order as console_emit_next_record(). */
+- printk_safe_enter_irqsave(flags);
+- console_lock_spinning_enable();
+- stop_critical_timings();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ printk_safe_enter_irqsave(flags);
++ console_lock_spinning_enable();
++ stop_critical_timings();
++ }
+
+ con->driver_enter(con, &driver_flags);
+ cant_migrate();
+@@ -1261,9 +1263,11 @@ bool nbcon_atomic_emit_next_record(struc
+
+ con->driver_exit(con, driver_flags);
+
+- start_critical_timings();
+- *handover = console_lock_spinning_disable_and_check(cookie);
+- printk_safe_exit_irqrestore(flags);
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ start_critical_timings();
++ *handover = console_lock_spinning_disable_and_check(cookie);
++ printk_safe_exit_irqrestore(flags);
++ }
+
+ return progress;
+ }
+@@ -1469,6 +1473,8 @@ static int __init printk_setup_threads(v
+ printk_threads_enabled = true;
+ for_each_console(con)
+ nbcon_kthread_create(con);
++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && printing_via_unlock)
++ nbcon_legacy_kthread_create();
+ console_list_unlock();
+ return 0;
+ }
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -487,6 +487,9 @@ bool have_boot_console;
+
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
++
++static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
++
+ /* All 3 protected by @syslog_lock. */
+ /* the next printk record to read by syslog(READ) or /proc/kmsg */
+ static u64 syslog_seq;
+@@ -2345,7 +2348,8 @@ asmlinkage int vprintk_emit(int facility
+ const struct dev_printk_info *dev_info,
+ const char *fmt, va_list args)
+ {
+- bool do_trylock_unlock = printing_via_unlock;
++ bool do_trylock_unlock = printing_via_unlock &&
++ !IS_ENABLED(CONFIG_PREEMPT_RT);
+ int printed_len;
+
+ /* Suppress unimportant messages after panic happens */
+@@ -2473,6 +2477,14 @@ EXPORT_SYMBOL(_printk);
+ static bool pr_flush(int timeout_ms, bool reset_on_progress);
+ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
+
++static struct task_struct *nbcon_legacy_kthread;
++
++static inline void wake_up_legacy_kthread(void)
++{
++ if (nbcon_legacy_kthread)
++ wake_up_interruptible(&legacy_wait);
++}
++
+ #else /* CONFIG_PRINTK */
+
+ #define printk_time false
+@@ -2486,6 +2498,8 @@ static u64 syslog_seq;
+ static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
+ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
+
++static inline void nbcon_legacy_kthread_create(void) { }
++static inline void wake_up_legacy_kthread(void) { }
+ #endif /* CONFIG_PRINTK */
+
+ #ifdef CONFIG_EARLY_PRINTK
+@@ -2723,6 +2737,8 @@ void resume_console(void)
+ }
+ console_srcu_read_unlock(cookie);
+
++ wake_up_legacy_kthread();
++
+ pr_flush(1000, true);
+ }
+
+@@ -2737,7 +2753,8 @@ void resume_console(void)
+ */
+ static int console_cpu_notify(unsigned int cpu)
+ {
+- if (!cpuhp_tasks_frozen && printing_via_unlock) {
++ if (!cpuhp_tasks_frozen && printing_via_unlock &&
++ !IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /* If trylock fails, someone else is doing the printing */
+ if (console_trylock())
+ console_unlock();
+@@ -2962,31 +2979,43 @@ static bool console_emit_next_record(str
+ con->dropped = 0;
+ }
+
+- /*
+- * While actively printing out messages, if another printk()
+- * were to occur on another CPU, it may wait for this one to
+- * finish. This task can not be preempted if there is a
+- * waiter waiting to take over.
+- *
+- * Interrupts are disabled because the hand over to a waiter
+- * must not be interrupted until the hand over is completed
+- * (@console_waiter is cleared).
+- */
+- printk_safe_enter_irqsave(flags);
+- console_lock_spinning_enable();
++ /* Write everything out to the hardware. */
+
+- /* Do not trace print latency. */
+- stop_critical_timings();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ /*
++ * On PREEMPT_RT this function is either in a thread or
++ * panic context. So there is no need for concern about
++ * printk reentrance or handovers.
++ */
+
+- /* Write everything out to the hardware. */
+- con->write(con, outbuf, pmsg.outbuf_len);
++ con->write(con, outbuf, pmsg.outbuf_len);
++ con->seq = pmsg.seq + 1;
++ } else {
++ /*
++ * While actively printing out messages, if another printk()
++ * were to occur on another CPU, it may wait for this one to
++ * finish. This task can not be preempted if there is a
++ * waiter waiting to take over.
++ *
++ * Interrupts are disabled because the hand over to a waiter
++ * must not be interrupted until the hand over is completed
++ * (@console_waiter is cleared).
++ */
++ printk_safe_enter_irqsave(flags);
++ console_lock_spinning_enable();
+
+- start_critical_timings();
++ /* Do not trace print latency. */
++ stop_critical_timings();
+
+- con->seq = pmsg.seq + 1;
++ con->write(con, outbuf, pmsg.outbuf_len);
+
+- *handover = console_lock_spinning_disable_and_check(cookie);
+- printk_safe_exit_irqrestore(flags);
++ start_critical_timings();
++
++ con->seq = pmsg.seq + 1;
++
++ *handover = console_lock_spinning_disable_and_check(cookie);
++ printk_safe_exit_irqrestore(flags);
++ }
+ skip:
+ return true;
+ }
+@@ -3096,19 +3125,7 @@ static bool console_flush_all(bool do_co
+ return false;
+ }
+
+-/**
+- * console_unlock - unblock the console subsystem from printing
+- *
+- * Releases the console_lock which the caller holds to block printing of
+- * the console subsystem.
+- *
+- * While the console_lock was held, console output may have been buffered
+- * by printk(). If this is the case, console_unlock(); emits
+- * the output prior to releasing the lock.
+- *
+- * console_unlock(); may be called from any context.
+- */
+-void console_unlock(void)
++static void console_flush_and_unlock(void)
+ {
+ bool do_cond_resched;
+ bool handover;
+@@ -3152,6 +3169,32 @@ void console_unlock(void)
+ */
+ } while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
+ }
++
++/**
++ * console_unlock - unblock the console subsystem from printing
++ *
++ * Releases the console_lock which the caller holds to block printing of
++ * the console subsystem.
++ *
++ * While the console_lock was held, console output may have been buffered
++ * by printk(). If this is the case, console_unlock(); emits
++ * the output prior to releasing the lock.
++ *
++ * console_unlock(); may be called from any context.
++ */
++void console_unlock(void)
++{
++ /*
++ * PREEMPT_RT relies on kthread and atomic consoles for printing.
++ * It never attempts to print from console_unlock().
++ */
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ __console_unlock();
++ return;
++ }
++
++ console_flush_and_unlock();
++}
+ EXPORT_SYMBOL(console_unlock);
+
+ /**
+@@ -3361,11 +3404,106 @@ void console_start(struct console *conso
+
+ if (flags & CON_NBCON)
+ nbcon_kthread_wake(console);
++ else
++ wake_up_legacy_kthread();
+
+ __pr_flush(console, 1000, true);
+ }
+ EXPORT_SYMBOL(console_start);
+
++#ifdef CONFIG_PRINTK
++static bool printer_should_wake(void)
++{
++ bool available = false;
++ struct console *con;
++ int cookie;
++
++ if (kthread_should_stop())
++ return true;
++
++ cookie = console_srcu_read_lock();
++ for_each_console_srcu(con) {
++ short flags = console_srcu_read_flags(con);
++ u64 printk_seq;
++
++ /*
++ * The legacy printer thread is only for legacy consoles,
++ * unless the nbcon console has no kthread printer.
++ */
++ if ((flags & CON_NBCON) && con->kthread)
++ continue;
++
++ if (!console_is_usable(con, flags, true))
++ continue;
++
++ if (flags & CON_NBCON) {
++ printk_seq = nbcon_seq_read(con);
++ } else {
++ /*
++ * It is safe to read @seq because only this
++ * thread context updates @seq.
++ */
++ printk_seq = con->seq;
++ }
++
++ if (prb_read_valid(prb, printk_seq, NULL)) {
++ available = true;
++ break;
++ }
++ }
++ console_srcu_read_unlock(cookie);
++
++ return available;
++}
++
++static int nbcon_legacy_kthread_func(void *unused)
++{
++ int error;
++
++ for (;;) {
++ error = wait_event_interruptible(legacy_wait, printer_should_wake());
++
++ if (kthread_should_stop())
++ break;
++
++ if (error)
++ continue;
++
++ console_lock();
++ console_flush_and_unlock();
++ }
++
++ return 0;
++}
++
++void nbcon_legacy_kthread_create(void)
++{
++ struct task_struct *kt;
++
++ lockdep_assert_held(&console_mutex);
++
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ return;
++
++ if (!printk_threads_enabled || nbcon_legacy_kthread)
++ return;
++
++ kt = kthread_run(nbcon_legacy_kthread_func, NULL, "pr/legacy");
++ if (IS_ERR(kt)) {
++ pr_err("unable to start legacy printing thread\n");
++ return;
++ }
++
++ nbcon_legacy_kthread = kt;
++
++ /*
++ * It is important that console printing threads are scheduled
++ * shortly after a printk call and with generous runtime budgets.
++ */
++ sched_set_normal(nbcon_legacy_kthread, -20);
++}
++#endif /* CONFIG_PRINTK */
++
+ static int __read_mostly keep_bootcon;
+
+ static int __init keep_bootcon_setup(char *str)
+@@ -3632,6 +3770,7 @@ void register_console(struct console *ne
+ nbcon_init(newcon);
+ } else {
+ have_legacy_console = true;
++ nbcon_legacy_kthread_create();
+ }
+
+ if (newcon->flags & CON_BOOT)
+@@ -3770,6 +3909,13 @@ static int unregister_console_locked(str
+ nbcon_kthread_create(c);
+ }
+
++#ifdef CONFIG_PRINTK
++ if (!printing_via_unlock && nbcon_legacy_kthread) {
++ kthread_stop(nbcon_legacy_kthread);
++ nbcon_legacy_kthread = NULL;
++ }
++#endif
++
+ return res;
+ }
+
+@@ -3929,8 +4075,12 @@ static bool __pr_flush(struct console *c
+
+ seq = prb_next_reserve_seq(prb);
+
+- /* Flush the consoles so that records up to @seq are printed. */
+- if (printing_via_unlock) {
++ /*
++ * Flush the consoles so that records up to @seq are printed.
++ * Otherwise this function will just wait for the threaded printers
++ * to print up to @seq.
++ */
++ if (printing_via_unlock && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ console_lock();
+ console_unlock();
+ }
+@@ -4038,9 +4188,16 @@ static void wake_up_klogd_work_func(stru
+ int pending = this_cpu_xchg(printk_pending, 0);
+
+ if (pending & PRINTK_PENDING_OUTPUT) {
+- /* If trylock fails, someone else is doing the printing */
+- if (console_trylock())
+- console_unlock();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ wake_up_interruptible(&legacy_wait);
++ } else {
++ /*
++ * If trylock fails, some other context
++ * will do the printing.
++ */
++ if (console_trylock())
++ console_unlock();
++ }
+ }
+
+ if (pending & PRINTK_PENDING_WAKEUP)
diff --git a/debian/patches-rt/0133-serial-8250-revert-drop-lockdep-annotation-from-seri.patch b/debian/patches-rt/0133-serial-8250-revert-drop-lockdep-annotation-from-seri.patch
new file mode 100644
index 0000000000..c9eba0a738
--- /dev/null
+++ b/debian/patches-rt/0133-serial-8250-revert-drop-lockdep-annotation-from-seri.patch
@@ -0,0 +1,29 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 2 Oct 2023 15:30:43 +0000
+Subject: [PATCH 133/134] serial: 8250: revert "drop lockdep annotation from
+ serial8250_clear_IER()"
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The 8250 driver no longer depends on @oops_in_progress and
+will no longer violate the port->lock locking constraints.
+
+This reverts commit 3d9e6f556e235ddcdc9f73600fdd46fe1736b090.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_port.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -728,6 +728,9 @@ static void __serial8250_clear_IER(struc
+
+ static inline void serial8250_clear_IER(struct uart_8250_port *up)
+ {
++ /* Port locked to synchronize UART_IER access against the console. */
++ lockdep_assert_held_once(&up->port.lock);
++
+ __serial8250_clear_IER(up);
+ }
+
diff --git a/debian/patches-rt/0134-printk-Avoid-false-positive-lockdep-report-for-legac.patch b/debian/patches-rt/0134-printk-Avoid-false-positive-lockdep-report-for-legac.patch
new file mode 100644
index 0000000000..225e8db694
--- /dev/null
+++ b/debian/patches-rt/0134-printk-Avoid-false-positive-lockdep-report-for-legac.patch
@@ -0,0 +1,64 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 11 Dec 2023 09:34:16 +0000
+Subject: [PATCH 134/134] printk: Avoid false positive lockdep report for
+ legacy driver.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+printk may invoke the legacy console driver from atomic context. This leads to
+a lockdep splat because the console driver will acquire a sleeping lock and the
+caller may also hold a spinning lock. This is noticed by lockdep on !PREEMPT_RT
+configurations because it will also lead to a problem on PREEMPT_RT.
+
+On PREEMPT_RT the atomic path is always avoided and the console driver is
+always invoked from a dedicated thread. Thus the lockdep splat is a false
+positive.
+
+Override the lock-context before invoking the console driver.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2817,6 +2817,8 @@ static void __console_unlock(void)
+ up_console_sem();
+ }
+
++static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP);
++
+ #ifdef CONFIG_PRINTK
+
+ /*
+@@ -2985,7 +2987,7 @@ static bool console_emit_next_record(str
+ /*
+ * On PREEMPT_RT this function is either in a thread or
+ * panic context. So there is no need for concern about
+- * printk reentrance or handovers.
++ * printk reentrance, handovers, or lockdep complaints.
+ */
+
+ con->write(con, outbuf, pmsg.outbuf_len);
+@@ -3007,7 +3009,9 @@ static bool console_emit_next_record(str
+ /* Do not trace print latency. */
+ stop_critical_timings();
+
++ lock_map_acquire_try(&printk_legacy_map);
+ con->write(con, outbuf, pmsg.outbuf_len);
++ lock_map_release(&printk_legacy_map);
+
+ start_critical_timings();
+
+@@ -3084,7 +3088,10 @@ static bool console_flush_all(bool do_co
+ any_usable = true;
+
+ if (flags & CON_NBCON) {
++
++ lock_map_acquire_try(&printk_legacy_map);
+ progress = nbcon_atomic_emit_next_record(con, handover, cookie);
++ lock_map_release(&printk_legacy_map);
+
+ printk_seq = nbcon_seq_read(con);
+ } else {
diff --git a/debian/patches-rt/ARM64__Allow_to_enable_RT.patch b/debian/patches-rt/ARM64__Allow_to_enable_RT.patch
new file mode 100644
index 0000000000..3657ef8700
--- /dev/null
+++ b/debian/patches-rt/ARM64__Allow_to_enable_RT.patch
@@ -0,0 +1,27 @@
+Subject: ARM64: Allow to enable RT
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri Oct 11 13:14:35 2019 +0200
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+Allow to select RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ arch/arm64/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+---
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -97,6 +97,7 @@ config ARM64
+ select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_PAGE_TABLE_CHECK
+ select ARCH_SUPPORTS_PER_VMA_LOCK
++ select ARCH_SUPPORTS_RT
+ select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
+ select ARCH_WANT_DEFAULT_BPF_JIT
diff --git a/debian/patches-rt/ARM__Allow_to_enable_RT.patch b/debian/patches-rt/ARM__Allow_to_enable_RT.patch
new file mode 100644
index 0000000000..902eca18c3
--- /dev/null
+++ b/debian/patches-rt/ARM__Allow_to_enable_RT.patch
@@ -0,0 +1,35 @@
+Subject: ARM: Allow to enable RT
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri Oct 11 13:14:29 2019 +0200
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+Allow to select RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ arch/arm/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+---
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -34,6 +34,7 @@ config ARM
+ select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
++ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_MEMTEST
+@@ -118,6 +119,7 @@ config ARM
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
+ select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
diff --git a/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch b/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
new file mode 100644
index 0000000000..ce42fd3a27
--- /dev/null
+++ b/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
@@ -0,0 +1,91 @@
+Subject: ARM: enable irq in translation/section permission fault handlers
+From: Yadi.hu <yadi.hu@windriver.com>
+Date: Wed Dec 10 10:32:09 2014 +0800
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Yadi.hu <yadi.hu@windriver.com>
+
+Probably happens on all ARM, with
+CONFIG_PREEMPT_RT
+CONFIG_DEBUG_ATOMIC_SLEEP
+
+This simple program....
+
+int main() {
+ *((char*)0xc0001000) = 0;
+};
+
+[ 512.742724] BUG: sleeping function called from invalid context at kernel/rtmutex.c:658
+[ 512.743000] in_atomic(): 0, irqs_disabled(): 128, pid: 994, name: a
+[ 512.743217] INFO: lockdep is turned off.
+[ 512.743360] irq event stamp: 0
+[ 512.743482] hardirqs last enabled at (0): [< (null)>] (null)
+[ 512.743714] hardirqs last disabled at (0): [<c0426370>] copy_process+0x3b0/0x11c0
+[ 512.744013] softirqs last enabled at (0): [<c0426370>] copy_process+0x3b0/0x11c0
+[ 512.744303] softirqs last disabled at (0): [< (null)>] (null)
+[ 512.744631] [<c041872c>] (unwind_backtrace+0x0/0x104)
+[ 512.745001] [<c09af0c4>] (dump_stack+0x20/0x24)
+[ 512.745355] [<c0462490>] (__might_sleep+0x1dc/0x1e0)
+[ 512.745717] [<c09b6770>] (rt_spin_lock+0x34/0x6c)
+[ 512.746073] [<c0441bf0>] (do_force_sig_info+0x34/0xf0)
+[ 512.746457] [<c0442668>] (force_sig_info+0x18/0x1c)
+[ 512.746829] [<c041d880>] (__do_user_fault+0x9c/0xd8)
+[ 512.747185] [<c041d938>] (do_bad_area+0x7c/0x94)
+[ 512.747536] [<c041d990>] (do_sect_fault+0x40/0x48)
+[ 512.747898] [<c040841c>] (do_DataAbort+0x40/0xa0)
+[ 512.748181] Exception stack(0xecaa1fb0 to 0xecaa1ff8)
+
+Oxc0000000 belongs to kernel address space, user task can not be
+allowed to access it. For above condition, correct result is that
+test case should receive a “segment fault” and exits but not stacks.
+
+the root cause is commit 02fe2845d6a8 ("avoid enabling interrupts in
+prefetch/data abort handlers"),it deletes irq enable block in Data
+abort assemble code and move them into page/breakpiont/alignment fault
+handlers instead. But author does not enable irq in translation/section
+permission fault handlers. ARM disables irq when it enters exception/
+interrupt mode, if kernel doesn't enable irq, it would be still disabled
+during translation/section permission fault.
+
+We see the above splat because do_force_sig_info is still called with
+IRQs off, and that code eventually does a:
+
+ spin_lock_irqsave(&t->sighand->siglock, flags);
+
+As this is architecture independent code, and we've not seen any other
+need for other arch to have the siglock converted to raw lock, we can
+conclude that we should enable irq for ARM translation/section
+permission exception.
+
+
+Signed-off-by: Yadi.hu <yadi.hu@windriver.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ arch/arm/mm/fault.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+---
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -404,6 +404,9 @@ do_translation_fault(unsigned long addr,
+ if (addr < TASK_SIZE)
+ return do_page_fault(addr, fsr, regs);
+
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ if (user_mode(regs))
+ goto bad_area;
+
+@@ -474,6 +477,9 @@ do_translation_fault(unsigned long addr,
+ static int
+ do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ {
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ do_bad_area(addr, fsr, regs);
+ return 0;
+ }
diff --git a/debian/patches-rt/Add_localversion_for_-RT_release.patch b/debian/patches-rt/Add_localversion_for_-RT_release.patch
new file mode 100644
index 0000000000..55db77eff5
--- /dev/null
+++ b/debian/patches-rt/Add_localversion_for_-RT_release.patch
@@ -0,0 +1,19 @@
+Subject: Add localversion for -RT release
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri Jul 8 20:25:16 2011 +0200
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ localversion-rt | 1 +
+ 1 file changed, 1 insertion(+)
+ create mode 100644 localversion-rt
+---
+--- /dev/null
++++ b/localversion-rt
+@@ -0,0 +1 @@
++-rt18
diff --git a/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch b/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch
new file mode 100644
index 0000000000..797e2084b4
--- /dev/null
+++ b/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch
@@ -0,0 +1,35 @@
+Subject: POWERPC: Allow to enable RT
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri Oct 11 13:14:41 2019 +0200
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+Allow to select RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ arch/powerpc/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+---
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -166,6 +166,7 @@ config PPC
+ select ARCH_STACKWALK
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
++ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+ select ARCH_USE_MEMTEST
+@@ -268,6 +269,7 @@ config PPC
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE
++ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
+ select HAVE_RSEQ
+ select HAVE_SETUP_PER_CPU_AREA if PPC64
+ select HAVE_SOFTIRQ_ON_OWN_STACK
diff --git a/debian/patches-rt/PREEMPT_AUTO.patch b/debian/patches-rt/PREEMPT_AUTO.patch
new file mode 100644
index 0000000000..859dadc20c
--- /dev/null
+++ b/debian/patches-rt/PREEMPT_AUTO.patch
@@ -0,0 +1,779 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 23 Sep 2023 03:11:05 +0200
+Subject: [PATCH] sched: define TIF_ALLOW_RESCHED
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+On Fri, Sep 22 2023 at 00:55, Thomas Gleixner wrote:
+> On Thu, Sep 21 2023 at 09:00, Linus Torvalds wrote:
+>> That said - I think as a proof of concept and "look, with this we get
+>> the expected scheduling event counts", that patch is perfect. I think
+>> you more than proved the concept.
+>
+> There is certainly quite some analyis work to do to make this a one to
+> one replacement.
+>
+> With a handful of benchmarks the PoC (tweaked with some obvious fixes)
+> is pretty much on par with the current mainline variants (NONE/FULL),
+> but the memtier benchmark makes a massive dent.
+>
+> It sports a whopping 10% regression with the LAZY mode versus the mainline
+> NONE model. Non-LAZY and FULL behave unsurprisingly in the same way.
+>
+> That benchmark is really sensitive to the preemption model. With current
+> mainline (DYNAMIC_PREEMPT enabled) the preempt=FULL model has ~20%
+> performance drop versus preempt=NONE.
+
+That 20% was a tired pilot error. The real number is in the 5% ballpark.
+
+> I have no clue what's going on there yet, but that shows that there is
+> obviously quite some work ahead to get this sorted.
+
+It took some head scratching to figure that out. The initial fix broke
+the handling of the hog issue, i.e. the problem that Ankur tried to
+solve, but I hacked up a "solution" for that too.
+
+With that the memtier benchmark is roughly back to the mainline numbers,
+but my throughput benchmark know how is pretty close to zero, so that
+should be looked at by people who actually understand these things.
+
+Likewise the hog prevention is just at the PoC level and clearly beyond
+my knowledge of scheduler details: It unconditionally forces a
+reschedule when the looping task is not responding to a lazy reschedule
+request before the next tick. IOW it forces a reschedule on the second
+tick, which is obviously different from the cond_resched()/might_sleep()
+behaviour.
+
+The changes vs. the original PoC aside of the bug and thinko fixes:
+
+ 1) A hack to utilize the TRACE_FLAG_IRQS_NOSUPPORT flag to trace the
+ lazy preempt bit as the trace_entry::flags field is full already.
+
+ That obviously breaks the tracer ABI, but if we go there then
+ this needs to be fixed. Steven?
+
+ 2) debugfs file to validate that loops can be force preempted w/o
+ cond_resched()
+
+ The usage is:
+
+ # taskset -c 1 bash
+ # echo 1 > /sys/kernel/debug/sched/hog &
+ # echo 1 > /sys/kernel/debug/sched/hog &
+ # echo 1 > /sys/kernel/debug/sched/hog &
+
+ top shows ~33% CPU for each of the hogs and tracing confirms that
+ the crude hack in the scheduler tick works:
+
+ bash-4559 [001] dlh2. 2253.331202: resched_curr <-__update_curr
+ bash-4560 [001] dlh2. 2253.340199: resched_curr <-__update_curr
+ bash-4561 [001] dlh2. 2253.346199: resched_curr <-__update_curr
+ bash-4559 [001] dlh2. 2253.353199: resched_curr <-__update_curr
+ bash-4561 [001] dlh2. 2253.358199: resched_curr <-__update_curr
+ bash-4560 [001] dlh2. 2253.370202: resched_curr <-__update_curr
+ bash-4559 [001] dlh2. 2253.378198: resched_curr <-__update_curr
+ bash-4561 [001] dlh2. 2253.389199: resched_curr <-__update_curr
+
+ The 'l' instead of the usual 'N' reflects that the lazy resched
+ bit is set. That makes __update_curr() invoke resched_curr()
+ instead of the lazy variant. resched_curr() sets TIF_NEED_RESCHED
+ and folds it into preempt_count so that preemption happens at the
+ next possible point, i.e. either in return from interrupt or at
+ the next preempt_enable().
+
+That's as much as I wanted to demonstrate and I'm not going to spend
+more cycles on it as I have already too many other things on flight and
+the resulting scheduler woes are clearly outside of my expertice.
+
+Though definitely I'm putting a permanent NAK in place for any attempts
+to duct tape the preempt=NONE model any further by sprinkling more
+cond*() and whatever warts around.
+
+Thanks,
+
+ tglx
+
+[tglx: s@CONFIG_PREEMPT_AUTO@CONFIG_PREEMPT_BUILD_AUTO@ ]
+
+Link: https://lore.kernel.org/all/87jzshhexi.ffs@tglx/
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/Kconfig | 1
+ arch/x86/include/asm/thread_info.h | 6 ++--
+ drivers/acpi/processor_idle.c | 2 -
+ include/linux/entry-common.h | 2 -
+ include/linux/entry-kvm.h | 2 -
+ include/linux/sched.h | 12 +++++---
+ include/linux/sched/idle.h | 8 ++---
+ include/linux/thread_info.h | 24 +++++++++++++++++
+ include/linux/trace_events.h | 8 ++---
+ kernel/Kconfig.preempt | 17 +++++++++++-
+ kernel/entry/common.c | 4 +-
+ kernel/entry/kvm.c | 2 -
+ kernel/sched/core.c | 50 +++++++++++++++++++++++++------------
+ kernel/sched/debug.c | 19 ++++++++++++++
+ kernel/sched/fair.c | 46 ++++++++++++++++++++++------------
+ kernel/sched/features.h | 2 +
+ kernel/sched/idle.c | 3 --
+ kernel/sched/sched.h | 1
+ kernel/trace/trace.c | 2 +
+ kernel/trace/trace_output.c | 16 ++++++++++-
+ 20 files changed, 171 insertions(+), 56 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -272,6 +272,7 @@ config X86
+ select HAVE_STATIC_CALL
+ select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
+ select HAVE_PREEMPT_DYNAMIC_CALL
++ select HAVE_PREEMPT_AUTO
+ select HAVE_RSEQ
+ select HAVE_RUST if X86_64
+ select HAVE_SYSCALL_TRACEPOINTS
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -81,8 +81,9 @@ struct thread_info {
+ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+ #define TIF_SIGPENDING 2 /* signal pending */
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+-#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
+-#define TIF_SSBD 5 /* Speculative store bypass disable */
++#define TIF_ARCH_RESCHED_LAZY 4 /* Lazy rescheduling */
++#define TIF_SINGLESTEP 5 /* reenable singlestep on user return*/
++#define TIF_SSBD 6 /* Speculative store bypass disable */
+ #define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
+ #define TIF_SPEC_L1D_FLUSH 10 /* Flush L1D on mm switches (processes) */
+ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+@@ -104,6 +105,7 @@ struct thread_info {
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
++#define _TIF_ARCH_RESCHED_LAZY (1 << TIF_ARCH_RESCHED_LAZY)
+ #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+ #define _TIF_SSBD (1 << TIF_SSBD)
+ #define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -108,7 +108,7 @@ static const struct dmi_system_id proces
+ */
+ static void __cpuidle acpi_safe_halt(void)
+ {
+- if (!tif_need_resched()) {
++ if (!need_resched()) {
+ raw_safe_halt();
+ raw_local_irq_disable();
+ }
+--- a/include/linux/entry-common.h
++++ b/include/linux/entry-common.h
+@@ -60,7 +60,7 @@
+ #define EXIT_TO_USER_MODE_WORK \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
+- ARCH_EXIT_TO_USER_MODE_WORK)
++ _TIF_NEED_RESCHED_LAZY | ARCH_EXIT_TO_USER_MODE_WORK)
+
+ /**
+ * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
+--- a/include/linux/entry-kvm.h
++++ b/include/linux/entry-kvm.h
+@@ -18,7 +18,7 @@
+
+ #define XFER_TO_GUEST_MODE_WORK \
+ (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
+- _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
++ _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED_LAZY | ARCH_XFER_TO_GUEST_MODE_WORK)
+
+ struct kvm_vcpu;
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2050,17 +2050,17 @@ static inline void update_tsk_thread_fla
+ update_ti_thread_flag(task_thread_info(tsk), flag, value);
+ }
+
+-static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
++static inline bool test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
+ {
+ return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
+ }
+
+-static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
++static inline bool test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
+ {
+ return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
+ }
+
+-static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
++static inline bool test_tsk_thread_flag(struct task_struct *tsk, int flag)
+ {
+ return test_ti_thread_flag(task_thread_info(tsk), flag);
+ }
+@@ -2073,9 +2073,11 @@ static inline void set_tsk_need_resched(
+ static inline void clear_tsk_need_resched(struct task_struct *tsk)
+ {
+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
++ if (IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO))
++ clear_tsk_thread_flag(tsk, TIF_NEED_RESCHED_LAZY);
+ }
+
+-static inline int test_tsk_need_resched(struct task_struct *tsk)
++static inline bool test_tsk_need_resched(struct task_struct *tsk)
+ {
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+ }
+@@ -2256,7 +2258,7 @@ static inline int rwlock_needbreak(rwloc
+
+ static __always_inline bool need_resched(void)
+ {
+- return unlikely(tif_need_resched());
++ return unlikely(tif_need_resched_lazy() || tif_need_resched());
+ }
+
+ /*
+--- a/include/linux/sched/idle.h
++++ b/include/linux/sched/idle.h
+@@ -63,7 +63,7 @@ static __always_inline bool __must_check
+ */
+ smp_mb__after_atomic();
+
+- return unlikely(tif_need_resched());
++ return unlikely(need_resched());
+ }
+
+ static __always_inline bool __must_check current_clr_polling_and_test(void)
+@@ -76,7 +76,7 @@ static __always_inline bool __must_check
+ */
+ smp_mb__after_atomic();
+
+- return unlikely(tif_need_resched());
++ return unlikely(need_resched());
+ }
+
+ #else
+@@ -85,11 +85,11 @@ static inline void __current_clr_polling
+
+ static inline bool __must_check current_set_polling_and_test(void)
+ {
+- return unlikely(tif_need_resched());
++ return unlikely(need_resched());
+ }
+ static inline bool __must_check current_clr_polling_and_test(void)
+ {
+- return unlikely(tif_need_resched());
++ return unlikely(need_resched());
+ }
+ #endif
+
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -59,6 +59,16 @@ enum syscall_work_bit {
+
+ #include <asm/thread_info.h>
+
++#ifdef CONFIG_PREEMPT_BUILD_AUTO
++# define TIF_NEED_RESCHED_LAZY TIF_ARCH_RESCHED_LAZY
++# define _TIF_NEED_RESCHED_LAZY _TIF_ARCH_RESCHED_LAZY
++# define TIF_NEED_RESCHED_LAZY_OFFSET (TIF_NEED_RESCHED_LAZY - TIF_NEED_RESCHED)
++#else
++# define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED
++# define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
++# define TIF_NEED_RESCHED_LAZY_OFFSET 0
++#endif
++
+ #ifdef __KERNEL__
+
+ #ifndef arch_set_restart_data
+@@ -185,6 +195,13 @@ static __always_inline bool tif_need_res
+ (unsigned long *)(&current_thread_info()->flags));
+ }
+
++static __always_inline bool tif_need_resched_lazy(void)
++{
++ return IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) &&
++ arch_test_bit(TIF_NEED_RESCHED_LAZY,
++ (unsigned long *)(&current_thread_info()->flags));
++}
++
+ #else
+
+ static __always_inline bool tif_need_resched(void)
+@@ -193,6 +210,13 @@ static __always_inline bool tif_need_res
+ (unsigned long *)(&current_thread_info()->flags));
+ }
+
++static __always_inline bool tif_need_resched_lazy(void)
++{
++ return IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) &&
++ test_bit(TIF_NEED_RESCHED_LAZY,
++ (unsigned long *)(&current_thread_info()->flags));
++}
++
+ #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
+
+ #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -178,8 +178,8 @@ unsigned int tracing_gen_ctx_irq_test(un
+
+ enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+- TRACE_FLAG_NEED_RESCHED = 0x04,
++ TRACE_FLAG_NEED_RESCHED = 0x02,
++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
+ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
+@@ -205,11 +205,11 @@ static inline unsigned int tracing_gen_c
+
+ static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
+ {
+- return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
++ return tracing_gen_ctx_irq_test(0);
+ }
+ static inline unsigned int tracing_gen_ctx(void)
+ {
+- return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
++ return tracing_gen_ctx_irq_test(0);
+ }
+ #endif
+
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -11,6 +11,13 @@ config PREEMPT_BUILD
+ select PREEMPTION
+ select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
+
++config PREEMPT_BUILD_AUTO
++ bool
++ select PREEMPT_BUILD
++
++config HAVE_PREEMPT_AUTO
++ bool
++
+ choice
+ prompt "Preemption Model"
+ default PREEMPT_NONE
+@@ -67,9 +74,17 @@ config PREEMPT
+ embedded system with latency requirements in the milliseconds
+ range.
+
++config PREEMPT_AUTO
++ bool "Automagic preemption mode with runtime tweaking support"
++ depends on HAVE_PREEMPT_AUTO
++ select PREEMPT_BUILD_AUTO
++ help
++ Add some sensible blurb here
++
+ config PREEMPT_RT
+ bool "Fully Preemptible Kernel (Real-Time)"
+ depends on EXPERT && ARCH_SUPPORTS_RT
++ select PREEMPT_BUILD_AUTO if HAVE_PREEMPT_AUTO
+ select PREEMPTION
+ help
+ This option turns the kernel into a real-time kernel by replacing
+@@ -95,7 +110,7 @@ config PREEMPTION
+
+ config PREEMPT_DYNAMIC
+ bool "Preemption behaviour defined on boot"
+- depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT
++ depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT && !PREEMPT_AUTO
+ select JUMP_LABEL if HAVE_PREEMPT_DYNAMIC_KEY
+ select PREEMPT_BUILD
+ default y if HAVE_PREEMPT_DYNAMIC_CALL
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -155,7 +155,7 @@ static unsigned long exit_to_user_mode_l
+
+ local_irq_enable_exit_to_user(ti_work);
+
+- if (ti_work & _TIF_NEED_RESCHED)
++ if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
+ schedule();
+
+ if (ti_work & _TIF_UPROBE)
+@@ -385,7 +385,7 @@ void raw_irqentry_exit_cond_resched(void
+ rcu_irq_exit_check_preempt();
+ if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
+ WARN_ON_ONCE(!on_thread_stack());
+- if (need_resched())
++ if (test_tsk_need_resched(current))
+ preempt_schedule_irq();
+ }
+ }
+--- a/kernel/entry/kvm.c
++++ b/kernel/entry/kvm.c
+@@ -13,7 +13,7 @@ static int xfer_to_guest_mode_work(struc
+ return -EINTR;
+ }
+
+- if (ti_work & _TIF_NEED_RESCHED)
++ if (ti_work & (_TIF_NEED_RESCHED | TIF_NEED_RESCHED_LAZY))
+ schedule();
+
+ if (ti_work & _TIF_NOTIFY_RESUME)
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -898,14 +898,15 @@ static inline void hrtick_rq_init(struct
+
+ #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
+ /*
+- * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * Atomically set TIF_NEED_RESCHED[_LAZY] and test for TIF_POLLING_NRFLAG,
+ * this avoids any races wrt polling state changes and thereby avoids
+ * spurious IPIs.
+ */
+-static inline bool set_nr_and_not_polling(struct task_struct *p)
++static inline bool set_nr_and_not_polling(struct task_struct *p, int tif_bit)
+ {
+ struct thread_info *ti = task_thread_info(p);
+- return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++
++ return !(fetch_or(&ti->flags, 1 << tif_bit) & _TIF_POLLING_NRFLAG);
+ }
+
+ /*
+@@ -922,7 +923,7 @@ static bool set_nr_if_polling(struct tas
+ for (;;) {
+ if (!(val & _TIF_POLLING_NRFLAG))
+ return false;
+- if (val & _TIF_NEED_RESCHED)
++ if (val & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
+ return true;
+ if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
+ break;
+@@ -931,9 +932,9 @@ static bool set_nr_if_polling(struct tas
+ }
+
+ #else
+-static inline bool set_nr_and_not_polling(struct task_struct *p)
++static inline bool set_nr_and_not_polling(struct task_struct *p, int tif_bit)
+ {
+- set_tsk_need_resched(p);
++ set_tsk_thread_flag(p, tif_bit);
+ return true;
+ }
+
+@@ -1038,28 +1039,47 @@ void wake_up_q(struct wake_q_head *head)
+ * might also involve a cross-CPU call to trigger the scheduler on
+ * the target CPU.
+ */
+-void resched_curr(struct rq *rq)
++static void __resched_curr(struct rq *rq, int lazy)
+ {
++ int cpu, tif_bit = TIF_NEED_RESCHED + lazy;
+ struct task_struct *curr = rq->curr;
+- int cpu;
+
+ lockdep_assert_rq_held(rq);
+
+- if (test_tsk_need_resched(curr))
++ if (unlikely(test_tsk_thread_flag(curr, tif_bit)))
+ return;
+
+ cpu = cpu_of(rq);
+
+ if (cpu == smp_processor_id()) {
+- set_tsk_need_resched(curr);
+- set_preempt_need_resched();
++ set_tsk_thread_flag(curr, tif_bit);
++ if (!lazy)
++ set_preempt_need_resched();
+ return;
+ }
+
+- if (set_nr_and_not_polling(curr))
+- smp_send_reschedule(cpu);
+- else
++ if (set_nr_and_not_polling(curr, tif_bit)) {
++ if (!lazy)
++ smp_send_reschedule(cpu);
++ } else {
+ trace_sched_wake_idle_without_ipi(cpu);
++ }
++}
++
++void resched_curr(struct rq *rq)
++{
++ __resched_curr(rq, 0);
++}
++
++void resched_curr_lazy(struct rq *rq)
++{
++ int lazy = IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) && !sched_feat(FORCE_NEED_RESCHED) ?
++ TIF_NEED_RESCHED_LAZY_OFFSET : 0;
++
++ if (lazy && unlikely(test_tsk_thread_flag(rq->curr, TIF_NEED_RESCHED)))
++ return;
++
++ __resched_curr(rq, lazy);
+ }
+
+ void resched_cpu(int cpu)
+@@ -1132,7 +1152,7 @@ static void wake_up_idle_cpu(int cpu)
+ if (cpu == smp_processor_id())
+ return;
+
+- if (set_nr_and_not_polling(rq->idle))
++ if (set_nr_and_not_polling(rq->idle, TIF_NEED_RESCHED))
+ smp_send_reschedule(cpu);
+ else
+ trace_sched_wake_idle_without_ipi(cpu);
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -333,6 +333,23 @@ static const struct file_operations sche
+ .release = seq_release,
+ };
+
++static ssize_t sched_hog_write(struct file *filp, const char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ unsigned long end = jiffies + 60 * HZ;
++
++ for (; time_before(jiffies, end) && !signal_pending(current);)
++ cpu_relax();
++
++ return cnt;
++}
++
++static const struct file_operations sched_hog_fops = {
++ .write = sched_hog_write,
++ .open = simple_open,
++ .llseek = default_llseek,
++};
++
+ static struct dentry *debugfs_sched;
+
+ static __init int sched_init_debug(void)
+@@ -374,6 +391,8 @@ static __init int sched_init_debug(void)
+
+ debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
+
++ debugfs_create_file("hog", 0200, debugfs_sched, NULL, &sched_hog_fops);
++
+ return 0;
+ }
+ late_initcall(sched_init_debug);
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1016,8 +1016,10 @@ static void clear_buddies(struct cfs_rq
+ * XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
+ * this is probably good enough.
+ */
+-static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
++static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se, bool tick)
+ {
++ struct rq *rq = rq_of(cfs_rq);
++
+ if ((s64)(se->vruntime - se->deadline) < 0)
+ return;
+
+@@ -1036,10 +1038,19 @@ static void update_deadline(struct cfs_r
+ /*
+ * The task has consumed its request, reschedule.
+ */
+- if (cfs_rq->nr_running > 1) {
+- resched_curr(rq_of(cfs_rq));
+- clear_buddies(cfs_rq, se);
++ if (cfs_rq->nr_running < 2)
++ return;
++
++ if (!IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) || sched_feat(FORCE_NEED_RESCHED)) {
++ resched_curr(rq);
++ } else {
++ /* Did the task ignore the lazy reschedule request? */
++ if (tick && test_tsk_thread_flag(rq->curr, TIF_NEED_RESCHED_LAZY))
++ resched_curr(rq);
++ else
++ resched_curr_lazy(rq);
+ }
++ clear_buddies(cfs_rq, se);
+ }
+
+ #include "pelt.h"
+@@ -1147,7 +1158,7 @@ static void update_tg_load_avg(struct cf
+ /*
+ * Update the current task's runtime statistics.
+ */
+-static void update_curr(struct cfs_rq *cfs_rq)
++static void __update_curr(struct cfs_rq *cfs_rq, bool tick)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+ u64 now = rq_clock_task(rq_of(cfs_rq));
+@@ -1174,7 +1185,7 @@ static void update_curr(struct cfs_rq *c
+ schedstat_add(cfs_rq->exec_clock, delta_exec);
+
+ curr->vruntime += calc_delta_fair(delta_exec, curr);
+- update_deadline(cfs_rq, curr);
++ update_deadline(cfs_rq, curr, tick);
+ update_min_vruntime(cfs_rq);
+
+ if (entity_is_task(curr)) {
+@@ -1188,6 +1199,11 @@ static void update_curr(struct cfs_rq *c
+ account_cfs_rq_runtime(cfs_rq, delta_exec);
+ }
+
++static inline void update_curr(struct cfs_rq *cfs_rq)
++{
++ __update_curr(cfs_rq, false);
++}
++
+ static void update_curr_fair(struct rq *rq)
+ {
+ update_curr(cfs_rq_of(&rq->curr->se));
+@@ -5398,7 +5414,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+ /*
+ * Update run-time statistics of the 'current'.
+ */
+- update_curr(cfs_rq);
++ __update_curr(cfs_rq, true);
+
+ /*
+ * Ensure that runnable average is periodically updated.
+@@ -5412,7 +5428,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+ * validating it and just reschedule.
+ */
+ if (queued) {
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ return;
+ }
+ /*
+@@ -5558,7 +5574,7 @@ static void __account_cfs_rq_runtime(str
+ * hierarchy can be throttled
+ */
+ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ }
+
+ static __always_inline
+@@ -5818,7 +5834,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cf
+
+ /* Determine whether we need to wake up potentially idle CPU: */
+ if (rq->curr == rq->idle && rq->cfs.nr_running)
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ }
+
+ #ifdef CONFIG_SMP
+@@ -6523,7 +6539,7 @@ static void hrtick_start_fair(struct rq
+
+ if (delta < 0) {
+ if (task_current(rq, p))
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ return;
+ }
+ hrtick_start(rq, delta);
+@@ -8175,7 +8191,7 @@ static void check_preempt_wakeup(struct
+ * prevents us from potentially nominating it as a false LAST_BUDDY
+ * below.
+ */
+- if (test_tsk_need_resched(curr))
++ if (need_resched())
+ return;
+
+ /* Idle tasks are by definition preempted by non-idle tasks. */
+@@ -8217,7 +8233,7 @@ static void check_preempt_wakeup(struct
+ return;
+
+ preempt:
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ }
+
+ #ifdef CONFIG_SMP
+@@ -12374,7 +12390,7 @@ static inline void task_tick_core(struct
+ */
+ if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
+ __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ }
+
+ /*
+@@ -12539,7 +12555,7 @@ prio_changed_fair(struct rq *rq, struct
+ */
+ if (task_current(rq, p)) {
+ if (p->prio > oldprio)
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ } else
+ check_preempt_curr(rq, p, 0);
+ }
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -89,3 +89,5 @@ SCHED_FEAT(UTIL_EST_FASTUP, true)
+ SCHED_FEAT(LATENCY_WARN, false)
+
+ SCHED_FEAT(HZ_BW, true)
++
++SCHED_FEAT(FORCE_NEED_RESCHED, false)
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -57,8 +57,7 @@ static noinline int __cpuidle cpu_idle_p
+ ct_cpuidle_enter();
+
+ raw_local_irq_enable();
+- while (!tif_need_resched() &&
+- (cpu_idle_force_poll || tick_check_broadcast_expired()))
++ while (!need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired()))
+ cpu_relax();
+ raw_local_irq_disable();
+
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2435,6 +2435,7 @@ extern void init_sched_fair_class(void);
+ extern void reweight_task(struct task_struct *p, int prio);
+
+ extern void resched_curr(struct rq *rq);
++extern void resched_curr_lazy(struct rq *rq);
+ extern void resched_cpu(int cpu);
+
+ extern struct rt_bandwidth def_rt_bandwidth;
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2722,6 +2722,8 @@ unsigned int tracing_gen_ctx_irq_test(un
+
+ if (tif_need_resched())
+ trace_flags |= TRACE_FLAG_NEED_RESCHED;
++ if (tif_need_resched_lazy())
++ trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
+ if (test_preempt_need_resched())
+ trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
+ return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -460,17 +460,29 @@ int trace_print_lat_fmt(struct trace_seq
+ (entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
+ (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
+ bh_off ? 'b' :
+- (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
++ !IS_ENABLED(CONFIG_TRACE_IRQFLAGS_SUPPORT) ? 'X' :
+ '.';
+
+- switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
++ switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY |
+ TRACE_FLAG_PREEMPT_RESCHED)) {
++ case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
++ need_resched = 'B';
++ break;
+ case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
+ need_resched = 'N';
+ break;
++ case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
++ need_resched = 'L';
++ break;
++ case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY:
++ need_resched = 'b';
++ break;
+ case TRACE_FLAG_NEED_RESCHED:
+ need_resched = 'n';
+ break;
++ case TRACE_FLAG_NEED_RESCHED_LAZY:
++ need_resched = 'l';
++ break;
+ case TRACE_FLAG_PREEMPT_RESCHED:
+ need_resched = 'p';
+ break;
diff --git a/debian/patches-rt/RISC-V-Probe-misaligned-access-speed-in-parallel.patch b/debian/patches-rt/RISC-V-Probe-misaligned-access-speed-in-parallel.patch
new file mode 100644
index 0000000000..78e57b07a5
--- /dev/null
+++ b/debian/patches-rt/RISC-V-Probe-misaligned-access-speed-in-parallel.patch
@@ -0,0 +1,197 @@
+From: Evan Green <evan@rivosinc.com>
+Date: Mon, 6 Nov 2023 14:58:55 -0800
+Subject: [PATCH] RISC-V: Probe misaligned access speed in parallel
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Probing for misaligned access speed takes about 0.06 seconds. On a
+system with 64 cores, doing this in smp_callin() means it's done
+serially, extending boot time by 3.8 seconds. That's a lot of boot time.
+
+Instead of measuring each CPU serially, let's do the measurements on
+all CPUs in parallel. If we disable preemption on all CPUs, the
+jiffies stop ticking, so we can do this in stages of 1) everybody
+except core 0, then 2) core 0. The allocations are all done outside of
+on_each_cpu() to avoid calling alloc_pages() with interrupts disabled.
+
+For hotplugged CPUs that come in after the boot time measurement,
+register CPU hotplug callbacks, and do the measurement there. Interrupts
+are enabled in those callbacks, so they're fine to do alloc_pages() in.
+
+[bigeasy: merge the individual patches into the final step.]
+
+Reported-by: Jisheng Zhang <jszhang@kernel.org>
+Closes: https://lore.kernel.org/all/mhng-9359993d-6872-4134-83ce-c97debe1cf9a@palmer-ri-x1c9/T/#mae9b8f40016f9df428829d33360144dc5026bcbf
+Fixes: 584ea6564bca ("RISC-V: Probe for unaligned access speed")
+Signed-off-by: Evan Green <evan@rivosinc.com>
+Link: https://lore.kernel.org/r/20231106225855.3121724-1-evan@rivosinc.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/riscv/include/asm/cpufeature.h | 2
+ arch/riscv/kernel/cpufeature.c | 90 ++++++++++++++++++++++++++++++------
+ arch/riscv/kernel/smpboot.c | 1
+ 3 files changed, 76 insertions(+), 17 deletions(-)
+
+--- a/arch/riscv/include/asm/cpufeature.h
++++ b/arch/riscv/include/asm/cpufeature.h
+@@ -30,6 +30,4 @@ DECLARE_PER_CPU(long, misaligned_access_
+ /* Per-cpu ISA extensions. */
+ extern struct riscv_isainfo hart_isa[NR_CPUS];
+
+-void check_unaligned_access(int cpu);
+-
+ #endif
+--- a/arch/riscv/kernel/cpufeature.c
++++ b/arch/riscv/kernel/cpufeature.c
+@@ -8,6 +8,7 @@
+
+ #include <linux/acpi.h>
+ #include <linux/bitmap.h>
++#include <linux/cpuhotplug.h>
+ #include <linux/ctype.h>
+ #include <linux/log2.h>
+ #include <linux/memory.h>
+@@ -29,6 +30,7 @@
+
+ #define MISALIGNED_ACCESS_JIFFIES_LG2 1
+ #define MISALIGNED_BUFFER_SIZE 0x4000
++#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
+ #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
+
+ unsigned long elf_hwcap __read_mostly;
+@@ -556,24 +558,19 @@ unsigned long riscv_get_elf_hwcap(void)
+ return hwcap;
+ }
+
+-void check_unaligned_access(int cpu)
++static int check_unaligned_access(void *param)
+ {
++ int cpu = smp_processor_id();
+ u64 start_cycles, end_cycles;
+ u64 word_cycles;
+ u64 byte_cycles;
+ int ratio;
+ unsigned long start_jiffies, now;
+- struct page *page;
++ struct page *page = param;
+ void *dst;
+ void *src;
+ long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
+
+- page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
+- if (!page) {
+- pr_warn("Can't alloc pages to measure memcpy performance");
+- return;
+- }
+-
+ /* Make an unaligned destination buffer. */
+ dst = (void *)((unsigned long)page_address(page) | 0x1);
+ /* Unalign src as well, but differently (off by 1 + 2 = 3). */
+@@ -626,7 +623,7 @@ void check_unaligned_access(int cpu)
+ pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
+ cpu);
+
+- goto out;
++ return 0;
+ }
+
+ if (word_cycles < byte_cycles)
+@@ -640,18 +637,83 @@ void check_unaligned_access(int cpu)
+ (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
+
+ per_cpu(misaligned_access_speed, cpu) = speed;
++ return 0;
++}
+
+-out:
+- __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
++static void check_unaligned_access_nonboot_cpu(void *param)
++{
++ unsigned int cpu = smp_processor_id();
++ struct page **pages = param;
++
++ if (smp_processor_id() != 0)
++ check_unaligned_access(pages[cpu]);
++}
++
++static int riscv_online_cpu(unsigned int cpu)
++{
++ static struct page *buf;
++
++ /* We are already set since the last check */
++ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
++ return 0;
++
++ buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
++ if (!buf) {
++ pr_warn("Allocation failure, not measuring misaligned performance\n");
++ return -ENOMEM;
++ }
++
++ check_unaligned_access(buf);
++ __free_pages(buf, MISALIGNED_BUFFER_ORDER);
++ return 0;
+ }
+
+-static int check_unaligned_access_boot_cpu(void)
++/* Measure unaligned access on all CPUs present at boot in parallel. */
++static int check_unaligned_access_all_cpus(void)
+ {
+- check_unaligned_access(0);
++ unsigned int cpu;
++ unsigned int cpu_count = num_possible_cpus();
++ struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
++ GFP_KERNEL);
++
++ if (!bufs) {
++ pr_warn("Allocation failure, not measuring misaligned performance\n");
++ return 0;
++ }
++
++ /*
++ * Allocate separate buffers for each CPU so there's no fighting over
++ * cache lines.
++ */
++ for_each_cpu(cpu, cpu_online_mask) {
++ bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
++ if (!bufs[cpu]) {
++ pr_warn("Allocation failure, not measuring misaligned performance\n");
++ goto out;
++ }
++ }
++
++ /* Check everybody except 0, who stays behind to tend jiffies. */
++ on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
++
++ /* Check core 0. */
++ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
++
++ /* Setup hotplug callback for any new CPUs that come online. */
++ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
++ riscv_online_cpu, NULL);
++
++out:
++ for_each_cpu(cpu, cpu_online_mask) {
++ if (bufs[cpu])
++ __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
++ }
++
++ kfree(bufs);
+ return 0;
+ }
+
+-arch_initcall(check_unaligned_access_boot_cpu);
++arch_initcall(check_unaligned_access_all_cpus);
+
+ #ifdef CONFIG_RISCV_ALTERNATIVE
+ /*
+--- a/arch/riscv/kernel/smpboot.c
++++ b/arch/riscv/kernel/smpboot.c
+@@ -246,7 +246,6 @@ asmlinkage __visible void smp_callin(voi
+
+ numa_add_cpu(curr_cpuid);
+ set_cpu_online(curr_cpuid, 1);
+- check_unaligned_access(curr_cpuid);
+
+ if (has_vector()) {
+ if (riscv_v_setup_vsize())
diff --git a/debian/patches-rt/Revert-drm-i915-Depend-on-PREEMPT_RT.patch b/debian/patches-rt/Revert-drm-i915-Depend-on-PREEMPT_RT.patch
new file mode 100644
index 0000000000..dc62e007e8
--- /dev/null
+++ b/debian/patches-rt/Revert-drm-i915-Depend-on-PREEMPT_RT.patch
@@ -0,0 +1,23 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 21 Feb 2022 17:59:14 +0100
+Subject: [PATCH] Revert "drm/i915: Depend on !PREEMPT_RT."
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Once the known issues are addressed, it should be safe to enable the
+driver.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/Kconfig | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/Kconfig
++++ b/drivers/gpu/drm/i915/Kconfig
+@@ -3,7 +3,6 @@ config DRM_I915
+ tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+ depends on DRM
+ depends on X86 && PCI
+- depends on !PREEMPT_RT
+ select INTEL_GTT if X86
+ select INTERVAL_TREE
+ # we need shmfs for the swappable backing store, and in particular
diff --git a/debian/patches-rt/drm-i915-Do-not-disable-preemption-for-resets.patch b/debian/patches-rt/drm-i915-Do-not-disable-preemption-for-resets.patch
new file mode 100644
index 0000000000..5869667030
--- /dev/null
+++ b/debian/patches-rt/drm-i915-Do-not-disable-preemption-for-resets.patch
@@ -0,0 +1,99 @@
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Date: Wed, 5 Jul 2023 10:30:25 +0100
+Subject: [PATCH] drm/i915: Do not disable preemption for resets
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Commit ade8a0f59844 ("drm/i915: Make all GPU resets atomic") added a
+preempt disable section over the hardware reset callback to prepare the
+driver for being able to reset from atomic contexts.
+
+In retrospect I can see that the work item at a time was about removing
+the struct mutex from the reset path. Code base also briefly entertained
+the idea of doing the reset under stop_machine in order to serialize
+userspace mmap and temporary glitch in the fence registers (see
+eb8d0f5af4ec ("drm/i915: Remove GPU reset dependence on struct_mutex"),
+but that never materialized and was soon removed in 2caffbf11762
+("drm/i915: Revoke mmaps and prevent access to fence registers across
+reset") and replaced with a SRCU based solution.
+
+As such, as far as I can see, today we still have a requirement that
+resets must not sleep (invoked from submission tasklets), but no need to
+support invoking them from a truly atomic context.
+
+Given that the preemption section is problematic on RT kernels, since the
+uncore lock becomes a sleeping lock and so is invalid in such section,
+lets try and remove it. Potential downside is that our short waits on GPU
+to complete the reset may get extended if CPU scheduling interferes, but
+in practice that probably isn't a deal breaker.
+
+In terms of mechanics, since the preemption disabled block is being
+removed we just need to replace a few of the wait_for_atomic macros into
+busy looping versions which will work (and not complain) when called from
+non-atomic sections.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: Chris Wilson <chris.p.wilson@intel.com>
+Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20230705093025.3689748-1-tvrtko.ursulin@linux.intel.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/gt/intel_reset.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_reset.c
++++ b/drivers/gpu/drm/i915/gt/intel_reset.c
+@@ -164,13 +164,13 @@ static int i915_do_reset(struct intel_gt
+ /* Assert reset for at least 20 usec, and wait for acknowledgement. */
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ udelay(50);
+- err = wait_for_atomic(i915_in_reset(pdev), 50);
++ err = _wait_for_atomic(i915_in_reset(pdev), 50, 0);
+
+ /* Clear the reset request. */
+ pci_write_config_byte(pdev, I915_GDRST, 0);
+ udelay(50);
+ if (!err)
+- err = wait_for_atomic(!i915_in_reset(pdev), 50);
++ err = _wait_for_atomic(!i915_in_reset(pdev), 50, 0);
+
+ return err;
+ }
+@@ -190,7 +190,7 @@ static int g33_do_reset(struct intel_gt
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
+
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+- return wait_for_atomic(g4x_reset_complete(pdev), 50);
++ return _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
+ }
+
+ static int g4x_do_reset(struct intel_gt *gt,
+@@ -207,7 +207,7 @@ static int g4x_do_reset(struct intel_gt
+
+ pci_write_config_byte(pdev, I915_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+- ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
++ ret = _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
+ if (ret) {
+ GT_TRACE(gt, "Wait for media reset failed\n");
+ goto out;
+@@ -215,7 +215,7 @@ static int g4x_do_reset(struct intel_gt
+
+ pci_write_config_byte(pdev, I915_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+- ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
++ ret = _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
+ if (ret) {
+ GT_TRACE(gt, "Wait for render reset failed\n");
+ goto out;
+@@ -785,9 +785,7 @@ int __intel_gt_reset(struct intel_gt *gt
+ reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
+
+ GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
+- preempt_disable();
+ ret = reset(gt, reset_mask, retry);
+- preempt_enable();
+
+ wa_14015076503_end(gt, reset_mask);
+ }
diff --git a/debian/patches-rt/drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch b/debian/patches-rt/drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
new file mode 100644
index 0000000000..0f030b42f2
--- /dev/null
+++ b/debian/patches-rt/drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
@@ -0,0 +1,29 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 3 Oct 2023 21:37:21 +0200
+Subject: [PATCH] drm/i915/guc: Consider also RCU depth in busy loop.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+intel_guc_send_busy_loop() looks at in_atomic() and irqs_disabled() to
+decide if it should busy-spin while waiting or if it may sleep.
+Both checks will report false on PREEMPT_RT if sleeping spinlocks are
+acquired leading to RCU splats while the function sleeps.
+
+Check also if RCU has been disabled.
+
+Reported-by: "John B. Wyatt IV" <jwyatt@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/gt/uc/intel_guc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+@@ -317,7 +317,7 @@ static inline int intel_guc_send_busy_lo
+ {
+ int err;
+ unsigned int sleep_period_ms = 1;
+- bool not_atomic = !in_atomic() && !irqs_disabled();
++ bool not_atomic = !in_atomic() && !irqs_disabled() && !rcu_preempt_depth();
+
+ /*
+ * FIXME: Have caller pass in if we are in an atomic context to avoid
diff --git a/debian/patches-rt/net-Avoid-the-IPI-to-free-the.patch b/debian/patches-rt/net-Avoid-the-IPI-to-free-the.patch
new file mode 100644
index 0000000000..6005e7a2e6
--- /dev/null
+++ b/debian/patches-rt/net-Avoid-the-IPI-to-free-the.patch
@@ -0,0 +1,119 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 15 Aug 2022 17:29:50 +0200
+Subject: [PATCH] net: Avoid the IPI to free the
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+skb_attempt_defer_free() collects a skbs, which was allocated on a
+remote CPU, on a per-CPU list. These skbs are either freed on that
+remote CPU once the CPU enters NET_RX or an remote IPI function is
+invoked in to raise the NET_RX softirq if a threshold of pending skb has
+been exceeded.
+This remote IPI can cause the wakeup of ksoftirqd on PREEMPT_RT if the
+remote CPU idle was idle. This is undesired because once the ksoftirqd
+is running it will acquire all pending softirqs and they will not be
+executed as part of the threaded interrupt until ksoftird goes idle
+again.
+
+To void all this, schedule the deferred clean up from a worker.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/netdevice.h | 4 ++++
+ net/core/dev.c | 39 ++++++++++++++++++++++++++++++---------
+ net/core/skbuff.c | 7 ++++++-
+ 3 files changed, 40 insertions(+), 10 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3258,7 +3258,11 @@ struct softnet_data {
+ int defer_count;
+ int defer_ipi_scheduled;
+ struct sk_buff *defer_list;
++#ifndef CONFIG_PREEMPT_RT
+ call_single_data_t defer_csd;
++#else
++ struct work_struct defer_work;
++#endif
+ };
+
+ static inline void input_queue_head_incr(struct softnet_data *sd)
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4702,15 +4702,6 @@ static void rps_trigger_softirq(void *da
+
+ #endif /* CONFIG_RPS */
+
+-/* Called from hardirq (IPI) context */
+-static void trigger_rx_softirq(void *data)
+-{
+- struct softnet_data *sd = data;
+-
+- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+- smp_store_release(&sd->defer_ipi_scheduled, 0);
+-}
+-
+ /*
+ * After we queued a packet into sd->input_pkt_queue,
+ * we need to make sure this queue is serviced soon.
+@@ -6679,6 +6670,32 @@ static void skb_defer_free_flush(struct
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT
++
++/* Called from hardirq (IPI) context */
++static void trigger_rx_softirq(void *data)
++{
++ struct softnet_data *sd = data;
++
++ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
++ smp_store_release(&sd->defer_ipi_scheduled, 0);
++}
++
++#else
++
++static void trigger_rx_softirq(struct work_struct *defer_work)
++{
++ struct softnet_data *sd;
++
++ sd = container_of(defer_work, struct softnet_data, defer_work);
++ smp_store_release(&sd->defer_ipi_scheduled, 0);
++ local_bh_disable();
++ skb_defer_free_flush(sd);
++ local_bh_enable();
++}
++
++#endif
++
+ static int napi_threaded_poll(void *data)
+ {
+ struct napi_struct *napi = data;
+@@ -11603,7 +11620,11 @@ static int __init net_dev_init(void)
+ INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
+ sd->cpu = i;
+ #endif
++#ifndef CONFIG_PREEMPT_RT
+ INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
++#else
++ INIT_WORK(&sd->defer_work, trigger_rx_softirq);
++#endif
+ spin_lock_init(&sd->defer_lock);
+
+ init_gro_hash(&sd->backlog);
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -6841,8 +6841,13 @@ nodefer: __kfree_skb(skb);
+ /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
+ * if we are unlucky enough (this seems very unlikely).
+ */
+- if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
++ if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
++#ifndef CONFIG_PREEMPT_RT
+ smp_call_function_single_async(cpu, &sd->defer_csd);
++#else
++ schedule_work_on(cpu, &sd->defer_work);
++#endif
++ }
+ }
+
+ static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
diff --git a/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch b/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch
new file mode 100644
index 0000000000..6eb1f4c871
--- /dev/null
+++ b/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch
@@ -0,0 +1,27 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 9 Mar 2023 09:13:52 +0100
+Subject: [PATCH] powerpc/pseries: Select the generic memory allocator.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The RTAS work area allocator is using the generic memory allocator and
+as such it must select it.
+
+Select the generic memory allocator on pseries.
+
+Fixes: 43033bc62d349 ("powerpc/pseries: add RTAS work area allocator")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/20230309135110.uAxhqRFk@linutronix.de
+---
+ arch/powerpc/platforms/pseries/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/powerpc/platforms/pseries/Kconfig
++++ b/arch/powerpc/platforms/pseries/Kconfig
+@@ -2,6 +2,7 @@
+ config PPC_PSERIES
+ depends on PPC64 && PPC_BOOK3S
+ bool "IBM pSeries & new (POWER5-based) iSeries"
++ select GENERIC_ALLOCATOR
+ select HAVE_PCSPKR_PLATFORM
+ select MPIC
+ select OF_DYNAMIC
diff --git a/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch b/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch
new file mode 100644
index 0000000000..de8c14fd76
--- /dev/null
+++ b/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch
@@ -0,0 +1,38 @@
+Subject: powerpc: traps: Use PREEMPT_RT
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri Jul 26 11:30:49 2019 +0200
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+Add PREEMPT_RT to the backtrace if enabled.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ arch/powerpc/kernel/traps.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+---
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -261,12 +261,17 @@ static char *get_mmu_str(void)
+
+ static int __die(const char *str, struct pt_regs *regs, long err)
+ {
++ const char *pr = "";
++
+ printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
+
++ if (IS_ENABLED(CONFIG_PREEMPTION))
++ pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
++
+ printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
+ IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
+ PAGE_SIZE / 1024, get_mmu_str(),
+- IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
++ pr,
+ IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
+ IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
+ debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
diff --git a/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch b/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
new file mode 100644
index 0000000000..f53f527b3a
--- /dev/null
+++ b/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
@@ -0,0 +1,43 @@
+Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT
+From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Date: Fri Apr 24 15:53:13 2015 +0000
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+
+While converting the openpic emulation code to use a raw_spinlock_t enables
+guests to run on RT, there's still a performance issue. For interrupts sent in
+directed delivery mode with a multiple CPU mask, the emulated openpic will loop
+through all of the VCPUs, and for each VCPUs, it call IRQ_check, which will loop
+through all the pending interrupts for that VCPU. This is done while holding the
+raw_lock, meaning that in all this time the interrupts and preemption are
+disabled on the host Linux. A malicious user app can max both these number and
+cause a DoS.
+
+This temporary fix is sent for two reasons. First is so that users who want to
+use the in-kernel MPIC emulation are aware of the potential latencies, thus
+making sure that the hardware MPIC and their usage scenario does not involve
+interrupts sent in directed delivery mode, and the number of possible pending
+interrupts is kept small. Secondly, this should incentivize the development of a
+proper openpic emulation that would be better suited for RT.
+
+Acked-by: Scott Wood <scottwood@freescale.com>
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ arch/powerpc/kvm/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+---
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -224,6 +224,7 @@ config KVM_E500MC
+ config KVM_MPIC
+ bool "KVM in-kernel MPIC emulation"
+ depends on KVM && PPC_E500
++ depends on !PREEMPT_RT
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_IRQFD
+ select HAVE_KVM_IRQ_ROUTING
diff --git a/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch b/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch
new file mode 100644
index 0000000000..f20adf29e0
--- /dev/null
+++ b/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch
@@ -0,0 +1,115 @@
+Subject: powerpc/pseries/iommu: Use a locallock instead local_irq_save()
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue Mar 26 18:31:54 2019 +0100
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+The locallock protects the per-CPU variable tce_page. The function
+attempts to allocate memory while tce_page is protected (by disabling
+interrupts).
+
+Use local_irq_save() instead of local_irq_disable().
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ arch/powerpc/platforms/pseries/iommu.c | 31 ++++++++++++++++++++-----------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
+---
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -25,6 +25,7 @@
+ #include <linux/of_address.h>
+ #include <linux/iommu.h>
+ #include <linux/rculist.h>
++#include <linux/local_lock.h>
+ #include <asm/io.h>
+ #include <asm/prom.h>
+ #include <asm/rtas.h>
+@@ -206,7 +207,13 @@ static int tce_build_pSeriesLP(unsigned
+ return ret;
+ }
+
+-static DEFINE_PER_CPU(__be64 *, tce_page);
++struct tce_page {
++ __be64 * page;
++ local_lock_t lock;
++};
++static DEFINE_PER_CPU(struct tce_page, tce_page) = {
++ .lock = INIT_LOCAL_LOCK(lock),
++};
+
+ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ long npages, unsigned long uaddr,
+@@ -229,9 +236,10 @@ static int tce_buildmulti_pSeriesLP(stru
+ direction, attrs);
+ }
+
+- local_irq_save(flags); /* to protect tcep and the page behind it */
++ /* to protect tcep and the page behind it */
++ local_lock_irqsave(&tce_page.lock, flags);
+
+- tcep = __this_cpu_read(tce_page);
++ tcep = __this_cpu_read(tce_page.page);
+
+ /* This is safe to do since interrupts are off when we're called
+ * from iommu_alloc{,_sg}()
+@@ -240,12 +248,12 @@ static int tce_buildmulti_pSeriesLP(stru
+ tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
+ /* If allocation fails, fall back to the loop implementation */
+ if (!tcep) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&tce_page.lock, flags);
+ return tce_build_pSeriesLP(tbl->it_index, tcenum,
+ tceshift,
+ npages, uaddr, direction, attrs);
+ }
+- __this_cpu_write(tce_page, tcep);
++ __this_cpu_write(tce_page.page, tcep);
+ }
+
+ rpn = __pa(uaddr) >> tceshift;
+@@ -275,7 +283,7 @@ static int tce_buildmulti_pSeriesLP(stru
+ tcenum += limit;
+ } while (npages > 0 && !rc);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&tce_page.lock, flags);
+
+ if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
+ ret = (int)rc;
+@@ -459,16 +467,17 @@ static int tce_setrange_multi_pSeriesLP(
+ DMA_BIDIRECTIONAL, 0);
+ }
+
+- local_irq_disable(); /* to protect tcep and the page behind it */
+- tcep = __this_cpu_read(tce_page);
++ /* to protect tcep and the page behind it */
++ local_lock_irq(&tce_page.lock);
++ tcep = __this_cpu_read(tce_page.page);
+
+ if (!tcep) {
+ tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
+ if (!tcep) {
+- local_irq_enable();
++ local_unlock_irq(&tce_page.lock);
+ return -ENOMEM;
+ }
+- __this_cpu_write(tce_page, tcep);
++ __this_cpu_write(tce_page.page, tcep);
+ }
+
+ proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
+@@ -511,7 +520,7 @@ static int tce_setrange_multi_pSeriesLP(
+
+ /* error cleanup: caller will clear whole range */
+
+- local_irq_enable();
++ local_unlock_irq(&tce_page.lock);
+ return rc;
+ }
+
diff --git a/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch b/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch
new file mode 100644
index 0000000000..c8994ca9d6
--- /dev/null
+++ b/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch
@@ -0,0 +1,37 @@
+Subject: powerpc/stackprotector: work around stack-guard init from atomic
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue Mar 26 18:31:29 2019 +0100
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+This is invoked from the secondary CPU in atomic context. On x86 we use
+tsc instead. On Power we XOR it against mftb() so lets use stack address
+as the initial value.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ arch/powerpc/include/asm/stackprotector.h | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+---
+--- a/arch/powerpc/include/asm/stackprotector.h
++++ b/arch/powerpc/include/asm/stackprotector.h
+@@ -19,8 +19,13 @@
+ */
+ static __always_inline void boot_init_stack_canary(void)
+ {
+- unsigned long canary = get_random_canary();
++ unsigned long canary;
+
++#ifndef CONFIG_PREEMPT_RT
++ canary = get_random_canary();
++#else
++ canary = ((unsigned long)&canary) & CANARY_MASK;
++#endif
+ current->stack_canary = canary;
+ #ifdef CONFIG_PPC64
+ get_paca()->canary = canary;
diff --git a/debian/patches-rt/preempt-Put-preempt_enable-within-an-instrumentation.patch b/debian/patches-rt/preempt-Put-preempt_enable-within-an-instrumentation.patch
new file mode 100644
index 0000000000..beddd4726f
--- /dev/null
+++ b/debian/patches-rt/preempt-Put-preempt_enable-within-an-instrumentation.patch
@@ -0,0 +1,47 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 8 Mar 2023 16:29:38 +0100
+Subject: [PATCH] preempt: Put preempt_enable() within an instrumentation*()
+ section.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Callers of preempt_enable() can be within an noinstr section leading to:
+| vmlinux.o: warning: objtool: native_sched_clock+0x97: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: kvm_clock_read+0x22: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: local_clock+0xb4: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: enter_from_user_mode+0xea: call to preempt_schedule_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: syscall_enter_from_user_mode+0x140: call to preempt_schedule_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: syscall_enter_from_user_mode_prepare+0xf2: call to preempt_schedule_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: irqentry_enter_from_user_mode+0xea: call to preempt_schedule_thunk() leaves .noinstr.text section
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20230309072724.3F6zRkvw@linutronix.de
+---
+ include/linux/preempt.h | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -230,15 +230,21 @@ do { \
+ #define preempt_enable() \
+ do { \
+ barrier(); \
+- if (unlikely(preempt_count_dec_and_test())) \
++ if (unlikely(preempt_count_dec_and_test())) { \
++ instrumentation_begin(); \
+ __preempt_schedule(); \
++ instrumentation_end(); \
++ } \
+ } while (0)
+
+ #define preempt_enable_notrace() \
+ do { \
+ barrier(); \
+- if (unlikely(__preempt_count_dec_and_test())) \
++ if (unlikely(__preempt_count_dec_and_test())) { \
++ instrumentation_begin(); \
+ __preempt_schedule_notrace(); \
++ instrumentation_end(); \
++ } \
+ } while (0)
+
+ #define preempt_check_resched() \
diff --git a/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch b/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
new file mode 100644
index 0000000000..a75971478b
--- /dev/null
+++ b/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
@@ -0,0 +1,71 @@
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Tue, 5 Apr 2022 03:07:51 +0200
+Subject: [PATCH] rcutorture: Also force sched priority to timersd on
+ boosting test.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+ksoftirqd is statically boosted to the priority level right above the
+one of rcu_torture_boost() so that timers, which torture readers rely on,
+get a chance to run while rcu_torture_boost() is polling.
+
+However timers processing got split from ksoftirqd into their own kthread
+(timersd) that isn't boosted. It has the same SCHED_FIFO low prio as
+rcu_torture_boost() and therefore timers can't preempt it and may
+starve.
+
+The issue can be triggered in practice on v5.17.1-rt17 using:
+
+ ./kvm.sh --allcpus --configs TREE04 --duration 10m --kconfig "CONFIG_EXPERT=y CONFIG_PREEMPT_RT=y"
+
+Fix this with statically boosting timersd just like is done with
+ksoftirqd in commit
+ ea6d962e80b61 ("rcutorture: Judge RCU priority boosting on grace periods, not callbacks")
+
+Suggested-by: Mel Gorman <mgorman@suse.de>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Link: https://lkml.kernel.org/r/20220405010752.1347437-1-frederic@kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 1 +
+ kernel/rcu/rcutorture.c | 6 ++++++
+ kernel/softirq.c | 2 +-
+ 3 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -610,6 +610,7 @@ extern void raise_softirq_irqoff(unsigne
+ extern void raise_softirq(unsigned int nr);
+
+ #ifdef CONFIG_PREEMPT_RT
++DECLARE_PER_CPU(struct task_struct *, timersd);
+ extern void raise_timer_softirq(void);
+ extern void raise_hrtimer_softirq(void);
+
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -2408,6 +2408,12 @@ static int rcutorture_booster_init(unsig
+ WARN_ON_ONCE(!t);
+ sp.sched_priority = 2;
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
++#ifdef CONFIG_PREEMPT_RT
++ t = per_cpu(timersd, cpu);
++ WARN_ON_ONCE(!t);
++ sp.sched_priority = 2;
++ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
++#endif
+ }
+
+ /* Don't allow time recalculation while creating a new task. */
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -620,7 +620,7 @@ static inline void tick_irq_exit(void)
+ }
+
+ #ifdef CONFIG_PREEMPT_RT
+-static DEFINE_PER_CPU(struct task_struct *, timersd);
++DEFINE_PER_CPU(struct task_struct *, timersd);
+ static DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
+
+ static unsigned int local_pending_timers(void)
diff --git a/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch b/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch
new file mode 100644
index 0000000000..60c3bf3aa1
--- /dev/null
+++ b/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch
@@ -0,0 +1,44 @@
+From: Jisheng Zhang <jszhang@kernel.org>
+Date: Tue, 31 Oct 2023 22:35:20 +0800
+Subject: [PATCH] riscv: add PREEMPT_AUTO support
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+riscv has switched to GENERIC_ENTRY, so adding PREEMPT_AUTO is as simple
+as adding TIF_ARCH_RESCHED_LAZY related definitions and enabling
+HAVE_PREEMPT_AUTO.
+
+Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/riscv/Kconfig | 1 +
+ arch/riscv/include/asm/thread_info.h | 2 ++
+ 2 files changed, 3 insertions(+)
+
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -135,6 +135,7 @@ config RISCV
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_POSIX_CPU_TIMERS_TASK_WORK
+ select HAVE_PREEMPT_DYNAMIC_KEY if !XIP_KERNEL
++ select HAVE_PREEMPT_AUTO
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RETHOOK if !XIP_KERNEL
+ select HAVE_RSEQ
+--- a/arch/riscv/include/asm/thread_info.h
++++ b/arch/riscv/include/asm/thread_info.h
+@@ -82,6 +82,7 @@ int arch_dup_task_struct(struct task_str
+ * - pending work-to-be-done flags are in lowest half-word
+ * - other flags in upper half-word(s)
+ */
++#define TIF_ARCH_RESCHED_LAZY 0 /* Lazy rescheduling */
+ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+ #define TIF_SIGPENDING 2 /* signal pending */
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+@@ -96,6 +97,7 @@ int arch_dup_task_struct(struct task_str
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
++#define _TIF_ARCH_RESCHED_LAZY (1 << TIF_ARCH_RESCHED_LAZY)
+
+ #define _TIF_WORK_MASK \
+ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
diff --git a/debian/patches-rt/riscv-allow-to-enable-RT.patch b/debian/patches-rt/riscv-allow-to-enable-RT.patch
new file mode 100644
index 0000000000..66fcfc8e7f
--- /dev/null
+++ b/debian/patches-rt/riscv-allow-to-enable-RT.patch
@@ -0,0 +1,23 @@
+From: Jisheng Zhang <jszhang@kernel.org>
+Date: Tue, 31 Oct 2023 22:35:21 +0800
+Subject: [PATCH] riscv: allow to enable RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+Now, it's ready to enable RT on riscv.
+
+Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/riscv/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -48,6 +48,7 @@ config RISCV
+ select ARCH_SUPPORTS_HUGETLBFS if MMU
+ select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
+ select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
++ select ARCH_SUPPORTS_RT
+ select ARCH_USE_MEMTEST
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USES_CFI_TRAPS if CFI_CLANG
diff --git a/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch b/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
new file mode 100644
index 0000000000..01e19e8fcc
--- /dev/null
+++ b/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
@@ -0,0 +1,58 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 1 Aug 2023 17:26:48 +0200
+Subject: [PATCH] sched/rt: Don't try push tasks if there are none.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+I have a RT task X at a high priority and cyclictest on each CPU with
+lower priority than X's. If X is active and each CPU wakes their own
+cylictest thread then it ends in a longer rto_push storm.
+A random CPU determines via balance_rt() that the CPU on which X is
+running needs to push tasks. X has the highest priority, cyclictest is
+next in line so there is nothing that can be done since the task with
+the higher priority is not touched.
+
+tell_cpu_to_push() increments rto_loop_next and schedules
+rto_push_irq_work_func() on X's CPU. The other CPUs also increment the
+loop counter and do the same. Once rto_push_irq_work_func() is active it
+does nothing because it has _no_ pushable tasks on its runqueue. Then
+checks rto_next_cpu() and decides to queue irq_work on the local CPU
+because another CPU requested a push by incrementing the counter.
+
+I have traces where ~30 CPUs request this ~3 times each before it
+finally ends. This greatly increases X's runtime while X isn't making
+much progress.
+
+Teach rto_next_cpu() to only return CPUs which also have tasks on their
+runqueue which can be pushed away. This does not reduce the
+tell_cpu_to_push() invocations (rto_loop_next counter increments) but
+reduces the amount of issued rto_push_irq_work_func() if nothing can be
+done. As the result the overloaded CPU is blocked less often.
+
+There are still cases where the "same job" is repeated several times
+(for instance the current CPU needs to resched but didn't yet because
+the irq-work is repeated a few times and so the old task remains on the
+CPU) but the majority of request end in tell_cpu_to_push() before an IPI
+is issued.
+
+Reviewed-by: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Link: https://lore.kernel.org/r/20230801152648._y603AS_@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/rt.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2249,8 +2249,11 @@ static int rto_next_cpu(struct root_doma
+
+ rd->rto_cpu = cpu;
+
+- if (cpu < nr_cpu_ids)
++ if (cpu < nr_cpu_ids) {
++ if (!has_pushable_tasks(cpu_rq(cpu)))
++ continue;
+ return cpu;
++ }
+
+ rd->rto_cpu = -1;
+
diff --git a/debian/patches-rt/series b/debian/patches-rt/series
new file mode 100644
index 0000000000..9269d7464f
--- /dev/null
+++ b/debian/patches-rt/series
@@ -0,0 +1,255 @@
+# Applied upstream
+
+###########################################################################
+# Posted and applied
+###########################################################################
+
+# signal_x86__Delay_calling_signals_in_atomic.patch
+
+###########################################################################
+# Posted
+###########################################################################
+0001-sched-Constrain-locks-in-sched_submit_work.patch
+0002-locking-rtmutex-Avoid-unconditional-slowpath-for-DEB.patch
+0003-sched-Extract-__schedule_loop.patch
+0004-sched-Provide-rt_mutex-specific-scheduler-helpers.patch
+0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch
+0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch
+0007-futex-pi-Fix-recursive-rt_mutex-waiter-state.patch
+
+# Hacks to get ptrace to work.
+0001-signal-Add-proper-comment-about-the-preempt-disable-.patch
+0002-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
+
+# DRM AMD GPU
+0001-drm-amd-display-Remove-migrate_en-dis-from-dc_fpu_be.patch
+0002-drm-amd-display-Simplify-the-per-CPU-usage.patch
+0003-drm-amd-display-Add-a-warning-if-the-FPU-is-used-out.patch
+0004-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
+0005-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
+
+###########################################################################
+# Post
+###########################################################################
+net-Avoid-the-IPI-to-free-the.patch
+
+###########################################################################
+# X86:
+###########################################################################
+x86__Allow_to_enable_RT.patch
+x86__Enable_RT_also_on_32bit.patch
+
+###########################################################################
+# For later, not essencial
+###########################################################################
+# Posted
+sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
+
+# Needs discussion first.
+softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
+rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
+tick-Fix-timer-storm-since-introduction-of-timersd.patch
+softirq-Wake-ktimers-thread-also-in-softirq.patch
+zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
+preempt-Put-preempt_enable-within-an-instrumentation.patch
+
+# Sched
+0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
+0002-softirq-Add-function-to-preempt-serving-softirqs.patch
+0003-time-Allow-to-preempt-after-a-callback.patch
+
+###########################################################################
+# John's printk queue
+###########################################################################
+0002-serial-core-Use-lock-wrappers.patch
+0003-serial-21285-Use-port-lock-wrappers.patch
+0004-serial-8250_aspeed_vuart-Use-port-lock-wrappers.patch
+0005-serial-8250_bcm7271-Use-port-lock-wrappers.patch
+0006-serial-8250-Use-port-lock-wrappers.patch
+0007-serial-8250_dma-Use-port-lock-wrappers.patch
+0008-serial-8250_dw-Use-port-lock-wrappers.patch
+0009-serial-8250_exar-Use-port-lock-wrappers.patch
+0010-serial-8250_fsl-Use-port-lock-wrappers.patch
+0011-serial-8250_mtk-Use-port-lock-wrappers.patch
+0012-serial-8250_omap-Use-port-lock-wrappers.patch
+0013-serial-8250_pci1xxxx-Use-port-lock-wrappers.patch
+0014-serial-altera_jtaguart-Use-port-lock-wrappers.patch
+0015-serial-altera_uart-Use-port-lock-wrappers.patch
+0016-serial-amba-pl010-Use-port-lock-wrappers.patch
+0017-serial-amba-pl011-Use-port-lock-wrappers.patch
+0018-serial-apb-Use-port-lock-wrappers.patch
+0019-serial-ar933x-Use-port-lock-wrappers.patch
+0020-serial-arc_uart-Use-port-lock-wrappers.patch
+0021-serial-atmel-Use-port-lock-wrappers.patch
+0022-serial-bcm63xx-uart-Use-port-lock-wrappers.patch
+0023-serial-cpm_uart-Use-port-lock-wrappers.patch
+0024-serial-digicolor-Use-port-lock-wrappers.patch
+0025-serial-dz-Use-port-lock-wrappers.patch
+0026-serial-linflexuart-Use-port-lock-wrappers.patch
+0027-serial-fsl_lpuart-Use-port-lock-wrappers.patch
+0028-serial-icom-Use-port-lock-wrappers.patch
+0029-serial-imx-Use-port-lock-wrappers.patch
+0030-serial-ip22zilog-Use-port-lock-wrappers.patch
+0031-serial-jsm-Use-port-lock-wrappers.patch
+0032-serial-liteuart-Use-port-lock-wrappers.patch
+0033-serial-lpc32xx_hs-Use-port-lock-wrappers.patch
+0034-serial-ma35d1-Use-port-lock-wrappers.patch
+0035-serial-mcf-Use-port-lock-wrappers.patch
+0036-serial-men_z135_uart-Use-port-lock-wrappers.patch
+0037-serial-meson-Use-port-lock-wrappers.patch
+0038-serial-milbeaut_usio-Use-port-lock-wrappers.patch
+0039-serial-mpc52xx-Use-port-lock-wrappers.patch
+0040-serial-mps2-uart-Use-port-lock-wrappers.patch
+0041-serial-msm-Use-port-lock-wrappers.patch
+0042-serial-mvebu-uart-Use-port-lock-wrappers.patch
+0043-serial-omap-Use-port-lock-wrappers.patch
+0044-serial-owl-Use-port-lock-wrappers.patch
+0045-serial-pch-Use-port-lock-wrappers.patch
+0046-serial-pic32-Use-port-lock-wrappers.patch
+0047-serial-pmac_zilog-Use-port-lock-wrappers.patch
+0048-serial-pxa-Use-port-lock-wrappers.patch
+0049-serial-qcom-geni-Use-port-lock-wrappers.patch
+0050-serial-rda-Use-port-lock-wrappers.patch
+0051-serial-rp2-Use-port-lock-wrappers.patch
+0052-serial-sa1100-Use-port-lock-wrappers.patch
+0053-serial-samsung_tty-Use-port-lock-wrappers.patch
+0054-serial-sb1250-duart-Use-port-lock-wrappers.patch
+0056-serial-tegra-Use-port-lock-wrappers.patch
+0057-serial-core-Use-port-lock-wrappers.patch
+0058-serial-mctrl_gpio-Use-port-lock-wrappers.patch
+0059-serial-txx9-Use-port-lock-wrappers.patch
+0060-serial-sh-sci-Use-port-lock-wrappers.patch
+0061-serial-sifive-Use-port-lock-wrappers.patch
+0062-serial-sprd-Use-port-lock-wrappers.patch
+0063-serial-st-asc-Use-port-lock-wrappers.patch
+0064-serial-stm32-Use-port-lock-wrappers.patch
+0065-serial-sunhv-Use-port-lock-wrappers.patch
+0066-serial-sunplus-uart-Use-port-lock-wrappers.patch
+0067-serial-sunsab-Use-port-lock-wrappers.patch
+0068-serial-sunsu-Use-port-lock-wrappers.patch
+0069-serial-sunzilog-Use-port-lock-wrappers.patch
+0070-serial-timbuart-Use-port-lock-wrappers.patch
+0071-serial-uartlite-Use-port-lock-wrappers.patch
+0072-serial-ucc_uart-Use-port-lock-wrappers.patch
+0073-serial-vt8500-Use-port-lock-wrappers.patch
+0074-serial-xilinx_uartps-Use-port-lock-wrappers.patch
+0075-printk-Add-non-BKL-nbcon-console-basic-infrastructur.patch
+0076-printk-nbcon-Add-acquire-release-logic.patch
+0077-printk-Make-static-printk-buffers-available-to-nbcon.patch
+0078-printk-nbcon-Add-buffer-management.patch
+0079-printk-nbcon-Add-ownership-state-functions.patch
+0080-printk-nbcon-Add-sequence-handling.patch
+0081-printk-nbcon-Add-emit-function-and-callback-function.patch
+0082-printk-nbcon-Allow-drivers-to-mark-unsafe-regions-an.patch
+0083-printk-fix-illegal-pbufs-access-for-CONFIG_PRINTK.patch
+0084-printk-Reduce-pr_flush-pooling-time.patch
+0085-printk-nbcon-Relocate-32bit-seq-macros.patch
+0086-printk-Adjust-mapping-for-32bit-seq-macros.patch
+0087-printk-Use-prb_first_seq-as-base-for-32bit-seq-macro.patch
+0088-printk-ringbuffer-Do-not-skip-non-finalized-records-.patch
+0089-printk-ringbuffer-Clarify-special-lpos-values.patch
+0090-printk-For-suppress_panic_printk-check-for-other-CPU.patch
+0091-printk-Add-this_cpu_in_panic.patch
+0092-printk-ringbuffer-Cleanup-reader-terminology.patch
+0093-printk-Wait-for-all-reserved-records-with-pr_flush.patch
+0094-printk-ringbuffer-Skip-non-finalized-records-in-pani.patch
+0095-printk-ringbuffer-Consider-committed-as-finalized-in.patch
+0096-printk-Disable-passing-console-lock-owner-completely.patch
+0097-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch
+0098-panic-Flush-kernel-log-buffer-at-the-end.patch
+0099-printk-Consider-nbcon-boot-consoles-on-seq-init.patch
+0100-printk-Add-sparse-notation-to-console_srcu-locking.patch
+0101-printk-nbcon-Ensure-ownership-release-on-failed-emit.patch
+0102-printk-Check-printk_deferred_enter-_exit-usage.patch
+0103-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch
+0104-printk-nbcon-Add-driver_enter-driver_exit-console-ca.patch
+0105-printk-Make-console_is_usable-available-to-nbcon.patch
+0106-printk-Let-console_is_usable-handle-nbcon.patch
+0107-printk-Add-flags-argument-for-console_is_usable.patch
+0108-printk-nbcon-Provide-function-to-flush-using-write_a.patch
+0109-printk-Track-registered-boot-consoles.patch
+0110-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
+0111-printk-nbcon-Assign-priority-based-on-CPU-state.patch
+0112-printk-nbcon-Add-unsafe-flushing-on-panic.patch
+0113-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
+0114-printk-Track-nbcon-consoles.patch
+0115-printk-Coordinate-direct-printing-in-panic.patch
+0116-printk-nbcon-Implement-emergency-sections.patch
+0117-panic-Mark-emergency-section-in-warn.patch
+0118-panic-Mark-emergency-section-in-oops.patch
+0119-rcu-Mark-emergency-section-in-rcu-stalls.patch
+0120-lockdep-Mark-emergency-section-in-lockdep-splats.patch
+0121-printk-nbcon-Introduce-printing-kthreads.patch
+0122-printk-Atomic-print-in-printk-context-on-shutdown.patch
+0123-printk-nbcon-Add-context-to-console_is_usable.patch
+0124-printk-nbcon-Add-printer-thread-wakeups.patch
+0125-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
+0126-printk-nbcon-Start-printing-threads.patch
+0127-proc-Add-nbcon-support-for-proc-consoles.patch
+0128-tty-sysfs-Add-nbcon-support-for-active.patch
+0129-printk-nbcon-Provide-function-to-reacquire-ownership.patch
+0130-serial-core-Provide-low-level-functions-to-port-lock.patch
+0131-serial-8250-Switch-to-nbcon-console.patch
+0132-printk-Add-kthread-for-all-legacy-consoles.patch
+0133-serial-8250-revert-drop-lockdep-annotation-from-seri.patch
+0134-printk-Avoid-false-positive-lockdep-report-for-legac.patch
+
+###########################################################################
+# DRM:
+###########################################################################
+0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
+0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
+0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
+0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
+0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
+0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch
+0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
+0010-drm-i915-Drop-the-irqs_disabled-check.patch
+drm-i915-Do-not-disable-preemption-for-resets.patch
+drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
+Revert-drm-i915-Depend-on-PREEMPT_RT.patch
+
+###########################################################################
+# Lazy preemption
+###########################################################################
+PREEMPT_AUTO.patch
+
+###########################################################################
+# ARM/ARM64
+###########################################################################
+0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
+ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
+# arm64-signal-Use-ARCH_RT_DELAYS_SIGNAL_SEND.patch
+tty_serial_omap__Make_the_locking_RT_aware.patch
+tty_serial_pl011__Make_the_locking_work_on_RT.patch
+0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
+0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
+0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
+0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
+ARM__Allow_to_enable_RT.patch
+ARM64__Allow_to_enable_RT.patch
+
+###########################################################################
+# POWERPC
+###########################################################################
+powerpc__traps__Use_PREEMPT_RT.patch
+powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch
+powerpc-pseries-Select-the-generic-memory-allocator.patch
+powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
+powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch
+POWERPC__Allow_to_enable_RT.patch
+
+###########################################################################
+# RISC-V
+###########################################################################
+#RISC-V-Probe-misaligned-access-speed-in-parallel.patch
+riscv-add-PREEMPT_AUTO-support.patch
+riscv-allow-to-enable-RT.patch
+
+# Sysfs file vs uname() -v
+sysfs__Add__sys_kernel_realtime_entry.patch
+
+###########################################################################
+# RT release version
+###########################################################################
+Add_localversion_for_-RT_release.patch
diff --git a/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch b/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
new file mode 100644
index 0000000000..949ff6da70
--- /dev/null
+++ b/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
@@ -0,0 +1,222 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 1 Dec 2021 17:41:09 +0100
+Subject: [PATCH] softirq: Use a dedicated thread for timer wakeups.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+A timer/hrtimer softirq is raised in-IRQ context. With threaded
+interrupts enabled or on PREEMPT_RT this leads to waking the ksoftirqd
+for the processing of the softirq.
+Once the ksoftirqd is marked as pending (or is running) it will collect
+all raised softirqs. This in turn means that a softirq which would have
+been processed at the end of the threaded interrupt, which runs at an
+elevated priority, is now moved to ksoftirqd which runs at SCHED_OTHER
+priority and competes with every regular task for CPU resources.
+This introduces long delays on heavy loaded systems and is not desired
+especially if the system is not overloaded by the softirqs.
+
+Split the TIMER_SOFTIRQ and HRTIMER_SOFTIRQ processing into a dedicated
+timers thread and let it run at the lowest SCHED_FIFO priority.
+RT tasks are are woken up from hardirq context so only timer_list timers
+and hrtimers for "regular" tasks are processed here. The higher priority
+ensures that wakeups are performed before scheduling SCHED_OTHER tasks.
+
+Using a dedicated variable to store the pending softirq bits values
+ensure that the timer are not accidentally picked up by ksoftirqd and
+other threaded interrupts.
+It shouldn't be picked up by ksoftirqd since it runs at lower priority.
+However if the timer bits are ORed while a threaded interrupt is
+running, then the timer softirq would be performed at higher priority.
+The new timer thread will block on the softirq lock before it starts
+softirq work. This "race window" isn't closed because while timer thread
+is performing the softirq it can get PI-boosted via the softirq lock by
+a random force-threaded thread.
+The timer thread can pick up pending softirqs from ksoftirqd but only
+if the softirq load is high. It is not be desired that the picked up
+softirqs are processed at SCHED_FIFO priority under high softirq load
+but this can already happen by a PI-boost by a force-threaded interrupt.
+
+Reported-by: kernel test robot <lkp@intel.com> [ static timer_threads ]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 16 ++++++++
+ kernel/softirq.c | 92 ++++++++++++++++++++++++++++++++++++++++++++--
+ kernel/time/hrtimer.c | 4 +-
+ kernel/time/timer.c | 2 -
+ 4 files changed, 108 insertions(+), 6 deletions(-)
+
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -609,6 +609,22 @@ extern void __raise_softirq_irqoff(unsig
+ extern void raise_softirq_irqoff(unsigned int nr);
+ extern void raise_softirq(unsigned int nr);
+
++#ifdef CONFIG_PREEMPT_RT
++extern void raise_timer_softirq(void);
++extern void raise_hrtimer_softirq(void);
++
++#else
++static inline void raise_timer_softirq(void)
++{
++ raise_softirq(TIMER_SOFTIRQ);
++}
++
++static inline void raise_hrtimer_softirq(void)
++{
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++}
++#endif
++
+ DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+
+ static inline struct task_struct *this_cpu_ksoftirqd(void)
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -619,6 +619,29 @@ static inline void tick_irq_exit(void)
+ #endif
+ }
+
++#ifdef CONFIG_PREEMPT_RT
++static DEFINE_PER_CPU(struct task_struct *, timersd);
++static DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
++
++static unsigned int local_pending_timers(void)
++{
++ return __this_cpu_read(pending_timer_softirq);
++}
++
++static void wake_timersd(void)
++{
++ struct task_struct *tsk = __this_cpu_read(timersd);
++
++ if (tsk)
++ wake_up_process(tsk);
++}
++
++#else
++
++static inline void wake_timersd(void) { }
++
++#endif
++
+ static inline void __irq_exit_rcu(void)
+ {
+ #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
+@@ -628,8 +651,13 @@ static inline void __irq_exit_rcu(void)
+ #endif
+ account_hardirq_exit(current);
+ preempt_count_sub(HARDIRQ_OFFSET);
+- if (!in_interrupt() && local_softirq_pending())
+- invoke_softirq();
++ if (!in_interrupt()) {
++ if (local_softirq_pending())
++ invoke_softirq();
++
++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && local_pending_timers())
++ wake_timersd();
++ }
+
+ tick_irq_exit();
+ }
+@@ -963,12 +991,70 @@ static struct smp_hotplug_thread softirq
+ .thread_comm = "ksoftirqd/%u",
+ };
+
++#ifdef CONFIG_PREEMPT_RT
++static void timersd_setup(unsigned int cpu)
++{
++ sched_set_fifo_low(current);
++}
++
++static int timersd_should_run(unsigned int cpu)
++{
++ return local_pending_timers();
++}
++
++static void run_timersd(unsigned int cpu)
++{
++ unsigned int timer_si;
++
++ ksoftirqd_run_begin();
++
++ timer_si = local_pending_timers();
++ __this_cpu_write(pending_timer_softirq, 0);
++ or_softirq_pending(timer_si);
++
++ __do_softirq();
++
++ ksoftirqd_run_end();
++}
++
++static void raise_ktimers_thread(unsigned int nr)
++{
++ trace_softirq_raise(nr);
++ __this_cpu_or(pending_timer_softirq, 1 << nr);
++}
++
++void raise_hrtimer_softirq(void)
++{
++ raise_ktimers_thread(HRTIMER_SOFTIRQ);
++}
++
++void raise_timer_softirq(void)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ raise_ktimers_thread(TIMER_SOFTIRQ);
++ wake_timersd();
++ local_irq_restore(flags);
++}
++
++static struct smp_hotplug_thread timer_threads = {
++ .store = &timersd,
++ .setup = timersd_setup,
++ .thread_should_run = timersd_should_run,
++ .thread_fn = run_timersd,
++ .thread_comm = "ktimers/%u",
++};
++#endif
++
+ static __init int spawn_ksoftirqd(void)
+ {
+ cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
+ takeover_tasklets);
+ BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
+-
++#ifdef CONFIG_PREEMPT_RT
++ BUG_ON(smpboot_register_percpu_thread(&timer_threads));
++#endif
+ return 0;
+ }
+ early_initcall(spawn_ksoftirqd);
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1808,7 +1808,7 @@ void hrtimer_interrupt(struct clock_even
+ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
+ cpu_base->softirq_expires_next = KTIME_MAX;
+ cpu_base->softirq_activated = 1;
+- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ raise_hrtimer_softirq();
+ }
+
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+@@ -1921,7 +1921,7 @@ void hrtimer_run_queues(void)
+ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
+ cpu_base->softirq_expires_next = KTIME_MAX;
+ cpu_base->softirq_activated = 1;
+- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ raise_hrtimer_softirq();
+ }
+
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -2054,7 +2054,7 @@ static void run_local_timers(void)
+ if (time_before(jiffies, base->next_expiry))
+ return;
+ }
+- raise_softirq(TIMER_SOFTIRQ);
++ raise_timer_softirq();
+ }
+
+ /*
diff --git a/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch b/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch
new file mode 100644
index 0000000000..21609fe207
--- /dev/null
+++ b/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch
@@ -0,0 +1,44 @@
+From: Junxiao Chang <junxiao.chang@intel.com>
+Date: Mon, 20 Feb 2023 09:12:20 +0100
+Subject: [PATCH] softirq: Wake ktimers thread also in softirq.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+If the hrtimer is raised while a softirq is processed then it does not
+wake the corresponding ktimers thread. This is due to the optimisation in the
+irq-exit path which is also used to wake the ktimers thread. For the other
+softirqs, this is okay because the additional softirq bits will be handled by
+the currently running softirq handler.
+The timer related softirq bits are added to a different variable and rely on
+the ktimers thread.
+As a consuequence the wake up of ktimersd is delayed until the next timer tick.
+
+Always wake the ktimers thread if a timer related softirq is pending.
+
+Reported-by: Peh, Hock Zhang <hock.zhang.peh@intel.com>
+Signed-off-by: Junxiao Chang <junxiao.chang@intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/softirq.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -646,13 +646,12 @@ static inline void __irq_exit_rcu(void)
+ #endif
+ account_hardirq_exit(current);
+ preempt_count_sub(HARDIRQ_OFFSET);
+- if (!in_interrupt()) {
+- if (local_softirq_pending())
+- invoke_softirq();
++ if (!in_interrupt() && local_softirq_pending())
++ invoke_softirq();
+
+- if (IS_ENABLED(CONFIG_PREEMPT_RT) && local_pending_timers())
+- wake_timersd();
+- }
++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && local_pending_timers() &&
++ !(in_nmi() | in_hardirq()))
++ wake_timersd();
+
+ tick_irq_exit();
+ }
diff --git a/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch b/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch
new file mode 100644
index 0000000000..2e6928846b
--- /dev/null
+++ b/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch
@@ -0,0 +1,53 @@
+Subject: sysfs: Add /sys/kernel/realtime entry
+From: Clark Williams <williams@redhat.com>
+Date: Sat Jul 30 21:55:53 2011 -0500
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Clark Williams <williams@redhat.com>
+
+Add a /sys/kernel entry to indicate that the kernel is a
+realtime kernel.
+
+Clark says that he needs this for udev rules, udev needs to evaluate
+if its a PREEMPT_RT kernel a few thousand times and parsing uname
+output is too slow or so.
+
+Are there better solutions? Should it exist and return 0 on !-rt?
+
+Signed-off-by: Clark Williams <williams@redhat.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ kernel/ksysfs.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+---
+--- a/kernel/ksysfs.c
++++ b/kernel/ksysfs.c
+@@ -179,6 +179,15 @@ KERNEL_ATTR_RO(crash_elfcorehdr_size);
+
+ #endif /* CONFIG_CRASH_CORE */
+
++#if defined(CONFIG_PREEMPT_RT)
++static ssize_t realtime_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", 1);
++}
++KERNEL_ATTR_RO(realtime);
++#endif
++
+ /* whether file capabilities are enabled */
+ static ssize_t fscaps_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+@@ -275,6 +284,9 @@ static struct attribute * kernel_attrs[]
+ &rcu_expedited_attr.attr,
+ &rcu_normal_attr.attr,
+ #endif
++#ifdef CONFIG_PREEMPT_RT
++ &realtime_attr.attr,
++#endif
+ NULL
+ };
+
diff --git a/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch b/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch
new file mode 100644
index 0000000000..874c7b3b3d
--- /dev/null
+++ b/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch
@@ -0,0 +1,106 @@
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Tue, 5 Apr 2022 03:07:52 +0200
+Subject: [PATCH] tick: Fix timer storm since introduction of timersd
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+If timers are pending while the tick is reprogrammed on nohz_mode, the
+next expiry is not armed to fire now, it is delayed one jiffy forward
+instead so as not to raise an inextinguishable timer storm with such
+scenario:
+
+1) IRQ triggers and queue a timer
+2) ksoftirqd() is woken up
+3) IRQ tail: timer is reprogrammed to fire now
+4) IRQ exit
+5) TIMER interrupt
+6) goto 3)
+
+...all that until we finally reach ksoftirqd.
+
+Unfortunately we are checking the wrong softirq vector bitmask since
+timersd kthread has split from ksoftirqd. Timers now have their own
+vector state field that must be checked separately. As a result, the
+old timer storm is back. This shows up early on boot with extremely long
+initcalls:
+
+ [ 333.004807] initcall dquot_init+0x0/0x111 returned 0 after 323822879 usecs
+
+and the cause is uncovered with the right trace events showing just
+10 microseconds between ticks (~100 000 Hz):
+
+|swapper/-1 1dn.h111 60818582us : hrtimer_expire_entry: hrtimer=00000000e0ef0f6b function=tick_sched_timer now=60415486608
+|swapper/-1 1dn.h111 60818592us : hrtimer_expire_entry: hrtimer=00000000e0ef0f6b function=tick_sched_timer now=60415496082
+|swapper/-1 1dn.h111 60818601us : hrtimer_expire_entry: hrtimer=00000000e0ef0f6b function=tick_sched_timer now=60415505550
+
+Fix this by checking the right timer vector state from the nohz code.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/20220405010752.1347437-2-frederic@kernel.org
+---
+ include/linux/interrupt.h | 12 ++++++++++++
+ kernel/softirq.c | 7 +------
+ kernel/time/tick-sched.c | 2 +-
+ 3 files changed, 14 insertions(+), 7 deletions(-)
+
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -611,9 +611,16 @@ extern void raise_softirq(unsigned int n
+
+ #ifdef CONFIG_PREEMPT_RT
+ DECLARE_PER_CPU(struct task_struct *, timersd);
++DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
++
+ extern void raise_timer_softirq(void);
+ extern void raise_hrtimer_softirq(void);
+
++static inline unsigned int local_pending_timers(void)
++{
++ return __this_cpu_read(pending_timer_softirq);
++}
++
+ #else
+ static inline void raise_timer_softirq(void)
+ {
+@@ -624,6 +631,11 @@ static inline void raise_hrtimer_softirq
+ {
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
++
++static inline unsigned int local_pending_timers(void)
++{
++ return local_softirq_pending();
++}
+ #endif
+
+ DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -621,12 +621,7 @@ static inline void tick_irq_exit(void)
+
+ #ifdef CONFIG_PREEMPT_RT
+ DEFINE_PER_CPU(struct task_struct *, timersd);
+-static DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
+-
+-static unsigned int local_pending_timers(void)
+-{
+- return __this_cpu_read(pending_timer_softirq);
+-}
++DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
+
+ static void wake_timersd(void)
+ {
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -795,7 +795,7 @@ static void tick_nohz_restart(struct tic
+
+ static inline bool local_timer_softirq_pending(void)
+ {
+- return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
++ return local_pending_timers() & BIT(TIMER_SOFTIRQ);
+ }
+
+ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
diff --git a/debian/patches-rt/tty_serial_omap__Make_the_locking_RT_aware.patch b/debian/patches-rt/tty_serial_omap__Make_the_locking_RT_aware.patch
new file mode 100644
index 0000000000..89d7447401
--- /dev/null
+++ b/debian/patches-rt/tty_serial_omap__Make_the_locking_RT_aware.patch
@@ -0,0 +1,47 @@
+Subject: tty/serial/omap: Make the locking RT aware
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu Jul 28 13:32:57 2011 +0200
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+The lock is a sleeping lock and local_irq_save() is not the
+optimsation we are looking for. Redo it to make it work on -RT and
+non-RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ drivers/tty/serial/omap-serial.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+---
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -1212,13 +1212,10 @@ serial_omap_console_write(struct console
+ unsigned int ier;
+ int locked = 1;
+
+- local_irq_save(flags);
+- if (up->port.sysrq)
+- locked = 0;
+- else if (oops_in_progress)
+- locked = uart_port_trylock(&up->port);
++ if (up->port.sysrq || oops_in_progress)
++ locked = uart_port_trylock_irqsave(&up->port, &flags);
+ else
+- uart_port_lock(&up->port);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -1245,8 +1242,7 @@ serial_omap_console_write(struct console
+ check_modem_status(up);
+
+ if (locked)
+- uart_port_unlock(&up->port);
+- local_irq_restore(flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static int __init
diff --git a/debian/patches-rt/tty_serial_pl011__Make_the_locking_work_on_RT.patch b/debian/patches-rt/tty_serial_pl011__Make_the_locking_work_on_RT.patch
new file mode 100644
index 0000000000..32ff14fba0
--- /dev/null
+++ b/debian/patches-rt/tty_serial_pl011__Make_the_locking_work_on_RT.patch
@@ -0,0 +1,46 @@
+Subject: tty/serial/pl011: Make the locking work on RT
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue Jan 8 21:36:51 2013 +0100
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+The lock is a sleeping lock and local_irq_save() is not the optimsation
+we are looking for. Redo it to make it work on -RT and non-RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ drivers/tty/serial/amba-pl011.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+---
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -2332,13 +2332,10 @@ pl011_console_write(struct console *co,
+
+ clk_enable(uap->clk);
+
+- local_irq_save(flags);
+- if (uap->port.sysrq)
+- locked = 0;
+- else if (oops_in_progress)
+- locked = uart_port_trylock(&uap->port);
++ if (uap->port.sysrq || oops_in_progress)
++ locked = uart_port_trylock_irqsave(&uap->port, &flags);
+ else
+- uart_port_lock(&uap->port);
++ uart_port_lock_irqsave(&uap->port, &flags);
+
+ /*
+ * First save the CR then disable the interrupts
+@@ -2364,8 +2361,7 @@ pl011_console_write(struct console *co,
+ pl011_write(old_cr, uap, REG_CR);
+
+ if (locked)
+- uart_port_unlock(&uap->port);
+- local_irq_restore(flags);
++ uart_port_unlock_irqrestore(&uap->port, flags);
+
+ clk_disable(uap->clk);
+ }
diff --git a/debian/patches-rt/x86__Allow_to_enable_RT.patch b/debian/patches-rt/x86__Allow_to_enable_RT.patch
new file mode 100644
index 0000000000..3fa433084a
--- /dev/null
+++ b/debian/patches-rt/x86__Allow_to_enable_RT.patch
@@ -0,0 +1,27 @@
+Subject: x86: Allow to enable RT
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed Aug 7 18:15:38 2019 +0200
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+Allow to select RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ arch/x86/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+---
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -28,6 +28,7 @@ config X86_64
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
+ select ARCH_SUPPORTS_PER_VMA_LOCK
++ select ARCH_SUPPORTS_RT
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select HAVE_ARCH_SOFT_DIRTY
+ select MODULES_USE_ELF_RELA
diff --git a/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch b/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch
new file mode 100644
index 0000000000..3092b41acb
--- /dev/null
+++ b/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch
@@ -0,0 +1,33 @@
+Subject: x86: Enable RT also on 32bit
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu Nov 7 17:49:20 2019 +0100
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ arch/x86/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+---
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -28,7 +28,6 @@ config X86_64
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
+ select ARCH_SUPPORTS_PER_VMA_LOCK
+- select ARCH_SUPPORTS_RT
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select HAVE_ARCH_SOFT_DIRTY
+ select MODULES_USE_ELF_RELA
+@@ -118,6 +117,7 @@ config X86
+ select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG
+ select ARCH_SUPPORTS_LTO_CLANG
+ select ARCH_SUPPORTS_LTO_CLANG_THIN
++ select ARCH_SUPPORTS_RT
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_MEMTEST
+ select ARCH_USE_QUEUED_RWLOCKS
diff --git a/debian/patches-rt/zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch b/debian/patches-rt/zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
new file mode 100644
index 0000000000..0ccb2d87cc
--- /dev/null
+++ b/debian/patches-rt/zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
@@ -0,0 +1,94 @@
+From: Mike Galbraith <umgwanakikbuti@gmail.com>
+Date: Thu, 31 Mar 2016 04:08:28 +0200
+Subject: [PATCH] zram: Replace bit spinlocks with spinlock_t for PREEMPT_RT.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The bit spinlock disables preemption. The spinlock_t lock becomes a sleeping
+lock on PREEMPT_RT and it can not be acquired in this context. In this locked
+section, zs_free() acquires a zs_pool::lock, and there is access to
+zram::wb_limit_lock.
+
+Use a spinlock_t on PREEMPT_RT for locking and set/ clear ZRAM_LOCK bit after
+the lock has been acquired/ dropped.
+
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/YqIbMuHCPiQk+Ac2@linutronix.de
+Link: https://lore.kernel.org/20230323161830.jFbWCosd@linutronix.de
+---
+ drivers/block/zram/zram_drv.c | 37 +++++++++++++++++++++++++++++++++++++
+ drivers/block/zram/zram_drv.h | 3 +++
+ 2 files changed, 40 insertions(+)
+
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -57,6 +57,41 @@ static void zram_free_page(struct zram *
+ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
+ struct bio *parent);
+
++#ifdef CONFIG_PREEMPT_RT
++static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
++{
++ size_t index;
++
++ for (index = 0; index < num_pages; index++)
++ spin_lock_init(&zram->table[index].lock);
++}
++
++static int zram_slot_trylock(struct zram *zram, u32 index)
++{
++ int ret;
++
++ ret = spin_trylock(&zram->table[index].lock);
++ if (ret)
++ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
++ return ret;
++}
++
++static void zram_slot_lock(struct zram *zram, u32 index)
++{
++ spin_lock(&zram->table[index].lock);
++ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
++}
++
++static void zram_slot_unlock(struct zram *zram, u32 index)
++{
++ __clear_bit(ZRAM_LOCK, &zram->table[index].flags);
++ spin_unlock(&zram->table[index].lock);
++}
++
++#else
++
++static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
++
+ static int zram_slot_trylock(struct zram *zram, u32 index)
+ {
+ return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
+@@ -71,6 +106,7 @@ static void zram_slot_unlock(struct zram
+ {
+ bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
+ }
++#endif
+
+ static inline bool init_done(struct zram *zram)
+ {
+@@ -1245,6 +1281,7 @@ static bool zram_meta_alloc(struct zram
+
+ if (!huge_class_size)
+ huge_class_size = zs_huge_class_size(zram->mem_pool);
++ zram_meta_init_table_locks(zram, num_pages);
+ return true;
+ }
+
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -69,6 +69,9 @@ struct zram_table_entry {
+ unsigned long element;
+ };
+ unsigned long flags;
++#ifdef CONFIG_PREEMPT_RT
++ spinlock_t lock;
++#endif
+ #ifdef CONFIG_ZRAM_MEMORY_TRACKING
+ ktime_t ac_time;
+ #endif