summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt')
-rw-r--r--debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch2
-rw-r--r--debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch4
-rw-r--r--debian/patches-rt/0001-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch (renamed from debian/patches-rt/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch)12
-rw-r--r--debian/patches-rt/0001-locking-local_lock-Introduce-guard-definition-for-lo.patch34
-rw-r--r--debian/patches-rt/0001-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch75
-rw-r--r--debian/patches-rt/0001-net-Remove-task_struct-bpf_net_context-init-on-fork.patch32
-rw-r--r--debian/patches-rt/0001-net-tcp-dccp-prepare-for-tw_timer-un-pinning.patch188
-rw-r--r--debian/patches-rt/0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch31
-rw-r--r--debian/patches-rt/0001-printk-Add-notation-to-console_srcu-locking.patch (renamed from debian/patches-rt/0006-printk-Add-notation-to-console_srcu-locking.patch)5
-rw-r--r--debian/patches-rt/0001-printk-ringbuffer-Clarify-special-lpos-values.patch95
-rw-r--r--debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch6
-rw-r--r--debian/patches-rt/0001-serial-amba-pl011-Use-uart_prepare_sysrq_char.patch92
-rw-r--r--debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch (renamed from debian/patches-rt/zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch)64
-rw-r--r--debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch2
-rw-r--r--debian/patches-rt/0002-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch (renamed from debian/patches-rt/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch)84
-rw-r--r--debian/patches-rt/0002-locking-local_lock-Add-local-nested-BH-locking-infra.patch141
-rw-r--r--debian/patches-rt/0002-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch330
-rw-r--r--debian/patches-rt/0002-net-Optimize-xdp_do_flush-with-bpf_net_context-infos.patch291
-rw-r--r--debian/patches-rt/0002-net-tcp-un-pin-the-tw_timer.patch29
-rw-r--r--debian/patches-rt/0002-perf-Enqueue-SIGTRAP-always-via-task_work.patch160
-rw-r--r--debian/patches-rt/0002-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch190
-rw-r--r--debian/patches-rt/0002-serial-ar933x-Use-uart_prepare_sysrq_char.patch68
-rw-r--r--debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch4
-rw-r--r--debian/patches-rt/0002-task_work-Add-TWA_NMI_CURRENT-as-an-additional-notif.patch102
-rw-r--r--debian/patches-rt/0002-zram-Remove-ZRAM_LOCK.patch66
-rw-r--r--debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch2
-rw-r--r--debian/patches-rt/0003-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch (renamed from debian/patches-rt/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch)25
-rw-r--r--debian/patches-rt/0003-net-Move-flush-list-retrieval-to-where-it-is-used.patch88
-rw-r--r--debian/patches-rt/0003-net-Use-__napi_alloc_frag_align-instead-of-open-codi.patch42
-rw-r--r--debian/patches-rt/0003-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch121
-rw-r--r--debian/patches-rt/0003-perf-Enqueue-SIGTRAP-always-via-task_work.patch120
-rw-r--r--debian/patches-rt/0003-perf-Remove-perf_swevent_get_recursion_context-from-.patch55
-rw-r--r--debian/patches-rt/0003-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch86
-rw-r--r--debian/patches-rt/0003-printk-nbcon-Remove-return-value-for-write_atomic.patch (renamed from debian/patches-rt/0008-printk-nbcon-Remove-return-value-for-write_atomic.patch)7
-rw-r--r--debian/patches-rt/0003-serial-bcm63xx-Use-uart_prepare_sysrq_char.patch78
-rw-r--r--debian/patches-rt/0003-tcp-move-inet_twsk_schedule-helper-out-of-header.patch45
-rw-r--r--debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch4
-rw-r--r--debian/patches-rt/0003-zram-Shrink-zram_table_entry-flags.patch48
-rw-r--r--debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch2
-rw-r--r--debian/patches-rt/0004-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch (renamed from debian/patches-rt/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch)27
-rw-r--r--debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch164
-rw-r--r--debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch131
-rw-r--r--debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch66
-rw-r--r--debian/patches-rt/0004-printk-Check-printk_deferred_enter-_exit-usage.patch (renamed from debian/patches-rt/0009-printk-Check-printk_deferred_enter-_exit-usage.patch)4
-rw-r--r--debian/patches-rt/0004-serial-meson-Use-uart_prepare_sysrq_char.patch77
-rw-r--r--debian/patches-rt/0005-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch (renamed from debian/patches-rt/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch)4
-rw-r--r--debian/patches-rt/0005-net-tcp_sigpool-Use-nested-BH-locking-for-sigpool_sc.patch75
-rw-r--r--debian/patches-rt/0005-perf-Move-swevent_htable-recursion-into-task_struct.patch121
-rw-r--r--debian/patches-rt/0005-printk-nbcon-Add-detailed-doc-for-write_atomic.patch (renamed from debian/patches-rt/0010-printk-nbcon-Add-detailed-doc-for-write_atomic.patch)42
-rw-r--r--debian/patches-rt/0005-serial-msm-Use-uart_prepare_sysrq_char.patch116
-rw-r--r--debian/patches-rt/0006-drm-i915-Drop-the-irqs_disabled-check.patch (renamed from debian/patches-rt/0010-drm-i915-Drop-the-irqs_disabled-check.patch)9
-rw-r--r--debian/patches-rt/0006-net-ipv4-Use-nested-BH-locking-for-ipv4_tcp_sk.patch94
-rw-r--r--debian/patches-rt/0006-perf-Don-t-disable-preemption-in-perf_pending_task.patch60
-rw-r--r--debian/patches-rt/0006-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch (renamed from debian/patches-rt/0011-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch)47
-rw-r--r--debian/patches-rt/0006-serial-omap-Use-uart_prepare_sysrq_char.patch66
-rw-r--r--debian/patches-rt/0007-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch (renamed from debian/patches-rt/drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch)5
-rw-r--r--debian/patches-rt/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch29
-rw-r--r--debian/patches-rt/0007-netfilter-br_netfilter-Use-nested-BH-locking-for-brn.patch98
-rw-r--r--debian/patches-rt/0007-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch (renamed from debian/patches-rt/0004-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch)44
-rw-r--r--debian/patches-rt/0007-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch116
-rw-r--r--debian/patches-rt/0007-printk-nbcon-Use-driver-synchronization-while-un-reg.patch116
-rw-r--r--debian/patches-rt/0007-serial-pxa-Use-uart_prepare_sysrq_char.patch68
-rw-r--r--debian/patches-rt/0008-Revert-drm-i915-Depend-on-PREEMPT_RT.patch (renamed from debian/patches-rt/Revert-drm-i915-Depend-on-PREEMPT_RT.patch)5
-rw-r--r--debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch42
-rw-r--r--debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch227
-rw-r--r--debian/patches-rt/0008-serial-core-Provide-low-level-functions-to-lock-port.patch (renamed from debian/patches-rt/0013-serial-core-Provide-low-level-functions-to-lock-port.patch)8
-rw-r--r--debian/patches-rt/0008-serial-sunplus-Use-uart_prepare_sysrq_char.patch67
-rw-r--r--debian/patches-rt/0009-dev-Remove-PREEMPT_RT-ifdefs-from-backlog_lock.patch60
-rw-r--r--debian/patches-rt/0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch122
-rw-r--r--debian/patches-rt/0009-serial-lpc32xx_hs-Use-uart_prepare_sysrq_char-to-han.patch68
-rw-r--r--debian/patches-rt/0010-console-Improve-console_srcu_read_flags-comments.patch73
-rw-r--r--debian/patches-rt/0010-dev-Use-nested-BH-locking-for-softnet_data.process_q.patch94
-rw-r--r--debian/patches-rt/0010-serial-owl-Use-uart_prepare_sysrq_char-to-handle-sys.patch104
-rw-r--r--debian/patches-rt/0011-lwt-Don-t-disable-migration-prio-invoking-BPF.patch40
-rw-r--r--debian/patches-rt/0011-nbcon-Add-API-to-acquire-context-for-non-printing-op.patch180
-rw-r--r--debian/patches-rt/0011-serial-rda-Use-uart_prepare_sysrq_char-to-handle-sys.patch91
-rw-r--r--debian/patches-rt/0012-printk-nbcon-Use-driver-synchronization-while-regist.patch68
-rw-r--r--debian/patches-rt/0012-seg6-Use-nested-BH-locking-for-seg6_bpf_srh_states.patch152
-rw-r--r--debian/patches-rt/0012-serial-core-Implement-processing-in-port-lock-wrappe.patch182
-rw-r--r--debian/patches-rt/0012-serial-sifive-Use-uart_prepare_sysrq_char-to-handle-.patch71
-rw-r--r--debian/patches-rt/0013-net-Use-nested-BH-locking-for-bpf_scratchpad.patch70
-rw-r--r--debian/patches-rt/0013-printk-nbcon-Do-not-rely-on-proxy-headers.patch (renamed from debian/patches-rt/0015-printk-nbcon-Do-not-rely-on-proxy-headers.patch)13
-rw-r--r--debian/patches-rt/0013-serial-pch-Invoke-handle_rx_to-directly.patch39
-rw-r--r--debian/patches-rt/0014-net-Reference-bpf_redirect_info-via-task_struct-on-P.patch657
-rw-r--r--debian/patches-rt/0014-printk-Make-console_is_usable-available-to-nbcon.patch (renamed from debian/patches-rt/0017-printk-Make-console_is_usable-available-to-nbcon.patch)10
-rw-r--r--debian/patches-rt/0014-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch478
-rw-r--r--debian/patches-rt/0014-serial-pch-Make-push_rx-return-void.patch56
-rw-r--r--debian/patches-rt/0015-net-Move-per-CPU-flush-lists-to-bpf_net_context-on-P.patch270
-rw-r--r--debian/patches-rt/0015-printk-Let-console_is_usable-handle-nbcon.patch (renamed from debian/patches-rt/0018-printk-Let-console_is_usable-handle-nbcon.patch)4
-rw-r--r--debian/patches-rt/0015-serial-pch-Don-t-disable-interrupts-while-acquiring-.patch42
-rw-r--r--debian/patches-rt/0016-printk-Add-flags-argument-for-console_is_usable.patch (renamed from debian/patches-rt/0019-printk-Add-flags-argument-for-console_is_usable.patch)10
-rw-r--r--debian/patches-rt/0016-printk-nbcon-Fix-kerneldoc-for-enums.patch36
-rw-r--r--debian/patches-rt/0016-serial-pch-Don-t-initialize-uart_port-s-spin_lock.patch28
-rw-r--r--debian/patches-rt/0017-printk-nbcon-Add-helper-to-assign-priority-based-on-.patch67
-rw-r--r--debian/patches-rt/0017-serial-pch-Remove-eg20t_port-lock.patch128
-rw-r--r--debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch268
-rw-r--r--debian/patches-rt/0018-serial-pch-Use-uart_prepare_sysrq_char.patch79
-rw-r--r--debian/patches-rt/0019-printk-Track-registered-boot-consoles.patch (renamed from debian/patches-rt/0021-printk-Track-registered-boot-consoles.patch)15
-rw-r--r--debian/patches-rt/0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch193
-rw-r--r--debian/patches-rt/0020-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch (renamed from debian/patches-rt/0022-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch)76
-rw-r--r--debian/patches-rt/0021-printk-nbcon-Add-unsafe-flushing-on-panic.patch159
-rw-r--r--debian/patches-rt/0022-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch (renamed from debian/patches-rt/0025-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch)44
-rw-r--r--debian/patches-rt/0023-printk-Track-nbcon-consoles.patch (renamed from debian/patches-rt/0026-printk-Track-nbcon-consoles.patch)23
-rw-r--r--debian/patches-rt/0023-printk-nbcon-Assign-priority-based-on-CPU-state.patch84
-rw-r--r--debian/patches-rt/0024-printk-Coordinate-direct-printing-in-panic.patch (renamed from debian/patches-rt/0027-printk-Coordinate-direct-printing-in-panic.patch)27
-rw-r--r--debian/patches-rt/0024-printk-nbcon-Add-unsafe-flushing-on-panic.patch125
-rw-r--r--debian/patches-rt/0025-printk-nbcon-Implement-emergency-sections.patch (renamed from debian/patches-rt/0028-printk-nbcon-Implement-emergency-sections.patch)170
-rw-r--r--debian/patches-rt/0026-panic-Mark-emergency-section-in-warn.patch (renamed from debian/patches-rt/0029-panic-Mark-emergency-section-in-warn.patch)9
-rw-r--r--debian/patches-rt/0027-panic-Mark-emergency-section-in-oops.patch (renamed from debian/patches-rt/0030-panic-Mark-emergency-section-in-oops.patch)9
-rw-r--r--debian/patches-rt/0028-rcu-Mark-emergency-sections-in-rcu-stalls.patch (renamed from debian/patches-rt/0031-rcu-Mark-emergency-sections-in-rcu-stalls.patch)52
-rw-r--r--debian/patches-rt/0029-lockdep-Mark-emergency-sections-in-lockdep-splats.patch (renamed from debian/patches-rt/0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch)143
-rw-r--r--debian/patches-rt/0030-printk-Rename-console_replay_all-and-update-context.patch77
-rw-r--r--debian/patches-rt/0031-printk-nbcon-Introduce-printing-kthreads.patch (renamed from debian/patches-rt/0033-printk-nbcon-Introduce-printing-kthreads.patch)141
-rw-r--r--debian/patches-rt/0032-printk-Atomic-print-in-printk-context-on-shutdown.patch62
-rw-r--r--debian/patches-rt/0033-printk-nbcon-Fix-nbcon_cpu_emergency_flush-when-pree.patch46
-rw-r--r--debian/patches-rt/0034-printk-Atomic-print-in-printk-context-on-shutdown.patch40
-rw-r--r--debian/patches-rt/0034-printk-nbcon-Add-context-to-console_is_usable.patch (renamed from debian/patches-rt/0035-printk-nbcon-Add-context-to-console_is_usable.patch)47
-rw-r--r--debian/patches-rt/0035-printk-nbcon-Add-printer-thread-wakeups.patch (renamed from debian/patches-rt/0036-printk-nbcon-Add-printer-thread-wakeups.patch)45
-rw-r--r--debian/patches-rt/0036-printk-nbcon-Stop-threads-on-shutdown-reboot.patch (renamed from debian/patches-rt/0037-printk-nbcon-Stop-threads-on-shutdown-reboot.patch)16
-rw-r--r--debian/patches-rt/0037-printk-nbcon-Start-printing-threads.patch (renamed from debian/patches-rt/0038-printk-nbcon-Start-printing-threads.patch)24
-rw-r--r--debian/patches-rt/0038-printk-Provide-helper-for-message-prepending.patch (renamed from debian/patches-rt/0039-printk-Provide-helper-for-message-prepending.patch)8
-rw-r--r--debian/patches-rt/0039-printk-nbcon-Show-replay-message-on-takeover.patch (renamed from debian/patches-rt/0040-printk-nbcon-Show-replay-message-on-takeover.patch)31
-rw-r--r--debian/patches-rt/0040-printk-Add-kthread-for-all-legacy-consoles.patch (renamed from debian/patches-rt/0046-printk-Add-kthread-for-all-legacy-consoles.patch)178
-rw-r--r--debian/patches-rt/0041-proc-consoles-Add-notation-to-c_start-c_stop.patch34
-rw-r--r--debian/patches-rt/0042-proc-Add-nbcon-support-for-proc-consoles.patch (renamed from debian/patches-rt/0041-proc-Add-nbcon-support-for-proc-consoles.patch)4
-rw-r--r--debian/patches-rt/0043-tty-sysfs-Add-nbcon-support-for-active.patch (renamed from debian/patches-rt/0042-tty-sysfs-Add-nbcon-support-for-active.patch)4
-rw-r--r--debian/patches-rt/0044-printk-Provide-threadprintk-boot-argument.patch (renamed from debian/patches-rt/0047-printk-Provide-threadprintk-boot-argument.patch)10
-rw-r--r--debian/patches-rt/0045-printk-Avoid-false-positive-lockdep-report-for-legac.patch (renamed from debian/patches-rt/0048-printk-Avoid-false-positive-lockdep-report-for-legac.patch)10
-rw-r--r--debian/patches-rt/0046-printk-nbcon-Add-function-for-printers-to-reacquire-.patch (renamed from debian/patches-rt/0043-printk-nbcon-Provide-function-to-reacquire-ownership.patch)74
-rw-r--r--debian/patches-rt/0047-serial-8250-Switch-to-nbcon-console.patch (renamed from debian/patches-rt/0044-serial-8250-Switch-to-nbcon-console.patch)39
-rw-r--r--debian/patches-rt/0048-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch (renamed from debian/patches-rt/0045-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch)6
-rw-r--r--debian/patches-rt/ARM64__Allow_to_enable_RT.patch2
-rw-r--r--debian/patches-rt/ARM__Allow_to_enable_RT.patch8
-rw-r--r--debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch6
-rw-r--r--debian/patches-rt/Add_localversion_for_-RT_release.patch4
-rw-r--r--debian/patches-rt/POWERPC__Allow_to_enable_RT.patch6
-rw-r--r--debian/patches-rt/PREEMPT_AUTO.patch98
-rw-r--r--debian/patches-rt/arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch8
-rw-r--r--debian/patches-rt/bpf-Remove-tst_run-from-lwt_seg6local_prog_ops.patch40
-rw-r--r--debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch2
-rw-r--r--debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch2
-rw-r--r--debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch4
-rw-r--r--debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch2
-rw-r--r--debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch2
-rw-r--r--debian/patches-rt/prinkt-nbcon-Add-a-scheduling-point-to-nbcon_kthread.patch34
-rw-r--r--debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch8
-rw-r--r--debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch4
-rw-r--r--debian/patches-rt/riscv-allow-to-enable-RT.patch8
-rw-r--r--debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch4
-rw-r--r--debian/patches-rt/series210
-rw-r--r--debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch26
-rw-r--r--debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch4
-rw-r--r--debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch8
-rw-r--r--debian/patches-rt/task_work-make-TWA_NMI_CURRENT-handling-conditional-.patch68
-rw-r--r--debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch12
-rw-r--r--debian/patches-rt/tun-Add-missing-bpf_net_ctx_clear-in-do_xdp_generic.patch34
-rw-r--r--debian/patches-rt/tun-Assign-missing-bpf_net_context.patch114
-rw-r--r--debian/patches-rt/x86__Allow_to_enable_RT.patch2
-rw-r--r--debian/patches-rt/x86__Enable_RT_also_on_32bit.patch4
159 files changed, 6490 insertions, 4577 deletions
diff --git a/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch b/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
index b2ee39b051..126b521923 100644
--- a/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
+++ b/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 19 May 2023 16:57:29 +0200
Subject: [PATCH 1/4] ARM: vfp: Provide vfp_lock() for VFP locking.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
kernel_neon_begin() uses local_bh_disable() to ensure exclusive access
to the VFP unit. This is broken on PREEMPT_RT because a BH disabled
diff --git a/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch b/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
index 7fbdfa5062..c427bf0803 100644
--- a/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
+++ b/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 8 Jul 2015 17:14:48 +0200
Subject: [PATCH 1/2] arm: Disable jump-label on PREEMPT_RT.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
jump-labels are used to efficiently switch between two possible code
paths. To achieve this, stop_machine() is used to keep the CPU in a
@@ -25,7 +25,7 @@ Link: https://lkml.kernel.org/r/20220613182447.112191-2-bigeasy@linutronix.de
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -75,7 +75,7 @@ config ARM
+@@ -77,7 +77,7 @@ config ARM
select HAS_IOPORT
select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
diff --git a/debian/patches-rt/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch b/debian/patches-rt/0001-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
index 86275329f1..7c4ce85fa4 100644
--- a/debian/patches-rt/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
+++ b/debian/patches-rt/0001-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
@@ -1,8 +1,8 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 27 Feb 2016 08:09:11 +0100
-Subject: [PATCH 03/10] drm/i915: Use preempt_disable/enable_rt() where
+Subject: [PATCH 1/8] drm/i915: Use preempt_disable/enable_rt() where
recommended
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Mario Kleiner suggest in commit
ad3543ede630f ("drm/intel: Push get_scanout_position() timestamping into kms driver.")
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
-@@ -275,6 +275,26 @@ int intel_crtc_scanline_to_hw(struct int
+@@ -276,6 +276,26 @@ int intel_crtc_scanline_to_hw(struct int
* all register accesses to the same cacheline to be serialized,
* otherwise they may hang.
*/
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void intel_vblank_section_enter(struct drm_i915_private *i915)
__acquires(i915->uncore.lock)
{
-@@ -332,10 +352,10 @@ static bool i915_get_crtc_scanoutpos(str
+@@ -333,10 +353,10 @@ static bool i915_get_crtc_scanoutpos(str
* timing critical raw register reads, potentially with
* preemption disabled, so the following code must not block.
*/
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -399,10 +419,10 @@ static bool i915_get_crtc_scanoutpos(str
+@@ -400,10 +420,10 @@ static bool i915_get_crtc_scanoutpos(str
if (etime)
*etime = ktime_get();
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* While in vblank, position will be negative
-@@ -440,13 +460,11 @@ int intel_get_crtc_scanline(struct intel
+@@ -441,13 +461,11 @@ int intel_get_crtc_scanline(struct intel
unsigned long irqflags;
int position;
diff --git a/debian/patches-rt/0001-locking-local_lock-Introduce-guard-definition-for-lo.patch b/debian/patches-rt/0001-locking-local_lock-Introduce-guard-definition-for-lo.patch
new file mode 100644
index 0000000000..9f0d4bff7c
--- /dev/null
+++ b/debian/patches-rt/0001-locking-local_lock-Introduce-guard-definition-for-lo.patch
@@ -0,0 +1,34 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 23 Oct 2023 16:14:09 +0200
+Subject: [PATCH 01/15] locking/local_lock: Introduce guard definition for
+ local_lock.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Introduce lock guard definition for local_lock_t. There are no users
+yet.
+
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/local_lock.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/include/linux/local_lock.h
++++ b/include/linux/local_lock.h
+@@ -51,4 +51,15 @@
+ #define local_unlock_irqrestore(lock, flags) \
+ __local_unlock_irqrestore(lock, flags)
+
++DEFINE_GUARD(local_lock, local_lock_t __percpu*,
++ local_lock(_T),
++ local_unlock(_T))
++DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
++ local_lock_irq(_T),
++ local_unlock_irq(_T))
++DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
++ local_lock_irqsave(_T->lock, _T->flags),
++ local_unlock_irqrestore(_T->lock, _T->flags),
++ unsigned long flags)
++
+ #endif
diff --git a/debian/patches-rt/0001-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch b/debian/patches-rt/0001-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch
deleted file mode 100644
index ae507f979e..0000000000
--- a/debian/patches-rt/0001-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Sat, 9 Mar 2024 10:05:09 +0100
-Subject: [PATCH 1/4] net: Remove conditional threaded-NAPI wakeup based on
- task state.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-A NAPI thread is scheduled by first setting NAPI_STATE_SCHED bit. If
-successful (the bit was not yet set) then the NAPI_STATE_SCHED_THREADED
-is set but only if thread's state is not TASK_INTERRUPTIBLE (is
-TASK_RUNNING) followed by task wakeup.
-
-If the task is idle (TASK_INTERRUPTIBLE) then the
-NAPI_STATE_SCHED_THREADED bit is not set. The thread is no relying on
-the bit but always leaving the wait-loop after returning from schedule()
-because there must have been a wakeup.
-
-The smpboot-threads implementation for per-CPU threads requires an
-explicit condition and does not support "if we get out of schedule()
-then there must be something to do".
-
-Removing this optimisation simplifies the following integration.
-
-Set NAPI_STATE_SCHED_THREADED unconditionally on wakeup and rely on it
-in the wait path by removing the `woken' condition.
-
-Acked-by: Jakub Kicinski <kuba@kernel.org>
-Link: https://lore.kernel.org/r/20240309090824.2956805-2-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/core/dev.c | 14 ++------------
- 1 file changed, 2 insertions(+), 12 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -4452,13 +4452,7 @@ static inline void ____napi_schedule(str
- */
- thread = READ_ONCE(napi->thread);
- if (thread) {
-- /* Avoid doing set_bit() if the thread is in
-- * INTERRUPTIBLE state, cause napi_thread_wait()
-- * makes sure to proceed with napi polling
-- * if the thread is explicitly woken from here.
-- */
-- if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
-- set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
-+ set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
- wake_up_process(thread);
- return;
- }
-@@ -6654,8 +6648,6 @@ static int napi_poll(struct napi_struct
-
- static int napi_thread_wait(struct napi_struct *napi)
- {
-- bool woken = false;
--
- set_current_state(TASK_INTERRUPTIBLE);
-
- while (!kthread_should_stop()) {
-@@ -6664,15 +6656,13 @@ static int napi_thread_wait(struct napi_
- * Testing SCHED bit is not enough because SCHED bit might be
- * set by some other busy poll thread or by napi_disable().
- */
-- if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
-+ if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
- WARN_ON(!list_empty(&napi->poll_list));
- __set_current_state(TASK_RUNNING);
- return 0;
- }
-
- schedule();
-- /* woken being true indicates this thread owns this napi. */
-- woken = true;
- set_current_state(TASK_INTERRUPTIBLE);
- }
- __set_current_state(TASK_RUNNING);
diff --git a/debian/patches-rt/0001-net-Remove-task_struct-bpf_net_context-init-on-fork.patch b/debian/patches-rt/0001-net-Remove-task_struct-bpf_net_context-init-on-fork.patch
new file mode 100644
index 0000000000..2309345d52
--- /dev/null
+++ b/debian/patches-rt/0001-net-Remove-task_struct-bpf_net_context-init-on-fork.patch
@@ -0,0 +1,32 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 28 Jun 2024 12:18:54 +0200
+Subject: [PATCH 1/3] net: Remove task_struct::bpf_net_context init on fork.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+There is no clone() invocation within a bpf_net_ctx_…() block. Therefore
+the task_struct::bpf_net_context has always to be NULL and an explicit
+initialisation is not required.
+
+Remove the NULL assignment in the clone() path.
+
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20240628103020.1766241-2-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/fork.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2355,7 +2355,6 @@ static void rv_task_fork(struct task_str
+ RCU_INIT_POINTER(p->bpf_storage, NULL);
+ p->bpf_ctx = NULL;
+ #endif
+- p->bpf_net_context = NULL;
+
+ /* Perform scheduler related setup. Assign this task to a CPU. */
+ retval = sched_fork(clone_flags, p);
diff --git a/debian/patches-rt/0001-net-tcp-dccp-prepare-for-tw_timer-un-pinning.patch b/debian/patches-rt/0001-net-tcp-dccp-prepare-for-tw_timer-un-pinning.patch
new file mode 100644
index 0000000000..7fd7164b95
--- /dev/null
+++ b/debian/patches-rt/0001-net-tcp-dccp-prepare-for-tw_timer-un-pinning.patch
@@ -0,0 +1,188 @@
+From: Valentin Schneider <vschneid@redhat.com>
+Date: Tue, 4 Jun 2024 16:08:47 +0200
+Subject: [PATCH 1/3] net: tcp/dccp: prepare for tw_timer un-pinning
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The TCP timewait timer is proving to be problematic for setups where
+scheduler CPU isolation is achieved at runtime via cpusets (as opposed to
+statically via isolcpus=domains).
+
+What happens there is a CPU goes through tcp_time_wait(), arming the
+time_wait timer, then gets isolated. TCP_TIMEWAIT_LEN later, the timer
+fires, causing interference for the now-isolated CPU. This is conceptually
+similar to the issue described in commit e02b93124855 ("workqueue: Unbind
+kworkers before sending them to exit()")
+
+Move inet_twsk_schedule() to within inet_twsk_hashdance(), with the ehash
+lock held. Expand the lock's critical section from inet_twsk_kill() to
+inet_twsk_deschedule_put(), serializing the scheduling vs descheduling of
+the timer. IOW, this prevents the following race:
+
+ tcp_time_wait()
+ inet_twsk_hashdance()
+ inet_twsk_deschedule_put()
+ del_timer_sync()
+ inet_twsk_schedule()
+
+Thanks to Paolo Abeni for suggesting to leverage the ehash lock.
+
+This also restores a comment from commit ec94c2696f0b ("tcp/dccp: avoid
+one atomic operation for timewait hashdance") as inet_twsk_hashdance() had
+a "Step 1" and "Step 3" comment, but the "Step 2" had gone missing.
+
+inet_twsk_deschedule_put() now acquires the ehash spinlock to synchronize
+with inet_twsk_hashdance_schedule().
+
+To ease possible regression search, actual un-pin is done in next patch.
+
+Link: https://lore.kernel.org/all/ZPhpfMjSiHVjQkTk@localhost.localdomain/
+Signed-off-by: Valentin Schneider <vschneid@redhat.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20240604140903.31939-2-fw@strlen.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/inet_timewait_sock.h | 6 +++-
+ net/dccp/minisocks.c | 3 --
+ net/ipv4/inet_timewait_sock.c | 52 +++++++++++++++++++++++++++++++++------
+ net/ipv4/tcp_minisocks.c | 3 --
+ 4 files changed, 51 insertions(+), 13 deletions(-)
+
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -93,8 +93,10 @@ struct inet_timewait_sock *inet_twsk_all
+ struct inet_timewait_death_row *dr,
+ const int state);
+
+-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+- struct inet_hashinfo *hashinfo);
++void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
++ struct sock *sk,
++ struct inet_hashinfo *hashinfo,
++ int timeo);
+
+ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
+ bool rearm);
+--- a/net/dccp/minisocks.c
++++ b/net/dccp/minisocks.c
+@@ -59,11 +59,10 @@ void dccp_time_wait(struct sock *sk, int
+ * we complete the initialization.
+ */
+ local_bh_disable();
+- inet_twsk_schedule(tw, timeo);
+ /* Linkage updates.
+ * Note that access to tw after this point is illegal.
+ */
+- inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
++ inet_twsk_hashdance_schedule(tw, sk, &dccp_hashinfo, timeo);
+ local_bh_enable();
+ } else {
+ /* Sorry, if we're out of memory, just CLOSE this
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -96,9 +96,13 @@ static void inet_twsk_add_node_rcu(struc
+ * Enter the time wait state. This is called with locally disabled BH.
+ * Essentially we whip up a timewait bucket, copy the relevant info into it
+ * from the SK, and mess with hash chains and list linkage.
++ *
++ * The caller must not access @tw anymore after this function returns.
+ */
+-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+- struct inet_hashinfo *hashinfo)
++void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
++ struct sock *sk,
++ struct inet_hashinfo *hashinfo,
++ int timeo)
+ {
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+@@ -129,26 +133,33 @@ void inet_twsk_hashdance(struct inet_tim
+
+ spin_lock(lock);
+
++ /* Step 2: Hash TW into tcp ehash chain */
+ inet_twsk_add_node_rcu(tw, &ehead->chain);
+
+ /* Step 3: Remove SK from hash chain */
+ if (__sk_nulls_del_node_init_rcu(sk))
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+
+- spin_unlock(lock);
+
++ /* Ensure above writes are committed into memory before updating the
++ * refcount.
++ * Provides ordering vs later refcount_inc().
++ */
++ smp_wmb();
+ /* tw_refcnt is set to 3 because we have :
+ * - one reference for bhash chain.
+ * - one reference for ehash chain.
+ * - one reference for timer.
+- * We can use atomic_set() because prior spin_lock()/spin_unlock()
+- * committed into memory all tw fields.
+ * Also note that after this point, we lost our implicit reference
+ * so we are not allowed to use tw anymore.
+ */
+ refcount_set(&tw->tw_refcnt, 3);
++
++ inet_twsk_schedule(tw, timeo);
++
++ spin_unlock(lock);
+ }
+-EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
++EXPORT_SYMBOL_GPL(inet_twsk_hashdance_schedule);
+
+ static void tw_timer_handler(struct timer_list *t)
+ {
+@@ -217,7 +228,34 @@ EXPORT_SYMBOL_GPL(inet_twsk_alloc);
+ */
+ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
+ {
+- if (del_timer_sync(&tw->tw_timer))
++ struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
++ spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
++
++ /* inet_twsk_purge() walks over all sockets, including tw ones,
++ * and removes them via inet_twsk_deschedule_put() after a
++ * refcount_inc_not_zero().
++ *
++ * inet_twsk_hashdance_schedule() must (re)init the refcount before
++ * arming the timer, i.e. inet_twsk_purge can obtain a reference to
++ * a twsk that did not yet schedule the timer.
++ *
++ * The ehash lock synchronizes these two:
++ * After acquiring the lock, the timer is always scheduled (else
++ * timer_shutdown returns false), because hashdance_schedule releases
++ * the ehash lock only after completing the timer initialization.
++ *
++ * Without grabbing the ehash lock, we get:
++ * 1) cpu x sets twsk refcount to 3
++ * 2) cpu y bumps refcount to 4
++ * 3) cpu y calls inet_twsk_deschedule_put() and shuts timer down
++ * 4) cpu x tries to start timer, but mod_timer is a noop post-shutdown
++ * -> timer refcount is never decremented.
++ */
++ spin_lock(lock);
++ /* Makes sure hashdance_schedule() has completed */
++ spin_unlock(lock);
++
++ if (timer_shutdown_sync(&tw->tw_timer))
+ inet_twsk_kill(tw);
+ inet_twsk_put(tw);
+ }
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -344,11 +344,10 @@ void tcp_time_wait(struct sock *sk, int
+ * we complete the initialization.
+ */
+ local_bh_disable();
+- inet_twsk_schedule(tw, timeo);
+ /* Linkage updates.
+ * Note that access to tw after this point is illegal.
+ */
+- inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
++ inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
+ local_bh_enable();
+ } else {
+ /* Sorry, if we're out of memory, just CLOSE this
diff --git a/debian/patches-rt/0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch b/debian/patches-rt/0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch
index 08a2950a4a..8b06ae678c 100644
--- a/debian/patches-rt/0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch
+++ b/debian/patches-rt/0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Mar 2024 19:01:49 +0100
-Subject: [PATCH 1/4] perf: Move irq_work_queue() where the event is prepared.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Date: Thu, 4 Jul 2024 19:03:35 +0200
+Subject: [PATCH 1/7] perf: Move irq_work_queue() where the event is prepared.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Only if perf_event::pending_sigtrap is zero, the irq_work accounted by
increminging perf_event::nr_pending. The member perf_event::pending_addr
@@ -15,27 +15,34 @@ irq_work is scheduled once.
Tested-by: Marco Elver <elver@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Link: https://lore.kernel.org/r/20240312180814.3373778-2-bigeasy@linutronix.de
+Link: https://lore.kernel.org/r/20240704170424.1466941-2-bigeasy@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/events/core.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
+ kernel/events/core.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -9595,6 +9595,7 @@ static int __perf_event_overflow(struct
+@@ -9738,6 +9738,11 @@ static int __perf_event_overflow(struct
if (!event->pending_sigtrap) {
event->pending_sigtrap = pending_id;
local_inc(&event->ctx->nr_pending);
++
++ event->pending_addr = 0;
++ if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
++ event->pending_addr = data->addr;
+ irq_work_queue(&event->pending_irq);
} else if (event->attr.exclude_kernel && valid_sample) {
/*
* Should not be able to return to user space without
-@@ -9614,7 +9615,6 @@ static int __perf_event_overflow(struct
- event->pending_addr = 0;
- if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
- event->pending_addr = data->addr;
+@@ -9753,11 +9758,6 @@ static int __perf_event_overflow(struct
+ */
+ WARN_ON_ONCE(event->pending_sigtrap != pending_id);
+ }
+-
+- event->pending_addr = 0;
+- if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
+- event->pending_addr = data->addr;
- irq_work_queue(&event->pending_irq);
}
diff --git a/debian/patches-rt/0006-printk-Add-notation-to-console_srcu-locking.patch b/debian/patches-rt/0001-printk-Add-notation-to-console_srcu-locking.patch
index 077a2d2a1a..f104135985 100644
--- a/debian/patches-rt/0006-printk-Add-notation-to-console_srcu-locking.patch
+++ b/debian/patches-rt/0001-printk-Add-notation-to-console_srcu-locking.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 9 Oct 2023 13:55:19 +0000
-Subject: [PATCH 06/48] printk: Add notation to console_srcu locking
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 01/48] printk: Add notation to console_srcu locking
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
kernel/printk/printk.c:284:5: sparse: sparse: context imbalance in
'console_srcu_read_lock' - wrong count at exit
@@ -10,6 +10,7 @@ include/linux/srcu.h:301:9: sparse: sparse: context imbalance in
Fixes: 6c4afa79147e ("printk: Prepare for SRCU console list protection")
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/printk.c | 2 ++
diff --git a/debian/patches-rt/0001-printk-ringbuffer-Clarify-special-lpos-values.patch b/debian/patches-rt/0001-printk-ringbuffer-Clarify-special-lpos-values.patch
deleted file mode 100644
index e4ea76a5c3..0000000000
--- a/debian/patches-rt/0001-printk-ringbuffer-Clarify-special-lpos-values.patch
+++ /dev/null
@@ -1,95 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Wed, 7 Feb 2024 14:46:54 +0106
-Subject: [PATCH 01/48] printk: ringbuffer: Clarify special lpos values
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-For empty line records, no data blocks are created. Instead,
-these valid records are identified by special logical position
-values (in fields of @prb_desc.text_blk_lpos).
-
-Currently the macro NO_LPOS is used for empty line records.
-This name is confusing because it does not imply _why_ there is
-no data block.
-
-Rename NO_LPOS to EMPTY_LINE_LPOS so that it is clear why there
-is no data block.
-
-Also add comments explaining the use of EMPTY_LINE_LPOS as well
-as clarification to the values used to represent data-less
-blocks.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Reviewed-by: Petr Mladek <pmladek@suse.com>
-Link: https://lore.kernel.org/r/20240207134103.1357162-6-john.ogness@linutronix.de
-Signed-off-by: Petr Mladek <pmladek@suse.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk_ringbuffer.c | 20 ++++++++++++++++----
- kernel/printk/printk_ringbuffer.h | 16 +++++++++++++++-
- 2 files changed, 31 insertions(+), 5 deletions(-)
-
---- a/kernel/printk/printk_ringbuffer.c
-+++ b/kernel/printk/printk_ringbuffer.c
-@@ -1034,9 +1034,13 @@ static char *data_alloc(struct printk_ri
- unsigned long next_lpos;
-
- if (size == 0) {
-- /* Specify a data-less block. */
-- blk_lpos->begin = NO_LPOS;
-- blk_lpos->next = NO_LPOS;
-+ /*
-+ * Data blocks are not created for empty lines. Instead, the
-+ * reader will recognize these special lpos values and handle
-+ * it appropriately.
-+ */
-+ blk_lpos->begin = EMPTY_LINE_LPOS;
-+ blk_lpos->next = EMPTY_LINE_LPOS;
- return NULL;
- }
-
-@@ -1214,10 +1218,18 @@ static const char *get_data(struct prb_d
-
- /* Data-less data block description. */
- if (BLK_DATALESS(blk_lpos)) {
-- if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
-+ /*
-+ * Records that are just empty lines are also valid, even
-+ * though they do not have a data block. For such records
-+ * explicitly return empty string data to signify success.
-+ */
-+ if (blk_lpos->begin == EMPTY_LINE_LPOS &&
-+ blk_lpos->next == EMPTY_LINE_LPOS) {
- *data_size = 0;
- return "";
- }
-+
-+ /* Data lost, invalid, or otherwise unavailable. */
- return NULL;
- }
-
---- a/kernel/printk/printk_ringbuffer.h
-+++ b/kernel/printk/printk_ringbuffer.h
-@@ -127,8 +127,22 @@ enum desc_state {
- #define DESC_SV(id, state) (((unsigned long)state << DESC_FLAGS_SHIFT) | id)
- #define DESC_ID_MASK (~DESC_FLAGS_MASK)
- #define DESC_ID(sv) ((sv) & DESC_ID_MASK)
-+
-+/*
-+ * Special data block logical position values (for fields of
-+ * @prb_desc.text_blk_lpos).
-+ *
-+ * - Bit0 is used to identify if the record has no data block. (Implemented in
-+ * the LPOS_DATALESS() macro.)
-+ *
-+ * - Bit1 specifies the reason for not having a data block.
-+ *
-+ * These special values could never be real lpos values because of the
-+ * meta data and alignment padding of data blocks. (See to_blk_size() for
-+ * details.)
-+ */
- #define FAILED_LPOS 0x1
--#define NO_LPOS 0x3
-+#define EMPTY_LINE_LPOS 0x3
-
- #define FAILED_BLK_LPOS \
- { \
diff --git a/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch b/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
index 366fb021df..d7a8f51a72 100644
--- a/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
+++ b/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 4 Aug 2023 13:30:37 +0200
Subject: [PATCH 1/3] sched/core: Provide a method to check if a task is
PI-boosted.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Provide a method to check if a task inherited the priority from another
task. This happens if a task owns a lock which is requested by a task
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1791,6 +1791,7 @@ static inline int dl_task_check_affinity
+@@ -1806,6 +1806,7 @@ static inline int dl_task_check_affinity
}
#endif
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int task_prio(const struct task_struct *p);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -8890,6 +8890,21 @@ static inline void preempt_dynamic_init(
+@@ -8910,6 +8910,21 @@ static inline void preempt_dynamic_init(
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
diff --git a/debian/patches-rt/0001-serial-amba-pl011-Use-uart_prepare_sysrq_char.patch b/debian/patches-rt/0001-serial-amba-pl011-Use-uart_prepare_sysrq_char.patch
deleted file mode 100644
index 9acb1fcaa1..0000000000
--- a/debian/patches-rt/0001-serial-amba-pl011-Use-uart_prepare_sysrq_char.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:14 +0100
-Subject: [PATCH 01/18] serial: amba-pl011: Use uart_prepare_sysrq_char().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Delay handling sysrq until port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Cc: Russell King <linux@armlinux.org.uk>
-Link: https://lore.kernel.org/r/20240301215246.891055-2-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/amba-pl011.c | 24 ++++++++----------------
- 1 file changed, 8 insertions(+), 16 deletions(-)
-
---- a/drivers/tty/serial/amba-pl011.c
-+++ b/drivers/tty/serial/amba-pl011.c
-@@ -348,10 +348,7 @@ static int pl011_fifo_to_tty(struct uart
- flag = TTY_FRAME;
- }
-
-- uart_port_unlock(&uap->port);
-- sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
-- uart_port_lock(&uap->port);
--
-+ sysrq = uart_prepare_sysrq_char(&uap->port, ch & 255);
- if (!sysrq)
- uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
- }
-@@ -1017,7 +1014,7 @@ static void pl011_dma_rx_callback(void *
- ret = pl011_dma_rx_trigger_dma(uap);
-
- pl011_dma_rx_chars(uap, pending, lastbuf, false);
-- uart_port_unlock_irq(&uap->port);
-+ uart_unlock_and_check_sysrq(&uap->port);
- /*
- * Do this check after we picked the DMA chars so we don't
- * get some IRQ immediately from RX.
-@@ -1540,11 +1537,10 @@ static void check_apply_cts_event_workar
- static irqreturn_t pl011_int(int irq, void *dev_id)
- {
- struct uart_amba_port *uap = dev_id;
-- unsigned long flags;
- unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
- int handled = 0;
-
-- uart_port_lock_irqsave(&uap->port, &flags);
-+ uart_port_lock(&uap->port);
- status = pl011_read(uap, REG_RIS) & uap->im;
- if (status) {
- do {
-@@ -1573,7 +1569,7 @@ static irqreturn_t pl011_int(int irq, vo
- handled = 1;
- }
-
-- uart_port_unlock_irqrestore(&uap->port, flags);
-+ uart_unlock_and_check_sysrq(&uap->port);
-
- return IRQ_RETVAL(handled);
- }
-@@ -2322,13 +2318,10 @@ pl011_console_write(struct console *co,
-
- clk_enable(uap->clk);
-
-- local_irq_save(flags);
-- if (uap->port.sysrq)
-- locked = 0;
-- else if (oops_in_progress)
-- locked = uart_port_trylock(&uap->port);
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(&uap->port, &flags);
- else
-- uart_port_lock(&uap->port);
-+ uart_port_lock_irqsave(&uap->port, &flags);
-
- /*
- * First save the CR then disable the interrupts
-@@ -2354,8 +2347,7 @@ pl011_console_write(struct console *co,
- pl011_write(old_cr, uap, REG_CR);
-
- if (locked)
-- uart_port_unlock(&uap->port);
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(&uap->port, flags);
-
- clk_disable(uap->clk);
- }
diff --git a/debian/patches-rt/zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch b/debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch
index 497f896e71..81f4f3c5dd 100644
--- a/debian/patches-rt/zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
+++ b/debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch
@@ -1,32 +1,35 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Thu, 31 Mar 2016 04:08:28 +0200
-Subject: [PATCH] zram: Replace bit spinlocks with spinlock_t for PREEMPT_RT.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 1/3] zram: Replace bit spinlocks with a spinlock_t.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The bit spinlock disables preemption. The spinlock_t lock becomes a sleeping
lock on PREEMPT_RT and it can not be acquired in this context. In this locked
section, zs_free() acquires a zs_pool::lock, and there is access to
zram::wb_limit_lock.
-Use a spinlock_t on PREEMPT_RT for locking and set/ clear ZRAM_LOCK bit after
-the lock has been acquired/ dropped.
+Add a spinlock_t for locking. Keep the set/ clear ZRAM_LOCK bit after
+the lock has been acquired/ dropped. The size of struct zram_table_entry
+increases by 4 bytes due to lock and additional 4 bytes padding with
+CONFIG_ZRAM_TRACK_ENTRY_ACTIME enabled.
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Link: https://lore.kernel.org/r/20240620153556.777272-2-bigeasy@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lkml.kernel.org/r/YqIbMuHCPiQk+Ac2@linutronix.de
Link: https://lore.kernel.org/20230323161830.jFbWCosd@linutronix.de
+Link: https://lore.kernel.org/20240619150814.BRAvaziM@linutronix.de
---
- drivers/block/zram/zram_drv.c | 37 +++++++++++++++++++++++++++++++++++++
- drivers/block/zram/zram_drv.h | 3 +++
- 2 files changed, 40 insertions(+)
+ drivers/block/zram/zram_drv.c | 22 +++++++++++++++++++---
+ drivers/block/zram/zram_drv.h | 1 +
+ 2 files changed, 20 insertions(+), 3 deletions(-)
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -57,6 +57,41 @@ static void zram_free_page(struct zram *
+@@ -57,19 +57,34 @@ static void zram_free_page(struct zram *
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent);
-+#ifdef CONFIG_PREEMPT_RT
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
+{
+ size_t index;
@@ -35,44 +38,33 @@ Link: https://lore.kernel.org/20230323161830.jFbWCosd@linutronix.de
+ spin_lock_init(&zram->table[index].lock);
+}
+
-+static int zram_slot_trylock(struct zram *zram, u32 index)
-+{
+ static int zram_slot_trylock(struct zram *zram, u32 index)
+ {
+- return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
+ int ret;
+
+ ret = spin_trylock(&zram->table[index].lock);
+ if (ret)
+ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
+ return ret;
-+}
-+
-+static void zram_slot_lock(struct zram *zram, u32 index)
-+{
+ }
+
+ static void zram_slot_lock(struct zram *zram, u32 index)
+ {
+- bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
+ spin_lock(&zram->table[index].lock);
+ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
-+}
-+
-+static void zram_slot_unlock(struct zram *zram, u32 index)
-+{
+ }
+
+ static void zram_slot_unlock(struct zram *zram, u32 index)
+ {
+- bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
+ __clear_bit(ZRAM_LOCK, &zram->table[index].flags);
+ spin_unlock(&zram->table[index].lock);
-+}
-+
-+#else
-+
-+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
-+
- static int zram_slot_trylock(struct zram *zram, u32 index)
- {
- return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
-@@ -71,6 +106,7 @@ static void zram_slot_unlock(struct zram
- {
- bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
}
-+#endif
static inline bool init_done(struct zram *zram)
- {
-@@ -1241,6 +1277,7 @@ static bool zram_meta_alloc(struct zram
+@@ -1226,6 +1241,7 @@ static bool zram_meta_alloc(struct zram
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
@@ -82,13 +74,11 @@ Link: https://lore.kernel.org/20230323161830.jFbWCosd@linutronix.de
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
-@@ -69,6 +69,9 @@ struct zram_table_entry {
+@@ -69,6 +69,7 @@ struct zram_table_entry {
unsigned long element;
};
unsigned long flags;
-+#ifdef CONFIG_PREEMPT_RT
+ spinlock_t lock;
-+#endif
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
ktime_t ac_time;
#endif
diff --git a/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch b/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
index 8cff78066f..bf3f19ee85 100644
--- a/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
+++ b/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 19 May 2023 16:57:30 +0200
Subject: [PATCH 2/4] ARM: vfp: Use vfp_lock() in vfp_sync_hwstate().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
vfp_sync_hwstate() uses preempt_disable() followed by local_bh_disable()
to ensure that it won't get interrupted while checking the VFP state.
diff --git a/debian/patches-rt/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch b/debian/patches-rt/0002-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
index 3d26b9d6cc..c4f8ef9737 100644
--- a/debian/patches-rt/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
+++ b/debian/patches-rt/0002-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
@@ -1,8 +1,8 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 27 Feb 2016 09:01:42 +0100
-Subject: [PATCH 04/10] drm/i915: Don't disable interrupts on PREEMPT_RT during
+Subject: [PATCH 2/8] drm/i915: Don't disable interrupts on PREEMPT_RT during
atomic updates
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Commit
8d7849db3eab7 ("drm/i915: Make sprite updates atomic")
@@ -32,12 +32,14 @@ Don't disable interrupts on PREEMPT_RT during atomic updates.
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/gpu/drm/i915/display/intel_crtc.c | 15 ++++++++++-----
- 1 file changed, 10 insertions(+), 5 deletions(-)
+ drivers/gpu/drm/i915/display/intel_crtc.c | 9 ++++++---
+ drivers/gpu/drm/i915/display/intel_cursor.c | 9 ++++++---
+ drivers/gpu/drm/i915/display/intel_vblank.c | 6 ++++--
+ 3 files changed, 16 insertions(+), 8 deletions(-)
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
-@@ -580,7 +580,8 @@ void intel_pipe_update_start(struct inte
+@@ -521,7 +521,8 @@ void intel_pipe_update_start(struct inte
*/
intel_psr_wait_for_idle_locked(new_crtc_state);
@@ -45,25 +47,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_disable();
- crtc->debug.min_vbl = min;
- crtc->debug.max_vbl = max;
-@@ -605,11 +606,13 @@ void intel_pipe_update_start(struct inte
- break;
- }
-
-- local_irq_enable();
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-+ local_irq_enable();
-
- timeout = schedule_timeout(timeout);
-
-- local_irq_disable();
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-+ local_irq_disable();
- }
-
- finish_wait(wq, &wait);
-@@ -642,7 +645,8 @@ void intel_pipe_update_start(struct inte
+ crtc->debug.min_vbl = evade.min;
+ crtc->debug.max_vbl = evade.max;
+@@ -539,7 +540,8 @@ void intel_pipe_update_start(struct inte
return;
irq_disable:
@@ -73,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
-@@ -744,7 +748,8 @@ void intel_pipe_update_end(struct intel_
+@@ -668,7 +670,8 @@ void intel_pipe_update_end(struct intel_
*/
intel_vrr_send_push(new_crtc_state);
@@ -83,3 +69,51 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (intel_vgpu_active(dev_priv))
goto out;
+--- a/drivers/gpu/drm/i915/display/intel_cursor.c
++++ b/drivers/gpu/drm/i915/display/intel_cursor.c
+@@ -895,13 +895,15 @@ intel_legacy_cursor_update(struct drm_pl
+ */
+ intel_psr_wait_for_idle_locked(crtc_state);
+
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+
+ intel_vblank_evade(&evade);
+
+ drm_crtc_vblank_put(&crtc->base);
+ } else {
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+ }
+
+ if (new_plane_state->uapi.visible) {
+@@ -911,7 +913,8 @@ intel_legacy_cursor_update(struct drm_pl
+ intel_plane_disable_arm(plane, crtc_state);
+ }
+
+- local_irq_enable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_enable();
+
+ intel_psr_unlock(crtc_state);
+
+--- a/drivers/gpu/drm/i915/display/intel_vblank.c
++++ b/drivers/gpu/drm/i915/display/intel_vblank.c
+@@ -705,11 +705,13 @@ int intel_vblank_evade(struct intel_vbla
+ break;
+ }
+
+- local_irq_enable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_enable();
+
+ timeout = schedule_timeout(timeout);
+
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+ }
+
+ finish_wait(wq, &wait);
diff --git a/debian/patches-rt/0002-locking-local_lock-Add-local-nested-BH-locking-infra.patch b/debian/patches-rt/0002-locking-local_lock-Add-local-nested-BH-locking-infra.patch
new file mode 100644
index 0000000000..a840e3e47c
--- /dev/null
+++ b/debian/patches-rt/0002-locking-local_lock-Add-local-nested-BH-locking-infra.patch
@@ -0,0 +1,141 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 18 Aug 2023 15:17:44 +0200
+Subject: [PATCH 02/15] locking/local_lock: Add local nested BH locking
+ infrastructure.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Add local_lock_nested_bh() locking. It is based on local_lock_t and the
+naming follows the preempt_disable_nested() example.
+
+For !PREEMPT_RT + !LOCKDEP it is a per-CPU annotation for locking
+assumptions based on local_bh_disable(). The macro is optimized away
+during compilation.
+For !PREEMPT_RT + LOCKDEP the local_lock_nested_bh() is reduced to
+the usual lock-acquire plus lockdep_assert_in_softirq() - ensuring that
+BH is disabled.
+
+For PREEMPT_RT local_lock_nested_bh() acquires the specified per-CPU
+lock. It does not disable CPU migration because it relies on
+local_bh_disable() disabling CPU migration.
+With LOCKDEP it performans the usual lockdep checks as with !PREEMPT_RT.
+Due to include hell the softirq check has been moved spinlock.c.
+
+The intention is to use this locking in places where locking of a per-CPU
+variable relies on BH being disabled. Instead of treating disabled
+bottom halves as a big per-CPU lock, PREEMPT_RT can use this to reduce
+the locking scope to what actually needs protecting.
+A side effect is that it also documents the protection scope of the
+per-CPU variables.
+
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/local_lock.h | 10 ++++++++++
+ include/linux/local_lock_internal.h | 31 +++++++++++++++++++++++++++++++
+ include/linux/lockdep.h | 3 +++
+ kernel/locking/spinlock.c | 8 ++++++++
+ 4 files changed, 52 insertions(+)
+
+--- a/include/linux/local_lock.h
++++ b/include/linux/local_lock.h
+@@ -62,4 +62,14 @@ DEFINE_LOCK_GUARD_1(local_lock_irqsave,
+ local_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
++#define local_lock_nested_bh(_lock) \
++ __local_lock_nested_bh(_lock)
++
++#define local_unlock_nested_bh(_lock) \
++ __local_unlock_nested_bh(_lock)
++
++DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
++ local_lock_nested_bh(_T),
++ local_unlock_nested_bh(_T))
++
+ #endif
+--- a/include/linux/local_lock_internal.h
++++ b/include/linux/local_lock_internal.h
+@@ -62,6 +62,17 @@ do { \
+ local_lock_debug_init(lock); \
+ } while (0)
+
++#define __spinlock_nested_bh_init(lock) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
++ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
++ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
++ LD_LOCK_NORMAL); \
++ local_lock_debug_init(lock); \
++} while (0)
++
+ #define __local_lock(lock) \
+ do { \
+ preempt_disable(); \
+@@ -98,6 +109,15 @@ do { \
+ local_irq_restore(flags); \
+ } while (0)
+
++#define __local_lock_nested_bh(lock) \
++ do { \
++ lockdep_assert_in_softirq(); \
++ local_lock_acquire(this_cpu_ptr(lock)); \
++ } while (0)
++
++#define __local_unlock_nested_bh(lock) \
++ local_lock_release(this_cpu_ptr(lock))
++
+ #else /* !CONFIG_PREEMPT_RT */
+
+ /*
+@@ -138,4 +158,15 @@ typedef spinlock_t local_lock_t;
+
+ #define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
++#define __local_lock_nested_bh(lock) \
++do { \
++ lockdep_assert_in_softirq_func(); \
++ spin_lock(this_cpu_ptr(lock)); \
++} while (0)
++
++#define __local_unlock_nested_bh(lock) \
++do { \
++ spin_unlock(this_cpu_ptr((lock))); \
++} while (0)
++
+ #endif /* CONFIG_PREEMPT_RT */
+--- a/include/linux/lockdep.h
++++ b/include/linux/lockdep.h
+@@ -600,6 +600,8 @@ do { \
+ (!in_softirq() || in_irq() || in_nmi())); \
+ } while (0)
+
++extern void lockdep_assert_in_softirq_func(void);
++
+ #else
+ # define might_lock(lock) do { } while (0)
+ # define might_lock_read(lock) do { } while (0)
+@@ -613,6 +615,7 @@ do { \
+ # define lockdep_assert_preemption_enabled() do { } while (0)
+ # define lockdep_assert_preemption_disabled() do { } while (0)
+ # define lockdep_assert_in_softirq() do { } while (0)
++# define lockdep_assert_in_softirq_func() do { } while (0)
+ #endif
+
+ #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+--- a/kernel/locking/spinlock.c
++++ b/kernel/locking/spinlock.c
+@@ -413,3 +413,11 @@ notrace int in_lock_functions(unsigned l
+ && addr < (unsigned long)__lock_text_end;
+ }
+ EXPORT_SYMBOL(in_lock_functions);
++
++#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_PREEMPT_RT)
++void notrace lockdep_assert_in_softirq_func(void)
++{
++ lockdep_assert_in_softirq();
++}
++EXPORT_SYMBOL(lockdep_assert_in_softirq_func);
++#endif
diff --git a/debian/patches-rt/0002-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch b/debian/patches-rt/0002-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch
deleted file mode 100644
index acdfbddf5a..0000000000
--- a/debian/patches-rt/0002-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch
+++ /dev/null
@@ -1,330 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Sat, 9 Mar 2024 10:05:10 +0100
-Subject: [PATCH 2/4] net: Allow to use SMP threads for backlog NAPI.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-Backlog NAPI is a per-CPU NAPI struct only (with no device behind it)
-used by drivers which don't do NAPI them self, RPS and parts of the
-stack which need to avoid recursive deadlocks while processing a packet.
-
-The non-NAPI driver use the CPU local backlog NAPI. If RPS is enabled
-then a flow for the skb is computed and based on the flow the skb can be
-enqueued on a remote CPU. Scheduling/ raising the softirq (for backlog's
-NAPI) on the remote CPU isn't trivial because the softirq is only
-scheduled on the local CPU and performed after the hardirq is done.
-In order to schedule a softirq on the remote CPU, an IPI is sent to the
-remote CPU which schedules the backlog-NAPI on the then local CPU.
-
-On PREEMPT_RT interrupts are force-threaded. The soft interrupts are
-raised within the interrupt thread and processed after the interrupt
-handler completed still within the context of the interrupt thread. The
-softirq is handled in the context where it originated.
-
-With force-threaded interrupts enabled, ksoftirqd is woken up if a
-softirq is raised from hardirq context. This is the case if it is raised
-from an IPI. Additionally there is a warning on PREEMPT_RT if the
-softirq is raised from the idle thread.
-This was done for two reasons:
-- With threaded interrupts the processing should happen in thread
- context (where it originated) and ksoftirqd is the only thread for
- this context if raised from hardirq. Using the currently running task
- instead would "punish" a random task.
-- Once ksoftirqd is active it consumes all further softirqs until it
- stops running. This changed recently and is no longer the case.
-
-Instead of keeping the backlog NAPI in ksoftirqd (in force-threaded/
-PREEMPT_RT setups) I am proposing NAPI-threads for backlog.
-The "proper" setup with threaded-NAPI is not doable because the threads
-are not pinned to an individual CPU and can be modified by the user.
-Additionally a dummy network device would have to be assigned. Also
-CPU-hotplug has to be considered if additional CPUs show up.
-All this can be probably done/ solved but the smpboot-threads already
-provide this infrastructure.
-
-Sending UDP packets over loopback expects that the packet is processed
-within the call. Delaying it by handing it over to the thread hurts
-performance. It is not beneficial to the outcome if the context switch
-happens immediately after enqueue or after a while to process a few
-packets in a batch.
-There is no need to always use the thread if the backlog NAPI is
-requested on the local CPU. This restores the loopback throuput. The
-performance drops mostly to the same value after enabling RPS on the
-loopback comparing the IPI and the tread result.
-
-Create NAPI-threads for backlog if request during boot. The thread runs
-the inner loop from napi_threaded_poll(), the wait part is different. It
-checks for NAPI_STATE_SCHED (the backlog NAPI can not be disabled).
-
-The NAPI threads for backlog are optional, it has to be enabled via the boot
-argument "thread_backlog_napi". It is mandatory for PREEMPT_RT to avoid the
-wakeup of ksoftirqd from the IPI.
-
-Acked-by: Jakub Kicinski <kuba@kernel.org>
-Link: https://lore.kernel.org/r/20240309090824.2956805-3-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/core/dev.c | 152 +++++++++++++++++++++++++++++++++++++++++++--------------
- 1 file changed, 115 insertions(+), 37 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -78,6 +78,7 @@
- #include <linux/slab.h>
- #include <linux/sched.h>
- #include <linux/sched/mm.h>
-+#include <linux/smpboot.h>
- #include <linux/mutex.h>
- #include <linux/rwsem.h>
- #include <linux/string.h>
-@@ -216,6 +217,31 @@ static inline struct hlist_head *dev_ind
- return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
- }
-
-+#ifndef CONFIG_PREEMPT_RT
-+
-+static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
-+
-+static int __init setup_backlog_napi_threads(char *arg)
-+{
-+ static_branch_enable(&use_backlog_threads_key);
-+ return 0;
-+}
-+early_param("thread_backlog_napi", setup_backlog_napi_threads);
-+
-+static bool use_backlog_threads(void)
-+{
-+ return static_branch_unlikely(&use_backlog_threads_key);
-+}
-+
-+#else
-+
-+static bool use_backlog_threads(void)
-+{
-+ return true;
-+}
-+
-+#endif
-+
- static inline void rps_lock_irqsave(struct softnet_data *sd,
- unsigned long *flags)
- {
-@@ -4420,6 +4446,7 @@ EXPORT_SYMBOL(__dev_direct_xmit);
- /*************************************************************************
- * Receiver routines
- *************************************************************************/
-+static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
-
- int netdev_max_backlog __read_mostly = 1000;
- EXPORT_SYMBOL(netdev_max_backlog);
-@@ -4452,12 +4479,16 @@ static inline void ____napi_schedule(str
- */
- thread = READ_ONCE(napi->thread);
- if (thread) {
-+ if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
-+ goto use_local_napi;
-+
- set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
- wake_up_process(thread);
- return;
- }
- }
-
-+use_local_napi:
- list_add_tail(&napi->poll_list, &sd->poll_list);
- WRITE_ONCE(napi->list_owner, smp_processor_id());
- /* If not called from net_rx_action()
-@@ -4703,6 +4734,11 @@ static void napi_schedule_rps(struct sof
-
- #ifdef CONFIG_RPS
- if (sd != mysd) {
-+ if (use_backlog_threads()) {
-+ __napi_schedule_irqoff(&sd->backlog);
-+ return;
-+ }
-+
- sd->rps_ipi_next = mysd->rps_ipi_list;
- mysd->rps_ipi_list = sd;
-
-@@ -5926,7 +5962,7 @@ static void net_rps_action_and_irq_enabl
- #ifdef CONFIG_RPS
- struct softnet_data *remsd = sd->rps_ipi_list;
-
-- if (remsd) {
-+ if (!use_backlog_threads() && remsd) {
- sd->rps_ipi_list = NULL;
-
- local_irq_enable();
-@@ -5941,7 +5977,7 @@ static void net_rps_action_and_irq_enabl
- static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
- {
- #ifdef CONFIG_RPS
-- return sd->rps_ipi_list != NULL;
-+ return !use_backlog_threads() && sd->rps_ipi_list;
- #else
- return false;
- #endif
-@@ -5985,7 +6021,7 @@ static int process_backlog(struct napi_s
- * We can use a plain write instead of clear_bit(),
- * and we dont need an smp_mb() memory barrier.
- */
-- napi->state = 0;
-+ napi->state &= NAPIF_STATE_THREADED;
- again = false;
- } else {
- skb_queue_splice_tail_init(&sd->input_pkt_queue,
-@@ -6691,43 +6727,48 @@ static void skb_defer_free_flush(struct
- }
- }
-
--static int napi_threaded_poll(void *data)
-+static void napi_threaded_poll_loop(struct napi_struct *napi)
- {
-- struct napi_struct *napi = data;
- struct softnet_data *sd;
-- void *have;
-+ unsigned long last_qs = jiffies;
-
-- while (!napi_thread_wait(napi)) {
-- unsigned long last_qs = jiffies;
-+ for (;;) {
-+ bool repoll = false;
-+ void *have;
-
-- for (;;) {
-- bool repoll = false;
-+ local_bh_disable();
-+ sd = this_cpu_ptr(&softnet_data);
-+ sd->in_napi_threaded_poll = true;
-
-- local_bh_disable();
-- sd = this_cpu_ptr(&softnet_data);
-- sd->in_napi_threaded_poll = true;
--
-- have = netpoll_poll_lock(napi);
-- __napi_poll(napi, &repoll);
-- netpoll_poll_unlock(have);
--
-- sd->in_napi_threaded_poll = false;
-- barrier();
--
-- if (sd_has_rps_ipi_waiting(sd)) {
-- local_irq_disable();
-- net_rps_action_and_irq_enable(sd);
-- }
-- skb_defer_free_flush(sd);
-- local_bh_enable();
-+ have = netpoll_poll_lock(napi);
-+ __napi_poll(napi, &repoll);
-+ netpoll_poll_unlock(have);
-+
-+ sd->in_napi_threaded_poll = false;
-+ barrier();
-+
-+ if (sd_has_rps_ipi_waiting(sd)) {
-+ local_irq_disable();
-+ net_rps_action_and_irq_enable(sd);
-+ }
-+ skb_defer_free_flush(sd);
-+ local_bh_enable();
-
-- if (!repoll)
-- break;
-+ if (!repoll)
-+ break;
-
-- rcu_softirq_qs_periodic(last_qs);
-- cond_resched();
-- }
-+ rcu_softirq_qs_periodic(last_qs);
-+ cond_resched();
- }
-+}
-+
-+static int napi_threaded_poll(void *data)
-+{
-+ struct napi_struct *napi = data;
-+
-+ while (!napi_thread_wait(napi))
-+ napi_threaded_poll_loop(napi);
-+
- return 0;
- }
-
-@@ -11326,7 +11367,7 @@ static int dev_cpu_dead(unsigned int old
-
- list_del_init(&napi->poll_list);
- if (napi->poll == process_backlog)
-- napi->state = 0;
-+ napi->state &= NAPIF_STATE_THREADED;
- else
- ____napi_schedule(sd, napi);
- }
-@@ -11334,12 +11375,14 @@ static int dev_cpu_dead(unsigned int old
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_enable();
-
-+ if (!use_backlog_threads()) {
- #ifdef CONFIG_RPS
-- remsd = oldsd->rps_ipi_list;
-- oldsd->rps_ipi_list = NULL;
-+ remsd = oldsd->rps_ipi_list;
-+ oldsd->rps_ipi_list = NULL;
- #endif
-- /* send out pending IPI's on offline CPU */
-- net_rps_send_ipi(remsd);
-+ /* send out pending IPI's on offline CPU */
-+ net_rps_send_ipi(remsd);
-+ }
-
- /* Process offline CPU's input_pkt_queue */
- while ((skb = __skb_dequeue(&oldsd->process_queue))) {
-@@ -11659,6 +11702,38 @@ static void __init net_dev_struct_check(
- *
- */
-
-+static int backlog_napi_should_run(unsigned int cpu)
-+{
-+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
-+ struct napi_struct *napi = &sd->backlog;
-+
-+ return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
-+}
-+
-+static void run_backlog_napi(unsigned int cpu)
-+{
-+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
-+
-+ napi_threaded_poll_loop(&sd->backlog);
-+}
-+
-+static void backlog_napi_setup(unsigned int cpu)
-+{
-+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
-+ struct napi_struct *napi = &sd->backlog;
-+
-+ napi->thread = this_cpu_read(backlog_napi);
-+ set_bit(NAPI_STATE_THREADED, &napi->state);
-+}
-+
-+static struct smp_hotplug_thread backlog_threads = {
-+ .store = &backlog_napi,
-+ .thread_should_run = backlog_napi_should_run,
-+ .thread_fn = run_backlog_napi,
-+ .thread_comm = "backlog_napi/%u",
-+ .setup = backlog_napi_setup,
-+};
-+
- /*
- * This is called single threaded during boot, so no need
- * to take the rtnl semaphore.
-@@ -11711,7 +11786,10 @@ static int __init net_dev_init(void)
- init_gro_hash(&sd->backlog);
- sd->backlog.poll = process_backlog;
- sd->backlog.weight = weight_p;
-+ INIT_LIST_HEAD(&sd->backlog.poll_list);
- }
-+ if (use_backlog_threads())
-+ smpboot_register_percpu_thread(&backlog_threads);
-
- dev_boot_phase = 0;
-
diff --git a/debian/patches-rt/0002-net-Optimize-xdp_do_flush-with-bpf_net_context-infos.patch b/debian/patches-rt/0002-net-Optimize-xdp_do_flush-with-bpf_net_context-infos.patch
new file mode 100644
index 0000000000..07738df76a
--- /dev/null
+++ b/debian/patches-rt/0002-net-Optimize-xdp_do_flush-with-bpf_net_context-infos.patch
@@ -0,0 +1,291 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 28 Jun 2024 12:18:55 +0200
+Subject: [PATCH 2/3] net: Optimize xdp_do_flush() with bpf_net_context infos.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Every NIC driver utilizing XDP should invoke xdp_do_flush() after
+processing all packages. With the introduction of the bpf_net_context
+logic the flush lists (for dev, CPU-map and xsk) are lazy initialized
+only if used. However xdp_do_flush() tries to flush all three of them so
+all three lists are always initialized and the likely empty lists are
+"iterated".
+Without the usage of XDP but with CONFIG_DEBUG_NET the lists are also
+initialized due to xdp_do_check_flushed().
+
+Jakub suggest to utilize the hints in bpf_net_context and avoid invoking
+the flush function. This will also avoiding initializing the lists which
+are otherwise unused.
+
+Introduce bpf_net_ctx_get_all_used_flush_lists() to return the
+individual list if not-empty. Use the logic in xdp_do_flush() and
+xdp_do_check_flushed(). Remove the not needed .*_check_flush().
+
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20240628103020.1766241-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/bpf.h | 10 ++++------
+ include/linux/filter.h | 27 +++++++++++++++++++++++++++
+ include/net/xdp_sock.h | 14 ++------------
+ kernel/bpf/cpumap.c | 13 +------------
+ kernel/bpf/devmap.c | 13 +------------
+ net/core/filter.c | 33 +++++++++++++++++++++++++--------
+ net/xdp/xsk.c | 13 +------------
+ 7 files changed, 61 insertions(+), 62 deletions(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -2492,7 +2492,7 @@ struct sk_buff;
+ struct bpf_dtab_netdev;
+ struct bpf_cpu_map_entry;
+
+-void __dev_flush(void);
++void __dev_flush(struct list_head *flush_list);
+ int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx);
+ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
+@@ -2505,7 +2505,7 @@ int dev_map_redirect_multi(struct net_de
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress);
+
+-void __cpu_map_flush(void);
++void __cpu_map_flush(struct list_head *flush_list);
+ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
+ struct net_device *dev_rx);
+ int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+@@ -2642,8 +2642,6 @@ void bpf_dynptr_init(struct bpf_dynptr_k
+ void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
+ void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
+
+-bool dev_check_flush(void);
+-bool cpu_map_check_flush(void);
+ #else /* !CONFIG_BPF_SYSCALL */
+ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
+ {
+@@ -2731,7 +2729,7 @@ static inline struct bpf_token *bpf_toke
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+-static inline void __dev_flush(void)
++static inline void __dev_flush(struct list_head *flush_list)
+ {
+ }
+
+@@ -2777,7 +2775,7 @@ int dev_map_redirect_multi(struct net_de
+ return 0;
+ }
+
+-static inline void __cpu_map_flush(void)
++static inline void __cpu_map_flush(struct list_head *flush_list)
+ {
+ }
+
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -829,6 +829,33 @@ static inline struct list_head *bpf_net_
+ return &bpf_net_ctx->xskmap_map_flush_list;
+ }
+
++static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map,
++ struct list_head **lh_dev,
++ struct list_head **lh_xsk)
++{
++ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
++ u32 kern_flags = bpf_net_ctx->ri.kern_flags;
++ struct list_head *lh;
++
++ *lh_map = *lh_dev = *lh_xsk = NULL;
++
++ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
++ return;
++
++ lh = &bpf_net_ctx->dev_map_flush_list;
++ if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh))
++ *lh_dev = lh;
++
++ lh = &bpf_net_ctx->cpu_map_flush_list;
++ if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh))
++ *lh_map = lh;
++
++ lh = &bpf_net_ctx->xskmap_map_flush_list;
++ if (IS_ENABLED(CONFIG_XDP_SOCKETS) &&
++ kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh))
++ *lh_xsk = lh;
++}
++
+ /* Compute the linear packet data range [data, data_end) which
+ * will be accessed by various program types (cls_bpf, act_bpf,
+ * lwt, ...). Subsystems allowing direct data access must (!)
+--- a/include/net/xdp_sock.h
++++ b/include/net/xdp_sock.h
+@@ -121,7 +121,7 @@ struct xsk_tx_metadata_ops {
+
+ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
+ int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
+-void __xsk_map_flush(void);
++void __xsk_map_flush(struct list_head *flush_list);
+
+ /**
+ * xsk_tx_metadata_to_compl - Save enough relevant metadata information
+@@ -206,7 +206,7 @@ static inline int __xsk_map_redirect(str
+ return -EOPNOTSUPP;
+ }
+
+-static inline void __xsk_map_flush(void)
++static inline void __xsk_map_flush(struct list_head *flush_list)
+ {
+ }
+
+@@ -228,14 +228,4 @@ static inline void xsk_tx_metadata_compl
+ }
+
+ #endif /* CONFIG_XDP_SOCKETS */
+-
+-#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET)
+-bool xsk_map_check_flush(void);
+-#else
+-static inline bool xsk_map_check_flush(void)
+-{
+- return false;
+-}
+-#endif
+-
+ #endif /* _LINUX_XDP_SOCK_H */
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -757,9 +757,8 @@ int cpu_map_generic_redirect(struct bpf_
+ return ret;
+ }
+
+-void __cpu_map_flush(void)
++void __cpu_map_flush(struct list_head *flush_list)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
+ struct xdp_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
+@@ -769,13 +768,3 @@ void __cpu_map_flush(void)
+ wake_up_process(bq->obj->kthread);
+ }
+ }
+-
+-#ifdef CONFIG_DEBUG_NET
+-bool cpu_map_check_flush(void)
+-{
+- if (list_empty(bpf_net_ctx_get_cpu_map_flush_list()))
+- return false;
+- __cpu_map_flush();
+- return true;
+-}
+-#endif
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -412,9 +412,8 @@ static void bq_xmit_all(struct xdp_dev_b
+ * driver before returning from its napi->poll() routine. See the comment above
+ * xdp_do_flush() in filter.c.
+ */
+-void __dev_flush(void)
++void __dev_flush(struct list_head *flush_list)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
+ struct xdp_dev_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
+@@ -425,16 +424,6 @@ void __dev_flush(void)
+ }
+ }
+
+-#ifdef CONFIG_DEBUG_NET
+-bool dev_check_flush(void)
+-{
+- if (list_empty(bpf_net_ctx_get_dev_flush_list()))
+- return false;
+- __dev_flush();
+- return true;
+-}
+-#endif
+-
+ /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
+ * by local_bh_disable() (from XDP calls inside NAPI). The
+ * rcu_read_lock_bh_held() below makes lockdep accept both.
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4277,22 +4277,39 @@ static const struct bpf_func_proto bpf_x
+ */
+ void xdp_do_flush(void)
+ {
+- __dev_flush();
+- __cpu_map_flush();
+- __xsk_map_flush();
++ struct list_head *lh_map, *lh_dev, *lh_xsk;
++
++ bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk);
++ if (lh_dev)
++ __dev_flush(lh_dev);
++ if (lh_map)
++ __cpu_map_flush(lh_map);
++ if (lh_xsk)
++ __xsk_map_flush(lh_xsk);
+ }
+ EXPORT_SYMBOL_GPL(xdp_do_flush);
+
+ #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
+ void xdp_do_check_flushed(struct napi_struct *napi)
+ {
+- bool ret;
++ struct list_head *lh_map, *lh_dev, *lh_xsk;
++ bool missed = false;
+
+- ret = dev_check_flush();
+- ret |= cpu_map_check_flush();
+- ret |= xsk_map_check_flush();
++ bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk);
++ if (lh_dev) {
++ __dev_flush(lh_dev);
++ missed = true;
++ }
++ if (lh_map) {
++ __cpu_map_flush(lh_map);
++ missed = true;
++ }
++ if (lh_xsk) {
++ __xsk_map_flush(lh_xsk);
++ missed = true;
++ }
+
+- WARN_ONCE(ret, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
++ WARN_ONCE(missed, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
+ napi->poll);
+ }
+ #endif
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -383,9 +383,8 @@ int __xsk_map_redirect(struct xdp_sock *
+ return 0;
+ }
+
+-void __xsk_map_flush(void)
++void __xsk_map_flush(struct list_head *flush_list)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+ struct xdp_sock *xs, *tmp;
+
+ list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
+@@ -394,16 +393,6 @@ void __xsk_map_flush(void)
+ }
+ }
+
+-#ifdef CONFIG_DEBUG_NET
+-bool xsk_map_check_flush(void)
+-{
+- if (list_empty(bpf_net_ctx_get_xskmap_flush_list()))
+- return false;
+- __xsk_map_flush();
+- return true;
+-}
+-#endif
+-
+ void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
+ {
+ xskq_prod_submit_n(pool->cq, nb_entries);
diff --git a/debian/patches-rt/0002-net-tcp-un-pin-the-tw_timer.patch b/debian/patches-rt/0002-net-tcp-un-pin-the-tw_timer.patch
new file mode 100644
index 0000000000..d19d45bf09
--- /dev/null
+++ b/debian/patches-rt/0002-net-tcp-un-pin-the-tw_timer.patch
@@ -0,0 +1,29 @@
+From: Florian Westphal <fw@strlen.de>
+Date: Tue, 4 Jun 2024 16:08:48 +0200
+Subject: [PATCH 2/3] net: tcp: un-pin the tw_timer
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+After previous patch, even if timer fires immediately on another CPU,
+context that schedules the timer now holds the ehash spinlock, so timer
+cannot reap tw socket until ehash lock is released.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20240604140903.31939-3-fw@strlen.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/ipv4/inet_timewait_sock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -203,7 +203,7 @@ struct inet_timewait_sock *inet_twsk_all
+ tw->tw_prot = sk->sk_prot_creator;
+ atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
+ twsk_net_set(tw, sock_net(sk));
+- timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
++ timer_setup(&tw->tw_timer, tw_timer_handler, 0);
+ /*
+ * Because we use RCU lookups, we should not set tw_refcnt
+ * to a non null value before everything is setup for this
diff --git a/debian/patches-rt/0002-perf-Enqueue-SIGTRAP-always-via-task_work.patch b/debian/patches-rt/0002-perf-Enqueue-SIGTRAP-always-via-task_work.patch
deleted file mode 100644
index e6e684ad51..0000000000
--- a/debian/patches-rt/0002-perf-Enqueue-SIGTRAP-always-via-task_work.patch
+++ /dev/null
@@ -1,160 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Mar 2024 19:01:50 +0100
-Subject: [PATCH 2/4] perf: Enqueue SIGTRAP always via task_work.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-A signal is delivered by raising irq_work() which works from any context
-including NMI. irq_work() can be delayed if the architecture does not
-provide an interrupt vector. In order not to lose a signal, the signal
-is injected via task_work during event_sched_out().
-
-Instead going via irq_work, the signal could be added directly via
-task_work. The signal is sent to current and can be enqueued on its
-return path to userland instead of triggering irq_work. A dummy IRQ is
-required in the NMI case to ensure the task_work is handled before
-returning to user land. For this irq_work is used. An alternative would
-be just raising an interrupt like arch_send_call_function_single_ipi().
-
-During testing with `remove_on_exec' it become visible that the event
-can be enqueued via NMI during execve(). The task_work must not be kept
-because free_event() will complain later. Also the new task will not
-have a sighandler installed.
-
-Queue signal via task_work. Remove perf_event::pending_sigtrap and
-and use perf_event::pending_work instead. Raise irq_work in the NMI case
-for a dummy interrupt. Remove the task_work if the event is freed.
-
-Tested-by: Marco Elver <elver@google.com>
-Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Link: https://lore.kernel.org/r/20240312180814.3373778-3-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/perf_event.h | 3 --
- kernel/events/core.c | 57 +++++++++++++++++++++++++--------------------
- 2 files changed, 33 insertions(+), 27 deletions(-)
-
---- a/include/linux/perf_event.h
-+++ b/include/linux/perf_event.h
-@@ -781,7 +781,6 @@ struct perf_event {
- unsigned int pending_wakeup;
- unsigned int pending_kill;
- unsigned int pending_disable;
-- unsigned int pending_sigtrap;
- unsigned long pending_addr; /* SIGTRAP */
- struct irq_work pending_irq;
- struct callback_head pending_task;
-@@ -959,7 +958,7 @@ struct perf_event_context {
- struct rcu_head rcu_head;
-
- /*
-- * Sum (event->pending_sigtrap + event->pending_work)
-+ * Sum (event->pending_work + event->pending_work)
- *
- * The SIGTRAP is targeted at ctx->task, as such it won't do changing
- * that until the signal is delivered.
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -2283,21 +2283,6 @@ event_sched_out(struct perf_event *event
- state = PERF_EVENT_STATE_OFF;
- }
-
-- if (event->pending_sigtrap) {
-- bool dec = true;
--
-- event->pending_sigtrap = 0;
-- if (state != PERF_EVENT_STATE_OFF &&
-- !event->pending_work) {
-- event->pending_work = 1;
-- dec = false;
-- WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
-- task_work_add(current, &event->pending_task, TWA_RESUME);
-- }
-- if (dec)
-- local_dec(&event->ctx->nr_pending);
-- }
--
- perf_event_set_state(event, state);
-
- if (!is_software_event(event))
-@@ -6741,11 +6726,6 @@ static void __perf_pending_irq(struct pe
- * Yay, we hit home and are in the context of the event.
- */
- if (cpu == smp_processor_id()) {
-- if (event->pending_sigtrap) {
-- event->pending_sigtrap = 0;
-- perf_sigtrap(event);
-- local_dec(&event->ctx->nr_pending);
-- }
- if (event->pending_disable) {
- event->pending_disable = 0;
- perf_event_disable_local(event);
-@@ -9592,14 +9572,23 @@ static int __perf_event_overflow(struct
-
- if (regs)
- pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
-- if (!event->pending_sigtrap) {
-- event->pending_sigtrap = pending_id;
-+ if (!event->pending_work) {
-+ event->pending_work = pending_id;
- local_inc(&event->ctx->nr_pending);
-- irq_work_queue(&event->pending_irq);
-+ WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
-+ task_work_add(current, &event->pending_task, TWA_RESUME);
-+ /*
-+ * The NMI path returns directly to userland. The
-+ * irq_work is raised as a dummy interrupt to ensure
-+ * regular return path to user is taken and task_work
-+ * is processed.
-+ */
-+ if (in_nmi())
-+ irq_work_queue(&event->pending_irq);
- } else if (event->attr.exclude_kernel && valid_sample) {
- /*
- * Should not be able to return to user space without
-- * consuming pending_sigtrap; with exceptions:
-+ * consuming pending_work; with exceptions:
- *
- * 1. Where !exclude_kernel, events can overflow again
- * in the kernel without returning to user space.
-@@ -9609,7 +9598,7 @@ static int __perf_event_overflow(struct
- * To approximate progress (with false negatives),
- * check 32-bit hash of the current IP.
- */
-- WARN_ON_ONCE(event->pending_sigtrap != pending_id);
-+ WARN_ON_ONCE(event->pending_work != pending_id);
- }
-
- event->pending_addr = 0;
-@@ -13049,6 +13038,13 @@ static void sync_child_event(struct perf
- &parent_event->child_total_time_running);
- }
-
-+static bool task_work_cb_match(struct callback_head *cb, void *data)
-+{
-+ struct perf_event *event = container_of(cb, struct perf_event, pending_task);
-+
-+ return event == data;
-+}
-+
- static void
- perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
- {
-@@ -13088,6 +13084,17 @@ perf_event_exit_event(struct perf_event
- * Kick perf_poll() for is_event_hup();
- */
- perf_event_wakeup(parent_event);
-+ /*
-+ * Cancel pending task_work and update counters if it has not
-+ * yet been delivered to userland. free_event() expects the
-+ * reference counter at 1 and keeping the event around until the
-+ * task return to userland will be a unexpected.
-+ */
-+ if (event->pending_work &&
-+ task_work_cancel_match(current, task_work_cb_match, event)) {
-+ put_event(event);
-+ local_dec(&event->ctx->nr_pending);
-+ }
- free_event(event);
- put_event(parent_event);
- return;
diff --git a/debian/patches-rt/0002-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch b/debian/patches-rt/0002-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch
new file mode 100644
index 0000000000..a178c87993
--- /dev/null
+++ b/debian/patches-rt/0002-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch
@@ -0,0 +1,190 @@
+From: Petr Mladek <pmladek@suse.com>
+Date: Wed, 22 Nov 2023 11:23:43 +0000
+Subject: [PATCH 02/48] printk: Properly deal with nbcon consoles on seq init
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+If a non-boot console is registering and boot consoles exist,
+the consoles are flushed before being unregistered. This allows
+the non-boot console to continue where the boot console left
+off.
+
+If for whatever reason flushing fails, the lowest seq found from
+any of the enabled boot consoles is used. Until now con->seq was
+checked. However, if it is an nbcon boot console, the function
+nbcon_seq_read() must be used to read seq because con->seq is
+not updated for nbcon consoles.
+
+Check if it is an nbcon boot console and if so call
+nbcon_seq_read() to read seq.
+
+Also, avoid usage of con->seq as temporary storage of the
+starting record. Instead, rename console_init_seq() to
+get_init_console_seq() and just return the value. For nbcon
+consoles set the sequence via nbcon_init(), for legacy consoles
+set con->seq.
+
+The cleaned design should make sure that the value stays and is
+set before the printing kthread is created.
+
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Reviewed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 4 ++--
+ kernel/printk/nbcon.c | 10 +++-------
+ kernel/printk/printk.c | 41 +++++++++++++++++++++++++++++------------
+ 3 files changed, 34 insertions(+), 21 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -75,7 +75,7 @@ u16 printk_parse_prefix(const char *text
+ u64 nbcon_seq_read(struct console *con);
+ void nbcon_seq_force(struct console *con, u64 seq);
+ bool nbcon_alloc(struct console *con);
+-void nbcon_init(struct console *con);
++void nbcon_init(struct console *con, u64 init_seq);
+ void nbcon_free(struct console *con);
+
+ #else
+@@ -96,7 +96,7 @@ static inline bool printk_percpu_data_re
+ static inline u64 nbcon_seq_read(struct console *con) { return 0; }
+ static inline void nbcon_seq_force(struct console *con, u64 seq) { }
+ static inline bool nbcon_alloc(struct console *con) { return false; }
+-static inline void nbcon_init(struct console *con) { }
++static inline void nbcon_init(struct console *con, u64 init_seq) { }
+ static inline void nbcon_free(struct console *con) { }
+
+ #endif /* CONFIG_PRINTK */
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -172,9 +172,6 @@ void nbcon_seq_force(struct console *con
+ u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
+
+ atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
+-
+- /* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */
+- con->seq = 0;
+ }
+
+ /**
+@@ -961,20 +958,19 @@ bool nbcon_alloc(struct console *con)
+ /**
+ * nbcon_init - Initialize the nbcon console specific data
+ * @con: Console to initialize
++ * @init_seq: Sequence number of the first record to be emitted
+ *
+ * nbcon_alloc() *must* be called and succeed before this function
+ * is called.
+- *
+- * This function expects that the legacy @con->seq has been set.
+ */
+-void nbcon_init(struct console *con)
++void nbcon_init(struct console *con, u64 init_seq)
+ {
+ struct nbcon_state state = { };
+
+ /* nbcon_alloc() must have been called and successful! */
+ BUG_ON(!con->pbufs);
+
+- nbcon_seq_force(con, con->seq);
++ nbcon_seq_force(con, init_seq);
+ nbcon_state_set(con, &state);
+ }
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3375,19 +3375,21 @@ static void try_enable_default_console(s
+ newcon->flags |= CON_CONSDEV;
+ }
+
+-static void console_init_seq(struct console *newcon, bool bootcon_registered)
++/* Return the starting sequence number for a newly registered console. */
++static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered)
+ {
+ struct console *con;
+ bool handover;
++ u64 init_seq;
+
+ if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) {
+ /* Get a consistent copy of @syslog_seq. */
+ mutex_lock(&syslog_lock);
+- newcon->seq = syslog_seq;
++ init_seq = syslog_seq;
+ mutex_unlock(&syslog_lock);
+ } else {
+ /* Begin with next message added to ringbuffer. */
+- newcon->seq = prb_next_seq(prb);
++ init_seq = prb_next_seq(prb);
+
+ /*
+ * If any enabled boot consoles are due to be unregistered
+@@ -3408,7 +3410,7 @@ static void console_init_seq(struct cons
+ * Flush all consoles and set the console to start at
+ * the next unprinted sequence number.
+ */
+- if (!console_flush_all(true, &newcon->seq, &handover)) {
++ if (!console_flush_all(true, &init_seq, &handover)) {
+ /*
+ * Flushing failed. Just choose the lowest
+ * sequence of the enabled boot consoles.
+@@ -3421,19 +3423,30 @@ static void console_init_seq(struct cons
+ if (handover)
+ console_lock();
+
+- newcon->seq = prb_next_seq(prb);
++ init_seq = prb_next_seq(prb);
+ for_each_console(con) {
+- if ((con->flags & CON_BOOT) &&
+- (con->flags & CON_ENABLED) &&
+- con->seq < newcon->seq) {
+- newcon->seq = con->seq;
++ u64 seq;
++
++ if (!(con->flags & CON_BOOT) ||
++ !(con->flags & CON_ENABLED)) {
++ continue;
+ }
++
++ if (con->flags & CON_NBCON)
++ seq = nbcon_seq_read(con);
++ else
++ seq = con->seq;
++
++ if (seq < init_seq)
++ init_seq = seq;
+ }
+ }
+
+ console_unlock();
+ }
+ }
++
++ return init_seq;
+ }
+
+ #define console_first() \
+@@ -3465,6 +3478,7 @@ void register_console(struct console *ne
+ struct console *con;
+ bool bootcon_registered = false;
+ bool realcon_registered = false;
++ u64 init_seq;
+ int err;
+
+ console_list_lock();
+@@ -3542,10 +3556,13 @@ void register_console(struct console *ne
+ }
+
+ newcon->dropped = 0;
+- console_init_seq(newcon, bootcon_registered);
++ init_seq = get_init_console_seq(newcon, bootcon_registered);
+
+- if (newcon->flags & CON_NBCON)
+- nbcon_init(newcon);
++ if (newcon->flags & CON_NBCON) {
++ nbcon_init(newcon, init_seq);
++ } else {
++ newcon->seq = init_seq;
++ }
+
+ /*
+ * Put this console in the list - keep the
diff --git a/debian/patches-rt/0002-serial-ar933x-Use-uart_prepare_sysrq_char.patch b/debian/patches-rt/0002-serial-ar933x-Use-uart_prepare_sysrq_char.patch
deleted file mode 100644
index f8f97abf10..0000000000
--- a/debian/patches-rt/0002-serial-ar933x-Use-uart_prepare_sysrq_char.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:15 +0100
-Subject: [PATCH 02/18] serial: ar933x: Use uart_prepare_sysrq_char().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Delay handling sysrq until port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Link: https://lore.kernel.org/r/20240301215246.891055-3-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/ar933x_uart.c | 18 ++++++------------
- 1 file changed, 6 insertions(+), 12 deletions(-)
-
---- a/drivers/tty/serial/ar933x_uart.c
-+++ b/drivers/tty/serial/ar933x_uart.c
-@@ -378,7 +378,7 @@ static void ar933x_uart_rx_chars(struct
- up->port.icount.rx++;
- ch = rdata & AR933X_UART_DATA_TX_RX_MASK;
-
-- if (uart_handle_sysrq_char(&up->port, ch))
-+ if (uart_prepare_sysrq_char(&up->port, ch))
- continue;
-
- if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0)
-@@ -468,7 +468,7 @@ static irqreturn_t ar933x_uart_interrupt
- ar933x_uart_tx_chars(up);
- }
-
-- uart_port_unlock(&up->port);
-+ uart_unlock_and_check_sysrq(&up->port);
-
- return IRQ_HANDLED;
- }
-@@ -627,14 +627,10 @@ static void ar933x_uart_console_write(st
- unsigned int int_en;
- int locked = 1;
-
-- local_irq_save(flags);
--
-- if (up->port.sysrq)
-- locked = 0;
-- else if (oops_in_progress)
-- locked = uart_port_trylock(&up->port);
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(&up->port, &flags);
- else
-- uart_port_lock(&up->port);
-+ uart_port_lock_irqsave(&up->port, &flags);
-
- /*
- * First save the IER then disable the interrupts
-@@ -654,9 +650,7 @@ static void ar933x_uart_console_write(st
- ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
-
- if (locked)
-- uart_port_unlock(&up->port);
--
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(&up->port, flags);
- }
-
- static int ar933x_uart_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch b/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
index 24f73e8d86..bbafcf5a1c 100644
--- a/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
+++ b/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 4 Aug 2023 13:30:38 +0200
Subject: [PATCH 2/3] softirq: Add function to preempt serving softirqs.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Add a functionality for the softirq handler to preempt its current work
if needed. The softirq core has no particular state. It reads and resets
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* _LINUX_BH_H */
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -247,6 +247,19 @@ void __local_bh_enable_ip(unsigned long
+@@ -248,6 +248,19 @@ void __local_bh_enable_ip(unsigned long
}
EXPORT_SYMBOL(__local_bh_enable_ip);
diff --git a/debian/patches-rt/0002-task_work-Add-TWA_NMI_CURRENT-as-an-additional-notif.patch b/debian/patches-rt/0002-task_work-Add-TWA_NMI_CURRENT-as-an-additional-notif.patch
new file mode 100644
index 0000000000..30a0f071e2
--- /dev/null
+++ b/debian/patches-rt/0002-task_work-Add-TWA_NMI_CURRENT-as-an-additional-notif.patch
@@ -0,0 +1,102 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:36 +0200
+Subject: [PATCH 2/7] task_work: Add TWA_NMI_CURRENT as an additional notify
+ mode.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Adding task_work from NMI context requires the following:
+- The kasan_record_aux_stack() is not NMU safe and must be avoided.
+- Using TWA_RESUME is NMI safe. If the NMI occurs while the CPU is in
+ userland then it will continue in userland and not invoke the `work'
+ callback.
+
+Add TWA_NMI_CURRENT as an additional notify mode. In this mode skip
+kasan and use irq_work in hardirq-mode to for needed interrupt. Set
+TIF_NOTIFY_RESUME within the irq_work callback due to k[ac]san
+instrumentation in test_and_set_bit() which does not look NMI safe in
+case of a report.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20240704170424.1466941-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/task_work.h | 1 +
+ kernel/task_work.c | 25 ++++++++++++++++++++++---
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+--- a/include/linux/task_work.h
++++ b/include/linux/task_work.h
+@@ -18,6 +18,7 @@ enum task_work_notify_mode {
+ TWA_RESUME,
+ TWA_SIGNAL,
+ TWA_SIGNAL_NO_IPI,
++ TWA_NMI_CURRENT,
+ };
+
+ static inline bool task_work_pending(struct task_struct *task)
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -1,10 +1,19 @@
+ // SPDX-License-Identifier: GPL-2.0
++#include <linux/irq_work.h>
+ #include <linux/spinlock.h>
+ #include <linux/task_work.h>
+ #include <linux/resume_user_mode.h>
++#include <trace/events/ipi.h>
+
+ static struct callback_head work_exited; /* all we need is ->next == NULL */
+
++static void task_work_set_notify_irq(struct irq_work *entry)
++{
++ test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
++}
++static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
++ IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
++
+ /**
+ * task_work_add - ask the @task to execute @work->func()
+ * @task: the task which should run the callback
+@@ -12,7 +21,7 @@ static struct callback_head work_exited;
+ * @notify: how to notify the targeted task
+ *
+ * Queue @work for task_work_run() below and notify the @task if @notify
+- * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI.
++ * is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT.
+ *
+ * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
+ * task and run the task_work, regardless of whether the task is currently
+@@ -24,6 +33,8 @@ static struct callback_head work_exited;
+ * kernel anyway.
+ * @TWA_RESUME work is run only when the task exits the kernel and returns to
+ * user mode, or before entering guest mode.
++ * @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the
++ * current @task and if the current context is NMI.
+ *
+ * Fails if the @task is exiting/exited and thus it can't process this @work.
+ * Otherwise @work->func() will be called when the @task goes through one of
+@@ -44,8 +55,13 @@ int task_work_add(struct task_struct *ta
+ {
+ struct callback_head *head;
+
+- /* record the work call stack in order to print it in KASAN reports */
+- kasan_record_aux_stack(work);
++ if (notify == TWA_NMI_CURRENT) {
++ if (WARN_ON_ONCE(task != current))
++ return -EINVAL;
++ } else {
++ /* record the work call stack in order to print it in KASAN reports */
++ kasan_record_aux_stack(work);
++ }
+
+ head = READ_ONCE(task->task_works);
+ do {
+@@ -66,6 +82,9 @@ int task_work_add(struct task_struct *ta
+ case TWA_SIGNAL_NO_IPI:
+ __set_notify_signal(task);
+ break;
++ case TWA_NMI_CURRENT:
++ irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
++ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
diff --git a/debian/patches-rt/0002-zram-Remove-ZRAM_LOCK.patch b/debian/patches-rt/0002-zram-Remove-ZRAM_LOCK.patch
new file mode 100644
index 0000000000..f795579f71
--- /dev/null
+++ b/debian/patches-rt/0002-zram-Remove-ZRAM_LOCK.patch
@@ -0,0 +1,66 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 20 Jun 2024 12:27:11 +0200
+Subject: [PATCH 2/3] zram: Remove ZRAM_LOCK
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The ZRAM_LOCK was used for locking and after the addition of spinlock_t
+the bit set and cleared but there no reader of it.
+
+Remove the ZRAM_LOCK bit.
+
+Link: https://lore.kernel.org/r/20240620153556.777272-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/block/zram/zram_drv.c | 11 ++---------
+ drivers/block/zram/zram_drv.h | 4 +---
+ 2 files changed, 3 insertions(+), 12 deletions(-)
+
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -67,23 +67,16 @@ static void zram_meta_init_table_locks(s
+
+ static int zram_slot_trylock(struct zram *zram, u32 index)
+ {
+- int ret;
+-
+- ret = spin_trylock(&zram->table[index].lock);
+- if (ret)
+- __set_bit(ZRAM_LOCK, &zram->table[index].flags);
+- return ret;
++ return spin_trylock(&zram->table[index].lock);
+ }
+
+ static void zram_slot_lock(struct zram *zram, u32 index)
+ {
+ spin_lock(&zram->table[index].lock);
+- __set_bit(ZRAM_LOCK, &zram->table[index].flags);
+ }
+
+ static void zram_slot_unlock(struct zram *zram, u32 index)
+ {
+- __clear_bit(ZRAM_LOCK, &zram->table[index].flags);
+ spin_unlock(&zram->table[index].lock);
+ }
+
+@@ -1299,7 +1292,7 @@ static void zram_free_page(struct zram *
+ zram_set_handle(zram, index, 0);
+ zram_set_obj_size(zram, index, 0);
+ WARN_ON_ONCE(zram->table[index].flags &
+- ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
++ ~(1UL << ZRAM_UNDER_WB));
+ }
+
+ /*
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -45,9 +45,7 @@
+
+ /* Flags for zram pages (table[page_no].flags) */
+ enum zram_pageflags {
+- /* zram slot is locked */
+- ZRAM_LOCK = ZRAM_FLAG_SHIFT,
+- ZRAM_SAME, /* Page consists the same element */
++ ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */
+ ZRAM_WB, /* page is stored on backing_device */
+ ZRAM_UNDER_WB, /* page is under writeback */
+ ZRAM_HUGE, /* Incompressible page */
diff --git a/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch b/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
index 2e2ea53f2a..5e9b00218a 100644
--- a/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
+++ b/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 28 Jun 2023 09:36:10 +0200
Subject: [PATCH 3/4] ARM: vfp: Use vfp_lock() in vfp_support_entry().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
vfp_entry() is invoked from exception handler and is fully preemptible.
It uses local_bh_disable() to remain uninterrupted while checking the
diff --git a/debian/patches-rt/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch b/debian/patches-rt/0003-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
index d61e6d3cda..583f8e96c9 100644
--- a/debian/patches-rt/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
+++ b/debian/patches-rt/0003-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
@@ -1,29 +1,38 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 25 Oct 2021 15:05:18 +0200
-Subject: [PATCH 05/10] drm/i915: Don't check for atomic context on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 3/8] drm/i915: Don't check for atomic context on PREEMPT_RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The !in_atomic() check in _wait_for_atomic() triggers on PREEMPT_RT
because the uncore::lock is a spinlock_t and does not disable
preemption or interrupts.
Changing the uncore:lock to a raw_spinlock_t doubles the worst case
-latency on an otherwise idle testbox during testing. Therefore I'm
-currently unsure about changing this.
+latency on an otherwise idle testbox during testing.
+Ignore _WAIT_FOR_ATOMIC_CHECK() on PREEMPT_RT.
+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Link: https://lore.kernel.org/all/20211006164628.s2mtsdd2jdbfyf7g@linutronix.de/
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/gpu/drm/i915/i915_utils.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
+ drivers/gpu/drm/i915/i915_utils.h | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
-@@ -288,7 +288,7 @@ wait_remaining_ms_from_jiffies(unsigned
+@@ -273,8 +273,13 @@ wait_remaining_ms_from_jiffies(unsigned
+ (Wmax))
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
- /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
+-/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
-#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
++/*
++ * If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false.
++ * On PREEMPT_RT the context isn't becoming atomic because it is used in an
++ * interrupt handler or because a spinlock_t is acquired. This leads to
++ * warnings which don't occur otherwise and therefore the check is disabled.
++ */
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT)
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
#else
diff --git a/debian/patches-rt/0003-net-Move-flush-list-retrieval-to-where-it-is-used.patch b/debian/patches-rt/0003-net-Move-flush-list-retrieval-to-where-it-is-used.patch
new file mode 100644
index 0000000000..bb954adf85
--- /dev/null
+++ b/debian/patches-rt/0003-net-Move-flush-list-retrieval-to-where-it-is-used.patch
@@ -0,0 +1,88 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 28 Jun 2024 12:18:56 +0200
+Subject: [PATCH 3/3] net: Move flush list retrieval to where it is used.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The bpf_net_ctx_get_.*_flush_list() are used at the top of the function.
+This means the variable is always assigned even if unused. By moving the
+function to where it is used, it is possible to delay the initialisation
+until it is unavoidable.
+Not sure how much this gains in reality but by looking at bq_enqueue()
+(in devmap.c) gcc pushes one register less to the stack. \o/.
+
+ Move flush list retrieval to where it is used.
+
+Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20240628103020.1766241-4-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/bpf/cpumap.c | 6 ++++--
+ kernel/bpf/devmap.c | 3 ++-
+ net/xdp/xsk.c | 6 ++++--
+ 3 files changed, 10 insertions(+), 5 deletions(-)
+
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -707,7 +707,6 @@ static void bq_flush_to_queue(struct xdp
+ */
+ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
+ struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
+
+ if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
+@@ -724,8 +723,11 @@ static void bq_enqueue(struct bpf_cpu_ma
+ */
+ bq->q[bq->count++] = xdpf;
+
+- if (!bq->flush_node.prev)
++ if (!bq->flush_node.prev) {
++ struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
++
+ list_add(&bq->flush_node, flush_list);
++ }
+ }
+
+ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -448,7 +448,6 @@ static void *__dev_map_lookup_elem(struc
+ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx, struct bpf_prog *xdp_prog)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
+ struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
+
+ if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
+@@ -462,6 +461,8 @@ static void bq_enqueue(struct net_device
+ * are only ever modified together.
+ */
+ if (!bq->dev_rx) {
++ struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
++
+ bq->dev_rx = dev_rx;
+ bq->xdp_prog = xdp_prog;
+ list_add(&bq->flush_node, flush_list);
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -370,15 +370,17 @@ static int xsk_rcv(struct xdp_sock *xs,
+
+ int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+ int err;
+
+ err = xsk_rcv(xs, xdp);
+ if (err)
+ return err;
+
+- if (!xs->flush_node.prev)
++ if (!xs->flush_node.prev) {
++ struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
++
+ list_add(&xs->flush_node, flush_list);
++ }
+
+ return 0;
+ }
diff --git a/debian/patches-rt/0003-net-Use-__napi_alloc_frag_align-instead-of-open-codi.patch b/debian/patches-rt/0003-net-Use-__napi_alloc_frag_align-instead-of-open-codi.patch
new file mode 100644
index 0000000000..677c50ab56
--- /dev/null
+++ b/debian/patches-rt/0003-net-Use-__napi_alloc_frag_align-instead-of-open-codi.patch
@@ -0,0 +1,42 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 23 Oct 2023 15:11:23 +0200
+Subject: [PATCH 03/15] net: Use __napi_alloc_frag_align() instead of open
+ coding it.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The else condition within __netdev_alloc_frag_align() is an open coded
+__napi_alloc_frag_align().
+
+Use __napi_alloc_frag_align() instead of open coding it.
+Move fragsz assignment before page_frag_alloc_align() invocation because
+__napi_alloc_frag_align() also contains this statement.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/skbuff.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -318,19 +318,15 @@ void *__netdev_alloc_frag_align(unsigned
+ {
+ void *data;
+
+- fragsz = SKB_DATA_ALIGN(fragsz);
+ if (in_hardirq() || irqs_disabled()) {
+ struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
+
++ fragsz = SKB_DATA_ALIGN(fragsz);
+ data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
+ align_mask);
+ } else {
+- struct napi_alloc_cache *nc;
+-
+ local_bh_disable();
+- nc = this_cpu_ptr(&napi_alloc_cache);
+- data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+- align_mask);
++ data = __napi_alloc_frag_align(fragsz, align_mask);
+ local_bh_enable();
+ }
+ return data;
diff --git a/debian/patches-rt/0003-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch b/debian/patches-rt/0003-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch
deleted file mode 100644
index ae46bdf942..0000000000
--- a/debian/patches-rt/0003-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch
+++ /dev/null
@@ -1,121 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Sat, 9 Mar 2024 10:05:11 +0100
-Subject: [PATCH 3/4] net: Use backlog-NAPI to clean up the defer_list.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The defer_list is a per-CPU list which is used to free skbs outside of
-the socket lock and on the CPU on which they have been allocated.
-The list is processed during NAPI callbacks so ideally the list is
-cleaned up.
-Should the amount of skbs on the list exceed a certain water mark then
-the softirq is triggered remotely on the target CPU by invoking a remote
-function call. The raise of the softirqs via a remote function call
-leads to waking the ksoftirqd on PREEMPT_RT which is undesired.
-The backlog-NAPI threads already provide the infrastructure which can be
-utilized to perform the cleanup of the defer_list.
-
-The NAPI state is updated with the input_pkt_queue.lock acquired. It
-order not to break the state, it is needed to also wake the backlog-NAPI
-thread with the lock held. This requires to acquire the use the lock in
-rps_lock_irq*() if the backlog-NAPI threads are used even with RPS
-disabled.
-
-Move the logic of remotely starting softirqs to clean up the defer_list
-into kick_defer_list_purge(). Make sure a lock is held in
-rps_lock_irq*() if backlog-NAPI threads are used. Schedule backlog-NAPI
-for defer_list cleanup if backlog-NAPI is available.
-
-Acked-by: Jakub Kicinski <kuba@kernel.org>
-Link: https://lore.kernel.org/r/20240309090824.2956805-4-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/netdevice.h | 1 +
- net/core/dev.c | 25 +++++++++++++++++++++----
- net/core/skbuff.c | 4 ++--
- 3 files changed, 24 insertions(+), 6 deletions(-)
-
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -3365,6 +3365,7 @@ static inline void dev_xmit_recursion_de
- __this_cpu_dec(softnet_data.xmit.recursion);
- }
-
-+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
- void __netif_schedule(struct Qdisc *q);
- void netif_schedule_queue(struct netdev_queue *txq);
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -245,7 +245,7 @@ static bool use_backlog_threads(void)
- static inline void rps_lock_irqsave(struct softnet_data *sd,
- unsigned long *flags)
- {
-- if (IS_ENABLED(CONFIG_RPS))
-+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_save(*flags);
-@@ -253,7 +253,7 @@ static inline void rps_lock_irqsave(stru
-
- static inline void rps_lock_irq_disable(struct softnet_data *sd)
- {
-- if (IS_ENABLED(CONFIG_RPS))
-+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_lock_irq(&sd->input_pkt_queue.lock);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_disable();
-@@ -262,7 +262,7 @@ static inline void rps_lock_irq_disable(
- static inline void rps_unlock_irq_restore(struct softnet_data *sd,
- unsigned long *flags)
- {
-- if (IS_ENABLED(CONFIG_RPS))
-+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_restore(*flags);
-@@ -270,7 +270,7 @@ static inline void rps_unlock_irq_restor
-
- static inline void rps_unlock_irq_enable(struct softnet_data *sd)
- {
-- if (IS_ENABLED(CONFIG_RPS))
-+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock_irq(&sd->input_pkt_queue.lock);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_enable();
-@@ -4753,6 +4753,23 @@ static void napi_schedule_rps(struct sof
- __napi_schedule_irqoff(&mysd->backlog);
- }
-
-+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
-+{
-+ unsigned long flags;
-+
-+ if (use_backlog_threads()) {
-+ rps_lock_irqsave(sd, &flags);
-+
-+ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
-+ __napi_schedule_irqoff(&sd->backlog);
-+
-+ rps_unlock_irq_restore(sd, &flags);
-+
-+ } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
-+ smp_call_function_single_async(cpu, &sd->defer_csd);
-+ }
-+}
-+
- #ifdef CONFIG_NET_FLOW_LIMIT
- int netdev_flow_limit_table_len __read_mostly = (1 << 12);
- #endif
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -6929,8 +6929,8 @@ nodefer: __kfree_skb(skb);
- /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
- * if we are unlucky enough (this seems very unlikely).
- */
-- if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
-- smp_call_function_single_async(cpu, &sd->defer_csd);
-+ if (unlikely(kick))
-+ kick_defer_list_purge(sd, cpu);
- }
-
- static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
diff --git a/debian/patches-rt/0003-perf-Enqueue-SIGTRAP-always-via-task_work.patch b/debian/patches-rt/0003-perf-Enqueue-SIGTRAP-always-via-task_work.patch
new file mode 100644
index 0000000000..725fb82046
--- /dev/null
+++ b/debian/patches-rt/0003-perf-Enqueue-SIGTRAP-always-via-task_work.patch
@@ -0,0 +1,120 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:37 +0200
+Subject: [PATCH 3/7] perf: Enqueue SIGTRAP always via task_work.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+A signal is delivered by raising irq_work() which works from any context
+including NMI. irq_work() can be delayed if the architecture does not
+provide an interrupt vector. In order not to lose a signal, the signal
+is injected via task_work during event_sched_out().
+
+Instead going via irq_work, the signal could be added directly via
+task_work. The signal is sent to current and can be enqueued on its
+return path to userland.
+
+Queue signal via task_work and consider possible NMI context. Remove
+perf_event::pending_sigtrap and and use perf_event::pending_work
+instead.
+
+Tested-by: Marco Elver <elver@google.com>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Link: https://lore.kernel.org/all/ZMAtZ2t43GXoF6tM@kernel.org/
+Link: https://lore.kernel.org/r/20240704170424.1466941-4-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/perf_event.h | 3 +--
+ kernel/events/core.c | 31 ++++++++++---------------------
+ 2 files changed, 11 insertions(+), 23 deletions(-)
+
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -781,7 +781,6 @@ struct perf_event {
+ unsigned int pending_wakeup;
+ unsigned int pending_kill;
+ unsigned int pending_disable;
+- unsigned int pending_sigtrap;
+ unsigned long pending_addr; /* SIGTRAP */
+ struct irq_work pending_irq;
+ struct callback_head pending_task;
+@@ -963,7 +962,7 @@ struct perf_event_context {
+ struct rcu_head rcu_head;
+
+ /*
+- * Sum (event->pending_sigtrap + event->pending_work)
++ * Sum (event->pending_work + event->pending_work)
+ *
+ * The SIGTRAP is targeted at ctx->task, as such it won't do changing
+ * that until the signal is delivered.
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2283,17 +2283,6 @@ event_sched_out(struct perf_event *event
+ state = PERF_EVENT_STATE_OFF;
+ }
+
+- if (event->pending_sigtrap) {
+- event->pending_sigtrap = 0;
+- if (state != PERF_EVENT_STATE_OFF &&
+- !event->pending_work &&
+- !task_work_add(current, &event->pending_task, TWA_RESUME)) {
+- event->pending_work = 1;
+- } else {
+- local_dec(&event->ctx->nr_pending);
+- }
+- }
+-
+ perf_event_set_state(event, state);
+
+ if (!is_software_event(event))
+@@ -6787,11 +6776,6 @@ static void __perf_pending_irq(struct pe
+ * Yay, we hit home and are in the context of the event.
+ */
+ if (cpu == smp_processor_id()) {
+- if (event->pending_sigtrap) {
+- event->pending_sigtrap = 0;
+- perf_sigtrap(event);
+- local_dec(&event->ctx->nr_pending);
+- }
+ if (event->pending_disable) {
+ event->pending_disable = 0;
+ perf_event_disable_local(event);
+@@ -9732,21 +9716,26 @@ static int __perf_event_overflow(struct
+ */
+ bool valid_sample = sample_is_allowed(event, regs);
+ unsigned int pending_id = 1;
++ enum task_work_notify_mode notify_mode;
+
+ if (regs)
+ pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
+- if (!event->pending_sigtrap) {
+- event->pending_sigtrap = pending_id;
++
++ notify_mode = in_nmi() ? TWA_NMI_CURRENT : TWA_RESUME;
++
++ if (!event->pending_work &&
++ !task_work_add(current, &event->pending_task, notify_mode)) {
++ event->pending_work = pending_id;
+ local_inc(&event->ctx->nr_pending);
+
+ event->pending_addr = 0;
+ if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
+ event->pending_addr = data->addr;
+- irq_work_queue(&event->pending_irq);
++
+ } else if (event->attr.exclude_kernel && valid_sample) {
+ /*
+ * Should not be able to return to user space without
+- * consuming pending_sigtrap; with exceptions:
++ * consuming pending_work; with exceptions:
+ *
+ * 1. Where !exclude_kernel, events can overflow again
+ * in the kernel without returning to user space.
+@@ -9756,7 +9745,7 @@ static int __perf_event_overflow(struct
+ * To approximate progress (with false negatives),
+ * check 32-bit hash of the current IP.
+ */
+- WARN_ON_ONCE(event->pending_sigtrap != pending_id);
++ WARN_ON_ONCE(event->pending_work != pending_id);
+ }
+ }
+
diff --git a/debian/patches-rt/0003-perf-Remove-perf_swevent_get_recursion_context-from-.patch b/debian/patches-rt/0003-perf-Remove-perf_swevent_get_recursion_context-from-.patch
deleted file mode 100644
index 150b5530d2..0000000000
--- a/debian/patches-rt/0003-perf-Remove-perf_swevent_get_recursion_context-from-.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Mar 2024 19:01:51 +0100
-Subject: [PATCH 3/4] perf: Remove perf_swevent_get_recursion_context() from
- perf_pending_task().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-perf_swevent_get_recursion_context() is supposed to avoid recursion.
-This requires to remain on the same CPU in order to decrement/ increment
-the same counter. This is done by using preempt_disable(). Having
-preemption disabled while sending a signal leads to locking problems on
-PREEMPT_RT because sighand, a spinlock_t, becomes a sleeping lock.
-
-This callback runs in task context and currently delivers only a signal
-to "itself". Any kind of recusrion protection in this context is not
-required.
-
-Remove recursion protection in perf_pending_task().
-
-Tested-by: Marco Elver <elver@google.com>
-Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Link: https://lore.kernel.org/r/20240312180814.3373778-4-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/events/core.c | 12 ------------
- 1 file changed, 12 deletions(-)
-
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -6785,14 +6785,6 @@ static void perf_pending_irq(struct irq_
- static void perf_pending_task(struct callback_head *head)
- {
- struct perf_event *event = container_of(head, struct perf_event, pending_task);
-- int rctx;
--
-- /*
-- * If we 'fail' here, that's OK, it means recursion is already disabled
-- * and we won't recurse 'further'.
-- */
-- preempt_disable_notrace();
-- rctx = perf_swevent_get_recursion_context();
-
- if (event->pending_work) {
- event->pending_work = 0;
-@@ -6800,10 +6792,6 @@ static void perf_pending_task(struct cal
- local_dec(&event->ctx->nr_pending);
- }
-
-- if (rctx >= 0)
-- perf_swevent_put_recursion_context(rctx);
-- preempt_enable_notrace();
--
- put_event(event);
- }
-
diff --git a/debian/patches-rt/0003-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch b/debian/patches-rt/0003-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch
deleted file mode 100644
index d7575e9a50..0000000000
--- a/debian/patches-rt/0003-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch
+++ /dev/null
@@ -1,86 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Wed, 7 Feb 2024 14:47:01 +0106
-Subject: [PATCH 03/48] printk: Avoid non-panic CPUs writing to ringbuffer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-Commit 13fb0f74d702 ("printk: Avoid livelock with heavy printk
-during panic") introduced a mechanism to silence non-panic CPUs
-if too many messages are being dropped. Aside from trying to
-workaround the livelock bugs of legacy consoles, it was also
-intended to avoid losing panic messages. However, if non-panic
-CPUs are writing to the ringbuffer, then reacting to dropped
-messages is too late.
-
-Another motivation is that non-finalized messages already might
-be skipped in panic(). In other words, random messages from
-non-panic CPUs might already get lost. It is better to ignore
-all to avoid confusion.
-
-To avoid losing panic CPU messages, silence non-panic CPUs
-immediately on panic.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Reviewed-by: Petr Mladek <pmladek@suse.com>
-Link: https://lore.kernel.org/r/20240207134103.1357162-13-john.ogness@linutronix.de
-Signed-off-by: Petr Mladek <pmladek@suse.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 26 ++++++--------------------
- 1 file changed, 6 insertions(+), 20 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -462,12 +462,6 @@ static int console_msg_format = MSG_FORM
- static DEFINE_MUTEX(syslog_lock);
-
- #ifdef CONFIG_PRINTK
--/*
-- * During panic, heavy printk by other CPUs can delay the
-- * panic and risk deadlock on console resources.
-- */
--static int __read_mostly suppress_panic_printk;
--
- DECLARE_WAIT_QUEUE_HEAD(log_wait);
- /* All 3 protected by @syslog_lock. */
- /* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -2322,7 +2316,12 @@ asmlinkage int vprintk_emit(int facility
- if (unlikely(suppress_printk))
- return 0;
-
-- if (unlikely(suppress_panic_printk) && other_cpu_in_panic())
-+ /*
-+ * The messages on the panic CPU are the most important. If
-+ * non-panic CPUs are generating any messages, they will be
-+ * silently dropped.
-+ */
-+ if (other_cpu_in_panic())
- return 0;
-
- if (level == LOGLEVEL_SCHED) {
-@@ -2807,8 +2806,6 @@ void console_prepend_dropped(struct prin
- bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
- bool is_extended, bool may_suppress)
- {
-- static int panic_console_dropped;
--
- struct printk_buffers *pbufs = pmsg->pbufs;
- const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
- const size_t outbuf_sz = sizeof(pbufs->outbuf);
-@@ -2836,17 +2833,6 @@ bool printk_get_next_message(struct prin
- pmsg->seq = r.info->seq;
- pmsg->dropped = r.info->seq - seq;
-
-- /*
-- * Check for dropped messages in panic here so that printk
-- * suppression can occur as early as possible if necessary.
-- */
-- if (pmsg->dropped &&
-- panic_in_progress() &&
-- panic_console_dropped++ > 10) {
-- suppress_panic_printk = 1;
-- pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
-- }
--
- /* Skip record that has level above the console loglevel. */
- if (may_suppress && suppress_message_printing(r.info->level))
- goto out;
diff --git a/debian/patches-rt/0008-printk-nbcon-Remove-return-value-for-write_atomic.patch b/debian/patches-rt/0003-printk-nbcon-Remove-return-value-for-write_atomic.patch
index 73b0fe77fd..fe6d01032d 100644
--- a/debian/patches-rt/0008-printk-nbcon-Remove-return-value-for-write_atomic.patch
+++ b/debian/patches-rt/0003-printk-nbcon-Remove-return-value-for-write_atomic.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 20 Oct 2023 09:52:59 +0000
-Subject: [PATCH 08/48] printk: nbcon: Remove return value for write_atomic()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 03/48] printk: nbcon: Remove return value for write_atomic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The return value of write_atomic() does not provide any useful
information. On the contrary, it makes things more complicated
@@ -15,6 +15,7 @@ message and the sequence number for that console will be
incremented.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/console.h | 2 +-
@@ -23,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/console.h
+++ b/include/linux/console.h
-@@ -327,7 +327,7 @@ struct console {
+@@ -345,7 +345,7 @@ struct console {
struct hlist_node node;
/* nbcon console specific members */
diff --git a/debian/patches-rt/0003-serial-bcm63xx-Use-uart_prepare_sysrq_char.patch b/debian/patches-rt/0003-serial-bcm63xx-Use-uart_prepare_sysrq_char.patch
deleted file mode 100644
index 580b336885..0000000000
--- a/debian/patches-rt/0003-serial-bcm63xx-Use-uart_prepare_sysrq_char.patch
+++ /dev/null
@@ -1,78 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:16 +0100
-Subject: [PATCH 03/18] serial: bcm63xx: Use uart_prepare_sysrq_char().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Delay handling sysrq until port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Link: https://lore.kernel.org/r/20240301215246.891055-4-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/bcm63xx_uart.c | 24 ++++++++----------------
- 1 file changed, 8 insertions(+), 16 deletions(-)
-
---- a/drivers/tty/serial/bcm63xx_uart.c
-+++ b/drivers/tty/serial/bcm63xx_uart.c
-@@ -285,10 +285,9 @@ static void bcm_uart_do_rx(struct uart_p
- flag = TTY_PARITY;
- }
-
-- if (uart_handle_sysrq_char(port, c))
-+ if (uart_prepare_sysrq_char(port, c))
- continue;
-
--
- if ((cstat & port->ignore_status_mask) == 0)
- tty_insert_flip_char(tty_port, c, flag);
-
-@@ -353,7 +352,7 @@ static irqreturn_t bcm_uart_interrupt(in
- estat & UART_EXTINP_DCD_MASK);
- }
-
-- uart_port_unlock(port);
-+ uart_unlock_and_check_sysrq(port);
- return IRQ_HANDLED;
- }
-
-@@ -703,20 +702,14 @@ static void bcm_console_write(struct con
- {
- struct uart_port *port;
- unsigned long flags;
-- int locked;
-+ int locked = 1;
-
- port = &ports[co->index];
-
-- local_irq_save(flags);
-- if (port->sysrq) {
-- /* bcm_uart_interrupt() already took the lock */
-- locked = 0;
-- } else if (oops_in_progress) {
-- locked = uart_port_trylock(port);
-- } else {
-- uart_port_lock(port);
-- locked = 1;
-- }
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(port, &flags);
-+ else
-+ uart_port_lock_irqsave(port, &flags);
-
- /* call helper to deal with \r\n */
- uart_console_write(port, s, count, bcm_console_putchar);
-@@ -725,8 +718,7 @@ static void bcm_console_write(struct con
- wait_for_xmitr(port);
-
- if (locked)
-- uart_port_unlock(port);
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(port, flags);
- }
-
- /*
diff --git a/debian/patches-rt/0003-tcp-move-inet_twsk_schedule-helper-out-of-header.patch b/debian/patches-rt/0003-tcp-move-inet_twsk_schedule-helper-out-of-header.patch
new file mode 100644
index 0000000000..d75af6d28d
--- /dev/null
+++ b/debian/patches-rt/0003-tcp-move-inet_twsk_schedule-helper-out-of-header.patch
@@ -0,0 +1,45 @@
+From: Florian Westphal <fw@strlen.de>
+Date: Tue, 4 Jun 2024 16:08:49 +0200
+Subject: [PATCH 3/3] tcp: move inet_twsk_schedule helper out of header
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Its no longer used outside inet_timewait_sock.c, so move it there.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20240604140903.31939-4-fw@strlen.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/inet_timewait_sock.h | 5 -----
+ net/ipv4/inet_timewait_sock.c | 5 +++++
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -101,11 +101,6 @@ void inet_twsk_hashdance_schedule(struct
+ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
+ bool rearm);
+
+-static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+-{
+- __inet_twsk_schedule(tw, timeo, false);
+-}
+-
+ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
+ {
+ __inet_twsk_schedule(tw, timeo, true);
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -92,6 +92,11 @@ static void inet_twsk_add_node_rcu(struc
+ hlist_nulls_add_head_rcu(&tw->tw_node, list);
+ }
+
++static void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
++{
++ __inet_twsk_schedule(tw, timeo, false);
++}
++
+ /*
+ * Enter the time wait state. This is called with locally disabled BH.
+ * Essentially we whip up a timewait bucket, copy the relevant info into it
diff --git a/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch b/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch
index 486b98521f..7481d00114 100644
--- a/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch
+++ b/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 4 Aug 2023 13:30:39 +0200
Subject: [PATCH 3/3] time: Allow to preempt after a callback.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The TIMER_SOFTIRQ handler invokes timer callbacks of the expired timers.
Before each invocation the timer_base::lock is dropped. The only lock
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1470,9 +1470,16 @@ static inline void timer_base_unlock_exp
+@@ -1562,9 +1562,16 @@ static inline void timer_base_unlock_exp
*/
static void timer_sync_wait_running(struct timer_base *base)
{
diff --git a/debian/patches-rt/0003-zram-Shrink-zram_table_entry-flags.patch b/debian/patches-rt/0003-zram-Shrink-zram_table_entry-flags.patch
new file mode 100644
index 0000000000..153dee8fd2
--- /dev/null
+++ b/debian/patches-rt/0003-zram-Shrink-zram_table_entry-flags.patch
@@ -0,0 +1,48 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 20 Jun 2024 12:53:06 +0200
+Subject: [PATCH 3/3] zram: Shrink zram_table_entry::flags.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The zram_table_entry::flags member is of type long and uses 8 bytes on a
+64bit architecture. With a PAGE_SIZE of 256KiB we have PAGE_SHIFT of 18
+which in turn leads to __NR_ZRAM_PAGEFLAGS = 27. This still fits in an
+ordinary integer.
+By reducing it the size of `flags' to four bytes, the size of the struct
+goes back to 16 bytes. The padding between the lock and ac_time (if
+enabled) is also gone.
+
+Make zram_table_entry::flags an unsigned int and update the build test
+to reflect the change.
+
+Link: https://lore.kernel.org/r/20240620153556.777272-4-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/block/zram/zram_drv.c | 3 ++-
+ drivers/block/zram/zram_drv.h | 2 +-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -2412,9 +2412,10 @@ static void destroy_devices(void)
+
+ static int __init zram_init(void)
+ {
++ struct zram_table_entry zram_te;
+ int ret;
+
+- BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG);
++ BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
+
+ ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
+ zcomp_cpu_up_prepare, zcomp_cpu_dead);
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -66,7 +66,7 @@ struct zram_table_entry {
+ unsigned long handle;
+ unsigned long element;
+ };
+- unsigned long flags;
++ unsigned int flags;
+ spinlock_t lock;
+ #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
+ ktime_t ac_time;
diff --git a/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch b/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
index b5f618d1c5..45080a4fe0 100644
--- a/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
+++ b/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 28 Jun 2023 09:39:33 +0200
Subject: [PATCH 4/4] ARM: vfp: Move sending signals outside of vfp_lock()ed
section.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
VFP_bounce() is invoked from within vfp_support_entry() and may send a
signal. Sending a signal uses spinlock_t which becomes a sleeping lock
diff --git a/debian/patches-rt/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch b/debian/patches-rt/0004-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
index 39bda39263..6ac572a8ad 100644
--- a/debian/patches-rt/0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
+++ b/debian/patches-rt/0004-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 6 Dec 2018 09:52:20 +0100
-Subject: [PATCH 06/10] drm/i915: Disable tracing points on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 4/8] drm/i915: Disable tracing points on PREEMPT_RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Luca Abeni reported this:
| BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003
@@ -12,7 +12,7 @@ Luca Abeni reported this:
| g4x_get_vblank_counter+0x36/0x40 [i915]
| trace_event_raw_event_i915_pipe_update_start+0x7d/0xf0 [i915]
-The tracing events use trace_i915_pipe_update_start() among other events
+The tracing events use trace_intel_pipe_update_start() among other events
use functions acquire spinlock_t locks which are transformed into
sleeping locks on PREEMPT_RT. A few trace points use
intel_get_crtc_scanline(), others use ->get_vblank_counter() wich also
@@ -23,20 +23,35 @@ is disabled and so the locks must not be acquired on PREEMPT_RT.
Based on this I don't see any other way than disable trace points on
PREMPT_RT.
+Acked-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Reported-by: Luca Abeni <lucabe72@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/gpu/drm/i915/i915_trace.h | 4 ++++
- 1 file changed, 4 insertions(+)
+ drivers/gpu/drm/i915/display/intel_display_trace.h | 4 ++++
+ drivers/gpu/drm/i915/i915_trace.h | 4 ++++
+ 2 files changed, 8 insertions(+)
+--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
++++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
+@@ -9,6 +9,10 @@
+ #if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+ #define __INTEL_DISPLAY_TRACE_H__
+
++#if defined(CONFIG_PREEMPT_RT) && !defined(NOTRACE)
++#define NOTRACE
++#endif
++
+ #include <linux/string_helpers.h>
+ #include <linux/types.h>
+ #include <linux/tracepoint.h>
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,6 +6,10 @@
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _I915_TRACE_H_
-+#ifdef CONFIG_PREEMPT_RT
++#if defined(CONFIG_PREEMPT_RT) && !defined(NOTRACE)
+#define NOTRACE
+#endif
+
diff --git a/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch b/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch
deleted file mode 100644
index 70de5b94e2..0000000000
--- a/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch
+++ /dev/null
@@ -1,164 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Sat, 9 Mar 2024 10:05:12 +0100
-Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The rps_lock.*() functions use the inner lock of a sk_buff_head for
-locking. This lock is used if RPS is enabled, otherwise the list is
-accessed lockless and disabling interrupts is enough for the
-synchronisation because it is only accessed CPU local. Not only the list
-is protected but also the NAPI state protected.
-With the addition of backlog threads, the lock is also needed because of
-the cross CPU access even without RPS. The clean up of the defer_list
-list is also done via backlog threads (if enabled).
-
-It has been suggested to rename the locking function since it is no
-longer just RPS.
-
-Rename the rps_lock*() functions to backlog_lock*().
-
-Suggested-by: Jakub Kicinski <kuba@kernel.org>
-Acked-by: Jakub Kicinski <kuba@kernel.org>
-Link: https://lore.kernel.org/r/20240309090824.2956805-5-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/core/dev.c | 34 +++++++++++++++++-----------------
- 1 file changed, 17 insertions(+), 17 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -242,8 +242,8 @@ static bool use_backlog_threads(void)
-
- #endif
-
--static inline void rps_lock_irqsave(struct softnet_data *sd,
-- unsigned long *flags)
-+static inline void backlog_lock_irq_save(struct softnet_data *sd,
-+ unsigned long *flags)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
-@@ -251,7 +251,7 @@ static inline void rps_lock_irqsave(stru
- local_irq_save(*flags);
- }
-
--static inline void rps_lock_irq_disable(struct softnet_data *sd)
-+static inline void backlog_lock_irq_disable(struct softnet_data *sd)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_lock_irq(&sd->input_pkt_queue.lock);
-@@ -259,8 +259,8 @@ static inline void rps_lock_irq_disable(
- local_irq_disable();
- }
-
--static inline void rps_unlock_irq_restore(struct softnet_data *sd,
-- unsigned long *flags)
-+static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
-+ unsigned long *flags)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
-@@ -268,7 +268,7 @@ static inline void rps_unlock_irq_restor
- local_irq_restore(*flags);
- }
-
--static inline void rps_unlock_irq_enable(struct softnet_data *sd)
-+static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock_irq(&sd->input_pkt_queue.lock);
-@@ -4758,12 +4758,12 @@ void kick_defer_list_purge(struct softne
- unsigned long flags;
-
- if (use_backlog_threads()) {
-- rps_lock_irqsave(sd, &flags);
-+ backlog_lock_irq_save(sd, &flags);
-
- if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
- __napi_schedule_irqoff(&sd->backlog);
-
-- rps_unlock_irq_restore(sd, &flags);
-+ backlog_unlock_irq_restore(sd, &flags);
-
- } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
- smp_call_function_single_async(cpu, &sd->defer_csd);
-@@ -4825,7 +4825,7 @@ static int enqueue_to_backlog(struct sk_
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
- sd = &per_cpu(softnet_data, cpu);
-
-- rps_lock_irqsave(sd, &flags);
-+ backlog_lock_irq_save(sd, &flags);
- if (!netif_running(skb->dev))
- goto drop;
- qlen = skb_queue_len(&sd->input_pkt_queue);
-@@ -4834,7 +4834,7 @@ static int enqueue_to_backlog(struct sk_
- enqueue:
- __skb_queue_tail(&sd->input_pkt_queue, skb);
- input_queue_tail_incr_save(sd, qtail);
-- rps_unlock_irq_restore(sd, &flags);
-+ backlog_unlock_irq_restore(sd, &flags);
- return NET_RX_SUCCESS;
- }
-
-@@ -4849,7 +4849,7 @@ static int enqueue_to_backlog(struct sk_
-
- drop:
- sd->dropped++;
-- rps_unlock_irq_restore(sd, &flags);
-+ backlog_unlock_irq_restore(sd, &flags);
-
- dev_core_stats_rx_dropped_inc(skb->dev);
- kfree_skb_reason(skb, reason);
-@@ -5880,7 +5880,7 @@ static void flush_backlog(struct work_st
- local_bh_disable();
- sd = this_cpu_ptr(&softnet_data);
-
-- rps_lock_irq_disable(sd);
-+ backlog_lock_irq_disable(sd);
- skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
- if (skb->dev->reg_state == NETREG_UNREGISTERING) {
- __skb_unlink(skb, &sd->input_pkt_queue);
-@@ -5888,7 +5888,7 @@ static void flush_backlog(struct work_st
- input_queue_head_incr(sd);
- }
- }
-- rps_unlock_irq_enable(sd);
-+ backlog_unlock_irq_enable(sd);
-
- skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
- if (skb->dev->reg_state == NETREG_UNREGISTERING) {
-@@ -5906,14 +5906,14 @@ static bool flush_required(int cpu)
- struct softnet_data *sd = &per_cpu(softnet_data, cpu);
- bool do_flush;
-
-- rps_lock_irq_disable(sd);
-+ backlog_lock_irq_disable(sd);
-
- /* as insertion into process_queue happens with the rps lock held,
- * process_queue access may race only with dequeue
- */
- do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
- !skb_queue_empty_lockless(&sd->process_queue);
-- rps_unlock_irq_enable(sd);
-+ backlog_unlock_irq_enable(sd);
-
- return do_flush;
- #endif
-@@ -6028,7 +6028,7 @@ static int process_backlog(struct napi_s
-
- }
-
-- rps_lock_irq_disable(sd);
-+ backlog_lock_irq_disable(sd);
- if (skb_queue_empty(&sd->input_pkt_queue)) {
- /*
- * Inline a custom version of __napi_complete().
-@@ -6044,7 +6044,7 @@ static int process_backlog(struct napi_s
- skb_queue_splice_tail_init(&sd->input_pkt_queue,
- &sd->process_queue);
- }
-- rps_unlock_irq_enable(sd);
-+ backlog_unlock_irq_enable(sd);
- }
-
- return work;
diff --git a/debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch b/debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch
new file mode 100644
index 0000000000..76e8f4e04c
--- /dev/null
+++ b/debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch
@@ -0,0 +1,131 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 23 Oct 2023 17:07:56 +0200
+Subject: [PATCH 04/15] net: Use nested-BH locking for napi_alloc_cache.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+napi_alloc_cache is a per-CPU variable and relies on disabled BH for its
+locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
+this data structure requires explicit locking.
+
+Add a local_lock_t to the data structure and use local_lock_nested_bh()
+for locking. This change adds only lockdep coverage and does not alter
+the functional behaviour for !PREEMPT_RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/skbuff.c | 29 ++++++++++++++++++++++++-----
+ 1 file changed, 24 insertions(+), 5 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -277,6 +277,7 @@ static void *page_frag_alloc_1k(struct p
+ #endif
+
+ struct napi_alloc_cache {
++ local_lock_t bh_lock;
+ struct page_frag_cache page;
+ struct page_frag_1k page_small;
+ unsigned int skb_count;
+@@ -284,7 +285,9 @@ struct napi_alloc_cache {
+ };
+
+ static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
+-static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
++static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ /* Double check that napi_get_frags() allocates skbs with
+ * skb->head being backed by slab, not a page fragment.
+@@ -306,11 +309,16 @@ void napi_get_frags_check(struct napi_st
+ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
+ {
+ struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ void *data;
+
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
+- return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
++ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
++ data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+ align_mask);
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
++ return data;
++
+ }
+ EXPORT_SYMBOL(__napi_alloc_frag_align);
+
+@@ -338,16 +346,20 @@ static struct sk_buff *napi_skb_cache_ge
+ struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ struct sk_buff *skb;
+
++ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
+ if (unlikely(!nc->skb_count)) {
+ nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
+ GFP_ATOMIC,
+ NAPI_SKB_CACHE_BULK,
+ nc->skb_cache);
+- if (unlikely(!nc->skb_count))
++ if (unlikely(!nc->skb_count)) {
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ return NULL;
++ }
+ }
+
+ skb = nc->skb_cache[--nc->skb_count];
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
+
+ return skb;
+@@ -740,9 +752,13 @@ struct sk_buff *__netdev_alloc_skb(struc
+ pfmemalloc = nc->pfmemalloc;
+ } else {
+ local_bh_disable();
++ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
++
+ nc = this_cpu_ptr(&napi_alloc_cache.page);
+ data = page_frag_alloc(nc, len, gfp_mask);
+ pfmemalloc = nc->pfmemalloc;
++
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ local_bh_enable();
+ }
+
+@@ -806,11 +822,11 @@ struct sk_buff *napi_alloc_skb(struct na
+ goto skb_success;
+ }
+
+- nc = this_cpu_ptr(&napi_alloc_cache);
+-
+ if (sk_memalloc_socks())
+ gfp_mask |= __GFP_MEMALLOC;
+
++ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
++ nc = this_cpu_ptr(&napi_alloc_cache);
+ if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
+ /* we are artificially inflating the allocation size, but
+ * that is not as bad as it may look like, as:
+@@ -832,6 +848,7 @@ struct sk_buff *napi_alloc_skb(struct na
+ data = page_frag_alloc(&nc->page, len, gfp_mask);
+ pfmemalloc = nc->page.pfmemalloc;
+ }
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+
+ if (unlikely(!data))
+ return NULL;
+@@ -1429,6 +1446,7 @@ static void napi_skb_cache_put(struct sk
+ if (!kasan_mempool_poison_object(skb))
+ return;
+
++ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
+ nc->skb_cache[nc->skb_count++] = skb;
+
+ if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
+@@ -1440,6 +1458,7 @@ static void napi_skb_cache_put(struct sk
+ nc->skb_cache + NAPI_SKB_CACHE_HALF);
+ nc->skb_count = NAPI_SKB_CACHE_HALF;
+ }
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ }
+
+ void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
diff --git a/debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch b/debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch
new file mode 100644
index 0000000000..ed4998a37a
--- /dev/null
+++ b/debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch
@@ -0,0 +1,66 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:38 +0200
+Subject: [PATCH 4/7] perf: Shrink the size of the recursion counter.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+There are four recursion counter, one for each context. The type of the
+counter is `int' but the counter is used as `bool' since it is only
+incremented if zero.
+The main goal here is to shrink the whole struct into 32bit int which
+can later be added task_struct into an existing hole.
+
+Reduce the type of the recursion counter to an unsigned char, keep the
+increment/ decrement operation.
+
+Tested-by: Marco Elver <elver@google.com>
+Link: https://lore.kernel.org/20240621091601.18227-1-frederic@kernel.org
+Link: https://lore.kernel.org/r/20240704170424.1466941-5-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/events/callchain.c | 2 +-
+ kernel/events/core.c | 2 +-
+ kernel/events/internal.h | 4 ++--
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/events/callchain.c
++++ b/kernel/events/callchain.c
+@@ -29,7 +29,7 @@ static inline size_t perf_callchain_entr
+ sysctl_perf_event_max_contexts_per_stack));
+ }
+
+-static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
++static DEFINE_PER_CPU(u8, callchain_recursion[PERF_NR_CONTEXTS]);
+ static atomic_t nr_callchain_events;
+ static DEFINE_MUTEX(callchain_mutex);
+ static struct callchain_cpus_entries *callchain_cpus_entries;
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9776,7 +9776,7 @@ struct swevent_htable {
+ int hlist_refcount;
+
+ /* Recursion avoidance in each contexts */
+- int recursion[PERF_NR_CONTEXTS];
++ u8 recursion[PERF_NR_CONTEXTS];
+ };
+
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -208,7 +208,7 @@ arch_perf_out_copy_user(void *dst, const
+
+ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
+
+-static inline int get_recursion_context(int *recursion)
++static inline int get_recursion_context(u8 *recursion)
+ {
+ unsigned char rctx = interrupt_context_level();
+
+@@ -221,7 +221,7 @@ static inline int get_recursion_context(
+ return rctx;
+ }
+
+-static inline void put_recursion_context(int *recursion, int rctx)
++static inline void put_recursion_context(u8 *recursion, int rctx)
+ {
+ barrier();
+ recursion[rctx]--;
diff --git a/debian/patches-rt/0009-printk-Check-printk_deferred_enter-_exit-usage.patch b/debian/patches-rt/0004-printk-Check-printk_deferred_enter-_exit-usage.patch
index b706ad35e4..ea2224e7e7 100644
--- a/debian/patches-rt/0009-printk-Check-printk_deferred_enter-_exit-usage.patch
+++ b/debian/patches-rt/0004-printk-Check-printk_deferred_enter-_exit-usage.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 22 Sep 2023 14:58:18 +0000
-Subject: [PATCH 09/48] printk: Check printk_deferred_enter()/_exit() usage
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 04/48] printk: Check printk_deferred_enter()/_exit() usage
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Add validation that printk_deferred_enter()/_exit() are called in
non-migration contexts.
diff --git a/debian/patches-rt/0004-serial-meson-Use-uart_prepare_sysrq_char.patch b/debian/patches-rt/0004-serial-meson-Use-uart_prepare_sysrq_char.patch
deleted file mode 100644
index d85b4e4333..0000000000
--- a/debian/patches-rt/0004-serial-meson-Use-uart_prepare_sysrq_char.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:17 +0100
-Subject: [PATCH 04/18] serial: meson: Use uart_prepare_sysrq_char().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Delay handling sysrq until port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Cc: Kevin Hilman <khilman@baylibre.com>
-Cc: Jerome Brunet <jbrunet@baylibre.com>
-Cc: linux-arm-kernel@lists.infradead.org
-Cc: linux-amlogic@lists.infradead.org
-Link: https://lore.kernel.org/r/20240301215246.891055-5-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/meson_uart.c | 22 ++++++++--------------
- 1 file changed, 8 insertions(+), 14 deletions(-)
-
---- a/drivers/tty/serial/meson_uart.c
-+++ b/drivers/tty/serial/meson_uart.c
-@@ -220,7 +220,7 @@ static void meson_receive_chars(struct u
- continue;
- }
-
-- if (uart_handle_sysrq_char(port, ch))
-+ if (uart_prepare_sysrq_char(port, ch))
- continue;
-
- if ((status & port->ignore_status_mask) == 0)
-@@ -248,7 +248,7 @@ static irqreturn_t meson_uart_interrupt(
- meson_uart_start_tx(port);
- }
-
-- uart_port_unlock(port);
-+ uart_unlock_and_check_sysrq(port);
-
- return IRQ_HANDLED;
- }
-@@ -556,18 +556,13 @@ static void meson_serial_port_write(stru
- u_int count)
- {
- unsigned long flags;
-- int locked;
-+ int locked = 1;
- u32 val, tmp;
-
-- local_irq_save(flags);
-- if (port->sysrq) {
-- locked = 0;
-- } else if (oops_in_progress) {
-- locked = uart_port_trylock(port);
-- } else {
-- uart_port_lock(port);
-- locked = 1;
-- }
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(port, &flags);
-+ else
-+ uart_port_lock_irqsave(port, &flags);
-
- val = readl(port->membase + AML_UART_CONTROL);
- tmp = val & ~(AML_UART_TX_INT_EN | AML_UART_RX_INT_EN);
-@@ -577,8 +572,7 @@ static void meson_serial_port_write(stru
- writel(val, port->membase + AML_UART_CONTROL);
-
- if (locked)
-- uart_port_unlock(port);
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(port, flags);
- }
-
- static void meson_serial_console_write(struct console *co, const char *s,
diff --git a/debian/patches-rt/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch b/debian/patches-rt/0005-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
index b5712afb62..f8e91753b7 100644
--- a/debian/patches-rt/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
+++ b/debian/patches-rt/0005-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
@@ -1,8 +1,8 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 8 Sep 2021 19:03:41 +0200
-Subject: [PATCH 09/10] drm/i915/gt: Use spin_lock_irq() instead of
+Subject: [PATCH 5/8] drm/i915/gt: Use spin_lock_irq() instead of
local_irq_disable() + spin_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
execlists_dequeue() is invoked from a function which uses
local_irq_disable() to disable interrupts so the spin_lock() behaves
diff --git a/debian/patches-rt/0005-net-tcp_sigpool-Use-nested-BH-locking-for-sigpool_sc.patch b/debian/patches-rt/0005-net-tcp_sigpool-Use-nested-BH-locking-for-sigpool_sc.patch
new file mode 100644
index 0000000000..d3773f2f0d
--- /dev/null
+++ b/debian/patches-rt/0005-net-tcp_sigpool-Use-nested-BH-locking-for-sigpool_sc.patch
@@ -0,0 +1,75 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 24 Nov 2023 10:11:03 +0100
+Subject: [PATCH 05/15] net/tcp_sigpool: Use nested-BH locking for
+ sigpool_scratch.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+sigpool_scratch is a per-CPU variable and relies on disabled BH for its
+locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
+this data structure requires explicit locking.
+
+Make a struct with a pad member (original sigpool_scratch) and a
+local_lock_t and use local_lock_nested_bh() for locking. This change
+adds only lockdep coverage and does not alter the functional behaviour
+for !PREEMPT_RT.
+
+Cc: David Ahern <dsahern@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/ipv4/tcp_sigpool.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_sigpool.c
++++ b/net/ipv4/tcp_sigpool.c
+@@ -10,7 +10,14 @@
+ #include <net/tcp.h>
+
+ static size_t __scratch_size;
+-static DEFINE_PER_CPU(void __rcu *, sigpool_scratch);
++struct sigpool_scratch {
++ local_lock_t bh_lock;
++ void __rcu *pad;
++};
++
++static DEFINE_PER_CPU(struct sigpool_scratch, sigpool_scratch) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ struct sigpool_entry {
+ struct crypto_ahash *hash;
+@@ -72,7 +79,7 @@ static int sigpool_reserve_scratch(size_
+ break;
+ }
+
+- old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
++ old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
+ scratch, lockdep_is_held(&cpool_mutex));
+ if (!cpu_online(cpu) || !old_scratch) {
+ kfree(old_scratch);
+@@ -93,7 +100,7 @@ static void sigpool_scratch_free(void)
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+- kfree(rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
++ kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
+ NULL, lockdep_is_held(&cpool_mutex)));
+ __scratch_size = 0;
+ }
+@@ -277,7 +284,8 @@ int tcp_sigpool_start(unsigned int id, s
+ /* Pairs with tcp_sigpool_reserve_scratch(), scratch area is
+ * valid (allocated) until tcp_sigpool_end().
+ */
+- c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch));
++ local_lock_nested_bh(&sigpool_scratch.bh_lock);
++ c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch.pad));
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(tcp_sigpool_start);
+@@ -286,6 +294,7 @@ void tcp_sigpool_end(struct tcp_sigpool
+ {
+ struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req);
+
++ local_unlock_nested_bh(&sigpool_scratch.bh_lock);
+ rcu_read_unlock_bh();
+ ahash_request_free(c->req);
+ crypto_free_ahash(hash);
diff --git a/debian/patches-rt/0005-perf-Move-swevent_htable-recursion-into-task_struct.patch b/debian/patches-rt/0005-perf-Move-swevent_htable-recursion-into-task_struct.patch
new file mode 100644
index 0000000000..5070c10a30
--- /dev/null
+++ b/debian/patches-rt/0005-perf-Move-swevent_htable-recursion-into-task_struct.patch
@@ -0,0 +1,121 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:39 +0200
+Subject: [PATCH 5/7] perf: Move swevent_htable::recursion into task_struct.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The swevent_htable::recursion counter is used to avoid creating an
+swevent while an event is processed to avoid recursion. The counter is
+per-CPU and preemption must be disabled to have a stable counter.
+perf_pending_task() disables preemption to access the counter and then
+signal. This is problematic on PREEMPT_RT because sending a signal uses
+a spinlock_t which must not be acquired in atomic on PREEMPT_RT because
+it becomes a sleeping lock.
+
+The atomic context can be avoided by moving the counter into the
+task_struct. There is a 4 byte hole between futex_state (usually always
+on) and the following perf pointer (perf_event_ctxp). After the
+recursion lost some weight it fits perfectly.
+
+Move swevent_htable::recursion into task_struct.
+
+Tested-by: Marco Elver <elver@google.com>
+Link: https://lore.kernel.org/r/20240704170424.1466941-6-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/perf_event.h | 6 ------
+ include/linux/sched.h | 7 +++++++
+ kernel/events/core.c | 13 +++----------
+ kernel/events/internal.h | 2 +-
+ 4 files changed, 11 insertions(+), 17 deletions(-)
+
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -970,12 +970,6 @@ struct perf_event_context {
+ local_t nr_pending;
+ };
+
+-/*
+- * Number of contexts where an event can trigger:
+- * task, softirq, hardirq, nmi.
+- */
+-#define PERF_NR_CONTEXTS 4
+-
+ struct perf_cpu_pmu_context {
+ struct perf_event_pmu_context epc;
+ struct perf_event_pmu_context *task_epc;
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -734,6 +734,12 @@ enum perf_event_task_context {
+ perf_nr_task_contexts,
+ };
+
++/*
++ * Number of contexts where an event can trigger:
++ * task, softirq, hardirq, nmi.
++ */
++#define PERF_NR_CONTEXTS 4
++
+ struct wake_q_node {
+ struct wake_q_node *next;
+ };
+@@ -1256,6 +1262,7 @@ struct task_struct {
+ unsigned int futex_state;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
++ u8 perf_recursion[PERF_NR_CONTEXTS];
+ struct perf_event_context *perf_event_ctxp;
+ struct mutex perf_event_mutex;
+ struct list_head perf_event_list;
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9774,11 +9774,7 @@ struct swevent_htable {
+ struct swevent_hlist *swevent_hlist;
+ struct mutex hlist_mutex;
+ int hlist_refcount;
+-
+- /* Recursion avoidance in each contexts */
+- u8 recursion[PERF_NR_CONTEXTS];
+ };
+-
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+
+ /*
+@@ -9976,17 +9972,13 @@ DEFINE_PER_CPU(struct pt_regs, __perf_re
+
+ int perf_swevent_get_recursion_context(void)
+ {
+- struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
+-
+- return get_recursion_context(swhash->recursion);
++ return get_recursion_context(current->perf_recursion);
+ }
+ EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
+
+ void perf_swevent_put_recursion_context(int rctx)
+ {
+- struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
+-
+- put_recursion_context(swhash->recursion, rctx);
++ put_recursion_context(current->perf_recursion, rctx);
+ }
+
+ void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
+@@ -13653,6 +13645,7 @@ int perf_event_init_task(struct task_str
+ {
+ int ret;
+
++ memset(child->perf_recursion, 0, sizeof(child->perf_recursion));
+ child->perf_event_ctxp = NULL;
+ mutex_init(&child->perf_event_mutex);
+ INIT_LIST_HEAD(&child->perf_event_list);
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -221,7 +221,7 @@ static inline int get_recursion_context(
+ return rctx;
+ }
+
+-static inline void put_recursion_context(u8 *recursion, int rctx)
++static inline void put_recursion_context(u8 *recursion, unsigned char rctx)
+ {
+ barrier();
+ recursion[rctx]--;
diff --git a/debian/patches-rt/0010-printk-nbcon-Add-detailed-doc-for-write_atomic.patch b/debian/patches-rt/0005-printk-nbcon-Add-detailed-doc-for-write_atomic.patch
index 354e027bdb..7c3d1192e9 100644
--- a/debian/patches-rt/0010-printk-nbcon-Add-detailed-doc-for-write_atomic.patch
+++ b/debian/patches-rt/0005-printk-nbcon-Add-detailed-doc-for-write_atomic.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 7 Feb 2024 18:38:14 +0000
-Subject: [PATCH 10/48] printk: nbcon: Add detailed doc for write_atomic()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 05/48] printk: nbcon: Add detailed doc for write_atomic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The write_atomic() callback has special requirements and is
allowed to use special helper functions. Provide detailed
@@ -12,12 +12,12 @@ Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/console.h | 31 +++++++++++++++++++++++++++----
- 1 file changed, 27 insertions(+), 4 deletions(-)
+ include/linux/console.h | 33 +++++++++++++++++++++++++++++----
+ 1 file changed, 29 insertions(+), 4 deletions(-)
--- a/include/linux/console.h
+++ b/include/linux/console.h
-@@ -285,7 +285,7 @@ struct nbcon_write_context {
+@@ -303,7 +303,7 @@ struct nbcon_write_context {
/**
* struct console - The console descriptor structure
* @name: The name of the console driver
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @read: Read callback for console input (Optional)
* @device: The underlying TTY device driver (Optional)
* @unblank: Callback to unblank the console (Optional)
-@@ -302,7 +302,6 @@ struct nbcon_write_context {
+@@ -320,7 +320,6 @@ struct nbcon_write_context {
* @data: Driver private data
* @node: hlist node for the console list
*
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @nbcon_state: State for nbcon consoles
* @nbcon_seq: Sequence number of the next record for nbcon to print
* @pbufs: Pointer to nbcon private buffer
-@@ -327,8 +326,32 @@ struct console {
+@@ -345,8 +344,34 @@ struct console {
struct hlist_node node;
/* nbcon console specific members */
@@ -46,21 +46,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * NBCON callback to write out text in any context.
+ *
-+ * This callback is called with the console already acquired. The
-+ * callback can use nbcon_can_proceed() at any time to verify that
-+ * it is still the owner of the console. In the case that it has
-+ * lost ownership, it is no longer allowed to go forward. In this
-+ * case it must back out immediately and carefully. The buffer
-+ * content is also no longer trusted since it no longer belongs to
-+ * the context.
++ * This callback is called with the console already acquired. However,
++ * a higher priority context is allowed to take it over by default.
+ *
-+ * If the callback needs to perform actions where ownership is not
-+ * allowed to be taken over, nbcon_enter_unsafe() and
-+ * nbcon_exit_unsafe() can be used to mark such sections. These
-+ * functions are also points of possible ownership transfer. If
-+ * either function returns false, ownership has been lost.
++ * The callback must call nbcon_enter_unsafe() and nbcon_exit_unsafe()
++ * around any code where the takeover is not safe, for example, when
++ * manipulating the serial port registers.
+ *
-+ * This callback can be called from any context (including NMI).
++ * nbcon_enter_unsafe() will fail if the context has lost the console
++ * ownership in the meantime. In this case, the callback is no longer
++ * allowed to go forward. It must back out immediately and carefully.
++ * The buffer content is also no longer trusted since it no longer
++ * belongs to the context.
++ *
++ * The callback should allow the takeover whenever it is safe. It
++ * increases the chance to see messages when the system is in trouble.
++ *
++ * The callback can be called from any context (including NMI).
+ * Therefore it must avoid usage of any locking and instead rely
+ * on the console ownership for synchronization.
+ */
diff --git a/debian/patches-rt/0005-serial-msm-Use-uart_prepare_sysrq_char.patch b/debian/patches-rt/0005-serial-msm-Use-uart_prepare_sysrq_char.patch
deleted file mode 100644
index 75c822db47..0000000000
--- a/debian/patches-rt/0005-serial-msm-Use-uart_prepare_sysrq_char.patch
+++ /dev/null
@@ -1,116 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:18 +0100
-Subject: [PATCH 05/18] serial: msm: Use uart_prepare_sysrq_char().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Delay handling sysrq until port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Cc: Bjorn Andersson <andersson@kernel.org>
-Cc: Konrad Dybcio <konrad.dybcio@linaro.org>
-Cc: linux-arm-msm@vger.kernel.org
-Link: https://lore.kernel.org/r/20240301215246.891055-6-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/msm_serial.c | 33 ++++++++++-----------------------
- 1 file changed, 10 insertions(+), 23 deletions(-)
-
---- a/drivers/tty/serial/msm_serial.c
-+++ b/drivers/tty/serial/msm_serial.c
-@@ -588,16 +588,14 @@ static void msm_complete_rx_dma(void *ar
- if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
- flag = TTY_NORMAL;
-
-- uart_port_unlock_irqrestore(port, flags);
-- sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
-- uart_port_lock_irqsave(port, &flags);
-+ sysrq = uart_prepare_sysrq_char(port, dma->virt[i]);
- if (!sysrq)
- tty_insert_flip_char(tport, dma->virt[i], flag);
- }
-
- msm_start_rx_dma(msm_port);
- done:
-- uart_port_unlock_irqrestore(port, flags);
-+ uart_unlock_and_check_sysrq_irqrestore(port, flags);
-
- if (count)
- tty_flip_buffer_push(tport);
-@@ -763,9 +761,7 @@ static void msm_handle_rx_dm(struct uart
- if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
- flag = TTY_NORMAL;
-
-- uart_port_unlock(port);
-- sysrq = uart_handle_sysrq_char(port, buf[i]);
-- uart_port_lock(port);
-+ sysrq = uart_prepare_sysrq_char(port, buf[i]);
- if (!sysrq)
- tty_insert_flip_char(tport, buf[i], flag);
- }
-@@ -825,9 +821,7 @@ static void msm_handle_rx(struct uart_po
- else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
- flag = TTY_FRAME;
-
-- uart_port_unlock(port);
-- sysrq = uart_handle_sysrq_char(port, c);
-- uart_port_lock(port);
-+ sysrq = uart_prepare_sysrq_char(port, c);
- if (!sysrq)
- tty_insert_flip_char(tport, c, flag);
- }
-@@ -948,11 +942,10 @@ static irqreturn_t msm_uart_irq(int irq,
- struct uart_port *port = dev_id;
- struct msm_port *msm_port = to_msm_port(port);
- struct msm_dma *dma = &msm_port->rx_dma;
-- unsigned long flags;
- unsigned int misr;
- u32 val;
-
-- uart_port_lock_irqsave(port, &flags);
-+ uart_port_lock(port);
- misr = msm_read(port, MSM_UART_MISR);
- msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
-
-@@ -984,7 +977,7 @@ static irqreturn_t msm_uart_irq(int irq,
- msm_handle_delta_cts(port);
-
- msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
-- uart_port_unlock_irqrestore(port, flags);
-+ uart_unlock_and_check_sysrq(port);
-
- return IRQ_HANDLED;
- }
-@@ -1621,14 +1614,10 @@ static void __msm_console_write(struct u
- num_newlines++;
- count += num_newlines;
-
-- local_irq_save(flags);
--
-- if (port->sysrq)
-- locked = 0;
-- else if (oops_in_progress)
-- locked = uart_port_trylock(port);
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(port, &flags);
- else
-- uart_port_lock(port);
-+ uart_port_lock_irqsave(port, &flags);
-
- if (is_uartdm)
- msm_reset_dm_count(port, count);
-@@ -1667,9 +1656,7 @@ static void __msm_console_write(struct u
- }
-
- if (locked)
-- uart_port_unlock(port);
--
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(port, flags);
- }
-
- static void msm_console_write(struct console *co, const char *s,
diff --git a/debian/patches-rt/0010-drm-i915-Drop-the-irqs_disabled-check.patch b/debian/patches-rt/0006-drm-i915-Drop-the-irqs_disabled-check.patch
index 1cf736b273..931d170015 100644
--- a/debian/patches-rt/0010-drm-i915-Drop-the-irqs_disabled-check.patch
+++ b/debian/patches-rt/0006-drm-i915-Drop-the-irqs_disabled-check.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 1 Oct 2021 20:01:03 +0200
-Subject: [PATCH 10/10] drm/i915: Drop the irqs_disabled() check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 6/8] drm/i915: Drop the irqs_disabled() check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The !irqs_disabled() check triggers on PREEMPT_RT even with
i915_sched_engine::lock acquired. The reason is the lock is transformed
@@ -14,6 +14,7 @@ caller and will yell if the interrupts are not disabled.
Remove the !irqs_disabled() check.
Reported-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Acked-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/i915_request.c | 2 --
@@ -21,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
-@@ -609,7 +609,6 @@ bool __i915_request_submit(struct i915_r
+@@ -608,7 +608,6 @@ bool __i915_request_submit(struct i915_r
RQ_TRACE(request, "\n");
@@ -29,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
lockdep_assert_held(&engine->sched_engine->lock);
/*
-@@ -718,7 +717,6 @@ void __i915_request_unsubmit(struct i915
+@@ -717,7 +716,6 @@ void __i915_request_unsubmit(struct i915
*/
RQ_TRACE(request, "\n");
diff --git a/debian/patches-rt/0006-net-ipv4-Use-nested-BH-locking-for-ipv4_tcp_sk.patch b/debian/patches-rt/0006-net-ipv4-Use-nested-BH-locking-for-ipv4_tcp_sk.patch
new file mode 100644
index 0000000000..9b882bf198
--- /dev/null
+++ b/debian/patches-rt/0006-net-ipv4-Use-nested-BH-locking-for-ipv4_tcp_sk.patch
@@ -0,0 +1,94 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 24 Oct 2023 09:38:48 +0200
+Subject: [PATCH 06/15] net/ipv4: Use nested-BH locking for ipv4_tcp_sk.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+ipv4_tcp_sk is a per-CPU variable and relies on disabled BH for its
+locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
+this data structure requires explicit locking.
+
+Make a struct with a sock member (original ipv4_tcp_sk) and a
+local_lock_t and use local_lock_nested_bh() for locking. This change
+adds only lockdep coverage and does not alter the functional behaviour
+for !PREEMPT_RT.
+
+Cc: David Ahern <dsahern@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/sock.h | 5 +++++
+ net/ipv4/tcp_ipv4.c | 15 +++++++++++----
+ 2 files changed, 16 insertions(+), 4 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -544,6 +544,11 @@ struct sock {
+ netns_tracker ns_tracker;
+ };
+
++struct sock_bh_locked {
++ struct sock *sock;
++ local_lock_t bh_lock;
++};
++
+ enum sk_pacing {
+ SK_PACING_NONE = 0,
+ SK_PACING_NEEDED = 1,
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -93,7 +93,9 @@ static int tcp_v4_md5_hash_hdr(char *md5
+ struct inet_hashinfo tcp_hashinfo;
+ EXPORT_SYMBOL(tcp_hashinfo);
+
+-static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
++static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ static u32 tcp_v4_init_seq(const struct sk_buff *skb)
+ {
+@@ -885,7 +887,9 @@ static void tcp_v4_send_reset(const stru
+ arg.tos = ip_hdr(skb)->tos;
+ arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
+ local_bh_disable();
+- ctl_sk = this_cpu_read(ipv4_tcp_sk);
++ local_lock_nested_bh(&ipv4_tcp_sk.bh_lock);
++ ctl_sk = this_cpu_read(ipv4_tcp_sk.sock);
++
+ sock_net_set(ctl_sk, net);
+ if (sk) {
+ ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
+@@ -910,6 +914,7 @@ static void tcp_v4_send_reset(const stru
+ sock_net_set(ctl_sk, &init_net);
+ __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
++ local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock);
+ local_bh_enable();
+
+ #ifdef CONFIG_TCP_MD5SIG
+@@ -1005,7 +1010,8 @@ static void tcp_v4_send_ack(const struct
+ arg.tos = tos;
+ arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
+ local_bh_disable();
+- ctl_sk = this_cpu_read(ipv4_tcp_sk);
++ local_lock_nested_bh(&ipv4_tcp_sk.bh_lock);
++ ctl_sk = this_cpu_read(ipv4_tcp_sk.sock);
+ sock_net_set(ctl_sk, net);
+ ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
+ inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
+@@ -1020,6 +1026,7 @@ static void tcp_v4_send_ack(const struct
+
+ sock_net_set(ctl_sk, &init_net);
+ __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
++ local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock);
+ local_bh_enable();
+ }
+
+@@ -3620,7 +3627,7 @@ void __init tcp_v4_init(void)
+ */
+ inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
+
+- per_cpu(ipv4_tcp_sk, cpu) = sk;
++ per_cpu(ipv4_tcp_sk.sock, cpu) = sk;
+ }
+ if (register_pernet_subsys(&tcp_sk_ops))
+ panic("Failed to create the TCP control socket.\n");
diff --git a/debian/patches-rt/0006-perf-Don-t-disable-preemption-in-perf_pending_task.patch b/debian/patches-rt/0006-perf-Don-t-disable-preemption-in-perf_pending_task.patch
new file mode 100644
index 0000000000..d3408aee48
--- /dev/null
+++ b/debian/patches-rt/0006-perf-Don-t-disable-preemption-in-perf_pending_task.patch
@@ -0,0 +1,60 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:40 +0200
+Subject: [PATCH 6/7] perf: Don't disable preemption in perf_pending_task().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+perf_pending_task() is invoked in task context and disables preemption
+because perf_swevent_get_recursion_context() used to access per-CPU
+variables. The other reason is to create a RCU read section while
+accessing the perf_event.
+
+The recursion counter is no longer a per-CPU accounter so disabling
+preemption is no longer required. The RCU section is needed and must be
+created explicit.
+
+Replace the preemption-disable section with a explicit RCU-read section.
+
+Tested-by: Marco Elver <elver@google.com>
+Link: https://lore.kernel.org/r/20240704170424.1466941-7-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/events/core.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5208,10 +5208,9 @@ static void perf_pending_task_sync(struc
+ }
+
+ /*
+- * All accesses related to the event are within the same
+- * non-preemptible section in perf_pending_task(). The RCU
+- * grace period before the event is freed will make sure all
+- * those accesses are complete by then.
++ * All accesses related to the event are within the same RCU section in
++ * perf_pending_task(). The RCU grace period before the event is freed
++ * will make sure all those accesses are complete by then.
+ */
+ rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
+ }
+@@ -6842,7 +6841,7 @@ static void perf_pending_task(struct cal
+ * critical section as the ->pending_work reset. See comment in
+ * perf_pending_task_sync().
+ */
+- preempt_disable_notrace();
++ rcu_read_lock();
+ /*
+ * If we 'fail' here, that's OK, it means recursion is already disabled
+ * and we won't recurse 'further'.
+@@ -6855,10 +6854,10 @@ static void perf_pending_task(struct cal
+ local_dec(&event->ctx->nr_pending);
+ rcuwait_wake_up(&event->pending_work_wait);
+ }
++ rcu_read_unlock();
+
+ if (rctx >= 0)
+ perf_swevent_put_recursion_context(rctx);
+- preempt_enable_notrace();
+ }
+
+ #ifdef CONFIG_GUEST_PERF_EVENTS
diff --git a/debian/patches-rt/0011-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch b/debian/patches-rt/0006-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch
index d6239d704b..ef850cfd27 100644
--- a/debian/patches-rt/0011-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch
+++ b/debian/patches-rt/0006-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch
@@ -1,27 +1,39 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 18 Mar 2024 10:11:56 +0000
-Subject: [PATCH 11/48] printk: nbcon: Add callbacks to synchronize with driver
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 06/48] printk: nbcon: Add callbacks to synchronize with driver
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Console drivers typically must deal with access to the hardware
via user input/output (such as an interactive login shell) and
-output of kernel messages via printk() calls.
+output of kernel messages via printk() calls. To provide the
+necessary synchronization, usually some driver-specific locking
+mechanism is used (for example, the port spinlock for uart
+serial consoles).
-Follow-up commits require that the printk subsystem is able to
-synchronize with the driver. Require nbcon consoles to implement
-two new callbacks (device_lock(), device_unlock()) that will
-use whatever synchronization mechanism the driver is using for
-itself (for example, the port lock for uart serial consoles).
+Until now, usage of this driver-specific locking has been hidden
+from the printk-subsystem and implemented within the various
+console callbacks. However, nbcon consoles would need to use it
+even in the generic code.
+
+Add device_lock() and device_unlock() callback which will need
+to get implemented by nbcon consoles.
+
+The callbacks will use whatever synchronization mechanism the
+driver is using for itself. The minimum requirement is to
+prevent CPU migration. It would allow a context friendly
+acquiring of nbcon console ownership in non-emergency and
+non-panic context.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/console.h | 42 ++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 42 insertions(+)
+ include/linux/console.h | 43 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
--- a/include/linux/console.h
+++ b/include/linux/console.h
-@@ -352,6 +352,48 @@ struct console {
+@@ -372,6 +372,49 @@ struct console {
*/
void (*write_atomic)(struct console *con, struct nbcon_write_context *wctxt);
@@ -38,12 +50,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * use whatever synchronization mechanism the driver is using for
+ * itself (for example, the port lock for uart serial consoles).
+ *
-+ * This callback is always called from task context. It may use any
-+ * synchronization method required by the driver. BUT this callback
-+ * MUST also disable migration. The console driver may be using a
-+ * synchronization mechanism that already takes care of this (such as
-+ * spinlocks). Otherwise this function must explicitly call
-+ * migrate_disable().
++ * The callback is always called from task context. It may use any
++ * synchronization method required by the driver.
++ *
++ * IMPORTANT: The callback MUST disable migration. The console driver
++ * may be using a synchronization mechanism that already takes
++ * care of this (such as spinlocks). Otherwise this function must
++ * explicitly call migrate_disable().
+ *
+ * The flags argument is provided as a convenience to the driver. It
+ * will be passed again to device_unlock(). It can be ignored if the
diff --git a/debian/patches-rt/0006-serial-omap-Use-uart_prepare_sysrq_char.patch b/debian/patches-rt/0006-serial-omap-Use-uart_prepare_sysrq_char.patch
deleted file mode 100644
index 58fe8b1323..0000000000
--- a/debian/patches-rt/0006-serial-omap-Use-uart_prepare_sysrq_char.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:19 +0100
-Subject: [PATCH 06/18] serial: omap: Use uart_prepare_sysrq_char().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Delay handling sysrq until port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Link: https://lore.kernel.org/r/20240301215246.891055-7-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/omap-serial.c | 16 ++++++----------
- 1 file changed, 6 insertions(+), 10 deletions(-)
-
---- a/drivers/tty/serial/omap-serial.c
-+++ b/drivers/tty/serial/omap-serial.c
-@@ -508,7 +508,7 @@ static void serial_omap_rdi(struct uart_
-
- up->port.icount.rx++;
-
-- if (uart_handle_sysrq_char(&up->port, ch))
-+ if (uart_prepare_sysrq_char(&up->port, ch))
- return;
-
- uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, TTY_NORMAL);
-@@ -563,7 +563,7 @@ static irqreturn_t serial_omap_irq(int i
- }
- } while (max_count--);
-
-- uart_port_unlock(&up->port);
-+ uart_unlock_and_check_sysrq(&up->port);
-
- tty_flip_buffer_push(&up->port.state->port);
-
-@@ -1212,13 +1212,10 @@ serial_omap_console_write(struct console
- unsigned int ier;
- int locked = 1;
-
-- local_irq_save(flags);
-- if (up->port.sysrq)
-- locked = 0;
-- else if (oops_in_progress)
-- locked = uart_port_trylock(&up->port);
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(&up->port, &flags);
- else
-- uart_port_lock(&up->port);
-+ uart_port_lock_irqsave(&up->port, &flags);
-
- /*
- * First save the IER then disable the interrupts
-@@ -1245,8 +1242,7 @@ serial_omap_console_write(struct console
- check_modem_status(up);
-
- if (locked)
-- uart_port_unlock(&up->port);
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(&up->port, flags);
- }
-
- static int __init
diff --git a/debian/patches-rt/drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch b/debian/patches-rt/0007-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
index 76342bd2d7..e2976aacd5 100644
--- a/debian/patches-rt/drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
+++ b/debian/patches-rt/0007-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Oct 2023 21:37:21 +0200
-Subject: [PATCH] drm/i915/guc: Consider also RCU depth in busy loop.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 7/8] drm/i915/guc: Consider also RCU depth in busy loop.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
intel_guc_send_busy_loop() looks at in_atomic() and irqs_disabled() to
decide if it should busy-spin while waiting or if it may sleep.
@@ -11,6 +11,7 @@ acquired leading to RCU splats while the function sleeps.
Check also if RCU has been disabled.
Reported-by: "John B. Wyatt IV" <jwyatt@redhat.com>
+Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/gt/uc/intel_guc.h | 2 +-
diff --git a/debian/patches-rt/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/debian/patches-rt/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
deleted file mode 100644
index 2d5f24fde2..0000000000
--- a/debian/patches-rt/0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 19 Dec 2018 10:47:02 +0100
-Subject: [PATCH 07/10] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with
- NOTRACE
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The order of the header files is important. If this header file is
-included after tracepoint.h was included then the NOTRACE here becomes a
-nop. Currently this happens for two .c files which use the tracepoitns
-behind DRM_I915_LOW_LEVEL_TRACEPOINTS.
-
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- drivers/gpu/drm/i915/i915_trace.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/gpu/drm/i915/i915_trace.h
-+++ b/drivers/gpu/drm/i915/i915_trace.h
-@@ -326,7 +326,7 @@ DEFINE_EVENT(i915_request, i915_request_
- TP_ARGS(rq)
- );
-
--#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
-+#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE)
- DEFINE_EVENT(i915_request, i915_request_guc_submit,
- TP_PROTO(struct i915_request *rq),
- TP_ARGS(rq)
diff --git a/debian/patches-rt/0007-netfilter-br_netfilter-Use-nested-BH-locking-for-brn.patch b/debian/patches-rt/0007-netfilter-br_netfilter-Use-nested-BH-locking-for-brn.patch
new file mode 100644
index 0000000000..ab910553af
--- /dev/null
+++ b/debian/patches-rt/0007-netfilter-br_netfilter-Use-nested-BH-locking-for-brn.patch
@@ -0,0 +1,98 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 21 Aug 2023 09:59:09 +0200
+Subject: [PATCH 07/15] netfilter: br_netfilter: Use nested-BH locking for
+ brnf_frag_data_storage.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+brnf_frag_data_storage is a per-CPU variable and relies on disabled BH
+for its locking. Without per-CPU locking in local_bh_disable() on
+PREEMPT_RT this data structure requires explicit locking.
+
+Add a local_lock_t to the data structure and use local_lock_nested_bh()
+for locking. This change adds only lockdep coverage and does not alter
+the functional behaviour for !PREEMPT_RT.
+
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Jozsef Kadlecsik <kadlec@netfilter.org>
+Cc: Nikolay Aleksandrov <razor@blackwall.org>
+Cc: Pablo Neira Ayuso <pablo@netfilter.org>
+Cc: Roopa Prabhu <roopa@nvidia.com>
+Cc: bridge@lists.linux.dev
+Cc: coreteam@netfilter.org
+Cc: netfilter-devel@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/bridge/br_netfilter_hooks.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -137,6 +137,7 @@ static inline bool is_pppoe_ipv6(const s
+ #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
+
+ struct brnf_frag_data {
++ local_lock_t bh_lock;
+ char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
+ u8 encap_size;
+ u8 size;
+@@ -144,7 +145,9 @@ struct brnf_frag_data {
+ __be16 vlan_proto;
+ };
+
+-static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
++static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ static void nf_bridge_info_free(struct sk_buff *skb)
+ {
+@@ -850,6 +853,7 @@ static int br_nf_dev_queue_xmit(struct n
+ {
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ unsigned int mtu, mtu_reserved;
++ int ret;
+
+ mtu_reserved = nf_bridge_mtu_reduction(skb);
+ mtu = skb->dev->mtu;
+@@ -882,6 +886,7 @@ static int br_nf_dev_queue_xmit(struct n
+
+ IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
+
++ local_lock_nested_bh(&brnf_frag_data_storage.bh_lock);
+ data = this_cpu_ptr(&brnf_frag_data_storage);
+
+ if (skb_vlan_tag_present(skb)) {
+@@ -897,7 +902,9 @@ static int br_nf_dev_queue_xmit(struct n
+ skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
+ data->size);
+
+- return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
++ ret = br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
++ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
++ return ret;
+ }
+ if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6)) {
+@@ -909,6 +916,7 @@ static int br_nf_dev_queue_xmit(struct n
+
+ IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
+
++ local_lock_nested_bh(&brnf_frag_data_storage.bh_lock);
+ data = this_cpu_ptr(&brnf_frag_data_storage);
+ data->encap_size = nf_bridge_encap_header_len(skb);
+ data->size = ETH_HLEN + data->encap_size;
+@@ -916,8 +924,12 @@ static int br_nf_dev_queue_xmit(struct n
+ skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
+ data->size);
+
+- if (v6ops)
+- return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
++ if (v6ops) {
++ ret = v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
++ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
++ return ret;
++ }
++ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
+
+ kfree_skb(skb);
+ return -EMSGSIZE;
diff --git a/debian/patches-rt/0004-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch b/debian/patches-rt/0007-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch
index 3f1d2a5e00..d0bf8acc35 100644
--- a/debian/patches-rt/0004-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch
+++ b/debian/patches-rt/0007-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch
@@ -1,11 +1,11 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Mar 2024 19:01:52 +0100
-Subject: [PATCH 4/4] perf: Split __perf_pending_irq() out of
+Date: Thu, 4 Jul 2024 19:03:41 +0200
+Subject: [PATCH 7/7] perf: Split __perf_pending_irq() out of
perf_pending_irq()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
perf_pending_irq() invokes perf_event_wakeup() and __perf_pending_irq().
-The former is in charge of waking any tasks which wait to be woken up
+The former is in charge of waking any tasks which waits to be woken up
while the latter disables perf-events.
The irq_work perf_pending_irq(), while this an irq_work, the callback
@@ -23,12 +23,13 @@ PREEMPT_RT. Rename the split out callback to perf_pending_disable().
Tested-by: Marco Elver <elver@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Link: https://lore.kernel.org/r/20240312180814.3373778-5-bigeasy@linutronix.de
+Link: https://lore.kernel.org/all/ZMAtZ2t43GXoF6tM@kernel.org/
+Link: https://lore.kernel.org/r/20240704170424.1466941-8-bigeasy@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/perf_event.h | 1 +
- kernel/events/core.c | 31 +++++++++++++++++++++++--------
- 2 files changed, 24 insertions(+), 8 deletions(-)
+ kernel/events/core.c | 29 ++++++++++++++++++++++-------
+ 2 files changed, 23 insertions(+), 7 deletions(-)
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -39,10 +40,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ struct irq_work pending_disable_irq;
struct callback_head pending_task;
unsigned int pending_work;
-
+ struct rcuwait pending_work_wait;
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -2449,7 +2449,7 @@ static void __perf_event_disable(struct
+@@ -2451,7 +2451,7 @@ static void __perf_event_disable(struct
* hold the top-level event's child_mutex, so any descendant that
* goes to exit will block in perf_event_exit_event().
*
@@ -51,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
-@@ -2489,7 +2489,7 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
+@@ -2491,7 +2491,7 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
void perf_event_disable_inatomic(struct perf_event *event)
{
event->pending_disable = 1;
@@ -60,15 +61,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#define MAX_INTERRUPTS (~0ULL)
-@@ -5175,6 +5175,7 @@ static void perf_addr_filters_splice(str
+@@ -5218,6 +5218,7 @@ static void perf_pending_task_sync(struc
static void _free_event(struct perf_event *event)
{
irq_work_sync(&event->pending_irq);
+ irq_work_sync(&event->pending_disable_irq);
+ perf_pending_task_sync(event);
unaccount_event(event);
-
-@@ -6711,7 +6712,7 @@ static void perf_sigtrap(struct perf_eve
+@@ -6760,7 +6761,7 @@ static void perf_sigtrap(struct perf_eve
/*
* Deliver the pending work in-event-context or follow the context.
*/
@@ -77,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
int cpu = READ_ONCE(event->oncpu);
-@@ -6749,11 +6750,26 @@ static void __perf_pending_irq(struct pe
+@@ -6798,11 +6799,26 @@ static void __perf_pending_irq(struct pe
* irq_work_queue(); // FAILS
*
* irq_work_run()
@@ -106,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void perf_pending_irq(struct irq_work *entry)
-@@ -6776,8 +6792,6 @@ static void perf_pending_irq(struct irq_
+@@ -6825,8 +6841,6 @@ static void perf_pending_irq(struct irq_
perf_event_wakeup(event);
}
@@ -115,20 +116,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
}
-@@ -9572,7 +9586,7 @@ static int __perf_event_overflow(struct
- * is processed.
- */
- if (in_nmi())
-- irq_work_queue(&event->pending_irq);
-+ irq_work_queue(&event->pending_disable_irq);
- } else if (event->attr.exclude_kernel && valid_sample) {
- /*
- * Should not be able to return to user space without
-@@ -11912,6 +11926,7 @@ perf_event_alloc(struct perf_event_attr
+@@ -11967,6 +11981,7 @@ perf_event_alloc(struct perf_event_attr
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending_irq, perf_pending_irq);
+ event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable);
init_task_work(&event->pending_task, perf_pending_task);
+ rcuwait_init(&event->pending_work_wait);
- mutex_init(&event->mmap_mutex);
diff --git a/debian/patches-rt/0007-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch b/debian/patches-rt/0007-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch
deleted file mode 100644
index cc54d0652e..0000000000
--- a/debian/patches-rt/0007-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch
+++ /dev/null
@@ -1,116 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Wed, 22 Nov 2023 11:23:43 +0000
-Subject: [PATCH 07/48] printk: Properly deal with nbcon consoles on seq init
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-If a non-boot console is registering and boot consoles exist, the
-consoles are flushed before being unregistered. This allows the
-non-boot console to continue where the boot console left off.
-
-If for whatever reason flushing fails, the lowest seq found from
-any of the enabled boot consoles is used. Until now con->seq was
-checked. However, if it is an nbcon boot console, the function
-nbcon_seq_read() must be used to read seq because con->seq is
-not updated for nbcon consoles.
-
-Check if it is an nbcon boot console and if so call
-nbcon_seq_read() to read seq.
-
-Also setup the nbcon sequence number and reset the legacy
-sequence number from register_console() (rather than in
-nbcon_init() and nbcon_seq_force()). This removes all legacy
-sequence handling from nbcon.c so the code is easier to follow
-and maintain.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/nbcon.c | 7 +------
- kernel/printk/printk.c | 29 ++++++++++++++++++++++++-----
- 2 files changed, 25 insertions(+), 11 deletions(-)
-
---- a/kernel/printk/nbcon.c
-+++ b/kernel/printk/nbcon.c
-@@ -172,9 +172,6 @@ void nbcon_seq_force(struct console *con
- u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
-
- atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
--
-- /* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */
-- con->seq = 0;
- }
-
- /**
-@@ -964,8 +961,6 @@ bool nbcon_alloc(struct console *con)
- *
- * nbcon_alloc() *must* be called and succeed before this function
- * is called.
-- *
-- * This function expects that the legacy @con->seq has been set.
- */
- void nbcon_init(struct console *con)
- {
-@@ -974,7 +969,7 @@ void nbcon_init(struct console *con)
- /* nbcon_alloc() must have been called and successful! */
- BUG_ON(!con->pbufs);
-
-- nbcon_seq_force(con, con->seq);
-+ nbcon_seq_force(con, 0);
- nbcon_state_set(con, &state);
- }
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -3353,6 +3353,7 @@ static void try_enable_default_console(s
- newcon->flags |= CON_CONSDEV;
- }
-
-+/* Set @newcon->seq to the first record this console should print. */
- static void console_init_seq(struct console *newcon, bool bootcon_registered)
- {
- struct console *con;
-@@ -3401,11 +3402,20 @@ static void console_init_seq(struct cons
-
- newcon->seq = prb_next_seq(prb);
- for_each_console(con) {
-- if ((con->flags & CON_BOOT) &&
-- (con->flags & CON_ENABLED) &&
-- con->seq < newcon->seq) {
-- newcon->seq = con->seq;
-+ u64 seq;
-+
-+ if (!((con->flags & CON_BOOT) &&
-+ (con->flags & CON_ENABLED))) {
-+ continue;
- }
-+
-+ if (con->flags & CON_NBCON)
-+ seq = nbcon_seq_read(con);
-+ else
-+ seq = con->seq;
-+
-+ if (seq < newcon->seq)
-+ newcon->seq = seq;
- }
- }
-
-@@ -3522,9 +3532,18 @@ void register_console(struct console *ne
- newcon->dropped = 0;
- console_init_seq(newcon, bootcon_registered);
-
-- if (newcon->flags & CON_NBCON)
-+ if (newcon->flags & CON_NBCON) {
- nbcon_init(newcon);
-
-+ /*
-+ * nbcon consoles have their own sequence counter. The legacy
-+ * sequence counter is reset so that it is clear it is not
-+ * being used.
-+ */
-+ nbcon_seq_force(newcon, newcon->seq);
-+ newcon->seq = 0;
-+ }
-+
- /*
- * Put this console in the list - keep the
- * preferred driver at the head of the list.
diff --git a/debian/patches-rt/0007-printk-nbcon-Use-driver-synchronization-while-un-reg.patch b/debian/patches-rt/0007-printk-nbcon-Use-driver-synchronization-while-un-reg.patch
new file mode 100644
index 0000000000..e2f23447fe
--- /dev/null
+++ b/debian/patches-rt/0007-printk-nbcon-Use-driver-synchronization-while-un-reg.patch
@@ -0,0 +1,116 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 15 Mar 2024 15:38:22 +0000
+Subject: [PATCH 07/48] printk: nbcon: Use driver synchronization while
+ (un)registering
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Console drivers typically have to deal with access to the
+hardware via user input/output (such as an interactive login
+shell) and output of kernel messages via printk() calls.
+
+They use some classic driver-specific locking mechanism in most
+situations. But console->write_atomic() callbacks, used by nbcon
+consoles, are synchronized only by acquiring the console
+context.
+
+The synchronization via the console context ownership is possible
+only when the console driver is registered. It is when a
+particular device driver is connected with a particular console
+driver.
+
+The two synchronization mechanisms must be synchronized between
+each other. It is tricky because the console context ownership
+is quite special. It might be taken over by a higher priority
+context. Also CPU migration must be disabled. The most tricky
+part is to (dis)connect these two mechanisms during the console
+(un)registration.
+
+Use the driver-specific locking callbacks: device_lock(),
+device_unlock(). They allow taking the device-specific lock
+while the device is being (un)registered by the related console
+driver.
+
+For example, these callbacks lock/unlock the port lock for
+serial port drivers.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 33 ++++++++++++++++++++++++++++++++-
+ 1 file changed, 32 insertions(+), 1 deletion(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3475,9 +3475,11 @@ static int unregister_console_locked(str
+ */
+ void register_console(struct console *newcon)
+ {
+- struct console *con;
++ bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic;
+ bool bootcon_registered = false;
+ bool realcon_registered = false;
++ struct console *con;
++ unsigned long flags;
+ u64 init_seq;
+ int err;
+
+@@ -3565,6 +3567,19 @@ void register_console(struct console *ne
+ }
+
+ /*
++ * If another context is actively using the hardware of this new
++ * console, it will not be aware of the nbcon synchronization. This
++ * is a risk that two contexts could access the hardware
++ * simultaneously if this new console is used for atomic printing
++ * and the other context is still using the hardware.
++ *
++ * Use the driver synchronization to ensure that the hardware is not
++ * in use while this new console transitions to being registered.
++ */
++ if (use_device_lock)
++ newcon->device_lock(newcon, &flags);
++
++ /*
+ * Put this console in the list - keep the
+ * preferred driver at the head of the list.
+ */
+@@ -3588,6 +3603,10 @@ void register_console(struct console *ne
+ * register_console() completes.
+ */
+
++ /* This new console is now registered. */
++ if (use_device_lock)
++ newcon->device_unlock(newcon, flags);
++
+ console_sysfs_notify();
+
+ /*
+@@ -3616,6 +3635,8 @@ EXPORT_SYMBOL(register_console);
+ /* Must be called under console_list_lock(). */
+ static int unregister_console_locked(struct console *console)
+ {
++ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
++ unsigned long flags;
+ int res;
+
+ lockdep_assert_console_list_lock_held();
+@@ -3634,8 +3655,18 @@ static int unregister_console_locked(str
+ if (!console_is_registered_locked(console))
+ return -ENODEV;
+
++ /*
++ * Use the driver synchronization to ensure that the hardware is not
++ * in use while this console transitions to being unregistered.
++ */
++ if (use_device_lock)
++ console->device_lock(console, &flags);
++
+ hlist_del_init_rcu(&console->node);
+
++ if (use_device_lock)
++ console->device_unlock(console, flags);
++
+ /*
+ * <HISTORICAL>
+ * If this isn't the last console and it has CON_CONSDEV set, we
diff --git a/debian/patches-rt/0007-serial-pxa-Use-uart_prepare_sysrq_char.patch b/debian/patches-rt/0007-serial-pxa-Use-uart_prepare_sysrq_char.patch
deleted file mode 100644
index ae76e25872..0000000000
--- a/debian/patches-rt/0007-serial-pxa-Use-uart_prepare_sysrq_char.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:20 +0100
-Subject: [PATCH 07/18] serial: pxa: Use uart_prepare_sysrq_char().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Delay handling sysrq until port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Link: https://lore.kernel.org/r/20240301215246.891055-8-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/pxa.c | 17 ++++++-----------
- 1 file changed, 6 insertions(+), 11 deletions(-)
-
---- a/drivers/tty/serial/pxa.c
-+++ b/drivers/tty/serial/pxa.c
-@@ -151,7 +151,7 @@ static inline void receive_chars(struct
- flag = TTY_FRAME;
- }
-
-- if (uart_handle_sysrq_char(&up->port, ch))
-+ if (uart_prepare_sysrq_char(&up->port, ch))
- goto ignore_char;
-
- uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
-@@ -232,7 +232,7 @@ static inline irqreturn_t serial_pxa_irq
- check_modem_status(up);
- if (lsr & UART_LSR_THRE)
- transmit_chars(up);
-- uart_port_unlock(&up->port);
-+ uart_unlock_and_check_sysrq(&up->port);
- return IRQ_HANDLED;
- }
-
-@@ -604,13 +604,10 @@ serial_pxa_console_write(struct console
- int locked = 1;
-
- clk_enable(up->clk);
-- local_irq_save(flags);
-- if (up->port.sysrq)
-- locked = 0;
-- else if (oops_in_progress)
-- locked = uart_port_trylock(&up->port);
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(&up->port, &flags);
- else
-- uart_port_lock(&up->port);
-+ uart_port_lock_irqsave(&up->port, &flags);
-
- /*
- * First save the IER then disable the interrupts
-@@ -628,10 +625,8 @@ serial_pxa_console_write(struct console
- serial_out(up, UART_IER, ier);
-
- if (locked)
-- uart_port_unlock(&up->port);
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(&up->port, flags);
- clk_disable(up->clk);
--
- }
-
- #ifdef CONFIG_CONSOLE_POLL
diff --git a/debian/patches-rt/Revert-drm-i915-Depend-on-PREEMPT_RT.patch b/debian/patches-rt/0008-Revert-drm-i915-Depend-on-PREEMPT_RT.patch
index 9c05d2aeb7..bdbd84d448 100644
--- a/debian/patches-rt/Revert-drm-i915-Depend-on-PREEMPT_RT.patch
+++ b/debian/patches-rt/0008-Revert-drm-i915-Depend-on-PREEMPT_RT.patch
@@ -1,11 +1,12 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 21 Feb 2022 17:59:14 +0100
-Subject: [PATCH] Revert "drm/i915: Depend on !PREEMPT_RT."
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 8/8] Revert "drm/i915: Depend on !PREEMPT_RT."
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Once the known issues are addressed, it should be safe to enable the
driver.
+Acked-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/Kconfig | 1 -
diff --git a/debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch b/debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch
deleted file mode 100644
index c7681ff217..0000000000
--- a/debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 8 Sep 2021 17:18:00 +0200
-Subject: [PATCH 08/10] drm/i915/gt: Queue and wait for the irq_work item.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-Disabling interrupts and invoking the irq_work function directly breaks
-on PREEMPT_RT.
-PREEMPT_RT does not invoke all irq_work from hardirq context because
-some of the user have spinlock_t locking in the callback function.
-These locks are then turned into a sleeping locks which can not be
-acquired with disabled interrupts.
-
-Using irq_work_queue() has the benefit that the irqwork will be invoked
-in the regular context. In general there is "no" delay between enqueuing
-the callback and its invocation because the interrupt is raised right
-away on architectures which support it (which includes x86).
-
-Use irq_work_queue() + irq_work_sync() instead invoking the callback
-directly.
-
-Reported-by: Clark Williams <williams@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
----
- drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
---- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
-+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
-@@ -317,10 +317,9 @@ void __intel_breadcrumbs_park(struct int
- /* Kick the work once more to drain the signalers, and disarm the irq */
- irq_work_sync(&b->irq_work);
- while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
-- local_irq_disable();
-- signal_irq_work(&b->irq_work);
-- local_irq_enable();
-+ irq_work_queue(&b->irq_work);
- cond_resched();
-+ irq_work_sync(&b->irq_work);
- }
- }
-
diff --git a/debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch b/debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch
new file mode 100644
index 0000000000..24ba5a1789
--- /dev/null
+++ b/debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch
@@ -0,0 +1,227 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 21 Aug 2023 11:47:40 +0200
+Subject: [PATCH 08/15] net: softnet_data: Make xmit per task.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Softirq is preemptible on PREEMPT_RT. Without a per-CPU lock in
+local_bh_disable() there is no guarantee that only one device is
+transmitting at a time.
+With preemption and multiple senders it is possible that the per-CPU
+`recursion' counter gets incremented by different threads and exceeds
+XMIT_RECURSION_LIMIT leading to a false positive recursion alert.
+The `more' member is subject to similar problems if set by one thread
+for one driver and wrongly used by another driver within another thread.
+
+Instead of adding a lock to protect the per-CPU variable it is simpler
+to make xmit per-task. Sending and receiving skbs happens always
+in thread context anyway.
+
+Having a lock to protected the per-CPU counter would block/ serialize two
+sending threads needlessly. It would also require a recursive lock to
+ensure that the owner can increment the counter further.
+
+Make the softnet_data.xmit a task_struct member on PREEMPT_RT. Add
+needed wrapper.
+
+Cc: Ben Segall <bsegall@google.com>
+Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
+Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Cc: Juri Lelli <juri.lelli@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Valentin Schneider <vschneid@redhat.com>
+Cc: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/netdevice.h | 42 ++++++++++++++++++++++++++++++-----------
+ include/linux/netdevice_xmit.h | 13 ++++++++++++
+ include/linux/sched.h | 5 +++-
+ net/core/dev.c | 14 +++++++++++++
+ net/core/dev.h | 18 +++++++++++++++++
+ 5 files changed, 80 insertions(+), 12 deletions(-)
+ create mode 100644 include/linux/netdevice_xmit.h
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -43,6 +43,7 @@
+
+ #include <linux/netdev_features.h>
+ #include <linux/neighbour.h>
++#include <linux/netdevice_xmit.h>
+ #include <uapi/linux/netdevice.h>
+ #include <uapi/linux/if_bonding.h>
+ #include <uapi/linux/pkt_cls.h>
+@@ -3222,13 +3223,7 @@ struct softnet_data {
+ struct sk_buff_head xfrm_backlog;
+ #endif
+ /* written and read only by owning cpu: */
+- struct {
+- u16 recursion;
+- u8 more;
+-#ifdef CONFIG_NET_EGRESS
+- u8 skip_txqueue;
+-#endif
+- } xmit;
++ struct netdev_xmit xmit;
+ #ifdef CONFIG_RPS
+ /* input_queue_head should be written by cpu owning this struct,
+ * and only read by other cpus. Worth using a cache line.
+@@ -3256,10 +3251,18 @@ struct softnet_data {
+
+ DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+
++#ifndef CONFIG_PREEMPT_RT
+ static inline int dev_recursion_level(void)
+ {
+ return this_cpu_read(softnet_data.xmit.recursion);
+ }
++#else
++static inline int dev_recursion_level(void)
++{
++ return current->net_xmit.recursion;
++}
++
++#endif
+
+ void __netif_schedule(struct Qdisc *q);
+ void netif_schedule_queue(struct netdev_queue *txq);
+@@ -4874,18 +4877,35 @@ static inline ktime_t netdev_get_tstamp(
+ return hwtstamps->hwtstamp;
+ }
+
+-static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
+- struct sk_buff *skb, struct net_device *dev,
+- bool more)
++#ifndef CONFIG_PREEMPT_RT
++static inline void netdev_xmit_set_more(bool more)
+ {
+ __this_cpu_write(softnet_data.xmit.more, more);
+- return ops->ndo_start_xmit(skb, dev);
+ }
+
+ static inline bool netdev_xmit_more(void)
+ {
+ return __this_cpu_read(softnet_data.xmit.more);
+ }
++#else
++static inline void netdev_xmit_set_more(bool more)
++{
++ current->net_xmit.more = more;
++}
++
++static inline bool netdev_xmit_more(void)
++{
++ return current->net_xmit.more;
++}
++#endif
++
++static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
++ struct sk_buff *skb, struct net_device *dev,
++ bool more)
++{
++ netdev_xmit_set_more(more);
++ return ops->ndo_start_xmit(skb, dev);
++}
+
+ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, bool more)
+--- /dev/null
++++ b/include/linux/netdevice_xmit.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++#ifndef _LINUX_NETDEVICE_XMIT_H
++#define _LINUX_NETDEVICE_XMIT_H
++
++struct netdev_xmit {
++ u16 recursion;
++ u8 more;
++#ifdef CONFIG_NET_EGRESS
++ u8 skip_txqueue;
++#endif
++};
++
++#endif
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -36,6 +36,7 @@
+ #include <linux/signal_types.h>
+ #include <linux/syscall_user_dispatch_types.h>
+ #include <linux/mm_types_task.h>
++#include <linux/netdevice_xmit.h>
+ #include <linux/task_io_accounting.h>
+ #include <linux/posix-timers_types.h>
+ #include <linux/restart_block.h>
+@@ -981,7 +982,9 @@ struct task_struct {
+ /* delay due to memory thrashing */
+ unsigned in_thrashing:1;
+ #endif
+-
++#ifdef CONFIG_PREEMPT_RT
++ struct netdev_xmit net_xmit;
++#endif
+ unsigned long atomic_flags; /* Flags requiring atomic access. */
+
+ struct restart_block restart_block;
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3940,6 +3940,7 @@ netdev_tx_queue_mapping(struct net_devic
+ return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ static bool netdev_xmit_txqueue_skipped(void)
+ {
+ return __this_cpu_read(softnet_data.xmit.skip_txqueue);
+@@ -3950,6 +3951,19 @@ void netdev_xmit_skip_txqueue(bool skip)
+ __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
+ }
+ EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
++
++#else
++static bool netdev_xmit_txqueue_skipped(void)
++{
++ return current->net_xmit.skip_txqueue;
++}
++
++void netdev_xmit_skip_txqueue(bool skip)
++{
++ current->net_xmit.skip_txqueue = skip;
++}
++EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
++#endif
+ #endif /* CONFIG_NET_EGRESS */
+
+ #ifdef CONFIG_NET_XGRESS
+--- a/net/core/dev.h
++++ b/net/core/dev.h
+@@ -150,6 +150,8 @@ struct napi_struct *napi_by_id(unsigned
+ void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
+
+ #define XMIT_RECURSION_LIMIT 8
++
++#ifndef CONFIG_PREEMPT_RT
+ static inline bool dev_xmit_recursion(void)
+ {
+ return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+@@ -165,5 +167,21 @@ static inline void dev_xmit_recursion_de
+ {
+ __this_cpu_dec(softnet_data.xmit.recursion);
+ }
++#else
++static inline bool dev_xmit_recursion(void)
++{
++ return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
++}
++
++static inline void dev_xmit_recursion_inc(void)
++{
++ current->net_xmit.recursion++;
++}
++
++static inline void dev_xmit_recursion_dec(void)
++{
++ current->net_xmit.recursion--;
++}
++#endif
+
+ #endif
diff --git a/debian/patches-rt/0013-serial-core-Provide-low-level-functions-to-lock-port.patch b/debian/patches-rt/0008-serial-core-Provide-low-level-functions-to-lock-port.patch
index c441b46257..51a1155fec 100644
--- a/debian/patches-rt/0013-serial-core-Provide-low-level-functions-to-lock-port.patch
+++ b/debian/patches-rt/0008-serial-core-Provide-low-level-functions-to-lock-port.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 11 Dec 2023 09:19:18 +0000
-Subject: [PATCH 13/48] serial: core: Provide low-level functions to lock port
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 08/48] serial: core: Provide low-level functions to lock port
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
It will be necessary at times for the uart nbcon console
drivers to acquire the port lock directly (without the
@@ -14,6 +14,8 @@ Provide low-level variants __uart_port_lock_irqsave() and
__uart_port_unlock_irqrestore() for this purpose.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/serial_core.h | 18 ++++++++++++++++++
@@ -21,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
-@@ -588,6 +588,24 @@ struct uart_port {
+@@ -590,6 +590,24 @@ struct uart_port {
void *private_data; /* generic platform data pointer */
};
diff --git a/debian/patches-rt/0008-serial-sunplus-Use-uart_prepare_sysrq_char.patch b/debian/patches-rt/0008-serial-sunplus-Use-uart_prepare_sysrq_char.patch
deleted file mode 100644
index fc7a5b7254..0000000000
--- a/debian/patches-rt/0008-serial-sunplus-Use-uart_prepare_sysrq_char.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:21 +0100
-Subject: [PATCH 08/18] serial: sunplus: Use uart_prepare_sysrq_char().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Delay handling sysrq until port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Cc: Hammer Hsieh <hammerh0314@gmail.com>
-Link: https://lore.kernel.org/r/20240301215246.891055-9-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/sunplus-uart.c | 18 ++++++------------
- 1 file changed, 6 insertions(+), 12 deletions(-)
-
---- a/drivers/tty/serial/sunplus-uart.c
-+++ b/drivers/tty/serial/sunplus-uart.c
-@@ -260,7 +260,7 @@ static void receive_chars(struct uart_po
- if (port->ignore_status_mask & SUP_DUMMY_READ)
- goto ignore_char;
-
-- if (uart_handle_sysrq_char(port, ch))
-+ if (uart_prepare_sysrq_char(port, ch))
- goto ignore_char;
-
- uart_insert_char(port, lsr, SUP_UART_LSR_OE, ch, flag);
-@@ -287,7 +287,7 @@ static irqreturn_t sunplus_uart_irq(int
- if (isc & SUP_UART_ISC_TX)
- transmit_chars(port);
-
-- uart_port_unlock(port);
-+ uart_unlock_and_check_sysrq(port);
-
- return IRQ_HANDLED;
- }
-@@ -512,22 +512,16 @@ static void sunplus_console_write(struct
- unsigned long flags;
- int locked = 1;
-
-- local_irq_save(flags);
--
-- if (sunplus_console_ports[co->index]->port.sysrq)
-- locked = 0;
-- else if (oops_in_progress)
-- locked = uart_port_trylock(&sunplus_console_ports[co->index]->port);
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(&sunplus_console_ports[co->index]->port, &flags);
- else
-- uart_port_lock(&sunplus_console_ports[co->index]->port);
-+ uart_port_lock_irqsave(&sunplus_console_ports[co->index]->port, &flags);
-
- uart_console_write(&sunplus_console_ports[co->index]->port, s, count,
- sunplus_uart_console_putchar);
-
- if (locked)
-- uart_port_unlock(&sunplus_console_ports[co->index]->port);
--
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(&sunplus_console_ports[co->index]->port, flags);
- }
-
- static int __init sunplus_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0009-dev-Remove-PREEMPT_RT-ifdefs-from-backlog_lock.patch b/debian/patches-rt/0009-dev-Remove-PREEMPT_RT-ifdefs-from-backlog_lock.patch
new file mode 100644
index 0000000000..bc5b67d203
--- /dev/null
+++ b/debian/patches-rt/0009-dev-Remove-PREEMPT_RT-ifdefs-from-backlog_lock.patch
@@ -0,0 +1,60 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 22 Aug 2023 18:14:44 +0200
+Subject: [PATCH 09/15] dev: Remove PREEMPT_RT ifdefs from backlog_lock.*().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The backlog_napi locking (previously RPS) relies on explicit locking if
+either RPS or backlog NAPI is enabled. If both are disabled then locking
+was achieved by disabling interrupts except on PREEMPT_RT. PREEMPT_RT
+was excluded because the needed synchronisation was already provided
+local_bh_disable().
+
+Since the introduction of backlog NAPI and making it mandatory for
+PREEMPT_RT the ifdef within backlog_lock.*() is obsolete and can be
+removed.
+
+Remove the ifdefs in backlog_lock.*().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/dev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -229,7 +229,7 @@ static inline void backlog_lock_irq_save
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
+- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ else
+ local_irq_save(*flags);
+ }
+
+@@ -237,7 +237,7 @@ static inline void backlog_lock_irq_disa
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_lock_irq(&sd->input_pkt_queue.lock);
+- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ else
+ local_irq_disable();
+ }
+
+@@ -246,7 +246,7 @@ static inline void backlog_unlock_irq_re
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
+- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ else
+ local_irq_restore(*flags);
+ }
+
+@@ -254,7 +254,7 @@ static inline void backlog_unlock_irq_en
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_unlock_irq(&sd->input_pkt_queue.lock);
+- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ else
+ local_irq_enable();
+ }
+
diff --git a/debian/patches-rt/0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch b/debian/patches-rt/0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch
new file mode 100644
index 0000000000..f2e50b7924
--- /dev/null
+++ b/debian/patches-rt/0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch
@@ -0,0 +1,122 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 17 Apr 2024 12:13:24 +0000
+Subject: [PATCH 09/48] serial: core: Introduce wrapper to set @uart_port->cons
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Introduce uart_port_set_cons() as a wrapper to set @cons of a
+uart_port. The wrapper sets @cons under the port lock in order
+to prevent @cons from disappearing while another context is
+holding the port lock. This is necessary for a follow-up
+commit relating to the port lock wrappers, which rely on @cons
+not changing between lock and unlock.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Tested-by: Théo Lebrun <theo.lebrun@bootlin.com> # EyeQ5, AMBA-PL011
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250_core.c | 6 +++---
+ drivers/tty/serial/amba-pl011.c | 2 +-
+ drivers/tty/serial/serial_core.c | 16 ++++++++--------
+ include/linux/serial_core.h | 17 +++++++++++++++++
+ 4 files changed, 29 insertions(+), 12 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -624,11 +624,11 @@ static int univ8250_console_setup(struct
+
+ port = &serial8250_ports[co->index].port;
+ /* link port to console */
+- port->cons = co;
++ uart_port_set_cons(port, co);
+
+ retval = serial8250_console_setup(port, options, false);
+ if (retval != 0)
+- port->cons = NULL;
++ uart_port_set_cons(port, NULL);
+ return retval;
+ }
+
+@@ -686,7 +686,7 @@ static int univ8250_console_match(struct
+ continue;
+
+ co->index = i;
+- port->cons = co;
++ uart_port_set_cons(port, co);
+ return serial8250_console_setup(port, options, true);
+ }
+
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -2480,7 +2480,7 @@ static int pl011_console_match(struct co
+ continue;
+
+ co->index = i;
+- port->cons = co;
++ uart_port_set_cons(port, co);
+ return pl011_console_setup(co, options);
+ }
+
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -3168,8 +3168,15 @@ static int serial_core_add_one_port(stru
+ state->uart_port = uport;
+ uport->state = state;
+
++ /*
++ * If this port is in use as a console then the spinlock is already
++ * initialised.
++ */
++ if (!uart_console_registered(uport))
++ uart_port_spin_lock_init(uport);
++
+ state->pm_state = UART_PM_STATE_UNDEFINED;
+- uport->cons = drv->cons;
++ uart_port_set_cons(uport, drv->cons);
+ uport->minor = drv->tty_driver->minor_start + uport->line;
+ uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name,
+ drv->tty_driver->name_base + uport->line);
+@@ -3178,13 +3185,6 @@ static int serial_core_add_one_port(stru
+ goto out;
+ }
+
+- /*
+- * If this port is in use as a console then the spinlock is already
+- * initialised.
+- */
+- if (!uart_console_registered(uport))
+- uart_port_spin_lock_init(uport);
+-
+ if (uport->cons && uport->dev)
+ of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
+
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -609,6 +609,23 @@ static inline void __uart_port_unlock_ir
+ }
+
+ /**
++ * uart_port_set_cons - Safely set the @cons field for a uart
++ * @up: The uart port to set
++ * @con: The new console to set to
++ *
++ * This function must be used to set @up->cons. It uses the port lock to
++ * synchronize with the port lock wrappers in order to ensure that the console
++ * cannot change or disappear while another context is holding the port lock.
++ */
++static inline void uart_port_set_cons(struct uart_port *up, struct console *con)
++{
++ unsigned long flags;
++
++ __uart_port_lock_irqsave(up, &flags);
++ up->cons = con;
++ __uart_port_unlock_irqrestore(up, flags);
++}
++/**
+ * uart_port_lock - Lock the UART port
+ * @up: Pointer to UART port structure
+ */
diff --git a/debian/patches-rt/0009-serial-lpc32xx_hs-Use-uart_prepare_sysrq_char-to-han.patch b/debian/patches-rt/0009-serial-lpc32xx_hs-Use-uart_prepare_sysrq_char-to-han.patch
deleted file mode 100644
index e099cf8e60..0000000000
--- a/debian/patches-rt/0009-serial-lpc32xx_hs-Use-uart_prepare_sysrq_char-to-han.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:22 +0100
-Subject: [PATCH 09/18] serial: lpc32xx_hs: Use uart_prepare_sysrq_char() to
- handle sysrq.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Handle sysrq requests sysrq once the port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Cc: Vladimir Zapolskiy <vz@mleia.com>
-Cc: linux-arm-kernel@lists.infradead.org
-Link: https://lore.kernel.org/r/20240301215246.891055-10-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/lpc32xx_hs.c | 17 +++++++----------
- 1 file changed, 7 insertions(+), 10 deletions(-)
-
---- a/drivers/tty/serial/lpc32xx_hs.c
-+++ b/drivers/tty/serial/lpc32xx_hs.c
-@@ -136,20 +136,16 @@ static void lpc32xx_hsuart_console_write
- int locked = 1;
-
- touch_nmi_watchdog();
-- local_irq_save(flags);
-- if (up->port.sysrq)
-- locked = 0;
-- else if (oops_in_progress)
-- locked = uart_port_trylock(&up->port);
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(&up->port, &flags);
- else
-- uart_port_lock(&up->port);
-+ uart_port_lock_irqsave(&up->port, &flags);
-
- uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar);
- wait_for_xmit_empty(&up->port);
-
- if (locked)
-- uart_port_unlock(&up->port);
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(&up->port, flags);
- }
-
- static int __init lpc32xx_hsuart_console_setup(struct console *co,
-@@ -268,7 +264,8 @@ static void __serial_lpc32xx_rx(struct u
- tty_insert_flip_char(tport, 0, TTY_FRAME);
- }
-
-- tty_insert_flip_char(tport, (tmp & 0xFF), flag);
-+ if (!uart_prepare_sysrq_char(port, tmp & 0xff))
-+ tty_insert_flip_char(tport, (tmp & 0xFF), flag);
-
- tmp = readl(LPC32XX_HSUART_FIFO(port->membase));
- }
-@@ -333,7 +330,7 @@ static irqreturn_t serial_lpc32xx_interr
- __serial_lpc32xx_tx(port);
- }
-
-- uart_port_unlock(port);
-+ uart_unlock_and_check_sysrq(port);
-
- return IRQ_HANDLED;
- }
diff --git a/debian/patches-rt/0010-console-Improve-console_srcu_read_flags-comments.patch b/debian/patches-rt/0010-console-Improve-console_srcu_read_flags-comments.patch
new file mode 100644
index 0000000000..94840da42f
--- /dev/null
+++ b/debian/patches-rt/0010-console-Improve-console_srcu_read_flags-comments.patch
@@ -0,0 +1,73 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 17 Apr 2024 14:34:50 +0000
+Subject: [PATCH 10/48] console: Improve console_srcu_read_flags() comments
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+It was not clear when exactly console_srcu_read_flags() must be
+used vs. directly reading @console->flags.
+
+Refactor and clarify that console_srcu_read_flags() is only
+needed if the console is registered or the caller is in a
+context where the registration status of the console may change
+(due to another context).
+
+The function requires the caller holds @console_srcu, which will
+ensure that the caller sees an appropriate @flags value for the
+registered console and that exit/cleanup routines will not run
+if the console is in the process of unregistration.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 28 +++++++++++++++++-----------
+ 1 file changed, 17 insertions(+), 11 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -446,28 +446,34 @@ extern void console_list_unlock(void) __
+ extern struct hlist_head console_list;
+
+ /**
+- * console_srcu_read_flags - Locklessly read the console flags
++ * console_srcu_read_flags - Locklessly read flags of a possibly registered
++ * console
+ * @con: struct console pointer of console to read flags from
+ *
+- * This function provides the necessary READ_ONCE() and data_race()
+- * notation for locklessly reading the console flags. The READ_ONCE()
+- * in this function matches the WRITE_ONCE() when @flags are modified
+- * for registered consoles with console_srcu_write_flags().
++ * Locklessly reading @con->flags provides a consistent read value because
++ * there is at most one CPU modifying @con->flags and that CPU is using only
++ * read-modify-write operations to do so.
+ *
+- * Only use this function to read console flags when locklessly
+- * iterating the console list via srcu.
++ * Requires console_srcu_read_lock to be held, which implies that @con might
++ * be a registered console. The purpose of holding console_srcu_read_lock is
++ * to guarantee that the console state is valid (CON_SUSPENDED/CON_ENABLED)
++ * and that no exit/cleanup routines will run if the console is currently
++ * undergoing unregistration.
++ *
++ * If the caller is holding the console_list_lock or it is _certain_ that
++ * @con is not and will not become registered, the caller may read
++ * @con->flags directly instead.
+ *
+ * Context: Any context.
++ * Return: The current value of the @con->flags field.
+ */
+ static inline short console_srcu_read_flags(const struct console *con)
+ {
+ WARN_ON_ONCE(!console_srcu_read_lock_is_held());
+
+ /*
+- * Locklessly reading console->flags provides a consistent
+- * read value because there is at most one CPU modifying
+- * console->flags and that CPU is using only read-modify-write
+- * operations to do so.
++ * The READ_ONCE() matches the WRITE_ONCE() when @flags are modified
++ * for registered consoles with console_srcu_write_flags().
+ */
+ return data_race(READ_ONCE(con->flags));
+ }
diff --git a/debian/patches-rt/0010-dev-Use-nested-BH-locking-for-softnet_data.process_q.patch b/debian/patches-rt/0010-dev-Use-nested-BH-locking-for-softnet_data.process_q.patch
new file mode 100644
index 0000000000..c21de546f2
--- /dev/null
+++ b/debian/patches-rt/0010-dev-Use-nested-BH-locking-for-softnet_data.process_q.patch
@@ -0,0 +1,94 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 22 Aug 2023 18:15:33 +0200
+Subject: [PATCH 10/15] dev: Use nested-BH locking for
+ softnet_data.process_queue.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+softnet_data::process_queue is a per-CPU variable and relies on disabled
+BH for its locking. Without per-CPU locking in local_bh_disable() on
+PREEMPT_RT this data structure requires explicit locking.
+
+softnet_data::input_queue_head can be updated lockless. This is fine
+because this value is only update CPU local by the local backlog_napi
+thread.
+
+Add a local_lock_t to softnet_data and use local_lock_nested_bh() for locking
+of process_queue. This change adds only lockdep coverage and does not
+alter the functional behaviour for !PREEMPT_RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/netdevice.h | 1 +
+ net/core/dev.c | 12 +++++++++++-
+ 2 files changed, 12 insertions(+), 1 deletion(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3201,6 +3201,7 @@ static inline bool dev_has_header(const
+ struct softnet_data {
+ struct list_head poll_list;
+ struct sk_buff_head process_queue;
++ local_lock_t process_queue_bh_lock;
+
+ /* stats */
+ unsigned int processed;
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -449,7 +449,9 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
+ * queue in the local softnet handler.
+ */
+
+-DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
++DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
++ .process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock),
++};
+ EXPORT_PER_CPU_SYMBOL(softnet_data);
+
+ /* Page_pool has a lockless array/stack to alloc/recycle pages.
+@@ -5949,6 +5951,7 @@ static void flush_backlog(struct work_st
+ }
+ backlog_unlock_irq_enable(sd);
+
++ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
+ skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ if (skb->dev->reg_state == NETREG_UNREGISTERING) {
+ __skb_unlink(skb, &sd->process_queue);
+@@ -5956,6 +5959,7 @@ static void flush_backlog(struct work_st
+ rps_input_queue_head_incr(sd);
+ }
+ }
++ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
+ local_bh_enable();
+ }
+
+@@ -6077,7 +6081,9 @@ static int process_backlog(struct napi_s
+ while (again) {
+ struct sk_buff *skb;
+
++ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
+ while ((skb = __skb_dequeue(&sd->process_queue))) {
++ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
+ rcu_read_lock();
+ __netif_receive_skb(skb);
+ rcu_read_unlock();
+@@ -6086,7 +6092,9 @@ static int process_backlog(struct napi_s
+ return work;
+ }
+
++ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
+ }
++ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
+
+ backlog_lock_irq_disable(sd);
+ if (skb_queue_empty(&sd->input_pkt_queue)) {
+@@ -6101,8 +6109,10 @@ static int process_backlog(struct napi_s
+ napi->state &= NAPIF_STATE_THREADED;
+ again = false;
+ } else {
++ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
+ skb_queue_splice_tail_init(&sd->input_pkt_queue,
+ &sd->process_queue);
++ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
+ }
+ backlog_unlock_irq_enable(sd);
+ }
diff --git a/debian/patches-rt/0010-serial-owl-Use-uart_prepare_sysrq_char-to-handle-sys.patch b/debian/patches-rt/0010-serial-owl-Use-uart_prepare_sysrq_char-to-handle-sys.patch
deleted file mode 100644
index bec90c28e6..0000000000
--- a/debian/patches-rt/0010-serial-owl-Use-uart_prepare_sysrq_char-to-handle-sys.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:23 +0100
-Subject: [PATCH 10/18] serial: owl: Use uart_prepare_sysrq_char() to handle
- sysrq.
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Handle sysrq requests sysrq once the port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Cc: Andreas Färber <afaerber@suse.de>
-Cc: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Cc: linux-arm-kernel@lists.infradead.org
-Cc: linux-actions@lists.infradead.org
-Link: https://lore.kernel.org/r/20240301215246.891055-11-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/owl-uart.c | 30 ++++++++++++------------------
- 1 file changed, 12 insertions(+), 18 deletions(-)
-
---- a/drivers/tty/serial/owl-uart.c
-+++ b/drivers/tty/serial/owl-uart.c
-@@ -199,6 +199,7 @@ static void owl_uart_receive_chars(struc
- stat = owl_uart_read(port, OWL_UART_STAT);
- while (!(stat & OWL_UART_STAT_RFEM)) {
- char flag = TTY_NORMAL;
-+ bool sysrq;
-
- if (stat & OWL_UART_STAT_RXER)
- port->icount.overrun++;
-@@ -217,7 +218,9 @@ static void owl_uart_receive_chars(struc
- val = owl_uart_read(port, OWL_UART_RXDAT);
- val &= 0xff;
-
-- if ((stat & port->ignore_status_mask) == 0)
-+ sysrq = uart_prepare_sysrq_char(port, val);
-+
-+ if (!sysrq && (stat & port->ignore_status_mask) == 0)
- tty_insert_flip_char(&port->state->port, val, flag);
-
- stat = owl_uart_read(port, OWL_UART_STAT);
-@@ -229,10 +232,9 @@ static void owl_uart_receive_chars(struc
- static irqreturn_t owl_uart_irq(int irq, void *dev_id)
- {
- struct uart_port *port = dev_id;
-- unsigned long flags;
- u32 stat;
-
-- uart_port_lock_irqsave(port, &flags);
-+ uart_port_lock(port);
-
- stat = owl_uart_read(port, OWL_UART_STAT);
-
-@@ -246,7 +248,7 @@ static irqreturn_t owl_uart_irq(int irq,
- stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP;
- owl_uart_write(port, stat, OWL_UART_STAT);
-
-- uart_port_unlock_irqrestore(port, flags);
-+ uart_unlock_and_check_sysrq(port);
-
- return IRQ_HANDLED;
- }
-@@ -508,18 +510,12 @@ static void owl_uart_port_write(struct u
- {
- u32 old_ctl, val;
- unsigned long flags;
-- int locked;
-+ int locked = 1;
-
-- local_irq_save(flags);
--
-- if (port->sysrq)
-- locked = 0;
-- else if (oops_in_progress)
-- locked = uart_port_trylock(port);
-- else {
-- uart_port_lock(port);
-- locked = 1;
-- }
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(port, &flags);
-+ else
-+ uart_port_lock_irqsave(port, &flags);
-
- old_ctl = owl_uart_read(port, OWL_UART_CTL);
- val = old_ctl | OWL_UART_CTL_TRFS_TX;
-@@ -541,9 +537,7 @@ static void owl_uart_port_write(struct u
- owl_uart_write(port, old_ctl, OWL_UART_CTL);
-
- if (locked)
-- uart_port_unlock(port);
--
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(port, flags);
- }
-
- static void owl_uart_console_write(struct console *co, const char *s,
diff --git a/debian/patches-rt/0011-lwt-Don-t-disable-migration-prio-invoking-BPF.patch b/debian/patches-rt/0011-lwt-Don-t-disable-migration-prio-invoking-BPF.patch
new file mode 100644
index 0000000000..93c21b4511
--- /dev/null
+++ b/debian/patches-rt/0011-lwt-Don-t-disable-migration-prio-invoking-BPF.patch
@@ -0,0 +1,40 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Oct 2023 09:12:06 +0200
+Subject: [PATCH 11/15] lwt: Don't disable migration prio invoking BPF.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+There is no need to explicitly disable migration if bottom halves are
+also disabled. Disabling BH implies disabling migration.
+
+Remove migrate_disable() and rely solely on disabling BH to remain on
+the same CPU.
+
+Cc: bpf@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/lwt_bpf.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/net/core/lwt_bpf.c
++++ b/net/core/lwt_bpf.c
+@@ -40,10 +40,9 @@ static int run_lwt_bpf(struct sk_buff *s
+ {
+ int ret;
+
+- /* Migration disable and BH disable are needed to protect per-cpu
+- * redirect_info between BPF prog and skb_do_redirect().
++ /* Disabling BH is needed to protect per-CPU bpf_redirect_info between
++ * BPF prog and skb_do_redirect().
+ */
+- migrate_disable();
+ local_bh_disable();
+ bpf_compute_data_pointers(skb);
+ ret = bpf_prog_run_save_cb(lwt->prog, skb);
+@@ -78,7 +77,6 @@ static int run_lwt_bpf(struct sk_buff *s
+ }
+
+ local_bh_enable();
+- migrate_enable();
+
+ return ret;
+ }
diff --git a/debian/patches-rt/0011-nbcon-Add-API-to-acquire-context-for-non-printing-op.patch b/debian/patches-rt/0011-nbcon-Add-API-to-acquire-context-for-non-printing-op.patch
new file mode 100644
index 0000000000..0245fc1ecd
--- /dev/null
+++ b/debian/patches-rt/0011-nbcon-Add-API-to-acquire-context-for-non-printing-op.patch
@@ -0,0 +1,180 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 17 Apr 2024 12:42:46 +0000
+Subject: [PATCH 11/48] nbcon: Add API to acquire context for non-printing
+ operations
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Provide functions nbcon_device_try_acquire() and
+nbcon_device_release() which will try to acquire the nbcon
+console ownership with NBCON_PRIO_NORMAL and mark it unsafe for
+handover/takeover.
+
+These functions are to be used together with the device-specific
+locking when performing non-printing activities on the console
+device. They will allow synchronization against the
+atomic_write() callback which will be serialized, for higher
+priority contexts, only by acquiring the console context
+ownership.
+
+Pitfalls:
+
+The API requires to be called in a context with migration
+disabled because it uses per-CPU variables internally.
+
+The context is set unsafe for a takeover all the time. It
+guarantees full serialization against any atomic_write() caller
+except for the final flush in panic() which might try an unsafe
+takeover.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 2 +
+ include/linux/printk.h | 15 ++++++++++++
+ kernel/printk/nbcon.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 3 files changed, 72 insertions(+), 1 deletion(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -322,6 +322,7 @@ struct nbcon_write_context {
+ *
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
++ * @nbcon_device_ctxt: Context available for non-printing operations
+ * @pbufs: Pointer to nbcon private buffer
+ */
+ struct console {
+@@ -417,6 +418,7 @@ struct console {
+
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
++ struct nbcon_context __private nbcon_device_ctxt;
+ struct printk_buffers *pbufs;
+ };
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -9,6 +9,8 @@
+ #include <linux/ratelimit_types.h>
+ #include <linux/once_lite.h>
+
++struct console;
++
+ extern const char linux_banner[];
+ extern const char linux_proc_banner[];
+
+@@ -194,6 +196,8 @@ extern asmlinkage void dump_stack_lvl(co
+ extern asmlinkage void dump_stack(void) __cold;
+ void printk_trigger_flush(void);
+ void console_replay_all(void);
++extern bool nbcon_device_try_acquire(struct console *con);
++extern void nbcon_device_release(struct console *con);
+ #else
+ static inline __printf(1, 0)
+ int vprintk(const char *s, va_list args)
+@@ -273,9 +277,20 @@ static inline void dump_stack(void)
+ static inline void printk_trigger_flush(void)
+ {
+ }
++
+ static inline void console_replay_all(void)
+ {
+ }
++
++static inline bool nbcon_device_try_acquire(struct console *con)
++{
++ return false;
++}
++
++static inline void nbcon_device_release(struct console *con)
++{
++}
++
+ #endif
+
+ bool this_cpu_in_panic(void);
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -5,7 +5,9 @@
+ #include <linux/kernel.h>
+ #include <linux/console.h>
+ #include <linux/delay.h>
++#include <linux/export.h>
+ #include <linux/slab.h>
++#include <linux/string.h>
+ #include "internal.h"
+ /*
+ * Printk console printing implementation for consoles which does not depend
+@@ -528,6 +530,7 @@ static struct printk_buffers panic_nbcon
+ * nbcon_context_try_acquire - Try to acquire nbcon console
+ * @ctxt: The context of the caller
+ *
++ * Context: Under @ctxt->con->device_lock() or local_irq_save().
+ * Return: True if the console was acquired. False otherwise.
+ *
+ * If the caller allowed an unsafe hostile takeover, on success the
+@@ -535,7 +538,6 @@ static struct printk_buffers panic_nbcon
+ * in an unsafe state. Otherwise, on success the caller may assume
+ * the console is not in an unsafe state.
+ */
+-__maybe_unused
+ static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
+ {
+ unsigned int cpu = smp_processor_id();
+@@ -989,3 +991,55 @@ void nbcon_free(struct console *con)
+
+ con->pbufs = NULL;
+ }
++
++/**
++ * nbcon_device_try_acquire - Try to acquire nbcon console and enter unsafe
++ * section
++ * @con: The nbcon console to acquire
++ *
++ * Context: Under the locking mechanism implemented in
++ * @con->device_lock() including disabling migration.
++ * Return: True if the console was acquired. False otherwise.
++ *
++ * Console drivers will usually use their own internal synchronization
++ * mechasism to synchronize between console printing and non-printing
++ * activities (such as setting baud rates). However, nbcon console drivers
++ * supporting atomic consoles may also want to mark unsafe sections when
++ * performing non-printing activities in order to synchronize against their
++ * atomic_write() callback.
++ *
++ * This function acquires the nbcon console using priority NBCON_PRIO_NORMAL
++ * and marks it unsafe for handover/takeover.
++ */
++bool nbcon_device_try_acquire(struct console *con)
++{
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
++
++ cant_migrate();
++
++ memset(ctxt, 0, sizeof(*ctxt));
++ ctxt->console = con;
++ ctxt->prio = NBCON_PRIO_NORMAL;
++
++ if (!nbcon_context_try_acquire(ctxt))
++ return false;
++
++ if (!nbcon_context_enter_unsafe(ctxt))
++ return false;
++
++ return true;
++}
++EXPORT_SYMBOL_GPL(nbcon_device_try_acquire);
++
++/**
++ * nbcon_device_release - Exit unsafe section and release the nbcon console
++ * @con: The nbcon console acquired in nbcon_device_try_acquire()
++ */
++void nbcon_device_release(struct console *con)
++{
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
++
++ if (nbcon_context_exit_unsafe(ctxt))
++ nbcon_context_release(ctxt);
++}
++EXPORT_SYMBOL_GPL(nbcon_device_release);
diff --git a/debian/patches-rt/0011-serial-rda-Use-uart_prepare_sysrq_char-to-handle-sys.patch b/debian/patches-rt/0011-serial-rda-Use-uart_prepare_sysrq_char-to-handle-sys.patch
deleted file mode 100644
index 35d33ed2f9..0000000000
--- a/debian/patches-rt/0011-serial-rda-Use-uart_prepare_sysrq_char-to-handle-sys.patch
+++ /dev/null
@@ -1,91 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:24 +0100
-Subject: [PATCH 11/18] serial: rda: Use uart_prepare_sysrq_char() to handle
- sysrq.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Handle sysrq requests sysrq once the port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Cc: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-Cc: linux-arm-kernel@lists.infradead.org
-Cc: linux-unisoc@lists.infradead.org
-Link: https://lore.kernel.org/r/20240301215246.891055-12-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/rda-uart.c | 28 ++++++++++------------------
- 1 file changed, 10 insertions(+), 18 deletions(-)
-
---- a/drivers/tty/serial/rda-uart.c
-+++ b/drivers/tty/serial/rda-uart.c
-@@ -394,7 +394,8 @@ static void rda_uart_receive_chars(struc
- val &= 0xff;
-
- port->icount.rx++;
-- tty_insert_flip_char(&port->state->port, val, flag);
-+ if (!uart_prepare_sysrq_char(port, val))
-+ tty_insert_flip_char(&port->state->port, val, flag);
-
- status = rda_uart_read(port, RDA_UART_STATUS);
- }
-@@ -405,10 +406,9 @@ static void rda_uart_receive_chars(struc
- static irqreturn_t rda_interrupt(int irq, void *dev_id)
- {
- struct uart_port *port = dev_id;
-- unsigned long flags;
- u32 val, irq_mask;
-
-- uart_port_lock_irqsave(port, &flags);
-+ uart_port_lock(port);
-
- /* Clear IRQ cause */
- val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
-@@ -425,7 +425,7 @@ static irqreturn_t rda_interrupt(int irq
- rda_uart_send_chars(port);
- }
-
-- uart_port_unlock_irqrestore(port, flags);
-+ uart_unlock_and_check_sysrq(port);
-
- return IRQ_HANDLED;
- }
-@@ -590,18 +590,12 @@ static void rda_uart_port_write(struct u
- {
- u32 old_irq_mask;
- unsigned long flags;
-- int locked;
-+ int locked = 1;
-
-- local_irq_save(flags);
--
-- if (port->sysrq) {
-- locked = 0;
-- } else if (oops_in_progress) {
-- locked = uart_port_trylock(port);
-- } else {
-- uart_port_lock(port);
-- locked = 1;
-- }
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(port, &flags);
-+ else
-+ uart_port_lock_irqsave(port, &flags);
-
- old_irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK);
- rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
-@@ -615,9 +609,7 @@ static void rda_uart_port_write(struct u
- rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
-
- if (locked)
-- uart_port_unlock(port);
--
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(port, flags);
- }
-
- static void rda_uart_console_write(struct console *co, const char *s,
diff --git a/debian/patches-rt/0012-printk-nbcon-Use-driver-synchronization-while-regist.patch b/debian/patches-rt/0012-printk-nbcon-Use-driver-synchronization-while-regist.patch
deleted file mode 100644
index 71afdf6e29..0000000000
--- a/debian/patches-rt/0012-printk-nbcon-Use-driver-synchronization-while-regist.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Fri, 15 Mar 2024 15:38:22 +0000
-Subject: [PATCH 12/48] printk: nbcon: Use driver synchronization while
- registering
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-Depending on if an nbcon console is registered, a driver may
-handle its internal locking differently. If a driver is holding
-its internal lock while the nbcon console is registered, there
-may be a risk that two different contexts access the hardware
-simultaneously without synchronization. (For example, if the
-printk subsystem invokes atomic printing while another driver
-context acquired the internal lock without considering the
-atomic console.)
-
-Use the driver synchronization while a registering nbcon console
-transitions to being registered. This guarantees that if the
-driver acquires its internal lock when the nbcon console was not
-registered, it will remain unregistered until that context
-releases the lock.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 18 ++++++++++++++++++
- 1 file changed, 18 insertions(+)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -3453,6 +3453,7 @@ void register_console(struct console *ne
- struct console *con;
- bool bootcon_registered = false;
- bool realcon_registered = false;
-+ unsigned long flags;
- int err;
-
- console_list_lock();
-@@ -3545,6 +3546,19 @@ void register_console(struct console *ne
- }
-
- /*
-+ * If another context is actively using the hardware of this new
-+ * console, it will not be aware of the nbcon synchronization. This
-+ * is a risk that two contexts could access the hardware
-+ * simultaneously if this new console is used for atomic printing
-+ * and the other context is still using the hardware.
-+ *
-+ * Use the driver synchronization to ensure that the hardware is not
-+ * in use while this new console transitions to being registered.
-+ */
-+ if ((newcon->flags & CON_NBCON) && newcon->write_atomic)
-+ newcon->device_lock(newcon, &flags);
-+
-+ /*
- * Put this console in the list - keep the
- * preferred driver at the head of the list.
- */
-@@ -3568,6 +3582,10 @@ void register_console(struct console *ne
- * register_console() completes.
- */
-
-+ /* This new console is now registered. */
-+ if ((newcon->flags & CON_NBCON) && newcon->write_atomic)
-+ newcon->device_unlock(newcon, flags);
-+
- console_sysfs_notify();
-
- /*
diff --git a/debian/patches-rt/0012-seg6-Use-nested-BH-locking-for-seg6_bpf_srh_states.patch b/debian/patches-rt/0012-seg6-Use-nested-BH-locking-for-seg6_bpf_srh_states.patch
new file mode 100644
index 0000000000..fa445e5fd6
--- /dev/null
+++ b/debian/patches-rt/0012-seg6-Use-nested-BH-locking-for-seg6_bpf_srh_states.patch
@@ -0,0 +1,152 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Oct 2023 12:10:40 +0200
+Subject: [PATCH 12/15] seg6: Use nested-BH locking for seg6_bpf_srh_states.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The access to seg6_bpf_srh_states is protected by disabling preemption.
+Based on the code, the entry point is input_action_end_bpf() and
+every other function (the bpf helper functions bpf_lwt_seg6_*()), that
+is accessing seg6_bpf_srh_states, should be called from within
+input_action_end_bpf().
+
+input_action_end_bpf() accesses seg6_bpf_srh_states first at the top of
+the function and then disables preemption. This looks wrong because if
+preemption needs to be disabled as part of the locking mechanism then
+the variable shouldn't be accessed beforehand.
+
+Looking at how it is used via test_lwt_seg6local.sh then
+input_action_end_bpf() is always invoked from softirq context. If this
+is always the case then the preempt_disable() statement is superfluous.
+If this is not always invoked from softirq then disabling only
+preemption is not sufficient.
+
+Replace the preempt_disable() statement with nested-BH locking. This is
+not an equivalent replacement as it assumes that the invocation of
+input_action_end_bpf() always occurs in softirq context and thus the
+preempt_disable() is superfluous.
+Add a local_lock_t the data structure and use local_lock_nested_bh() for
+locking. Add lockdep_assert_held() to ensure the lock is held while the
+per-CPU variable is referenced in the helper functions.
+
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: David Ahern <dsahern@kernel.org>
+Cc: Hao Luo <haoluo@google.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Martin KaFai Lau <martin.lau@linux.dev>
+Cc: Song Liu <song@kernel.org>
+Cc: Stanislav Fomichev <sdf@google.com>
+Cc: Yonghong Song <yonghong.song@linux.dev>
+Cc: bpf@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/seg6_local.h | 1 +
+ net/core/filter.c | 3 +++
+ net/ipv6/seg6_local.c | 22 ++++++++++++++--------
+ 3 files changed, 18 insertions(+), 8 deletions(-)
+
+--- a/include/net/seg6_local.h
++++ b/include/net/seg6_local.h
+@@ -19,6 +19,7 @@ extern int seg6_lookup_nexthop(struct sk
+ extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
+
+ struct seg6_bpf_srh_state {
++ local_lock_t bh_lock;
+ struct ipv6_sr_hdr *srh;
+ u16 hdrlen;
+ bool valid;
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -6455,6 +6455,7 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, str
+ void *srh_tlvs, *srh_end, *ptr;
+ int srhoff = 0;
+
++ lockdep_assert_held(&srh_state->bh_lock);
+ if (srh == NULL)
+ return -EINVAL;
+
+@@ -6511,6 +6512,7 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct s
+ int hdroff = 0;
+ int err;
+
++ lockdep_assert_held(&srh_state->bh_lock);
+ switch (action) {
+ case SEG6_LOCAL_ACTION_END_X:
+ if (!seg6_bpf_has_valid_srh(skb))
+@@ -6587,6 +6589,7 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, stru
+ int srhoff = 0;
+ int ret;
+
++ lockdep_assert_held(&srh_state->bh_lock);
+ if (unlikely(srh == NULL))
+ return -EINVAL;
+
+--- a/net/ipv6/seg6_local.c
++++ b/net/ipv6/seg6_local.c
+@@ -1380,7 +1380,9 @@ static int input_action_end_b6_encap(str
+ return err;
+ }
+
+-DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
++DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
+ {
+@@ -1388,6 +1390,7 @@ bool seg6_bpf_has_valid_srh(struct sk_bu
+ this_cpu_ptr(&seg6_bpf_srh_states);
+ struct ipv6_sr_hdr *srh = srh_state->srh;
+
++ lockdep_assert_held(&srh_state->bh_lock);
+ if (unlikely(srh == NULL))
+ return false;
+
+@@ -1408,8 +1411,7 @@ bool seg6_bpf_has_valid_srh(struct sk_bu
+ static int input_action_end_bpf(struct sk_buff *skb,
+ struct seg6_local_lwt *slwt)
+ {
+- struct seg6_bpf_srh_state *srh_state =
+- this_cpu_ptr(&seg6_bpf_srh_states);
++ struct seg6_bpf_srh_state *srh_state;
+ struct ipv6_sr_hdr *srh;
+ int ret;
+
+@@ -1420,10 +1422,14 @@ static int input_action_end_bpf(struct s
+ }
+ advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
+
+- /* preempt_disable is needed to protect the per-CPU buffer srh_state,
+- * which is also accessed by the bpf_lwt_seg6_* helpers
++ /* The access to the per-CPU buffer srh_state is protected by running
++ * always in softirq context (with disabled BH). On PREEMPT_RT the
++ * required locking is provided by the following local_lock_nested_bh()
++ * statement. It is also accessed by the bpf_lwt_seg6_* helpers via
++ * bpf_prog_run_save_cb().
+ */
+- preempt_disable();
++ local_lock_nested_bh(&seg6_bpf_srh_states.bh_lock);
++ srh_state = this_cpu_ptr(&seg6_bpf_srh_states);
+ srh_state->srh = srh;
+ srh_state->hdrlen = srh->hdrlen << 3;
+ srh_state->valid = true;
+@@ -1446,15 +1452,15 @@ static int input_action_end_bpf(struct s
+
+ if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
+ goto drop;
++ local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock);
+
+- preempt_enable();
+ if (ret != BPF_REDIRECT)
+ seg6_lookup_nexthop(skb, NULL, 0);
+
+ return dst_input(skb);
+
+ drop:
+- preempt_enable();
++ local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
diff --git a/debian/patches-rt/0012-serial-core-Implement-processing-in-port-lock-wrappe.patch b/debian/patches-rt/0012-serial-core-Implement-processing-in-port-lock-wrappe.patch
new file mode 100644
index 0000000000..05a1928149
--- /dev/null
+++ b/debian/patches-rt/0012-serial-core-Implement-processing-in-port-lock-wrappe.patch
@@ -0,0 +1,182 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 17 Apr 2024 14:41:16 +0000
+Subject: [PATCH 12/48] serial: core: Implement processing in port->lock
+ wrapper
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Currently the port->lock wrappers uart_port_lock(),
+uart_port_unlock() (and their variants) only lock/unlock
+the spin_lock.
+
+If the port is an nbcon console, the wrappers must also
+acquire/release the console and mark the region as unsafe. This
+allows general port->lock synchronization to be synchronized
+with the nbcon console ownership.
+
+Note that __uart_port_using_nbcon() relies on the port->lock
+being held while a console is added and removed from the
+console list (i.e. all uart nbcon drivers *must* take the
+port->lock in their device_lock() callbacks).
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/serial_core.h | 82 ++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 80 insertions(+), 2 deletions(-)
+
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -11,6 +11,8 @@
+ #include <linux/compiler.h>
+ #include <linux/console.h>
+ #include <linux/interrupt.h>
++#include <linux/lockdep.h>
++#include <linux/printk.h>
+ #include <linux/spinlock.h>
+ #include <linux/sched.h>
+ #include <linux/tty.h>
+@@ -625,6 +627,60 @@ static inline void uart_port_set_cons(st
+ up->cons = con;
+ __uart_port_unlock_irqrestore(up, flags);
+ }
++
++/* Only for internal port lock wrapper usage. */
++static inline bool __uart_port_using_nbcon(struct uart_port *up)
++{
++ lockdep_assert_held_once(&up->lock);
++
++ if (likely(!uart_console(up)))
++ return false;
++
++ /*
++ * @up->cons is only modified under the port lock. Therefore it is
++ * certain that it cannot disappear here.
++ *
++ * @up->cons->node is added/removed from the console list under the
++ * port lock. Therefore it is certain that the registration status
++ * cannot change here, thus @up->cons->flags can be read directly.
++ */
++ if (hlist_unhashed_lockless(&up->cons->node) ||
++ !(up->cons->flags & CON_NBCON) ||
++ !up->cons->write_atomic) {
++ return false;
++ }
++
++ return true;
++}
++
++/* Only for internal port lock wrapper usage. */
++static inline bool __uart_port_nbcon_try_acquire(struct uart_port *up)
++{
++ if (!__uart_port_using_nbcon(up))
++ return true;
++
++ return nbcon_device_try_acquire(up->cons);
++}
++
++/* Only for internal port lock wrapper usage. */
++static inline void __uart_port_nbcon_acquire(struct uart_port *up)
++{
++ if (!__uart_port_using_nbcon(up))
++ return;
++
++ while (!nbcon_device_try_acquire(up->cons))
++ cpu_relax();
++}
++
++/* Only for internal port lock wrapper usage. */
++static inline void __uart_port_nbcon_release(struct uart_port *up)
++{
++ if (!__uart_port_using_nbcon(up))
++ return;
++
++ nbcon_device_release(up->cons);
++}
++
+ /**
+ * uart_port_lock - Lock the UART port
+ * @up: Pointer to UART port structure
+@@ -632,6 +688,7 @@ static inline void uart_port_set_cons(st
+ static inline void uart_port_lock(struct uart_port *up)
+ {
+ spin_lock(&up->lock);
++ __uart_port_nbcon_acquire(up);
+ }
+
+ /**
+@@ -641,6 +698,7 @@ static inline void uart_port_lock(struct
+ static inline void uart_port_lock_irq(struct uart_port *up)
+ {
+ spin_lock_irq(&up->lock);
++ __uart_port_nbcon_acquire(up);
+ }
+
+ /**
+@@ -651,6 +709,7 @@ static inline void uart_port_lock_irq(st
+ static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+ {
+ spin_lock_irqsave(&up->lock, *flags);
++ __uart_port_nbcon_acquire(up);
+ }
+
+ /**
+@@ -661,7 +720,15 @@ static inline void uart_port_lock_irqsav
+ */
+ static inline bool uart_port_trylock(struct uart_port *up)
+ {
+- return spin_trylock(&up->lock);
++ if (!spin_trylock(&up->lock))
++ return false;
++
++ if (!__uart_port_nbcon_try_acquire(up)) {
++ spin_unlock(&up->lock);
++ return false;
++ }
++
++ return true;
+ }
+
+ /**
+@@ -673,7 +740,15 @@ static inline bool uart_port_trylock(str
+ */
+ static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
+ {
+- return spin_trylock_irqsave(&up->lock, *flags);
++ if (!spin_trylock_irqsave(&up->lock, *flags))
++ return false;
++
++ if (!__uart_port_nbcon_try_acquire(up)) {
++ spin_unlock_irqrestore(&up->lock, *flags);
++ return false;
++ }
++
++ return true;
+ }
+
+ /**
+@@ -682,6 +757,7 @@ static inline bool uart_port_trylock_irq
+ */
+ static inline void uart_port_unlock(struct uart_port *up)
+ {
++ __uart_port_nbcon_release(up);
+ spin_unlock(&up->lock);
+ }
+
+@@ -691,6 +767,7 @@ static inline void uart_port_unlock(stru
+ */
+ static inline void uart_port_unlock_irq(struct uart_port *up)
+ {
++ __uart_port_nbcon_release(up);
+ spin_unlock_irq(&up->lock);
+ }
+
+@@ -701,6 +778,7 @@ static inline void uart_port_unlock_irq(
+ */
+ static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+ {
++ __uart_port_nbcon_release(up);
+ spin_unlock_irqrestore(&up->lock, flags);
+ }
+
diff --git a/debian/patches-rt/0012-serial-sifive-Use-uart_prepare_sysrq_char-to-handle-.patch b/debian/patches-rt/0012-serial-sifive-Use-uart_prepare_sysrq_char-to-handle-.patch
deleted file mode 100644
index 5b82468e40..0000000000
--- a/debian/patches-rt/0012-serial-sifive-Use-uart_prepare_sysrq_char-to-handle-.patch
+++ /dev/null
@@ -1,71 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:25 +0100
-Subject: [PATCH 12/18] serial: sifive: Use uart_prepare_sysrq_char() to handle
- sysrq.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Handle sysrq requests sysrq once the port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Cc: Palmer Dabbelt <palmer@dabbelt.com>
-Cc: Paul Walmsley <paul.walmsley@sifive.com>
-Cc: linux-riscv@lists.infradead.org
-Link: https://lore.kernel.org/r/20240301215246.891055-13-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/sifive.c | 17 +++++++----------
- 1 file changed, 7 insertions(+), 10 deletions(-)
-
---- a/drivers/tty/serial/sifive.c
-+++ b/drivers/tty/serial/sifive.c
-@@ -412,7 +412,8 @@ static void __ssp_receive_chars(struct s
- break;
-
- ssp->port.icount.rx++;
-- uart_insert_char(&ssp->port, 0, 0, ch, TTY_NORMAL);
-+ if (!uart_prepare_sysrq_char(&ssp->port, ch))
-+ uart_insert_char(&ssp->port, 0, 0, ch, TTY_NORMAL);
- }
-
- tty_flip_buffer_push(&ssp->port.state->port);
-@@ -534,7 +535,7 @@ static irqreturn_t sifive_serial_irq(int
- if (ip & SIFIVE_SERIAL_IP_TXWM_MASK)
- __ssp_transmit_chars(ssp);
-
-- uart_port_unlock(&ssp->port);
-+ uart_unlock_and_check_sysrq(&ssp->port);
-
- return IRQ_HANDLED;
- }
-@@ -791,13 +792,10 @@ static void sifive_serial_console_write(
- if (!ssp)
- return;
-
-- local_irq_save(flags);
-- if (ssp->port.sysrq)
-- locked = 0;
-- else if (oops_in_progress)
-- locked = uart_port_trylock(&ssp->port);
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(&ssp->port, &flags);
- else
-- uart_port_lock(&ssp->port);
-+ uart_port_lock_irqsave(&ssp->port, &flags);
-
- ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
- __ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
-@@ -807,8 +805,7 @@ static void sifive_serial_console_write(
- __ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
-
- if (locked)
-- uart_port_unlock(&ssp->port);
-- local_irq_restore(flags);
-+ uart_port_unlock_irqrestore(&ssp->port, flags);
- }
-
- static int sifive_serial_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0013-net-Use-nested-BH-locking-for-bpf_scratchpad.patch b/debian/patches-rt/0013-net-Use-nested-BH-locking-for-bpf_scratchpad.patch
new file mode 100644
index 0000000000..9132b002e6
--- /dev/null
+++ b/debian/patches-rt/0013-net-Use-nested-BH-locking-for-bpf_scratchpad.patch
@@ -0,0 +1,70 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Oct 2023 15:17:32 +0200
+Subject: [PATCH 13/15] net: Use nested-BH locking for bpf_scratchpad.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+bpf_scratchpad is a per-CPU variable and relies on disabled BH for its
+locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
+this data structure requires explicit locking.
+
+Add a local_lock_t to the data structure and use local_lock_nested_bh()
+for locking. This change adds only lockdep coverage and does not alter
+the functional behaviour for !PREEMPT_RT.
+
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: Hao Luo <haoluo@google.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Martin KaFai Lau <martin.lau@linux.dev>
+Cc: Song Liu <song@kernel.org>
+Cc: Stanislav Fomichev <sdf@google.com>
+Cc: Yonghong Song <yonghong.song@linux.dev>
+Cc: bpf@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/filter.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1658,9 +1658,12 @@ struct bpf_scratchpad {
+ __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
+ u8 buff[MAX_BPF_STACK];
+ };
++ local_lock_t bh_lock;
+ };
+
+-static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
++static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ static inline int __bpf_try_make_writable(struct sk_buff *skb,
+ unsigned int write_len)
+@@ -2021,6 +2024,7 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from
+ struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
+ u32 diff_size = from_size + to_size;
+ int i, j = 0;
++ __wsum ret;
+
+ /* This is quite flexible, some examples:
+ *
+@@ -2034,12 +2038,15 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from
+ diff_size > sizeof(sp->diff)))
+ return -EINVAL;
+
++ local_lock_nested_bh(&bpf_sp.bh_lock);
+ for (i = 0; i < from_size / sizeof(__be32); i++, j++)
+ sp->diff[j] = ~from[i];
+ for (i = 0; i < to_size / sizeof(__be32); i++, j++)
+ sp->diff[j] = to[i];
+
+- return csum_partial(sp->diff, diff_size, seed);
++ ret = csum_partial(sp->diff, diff_size, seed);
++ local_unlock_nested_bh(&bpf_sp.bh_lock);
++ return ret;
+ }
+
+ static const struct bpf_func_proto bpf_csum_diff_proto = {
diff --git a/debian/patches-rt/0015-printk-nbcon-Do-not-rely-on-proxy-headers.patch b/debian/patches-rt/0013-printk-nbcon-Do-not-rely-on-proxy-headers.patch
index 3ea3dd3d77..f7d08af8a7 100644
--- a/debian/patches-rt/0015-printk-nbcon-Do-not-rely-on-proxy-headers.patch
+++ b/debian/patches-rt/0013-printk-nbcon-Do-not-rely-on-proxy-headers.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 19 Feb 2024 17:35:49 +0000
-Subject: [PATCH 15/48] printk: nbcon: Do not rely on proxy headers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 13/48] printk: nbcon: Do not rely on proxy headers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The headers kernel.h, serial_core.h, and console.h allow for the
definitions of many types and functions from other headers.
@@ -12,12 +12,13 @@ list alphabetically to be able to easily detect duplicates.
Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/internal.h | 8 ++++++--
- kernel/printk/nbcon.c | 12 +++++++++++-
+ kernel/printk/nbcon.c | 13 ++++++++++++-
kernel/printk/printk_ringbuffer.h | 2 ++
- 3 files changed, 19 insertions(+), 3 deletions(-)
+ 3 files changed, 20 insertions(+), 3 deletions(-)
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -48,13 +49,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__printf(4, 0)
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
-@@ -2,14 +2,24 @@
+@@ -2,13 +2,24 @@
// Copyright (C) 2022 Linutronix GmbH, John Ogness
// Copyright (C) 2022 Intel, Thomas Gleixner
-#include <linux/kernel.h>
+#include <linux/atomic.h>
- #include <linux/bug.h>
++#include <linux/bug.h>
#include <linux/console.h>
#include <linux/delay.h>
+#include <linux/errno.h>
diff --git a/debian/patches-rt/0013-serial-pch-Invoke-handle_rx_to-directly.patch b/debian/patches-rt/0013-serial-pch-Invoke-handle_rx_to-directly.patch
deleted file mode 100644
index 436fb4e55c..0000000000
--- a/debian/patches-rt/0013-serial-pch-Invoke-handle_rx_to-directly.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:26 +0100
-Subject: [PATCH 13/18] serial: pch: Invoke handle_rx_to() directly.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-handle_rx() is only a wrapper around handle_rx_to() without any
-additional functionality.
-
-Invoke handle_rx_to() directly and remove handle_rx().
-
-Link: https://lore.kernel.org/r/20240301215246.891055-14-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/pch_uart.c | 7 +------
- 1 file changed, 1 insertion(+), 6 deletions(-)
-
---- a/drivers/tty/serial/pch_uart.c
-+++ b/drivers/tty/serial/pch_uart.c
-@@ -778,11 +778,6 @@ static int handle_rx_to(struct eg20t_por
- return PCH_UART_HANDLED_RX_INT;
- }
-
--static int handle_rx(struct eg20t_port *priv)
--{
-- return handle_rx_to(priv);
--}
--
- static int dma_handle_rx(struct eg20t_port *priv)
- {
- struct uart_port *port = &priv->port;
-@@ -1051,7 +1046,7 @@ static irqreturn_t pch_uart_interrupt(in
- PCH_UART_HAL_RX_INT |
- PCH_UART_HAL_RX_ERR_INT);
- } else {
-- ret = handle_rx(priv);
-+ ret = handle_rx_to(priv);
- }
- break;
- case PCH_UART_IID_RDR_TO: /* Received Data Ready
diff --git a/debian/patches-rt/0014-net-Reference-bpf_redirect_info-via-task_struct-on-P.patch b/debian/patches-rt/0014-net-Reference-bpf_redirect_info-via-task_struct-on-P.patch
new file mode 100644
index 0000000000..b69f9bd551
--- /dev/null
+++ b/debian/patches-rt/0014-net-Reference-bpf_redirect_info-via-task_struct-on-P.patch
@@ -0,0 +1,657 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 18 Jan 2024 09:28:53 +0100
+Subject: [PATCH v9 net-next 14/15] net: Reference bpf_redirect_info via
+ task_struct on PREEMPT_RT.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The XDP redirect process is two staged:
+- bpf_prog_run_xdp() is invoked to run a eBPF program which inspects the
+ packet and makes decisions. While doing that, the per-CPU variable
+ bpf_redirect_info is used.
+
+- Afterwards xdp_do_redirect() is invoked and accesses bpf_redirect_info
+ and it may also access other per-CPU variables like xskmap_flush_list.
+
+At the very end of the NAPI callback, xdp_do_flush() is invoked which
+does not access bpf_redirect_info but will touch the individual per-CPU
+lists.
+
+The per-CPU variables are only used in the NAPI callback hence disabling
+bottom halves is the only protection mechanism. Users from preemptible
+context (like cpu_map_kthread_run()) explicitly disable bottom halves
+for protections reasons.
+Without locking in local_bh_disable() on PREEMPT_RT this data structure
+requires explicit locking.
+
+PREEMPT_RT has forced-threaded interrupts enabled and every
+NAPI-callback runs in a thread. If each thread has its own data
+structure then locking can be avoided.
+
+Create a struct bpf_net_context which contains struct bpf_redirect_info.
+Define the variable on stack, use bpf_net_ctx_set() to save a pointer to
+it, bpf_net_ctx_clear() removes it again.
+The bpf_net_ctx_set() may nest. For instance a function can be used from
+within NET_RX_SOFTIRQ/ net_rx_action which uses bpf_net_ctx_set() and
+NET_TX_SOFTIRQ which does not. Therefore only the first invocations
+updates the pointer.
+Use bpf_net_ctx_get_ri() as a wrapper to retrieve the current struct
+bpf_redirect_info. The returned data structure is zero initialized to
+ensure nothing is leaked from stack. This is done on first usage of the
+struct. bpf_net_ctx_set() sets bpf_redirect_info::kern_flags to 0 to
+note that initialisation is required. First invocation of
+bpf_net_ctx_get_ri() will memset() the data structure and update
+bpf_redirect_info::kern_flags.
+bpf_redirect_info::nh is excluded from memset because it is only used
+once BPF_F_NEIGH is set which also sets the nh member. The kern_flags is
+moved past nh to exclude it from memset.
+
+The pointer to bpf_net_context is saved task's task_struct. Using
+always the bpf_net_context approach has the advantage that there is
+almost zero differences between PREEMPT_RT and non-PREEMPT_RT builds.
+
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: Eduard Zingerman <eddyz87@gmail.com>
+Cc: Hao Luo <haoluo@google.com>
+Cc: Jesper Dangaard Brouer <hawk@kernel.org>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Martin KaFai Lau <martin.lau@linux.dev>
+Cc: Song Liu <song@kernel.org>
+Cc: Stanislav Fomichev <sdf@google.com>
+Cc: Toke Høiland-Jørgensen <toke@redhat.com>
+Cc: Yonghong Song <yonghong.song@linux.dev>
+Cc: bpf@vger.kernel.org
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/filter.h | 56 ++++++++++++++++++++++++++++++++++++++++---------
+ include/linux/sched.h | 3 ++
+ kernel/bpf/cpumap.c | 3 ++
+ kernel/bpf/devmap.c | 9 +++++++
+ kernel/fork.c | 1
+ net/bpf/test_run.c | 11 ++++++++-
+ net/core/dev.c | 29 ++++++++++++++++++++++++-
+ net/core/filter.c | 44 ++++++++++----------------------------
+ net/core/lwt_bpf.c | 3 ++
+ 9 files changed, 114 insertions(+), 45 deletions(-)
+
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -733,21 +733,59 @@ struct bpf_nh_params {
+ };
+ };
+
++/* flags for bpf_redirect_info kern_flags */
++#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
++#define BPF_RI_F_RI_INIT BIT(1)
++
+ struct bpf_redirect_info {
+ u64 tgt_index;
+ void *tgt_value;
+ struct bpf_map *map;
+ u32 flags;
+- u32 kern_flags;
+ u32 map_id;
+ enum bpf_map_type map_type;
+ struct bpf_nh_params nh;
++ u32 kern_flags;
+ };
+
+-DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
++struct bpf_net_context {
++ struct bpf_redirect_info ri;
++};
+
+-/* flags for bpf_redirect_info kern_flags */
+-#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
++static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
++{
++ struct task_struct *tsk = current;
++
++ if (tsk->bpf_net_context != NULL)
++ return NULL;
++ bpf_net_ctx->ri.kern_flags = 0;
++
++ tsk->bpf_net_context = bpf_net_ctx;
++ return bpf_net_ctx;
++}
++
++static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx)
++{
++ if (bpf_net_ctx)
++ current->bpf_net_context = NULL;
++}
++
++static inline struct bpf_net_context *bpf_net_ctx_get(void)
++{
++ return current->bpf_net_context;
++}
++
++static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void)
++{
++ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
++
++ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) {
++ memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh));
++ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT;
++ }
++
++ return &bpf_net_ctx->ri;
++}
+
+ /* Compute the linear packet data range [data, data_end) which
+ * will be accessed by various program types (cls_bpf, act_bpf,
+@@ -1018,25 +1056,23 @@ struct bpf_prog *bpf_patch_insn_single(s
+ const struct bpf_insn *patch, u32 len);
+ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
+
+-void bpf_clear_redirect_map(struct bpf_map *map);
+-
+ static inline bool xdp_return_frame_no_direct(void)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
+ }
+
+ static inline void xdp_set_return_frame_no_direct(void)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
+ }
+
+ static inline void xdp_clear_return_frame_no_direct(void)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
+ }
+@@ -1592,7 +1628,7 @@ static __always_inline long __bpf_xdp_re
+ u64 flags, const u64 flag_mask,
+ void *lookup_elem(struct bpf_map *map, u32 key))
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
+
+ /* Lower bits of the flags are used as return code on lookup failure */
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -54,6 +54,7 @@ struct bio_list;
+ struct blk_plug;
+ struct bpf_local_storage;
+ struct bpf_run_ctx;
++struct bpf_net_context;
+ struct capture_control;
+ struct cfs_rq;
+ struct fs_struct;
+@@ -1516,6 +1517,8 @@ struct task_struct {
+ /* Used for BPF run context */
+ struct bpf_run_ctx *bpf_ctx;
+ #endif
++ /* Used by BPF for per-TASK xdp storage */
++ struct bpf_net_context *bpf_net_context;
+
+ #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ unsigned long lowest_stack;
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -240,12 +240,14 @@ static int cpu_map_bpf_prog_run(struct b
+ int xdp_n, struct xdp_cpumap_stats *stats,
+ struct list_head *list)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int nframes;
+
+ if (!rcpu->prog)
+ return xdp_n;
+
+ rcu_read_lock_bh();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
+ nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats);
+
+@@ -255,6 +257,7 @@ static int cpu_map_bpf_prog_run(struct b
+ if (unlikely(!list_empty(list)))
+ cpu_map_bpf_prog_run_skb(rcpu, list, stats);
+
++ bpf_net_ctx_clear(bpf_net_ctx);
+ rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
+
+ return nframes;
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -196,7 +196,14 @@ static void dev_map_free(struct bpf_map
+ list_del_rcu(&dtab->list);
+ spin_unlock(&dev_map_lock);
+
+- bpf_clear_redirect_map(map);
++ /* bpf_redirect_info->map is assigned in __bpf_xdp_redirect_map()
++ * during NAPI callback and cleared after the XDP redirect. There is no
++ * explicit RCU read section which protects bpf_redirect_info->map but
++ * local_bh_disable() also marks the beginning an RCU section. This
++ * makes the complete softirq callback RCU protected. Thus after
++ * following synchronize_rcu() there no bpf_redirect_info->map == map
++ * assignment.
++ */
+ synchronize_rcu();
+
+ /* Make sure prior __dev_map_entry_free() have completed. */
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2355,6 +2355,7 @@ static void rv_task_fork(struct task_str
+ RCU_INIT_POINTER(p->bpf_storage, NULL);
+ p->bpf_ctx = NULL;
+ #endif
++ p->bpf_net_context = NULL;
+
+ /* Perform scheduler related setup. Assign this task to a CPU. */
+ retval = sched_fork(clone_flags, p);
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -283,9 +283,10 @@ static int xdp_recv_frames(struct xdp_fr
+ static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
+ u32 repeat)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int err = 0, act, ret, i, nframes = 0, batch_sz;
+ struct xdp_frame **frames = xdp->frames;
++ struct bpf_redirect_info *ri;
+ struct xdp_page_head *head;
+ struct xdp_frame *frm;
+ bool redirect = false;
+@@ -295,6 +296,8 @@ static int xdp_test_run_batch(struct xdp
+ batch_sz = min_t(u32, repeat, xdp->batch_size);
+
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
++ ri = bpf_net_ctx_get_ri();
+ xdp_set_return_frame_no_direct();
+
+ for (i = 0; i < batch_sz; i++) {
+@@ -359,6 +362,7 @@ static int xdp_test_run_batch(struct xdp
+ }
+
+ xdp_clear_return_frame_no_direct();
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+ return err;
+ }
+@@ -394,6 +398,7 @@ static int bpf_test_run_xdp_live(struct
+ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
+ u32 *retval, u32 *time, bool xdp)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ struct bpf_prog_array_item item = {.prog = prog};
+ struct bpf_run_ctx *old_ctx;
+ struct bpf_cg_run_ctx run_ctx;
+@@ -419,10 +424,14 @@ static int bpf_test_run(struct bpf_prog
+ do {
+ run_ctx.prog_item = &item;
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
++
+ if (xdp)
+ *retval = bpf_prog_run_xdp(prog, ctx);
+ else
+ *retval = bpf_prog_run(prog, ctx);
++
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+ } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
+ bpf_reset_run_ctx(old_ctx);
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4045,10 +4045,13 @@ sch_handle_ingress(struct sk_buff *skb,
+ {
+ struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
+ enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS;
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int sch_ret;
+
+ if (!entry)
+ return skb;
++
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ if (*pt_prev) {
+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
+ *pt_prev = NULL;
+@@ -4077,10 +4080,12 @@ sch_handle_ingress(struct sk_buff *skb,
+ break;
+ }
+ *ret = NET_RX_SUCCESS;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ case TC_ACT_SHOT:
+ kfree_skb_reason(skb, drop_reason);
+ *ret = NET_RX_DROP;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ /* used by tc_run */
+ case TC_ACT_STOLEN:
+@@ -4090,8 +4095,10 @@ sch_handle_ingress(struct sk_buff *skb,
+ fallthrough;
+ case TC_ACT_CONSUMED:
+ *ret = NET_RX_SUCCESS;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ }
++ bpf_net_ctx_clear(bpf_net_ctx);
+
+ return skb;
+ }
+@@ -4101,11 +4108,14 @@ sch_handle_egress(struct sk_buff *skb, i
+ {
+ struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
+ enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS;
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int sch_ret;
+
+ if (!entry)
+ return skb;
+
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
++
+ /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
+ * already set by the caller.
+ */
+@@ -4121,10 +4131,12 @@ sch_handle_egress(struct sk_buff *skb, i
+ /* No need to push/pop skb's mac_header here on egress! */
+ skb_do_redirect(skb);
+ *ret = NET_XMIT_SUCCESS;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ case TC_ACT_SHOT:
+ kfree_skb_reason(skb, drop_reason);
+ *ret = NET_XMIT_DROP;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ /* used by tc_run */
+ case TC_ACT_STOLEN:
+@@ -4134,8 +4146,10 @@ sch_handle_egress(struct sk_buff *skb, i
+ fallthrough;
+ case TC_ACT_CONSUMED:
+ *ret = NET_XMIT_SUCCESS;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ }
++ bpf_net_ctx_clear(bpf_net_ctx);
+
+ return skb;
+ }
+@@ -6325,6 +6339,7 @@ enum {
+ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
+ unsigned flags, u16 budget)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ bool skip_schedule = false;
+ unsigned long timeout;
+ int rc;
+@@ -6342,6 +6357,7 @@ static void busy_poll_stop(struct napi_s
+ clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
+
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
+ if (flags & NAPI_F_PREFER_BUSY_POLL) {
+ napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
+@@ -6364,6 +6380,7 @@ static void busy_poll_stop(struct napi_s
+ netpoll_poll_unlock(have_poll_lock);
+ if (rc == budget)
+ __busy_poll_stop(napi, skip_schedule);
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+ }
+
+@@ -6373,6 +6390,7 @@ static void __napi_busy_loop(unsigned in
+ {
+ unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
+ int (*napi_poll)(struct napi_struct *napi, int budget);
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ void *have_poll_lock = NULL;
+ struct napi_struct *napi;
+
+@@ -6391,6 +6409,7 @@ static void __napi_busy_loop(unsigned in
+ int work = 0;
+
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ if (!napi_poll) {
+ unsigned long val = READ_ONCE(napi->state);
+
+@@ -6421,6 +6440,7 @@ static void __napi_busy_loop(unsigned in
+ __NET_ADD_STATS(dev_net(napi->dev),
+ LINUX_MIB_BUSYPOLLRXPACKETS, work);
+ skb_defer_free_flush(this_cpu_ptr(&softnet_data));
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+
+ if (!loop_end || loop_end(loop_end_arg, start_time))
+@@ -6848,6 +6868,7 @@ static int napi_thread_wait(struct napi_
+
+ static void napi_threaded_poll_loop(struct napi_struct *napi)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ struct softnet_data *sd;
+ unsigned long last_qs = jiffies;
+
+@@ -6856,6 +6877,8 @@ static void napi_threaded_poll_loop(stru
+ void *have;
+
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
++
+ sd = this_cpu_ptr(&softnet_data);
+ sd->in_napi_threaded_poll = true;
+
+@@ -6871,6 +6894,7 @@ static void napi_threaded_poll_loop(stru
+ net_rps_action_and_irq_enable(sd);
+ }
+ skb_defer_free_flush(sd);
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+
+ if (!repoll)
+@@ -6896,10 +6920,12 @@ static __latent_entropy void net_rx_acti
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+ unsigned long time_limit = jiffies +
+ usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int budget = READ_ONCE(net_hotdata.netdev_budget);
+ LIST_HEAD(list);
+ LIST_HEAD(repoll);
+
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ start:
+ sd->in_net_rx_action = true;
+ local_irq_disable();
+@@ -6952,7 +6978,8 @@ static __latent_entropy void net_rx_acti
+ sd->in_net_rx_action = false;
+
+ net_rps_action_and_irq_enable(sd);
+-end:;
++end:
++ bpf_net_ctx_clear(bpf_net_ctx);
+ }
+
+ struct netdev_adjacent {
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2483,9 +2483,6 @@ static const struct bpf_func_proto bpf_c
+ .arg3_type = ARG_ANYTHING,
+ };
+
+-DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+-EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
+-
+ static struct net_device *skb_get_peer_dev(struct net_device *dev)
+ {
+ const struct net_device_ops *ops = dev->netdev_ops;
+@@ -2498,7 +2495,7 @@ static struct net_device *skb_get_peer_d
+
+ int skb_do_redirect(struct sk_buff *skb)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ struct net *net = dev_net(skb->dev);
+ struct net_device *dev;
+ u32 flags = ri->flags;
+@@ -2531,7 +2528,7 @@ int skb_do_redirect(struct sk_buff *skb)
+
+ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
+ return TC_ACT_SHOT;
+@@ -2552,7 +2549,7 @@ static const struct bpf_func_proto bpf_r
+
+ BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ if (unlikely(flags))
+ return TC_ACT_SHOT;
+@@ -2574,7 +2571,7 @@ static const struct bpf_func_proto bpf_r
+ BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
+ int, plen, u64, flags)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ if (unlikely((plen && plen < sizeof(*params)) || flags))
+ return TC_ACT_SHOT;
+@@ -4300,30 +4297,13 @@ void xdp_do_check_flushed(struct napi_st
+ }
+ #endif
+
+-void bpf_clear_redirect_map(struct bpf_map *map)
+-{
+- struct bpf_redirect_info *ri;
+- int cpu;
+-
+- for_each_possible_cpu(cpu) {
+- ri = per_cpu_ptr(&bpf_redirect_info, cpu);
+- /* Avoid polluting remote cacheline due to writes if
+- * not needed. Once we pass this test, we need the
+- * cmpxchg() to make sure it hasn't been changed in
+- * the meantime by remote CPU.
+- */
+- if (unlikely(READ_ONCE(ri->map) == map))
+- cmpxchg(&ri->map, map, NULL);
+- }
+-}
+-
+ DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
+ EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key);
+
+ u32 xdp_master_redirect(struct xdp_buff *xdp)
+ {
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ struct net_device *master, *slave;
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
+ slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp);
+@@ -4395,7 +4375,7 @@ static __always_inline int __xdp_do_redi
+ map = READ_ONCE(ri->map);
+
+ /* The map pointer is cleared when the map is being torn
+- * down by bpf_clear_redirect_map()
++ * down by dev_map_free()
+ */
+ if (unlikely(!map)) {
+ err = -ENOENT;
+@@ -4440,7 +4420,7 @@ static __always_inline int __xdp_do_redi
+ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ enum bpf_map_type map_type = ri->map_type;
+
+ if (map_type == BPF_MAP_TYPE_XSKMAP)
+@@ -4454,7 +4434,7 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect);
+ int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
+ struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ enum bpf_map_type map_type = ri->map_type;
+
+ if (map_type == BPF_MAP_TYPE_XSKMAP)
+@@ -4471,7 +4451,7 @@ static int xdp_do_generic_redirect_map(s
+ enum bpf_map_type map_type, u32 map_id,
+ u32 flags)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ struct bpf_map *map;
+ int err;
+
+@@ -4483,7 +4463,7 @@ static int xdp_do_generic_redirect_map(s
+ map = READ_ONCE(ri->map);
+
+ /* The map pointer is cleared when the map is being torn
+- * down by bpf_clear_redirect_map()
++ * down by dev_map_free()
+ */
+ if (unlikely(!map)) {
+ err = -ENOENT;
+@@ -4525,7 +4505,7 @@ static int xdp_do_generic_redirect_map(s
+ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ enum bpf_map_type map_type = ri->map_type;
+ void *fwd = ri->tgt_value;
+ u32 map_id = ri->map_id;
+@@ -4561,7 +4541,7 @@ int xdp_do_generic_redirect(struct net_d
+
+ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ if (unlikely(flags))
+ return XDP_ABORTED;
+--- a/net/core/lwt_bpf.c
++++ b/net/core/lwt_bpf.c
+@@ -38,12 +38,14 @@ static inline struct bpf_lwt *bpf_lwt_lw
+ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
+ struct dst_entry *dst, bool can_redirect)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int ret;
+
+ /* Disabling BH is needed to protect per-CPU bpf_redirect_info between
+ * BPF prog and skb_do_redirect().
+ */
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ bpf_compute_data_pointers(skb);
+ ret = bpf_prog_run_save_cb(lwt->prog, skb);
+
+@@ -76,6 +78,7 @@ static int run_lwt_bpf(struct sk_buff *s
+ break;
+ }
+
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+
+ return ret;
diff --git a/debian/patches-rt/0017-printk-Make-console_is_usable-available-to-nbcon.patch b/debian/patches-rt/0014-printk-Make-console_is_usable-available-to-nbcon.patch
index 8334063ed5..3f88a5627b 100644
--- a/debian/patches-rt/0017-printk-Make-console_is_usable-available-to-nbcon.patch
+++ b/debian/patches-rt/0014-printk-Make-console_is_usable-available-to-nbcon.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Sep 2023 13:25:41 +0000
-Subject: [PATCH 17/48] printk: Make console_is_usable() available to nbcon
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 14/48] printk: Make console_is_usable() available to nbcon
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Move console_is_usable() as-is into internal.h so that it can
be used by nbcon printing functions as well.
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -85,6 +85,36 @@ bool nbcon_alloc(struct console *con);
- void nbcon_init(struct console *con);
+ void nbcon_init(struct console *con, u64 init_seq);
void nbcon_free(struct console *con);
+/*
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define PRINTK_PREFIX_MAX 0
@@ -106,6 +136,8 @@ static inline bool nbcon_alloc(struct co
- static inline void nbcon_init(struct console *con) { }
+ static inline void nbcon_init(struct console *con, u64 init_seq) { }
static inline void nbcon_free(struct console *con) { }
+static inline bool console_is_usable(struct console *con) { return false; }
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern struct printk_buffers printk_shared_pbufs;
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2702,36 +2702,6 @@ int is_console_locked(void)
+@@ -2697,36 +2697,6 @@ int is_console_locked(void)
}
EXPORT_SYMBOL(is_console_locked);
diff --git a/debian/patches-rt/0014-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch b/debian/patches-rt/0014-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch
deleted file mode 100644
index edf62ea414..0000000000
--- a/debian/patches-rt/0014-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch
+++ /dev/null
@@ -1,478 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Wed, 13 Sep 2023 08:35:23 +0000
-Subject: [PATCH 14/48] printk: nbcon: Implement processing in port->lock
- wrapper
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-Currently the port->lock wrappers uart_port_lock(),
-uart_port_unlock() (and their variants) only lock/unlock
-the spin_lock.
-
-If the port is an nbcon console, the wrappers must also
-acquire/release the console and mark the region as unsafe. This
-allows general port->lock synchronization to be synchronized
-with the nbcon console ownership.
-
-Introduce a new struct nbcon_drvdata within struct console that
-provides the necessary components for the port lock wrappers to
-acquire the nbcon console and track its ownership.
-
-Also introduce uart_port_set_cons() as a wrapper to set @cons
-of a uart_port. The wrapper sets @cons under the port lock in
-order to prevent @cons from disappearing while another context
-owns the port lock via the port lock wrappers.
-
-Also cleanup the description of the console_srcu_read_flags()
-function. It is used by the port lock wrappers to ensure a
-console cannot be fully unregistered while another context
-owns the port lock via the port lock wrappers.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/8250/8250_core.c | 6 +-
- drivers/tty/serial/amba-pl011.c | 2
- drivers/tty/serial/serial_core.c | 16 ++---
- include/linux/console.h | 57 ++++++++++++++++----
- include/linux/printk.h | 13 ++++
- include/linux/serial_core.h | 98 +++++++++++++++++++++++++++++++++++-
- kernel/printk/nbcon.c | 52 +++++++++++++++++++
- 7 files changed, 219 insertions(+), 25 deletions(-)
-
---- a/drivers/tty/serial/8250/8250_core.c
-+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -627,11 +627,11 @@ static int univ8250_console_setup(struct
-
- port = &serial8250_ports[co->index].port;
- /* link port to console */
-- port->cons = co;
-+ uart_port_set_cons(port, co);
-
- retval = serial8250_console_setup(port, options, false);
- if (retval != 0)
-- port->cons = NULL;
-+ uart_port_set_cons(port, NULL);
- return retval;
- }
-
-@@ -689,7 +689,7 @@ static int univ8250_console_match(struct
- continue;
-
- co->index = i;
-- port->cons = co;
-+ uart_port_set_cons(port, co);
- return serial8250_console_setup(port, options, true);
- }
-
---- a/drivers/tty/serial/amba-pl011.c
-+++ b/drivers/tty/serial/amba-pl011.c
-@@ -2488,7 +2488,7 @@ static int pl011_console_match(struct co
- continue;
-
- co->index = i;
-- port->cons = co;
-+ uart_port_set_cons(port, co);
- return pl011_console_setup(co, options);
- }
-
---- a/drivers/tty/serial/serial_core.c
-+++ b/drivers/tty/serial/serial_core.c
-@@ -3145,8 +3145,15 @@ static int serial_core_add_one_port(stru
- state->uart_port = uport;
- uport->state = state;
-
-+ /*
-+ * If this port is in use as a console then the spinlock is already
-+ * initialised.
-+ */
-+ if (!uart_console_registered(uport))
-+ uart_port_spin_lock_init(uport);
-+
- state->pm_state = UART_PM_STATE_UNDEFINED;
-- uport->cons = drv->cons;
-+ uart_port_set_cons(uport, drv->cons);
- uport->minor = drv->tty_driver->minor_start + uport->line;
- uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name,
- drv->tty_driver->name_base + uport->line);
-@@ -3155,13 +3162,6 @@ static int serial_core_add_one_port(stru
- goto out;
- }
-
-- /*
-- * If this port is in use as a console then the spinlock is already
-- * initialised.
-- */
-- if (!uart_console_registered(uport))
-- uart_port_spin_lock_init(uport);
--
- if (uport->cons && uport->dev)
- of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
-
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -283,6 +283,25 @@ struct nbcon_write_context {
- };
-
- /**
-+ * struct nbcon_drvdata - Data to allow nbcon acquire in non-print context
-+ * @ctxt: The core console context
-+ * @srcu_cookie: Storage for a console_srcu_lock cookie, if needed
-+ * @owner_index: Storage for the owning console index, if needed
-+ * @locked: Storage for the locked state, if needed
-+ *
-+ * All fields (except for @ctxt) are available exclusively to the driver to
-+ * use as needed. They are not used by the printk subsystem.
-+ */
-+struct nbcon_drvdata {
-+ struct nbcon_context __private ctxt;
-+
-+ /* reserved for driver use */
-+ int srcu_cookie;
-+ short owner_index;
-+ bool locked;
-+};
-+
-+/**
- * struct console - The console descriptor structure
- * @name: The name of the console driver
- * @write: Legacy write callback to output messages (Optional)
-@@ -396,6 +415,21 @@ struct console {
-
- atomic_t __private nbcon_state;
- atomic_long_t __private nbcon_seq;
-+
-+ /**
-+ * @nbcon_drvdata:
-+ *
-+ * Data for nbcon ownership tracking to allow acquiring nbcon consoles
-+ * in non-printing contexts.
-+ *
-+ * Drivers may need to acquire nbcon consoles in non-printing
-+ * contexts. This is achieved by providing a struct nbcon_drvdata.
-+ * Then the driver can call nbcon_driver_acquire() and
-+ * nbcon_driver_release(). The struct does not require any special
-+ * initialization.
-+ */
-+ struct nbcon_drvdata *nbcon_drvdata;
-+
- struct printk_buffers *pbufs;
- };
-
-@@ -425,28 +459,29 @@ extern void console_list_unlock(void) __
- extern struct hlist_head console_list;
-
- /**
-- * console_srcu_read_flags - Locklessly read the console flags
-+ * console_srcu_read_flags - Locklessly read flags of a possibly registered
-+ * console
- * @con: struct console pointer of console to read flags from
- *
-- * This function provides the necessary READ_ONCE() and data_race()
-- * notation for locklessly reading the console flags. The READ_ONCE()
-- * in this function matches the WRITE_ONCE() when @flags are modified
-- * for registered consoles with console_srcu_write_flags().
-+ * Locklessly reading @con->flags provides a consistent read value because
-+ * there is at most one CPU modifying @con->flags and that CPU is using only
-+ * read-modify-write operations to do so.
- *
-- * Only use this function to read console flags when locklessly
-- * iterating the console list via srcu.
-+ * Requires console_srcu_read_lock to be held, which implies that @con might
-+ * be a registered console. If the caller is holding the console_list_lock or
-+ * it is certain that the console is not registered, the caller may read
-+ * @con->flags directly instead.
- *
- * Context: Any context.
-+ * Return: The current value of the @con->flags field.
- */
- static inline short console_srcu_read_flags(const struct console *con)
- {
- WARN_ON_ONCE(!console_srcu_read_lock_is_held());
-
- /*
-- * Locklessly reading console->flags provides a consistent
-- * read value because there is at most one CPU modifying
-- * console->flags and that CPU is using only read-modify-write
-- * operations to do so.
-+ * The READ_ONCE() matches the WRITE_ONCE() when @flags are modified
-+ * for registered consoles with console_srcu_write_flags().
- */
- return data_race(READ_ONCE(con->flags));
- }
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -9,6 +9,8 @@
- #include <linux/ratelimit_types.h>
- #include <linux/once_lite.h>
-
-+struct console;
-+
- extern const char linux_banner[];
- extern const char linux_proc_banner[];
-
-@@ -193,6 +195,8 @@ void show_regs_print_info(const char *lo
- extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
- extern asmlinkage void dump_stack(void) __cold;
- void printk_trigger_flush(void);
-+extern void nbcon_driver_acquire(struct console *con);
-+extern void nbcon_driver_release(struct console *con);
- #else
- static inline __printf(1, 0)
- int vprintk(const char *s, va_list args)
-@@ -272,6 +276,15 @@ static inline void dump_stack(void)
- static inline void printk_trigger_flush(void)
- {
- }
-+
-+static inline void nbcon_driver_acquire(struct console *con)
-+{
-+}
-+
-+static inline void nbcon_driver_release(struct console *con)
-+{
-+}
-+
- #endif
-
- bool this_cpu_in_panic(void);
---- a/include/linux/serial_core.h
-+++ b/include/linux/serial_core.h
-@@ -8,10 +8,13 @@
- #define LINUX_SERIAL_CORE_H
-
- #include <linux/bitops.h>
-+#include <linux/bug.h>
- #include <linux/compiler.h>
- #include <linux/console.h>
- #include <linux/interrupt.h>
- #include <linux/circ_buf.h>
-+#include <linux/lockdep.h>
-+#include <linux/printk.h>
- #include <linux/spinlock.h>
- #include <linux/sched.h>
- #include <linux/tty.h>
-@@ -607,12 +610,90 @@ static inline void __uart_port_unlock_ir
- }
-
- /**
-+ * uart_port_set_cons - Safely set the @cons field for a uart
-+ * @up: The uart port to set
-+ * @con: The new console to set to
-+ *
-+ * This function must be used to set @up->cons. It uses the port lock to
-+ * synchronize with the port lock wrappers in order to ensure that the console
-+ * cannot change or disappear while another context is holding the port lock.
-+ */
-+static inline void uart_port_set_cons(struct uart_port *up, struct console *con)
-+{
-+ unsigned long flags;
-+
-+ __uart_port_lock_irqsave(up, &flags);
-+ up->cons = con;
-+ __uart_port_unlock_irqrestore(up, flags);
-+}
-+
-+/* Only for internal port lock wrapper usage. */
-+static inline void __uart_port_nbcon_acquire(struct uart_port *up)
-+{
-+ lockdep_assert_held_once(&up->lock);
-+
-+ if (likely(!uart_console(up)))
-+ return;
-+
-+ if (up->cons->nbcon_drvdata) {
-+ /*
-+ * If @up->cons is registered, prevent it from fully
-+ * unregistering until this context releases the nbcon.
-+ */
-+ int cookie = console_srcu_read_lock();
-+
-+ /* Ensure console is registered and is an nbcon console. */
-+ if (!hlist_unhashed_lockless(&up->cons->node) &&
-+ (console_srcu_read_flags(up->cons) & CON_NBCON)) {
-+ WARN_ON_ONCE(up->cons->nbcon_drvdata->locked);
-+
-+ nbcon_driver_acquire(up->cons);
-+
-+ /*
-+ * Record @up->line to be used during release because
-+ * @up->cons->index can change while the port and
-+ * nbcon are locked.
-+ */
-+ up->cons->nbcon_drvdata->owner_index = up->line;
-+ up->cons->nbcon_drvdata->srcu_cookie = cookie;
-+ up->cons->nbcon_drvdata->locked = true;
-+ } else {
-+ console_srcu_read_unlock(cookie);
-+ }
-+ }
-+}
-+
-+/* Only for internal port lock wrapper usage. */
-+static inline void __uart_port_nbcon_release(struct uart_port *up)
-+{
-+ lockdep_assert_held_once(&up->lock);
-+
-+ /*
-+ * uart_console() cannot be used here because @up->cons->index might
-+ * have changed. Check against @up->cons->nbcon_drvdata->owner_index
-+ * instead.
-+ */
-+
-+ if (unlikely(up->cons &&
-+ up->cons->nbcon_drvdata &&
-+ up->cons->nbcon_drvdata->locked &&
-+ up->cons->nbcon_drvdata->owner_index == up->line)) {
-+ WARN_ON_ONCE(!up->cons->nbcon_drvdata->locked);
-+
-+ up->cons->nbcon_drvdata->locked = false;
-+ nbcon_driver_release(up->cons);
-+ console_srcu_read_unlock(up->cons->nbcon_drvdata->srcu_cookie);
-+ }
-+}
-+
-+/**
- * uart_port_lock - Lock the UART port
- * @up: Pointer to UART port structure
- */
- static inline void uart_port_lock(struct uart_port *up)
- {
- spin_lock(&up->lock);
-+ __uart_port_nbcon_acquire(up);
- }
-
- /**
-@@ -622,6 +703,7 @@ static inline void uart_port_lock(struct
- static inline void uart_port_lock_irq(struct uart_port *up)
- {
- spin_lock_irq(&up->lock);
-+ __uart_port_nbcon_acquire(up);
- }
-
- /**
-@@ -632,6 +714,7 @@ static inline void uart_port_lock_irq(st
- static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
- {
- spin_lock_irqsave(&up->lock, *flags);
-+ __uart_port_nbcon_acquire(up);
- }
-
- /**
-@@ -642,7 +725,11 @@ static inline void uart_port_lock_irqsav
- */
- static inline bool uart_port_trylock(struct uart_port *up)
- {
-- return spin_trylock(&up->lock);
-+ if (!spin_trylock(&up->lock))
-+ return false;
-+
-+ __uart_port_nbcon_acquire(up);
-+ return true;
- }
-
- /**
-@@ -654,7 +741,11 @@ static inline bool uart_port_trylock(str
- */
- static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
- {
-- return spin_trylock_irqsave(&up->lock, *flags);
-+ if (!spin_trylock_irqsave(&up->lock, *flags))
-+ return false;
-+
-+ __uart_port_nbcon_acquire(up);
-+ return true;
- }
-
- /**
-@@ -663,6 +754,7 @@ static inline bool uart_port_trylock_irq
- */
- static inline void uart_port_unlock(struct uart_port *up)
- {
-+ __uart_port_nbcon_release(up);
- spin_unlock(&up->lock);
- }
-
-@@ -672,6 +764,7 @@ static inline void uart_port_unlock(stru
- */
- static inline void uart_port_unlock_irq(struct uart_port *up)
- {
-+ __uart_port_nbcon_release(up);
- spin_unlock_irq(&up->lock);
- }
-
-@@ -682,6 +775,7 @@ static inline void uart_port_unlock_irq(
- */
- static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
- {
-+ __uart_port_nbcon_release(up);
- spin_unlock_irqrestore(&up->lock, flags);
- }
-
---- a/kernel/printk/nbcon.c
-+++ b/kernel/printk/nbcon.c
-@@ -3,9 +3,12 @@
- // Copyright (C) 2022 Intel, Thomas Gleixner
-
- #include <linux/kernel.h>
-+#include <linux/bug.h>
- #include <linux/console.h>
- #include <linux/delay.h>
-+#include <linux/export.h>
- #include <linux/slab.h>
-+#include <linux/string.h>
- #include "internal.h"
- /*
- * Printk console printing implementation for consoles which does not depend
-@@ -988,3 +991,52 @@ void nbcon_free(struct console *con)
-
- con->pbufs = NULL;
- }
-+
-+/**
-+ * nbcon_driver_acquire - Acquire nbcon console and enter unsafe section
-+ * @con: The nbcon console to acquire
-+ *
-+ * Context: Any context which could not be migrated to another CPU.
-+ *
-+ * Console drivers will usually use their own internal synchronization
-+ * mechasism to synchronize between console printing and non-printing
-+ * activities (such as setting baud rates). However, nbcon console drivers
-+ * supporting atomic consoles may also want to mark unsafe sections when
-+ * performing non-printing activities.
-+ *
-+ * This function acquires the nbcon console using priority NBCON_PRIO_NORMAL
-+ * and marks it unsafe for handover/takeover.
-+ *
-+ * Console drivers using this function must have provided @nbcon_drvdata in
-+ * their struct console, which is used to track ownership and state
-+ * information.
-+ */
-+void nbcon_driver_acquire(struct console *con)
-+{
-+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con->nbcon_drvdata, ctxt);
-+
-+ cant_migrate();
-+
-+ do {
-+ do {
-+ memset(ctxt, 0, sizeof(*ctxt));
-+ ctxt->console = con;
-+ ctxt->prio = NBCON_PRIO_NORMAL;
-+ } while (!nbcon_context_try_acquire(ctxt));
-+
-+ } while (!nbcon_context_enter_unsafe(ctxt));
-+}
-+EXPORT_SYMBOL_GPL(nbcon_driver_acquire);
-+
-+/**
-+ * nbcon_driver_release - Exit unsafe section and release the nbcon console
-+ * @con: The nbcon console acquired in nbcon_driver_acquire()
-+ */
-+void nbcon_driver_release(struct console *con)
-+{
-+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con->nbcon_drvdata, ctxt);
-+
-+ if (nbcon_context_exit_unsafe(ctxt))
-+ nbcon_context_release(ctxt);
-+}
-+EXPORT_SYMBOL_GPL(nbcon_driver_release);
diff --git a/debian/patches-rt/0014-serial-pch-Make-push_rx-return-void.patch b/debian/patches-rt/0014-serial-pch-Make-push_rx-return-void.patch
deleted file mode 100644
index 9ed4ea5abc..0000000000
--- a/debian/patches-rt/0014-serial-pch-Make-push_rx-return-void.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:27 +0100
-Subject: [PATCH 14/18] serial: pch: Make push_rx() return void.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-push_rx() returns always 0.
-
-Make push_rx() return void.
-
-Link: https://lore.kernel.org/r/20240301215246.891055-15-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/pch_uart.c | 12 ++++--------
- 1 file changed, 4 insertions(+), 8 deletions(-)
-
---- a/drivers/tty/serial/pch_uart.c
-+++ b/drivers/tty/serial/pch_uart.c
-@@ -599,16 +599,14 @@ static void pch_uart_hal_set_break(struc
- iowrite8(lcr, priv->membase + UART_LCR);
- }
-
--static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
-- int size)
-+static void push_rx(struct eg20t_port *priv, const unsigned char *buf,
-+ int size)
- {
- struct uart_port *port = &priv->port;
- struct tty_port *tport = &port->state->port;
-
- tty_insert_flip_string(tport, buf, size);
- tty_flip_buffer_push(tport);
--
-- return 0;
- }
-
- static int dma_push_rx(struct eg20t_port *priv, int size)
-@@ -761,7 +759,7 @@ static int handle_rx_to(struct eg20t_por
- {
- struct pch_uart_buffer *buf;
- int rx_size;
-- int ret;
-+
- if (!priv->start_rx) {
- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
- PCH_UART_HAL_RX_ERR_INT);
-@@ -770,9 +768,7 @@ static int handle_rx_to(struct eg20t_por
- buf = &priv->rxbuf;
- do {
- rx_size = pch_uart_hal_read(priv, buf->buf, buf->size);
-- ret = push_rx(priv, buf->buf, rx_size);
-- if (ret)
-- return 0;
-+ push_rx(priv, buf->buf, rx_size);
- } while (rx_size == buf->size);
-
- return PCH_UART_HANDLED_RX_INT;
diff --git a/debian/patches-rt/0015-net-Move-per-CPU-flush-lists-to-bpf_net_context-on-P.patch b/debian/patches-rt/0015-net-Move-per-CPU-flush-lists-to-bpf_net_context-on-P.patch
new file mode 100644
index 0000000000..ab01176e1d
--- /dev/null
+++ b/debian/patches-rt/0015-net-Move-per-CPU-flush-lists-to-bpf_net_context-on-P.patch
@@ -0,0 +1,270 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 1 Feb 2024 15:39:56 +0100
+Subject: [PATCH 15/15] net: Move per-CPU flush-lists to bpf_net_context on
+ PREEMPT_RT.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The per-CPU flush lists, which are accessed from within the NAPI callback
+(xdp_do_flush() for instance), are per-CPU. There are subject to the
+same problem as struct bpf_redirect_info.
+
+Add the per-CPU lists cpu_map_flush_list, dev_map_flush_list and
+xskmap_map_flush_list to struct bpf_net_context. Add wrappers for the
+access. The lists initialized on first usage (similar to
+bpf_net_ctx_get_ri()).
+
+Cc: "Björn Töpel" <bjorn@kernel.org>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: Eduard Zingerman <eddyz87@gmail.com>
+Cc: Hao Luo <haoluo@google.com>
+Cc: Jesper Dangaard Brouer <hawk@kernel.org>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Cc: Magnus Karlsson <magnus.karlsson@intel.com>
+Cc: Martin KaFai Lau <martin.lau@linux.dev>
+Cc: Song Liu <song@kernel.org>
+Cc: Stanislav Fomichev <sdf@google.com>
+Cc: Toke Høiland-Jørgensen <toke@redhat.com>
+Cc: Yonghong Song <yonghong.song@linux.dev>
+Cc: bpf@vger.kernel.org
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/filter.h | 42 ++++++++++++++++++++++++++++++++++++++++++
+ kernel/bpf/cpumap.c | 19 +++----------------
+ kernel/bpf/devmap.c | 11 +++--------
+ net/xdp/xsk.c | 12 ++++--------
+ 4 files changed, 52 insertions(+), 32 deletions(-)
+
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -736,6 +736,9 @@ struct bpf_nh_params {
+ /* flags for bpf_redirect_info kern_flags */
+ #define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+ #define BPF_RI_F_RI_INIT BIT(1)
++#define BPF_RI_F_CPU_MAP_INIT BIT(2)
++#define BPF_RI_F_DEV_MAP_INIT BIT(3)
++#define BPF_RI_F_XSK_MAP_INIT BIT(4)
+
+ struct bpf_redirect_info {
+ u64 tgt_index;
+@@ -750,6 +753,9 @@ struct bpf_redirect_info {
+
+ struct bpf_net_context {
+ struct bpf_redirect_info ri;
++ struct list_head cpu_map_flush_list;
++ struct list_head dev_map_flush_list;
++ struct list_head xskmap_map_flush_list;
+ };
+
+ static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
+@@ -787,6 +793,42 @@ static inline struct bpf_redirect_info *
+ return &bpf_net_ctx->ri;
+ }
+
++static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void)
++{
++ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
++
++ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) {
++ INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list);
++ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT;
++ }
++
++ return &bpf_net_ctx->cpu_map_flush_list;
++}
++
++static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void)
++{
++ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
++
++ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) {
++ INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list);
++ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT;
++ }
++
++ return &bpf_net_ctx->dev_map_flush_list;
++}
++
++static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void)
++{
++ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
++
++ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) {
++ INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list);
++ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT;
++ }
++
++ return &bpf_net_ctx->xskmap_map_flush_list;
++}
++
+ /* Compute the linear packet data range [data, data_end) which
+ * will be accessed by various program types (cls_bpf, act_bpf,
+ * lwt, ...). Subsystems allowing direct data access must (!)
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -79,8 +79,6 @@ struct bpf_cpu_map {
+ struct bpf_cpu_map_entry __rcu **cpu_map;
+ };
+
+-static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
+-
+ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
+ {
+ u32 value_size = attr->value_size;
+@@ -709,7 +707,7 @@ static void bq_flush_to_queue(struct xdp
+ */
+ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
+ struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
+
+ if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
+@@ -761,7 +759,7 @@ int cpu_map_generic_redirect(struct bpf_
+
+ void __cpu_map_flush(void)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
+ struct xdp_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
+@@ -775,20 +773,9 @@ void __cpu_map_flush(void)
+ #ifdef CONFIG_DEBUG_NET
+ bool cpu_map_check_flush(void)
+ {
+- if (list_empty(this_cpu_ptr(&cpu_map_flush_list)))
++ if (list_empty(bpf_net_ctx_get_cpu_map_flush_list()))
+ return false;
+ __cpu_map_flush();
+ return true;
+ }
+ #endif
+-
+-static int __init cpu_map_init(void)
+-{
+- int cpu;
+-
+- for_each_possible_cpu(cpu)
+- INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
+- return 0;
+-}
+-
+-subsys_initcall(cpu_map_init);
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -83,7 +83,6 @@ struct bpf_dtab {
+ u32 n_buckets;
+ };
+
+-static DEFINE_PER_CPU(struct list_head, dev_flush_list);
+ static DEFINE_SPINLOCK(dev_map_lock);
+ static LIST_HEAD(dev_map_list);
+
+@@ -415,7 +414,7 @@ static void bq_xmit_all(struct xdp_dev_b
+ */
+ void __dev_flush(void)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
+ struct xdp_dev_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
+@@ -429,7 +428,7 @@ void __dev_flush(void)
+ #ifdef CONFIG_DEBUG_NET
+ bool dev_check_flush(void)
+ {
+- if (list_empty(this_cpu_ptr(&dev_flush_list)))
++ if (list_empty(bpf_net_ctx_get_dev_flush_list()))
+ return false;
+ __dev_flush();
+ return true;
+@@ -460,7 +459,7 @@ static void *__dev_map_lookup_elem(struc
+ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx, struct bpf_prog *xdp_prog)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
+ struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
+
+ if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
+@@ -1160,15 +1159,11 @@ static struct notifier_block dev_map_not
+
+ static int __init dev_map_init(void)
+ {
+- int cpu;
+-
+ /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
+ BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
+ offsetof(struct _bpf_dtab_netdev, dev));
+ register_netdevice_notifier(&dev_map_notifier);
+
+- for_each_possible_cpu(cpu)
+- INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
+ return 0;
+ }
+
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -35,8 +35,6 @@
+ #define TX_BATCH_SIZE 32
+ #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
+
+-static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
+-
+ void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
+ {
+ if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
+@@ -372,7 +370,7 @@ static int xsk_rcv(struct xdp_sock *xs,
+
+ int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+ int err;
+
+ err = xsk_rcv(xs, xdp);
+@@ -387,7 +385,7 @@ int __xsk_map_redirect(struct xdp_sock *
+
+ void __xsk_map_flush(void)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+ struct xdp_sock *xs, *tmp;
+
+ list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
+@@ -399,7 +397,7 @@ void __xsk_map_flush(void)
+ #ifdef CONFIG_DEBUG_NET
+ bool xsk_map_check_flush(void)
+ {
+- if (list_empty(this_cpu_ptr(&xskmap_flush_list)))
++ if (list_empty(bpf_net_ctx_get_xskmap_flush_list()))
+ return false;
+ __xsk_map_flush();
+ return true;
+@@ -1772,7 +1770,7 @@ static struct pernet_operations xsk_net_
+
+ static int __init xsk_init(void)
+ {
+- int err, cpu;
++ int err;
+
+ err = proto_register(&xsk_proto, 0 /* no slab */);
+ if (err)
+@@ -1790,8 +1788,6 @@ static int __init xsk_init(void)
+ if (err)
+ goto out_pernet;
+
+- for_each_possible_cpu(cpu)
+- INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
+ return 0;
+
+ out_pernet:
diff --git a/debian/patches-rt/0018-printk-Let-console_is_usable-handle-nbcon.patch b/debian/patches-rt/0015-printk-Let-console_is_usable-handle-nbcon.patch
index 6d995e7020..cfc311a7a5 100644
--- a/debian/patches-rt/0018-printk-Let-console_is_usable-handle-nbcon.patch
+++ b/debian/patches-rt/0015-printk-Let-console_is_usable-handle-nbcon.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Sep 2023 13:53:21 +0000
-Subject: [PATCH 18/48] printk: Let console_is_usable() handle nbcon
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 15/48] printk: Let console_is_usable() handle nbcon
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The nbcon consoles use a different printing callback. For nbcon
consoles, check for the write_atomic() callback instead of
diff --git a/debian/patches-rt/0015-serial-pch-Don-t-disable-interrupts-while-acquiring-.patch b/debian/patches-rt/0015-serial-pch-Don-t-disable-interrupts-while-acquiring-.patch
deleted file mode 100644
index aad340f32c..0000000000
--- a/debian/patches-rt/0015-serial-pch-Don-t-disable-interrupts-while-acquiring-.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:28 +0100
-Subject: [PATCH 15/18] serial: pch: Don't disable interrupts while acquiring
- lock in ISR.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The interrupt service routine is always invoked with disabled
-interrupts.
-
-Remove the _irqsave() from the locking functions in the interrupts
-service routine/ pch_uart_interrupt().
-
-Link: https://lore.kernel.org/r/20240301215246.891055-16-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/pch_uart.c | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
---- a/drivers/tty/serial/pch_uart.c
-+++ b/drivers/tty/serial/pch_uart.c
-@@ -1010,11 +1010,10 @@ static irqreturn_t pch_uart_interrupt(in
- u8 lsr;
- int ret = 0;
- unsigned char iid;
-- unsigned long flags;
- int next = 1;
- u8 msr;
-
-- spin_lock_irqsave(&priv->lock, flags);
-+ spin_lock(&priv->lock);
- handled = 0;
- while (next) {
- iid = pch_uart_hal_get_iid(priv);
-@@ -1074,7 +1073,7 @@ static irqreturn_t pch_uart_interrupt(in
- handled |= (unsigned int)ret;
- }
-
-- spin_unlock_irqrestore(&priv->lock, flags);
-+ spin_unlock(&priv->lock);
- return IRQ_RETVAL(handled);
- }
-
diff --git a/debian/patches-rt/0019-printk-Add-flags-argument-for-console_is_usable.patch b/debian/patches-rt/0016-printk-Add-flags-argument-for-console_is_usable.patch
index ed902938c0..068ad851e9 100644
--- a/debian/patches-rt/0019-printk-Add-flags-argument-for-console_is_usable.patch
+++ b/debian/patches-rt/0016-printk-Add-flags-argument-for-console_is_usable.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Sep 2023 13:45:33 +0000
-Subject: [PATCH 19/48] printk: Add @flags argument for console_is_usable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 16/48] printk: Add @flags argument for console_is_usable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The caller of console_is_usable() usually needs @console->flags
for its own checks. Rather than having console_is_usable() read
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
@@ -143,7 +139,7 @@ static inline bool nbcon_alloc(struct co
- static inline void nbcon_init(struct console *con) { }
+ static inline void nbcon_init(struct console *con, u64 init_seq) { }
static inline void nbcon_free(struct console *con) { }
-static inline bool console_is_usable(struct console *con) { return false; }
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2947,9 +2947,10 @@ static bool console_flush_all(bool do_co
+@@ -2942,9 +2942,10 @@ static bool console_flush_all(bool do_co
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
any_usable = true;
-@@ -3819,7 +3820,7 @@ static bool __pr_flush(struct console *c
+@@ -3852,7 +3853,7 @@ static bool __pr_flush(struct console *c
* that they make forward progress, so only increment
* @diff for usable consoles.
*/
diff --git a/debian/patches-rt/0016-printk-nbcon-Fix-kerneldoc-for-enums.patch b/debian/patches-rt/0016-printk-nbcon-Fix-kerneldoc-for-enums.patch
deleted file mode 100644
index 246deca24d..0000000000
--- a/debian/patches-rt/0016-printk-nbcon-Fix-kerneldoc-for-enums.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Fri, 9 Feb 2024 10:46:58 +0000
-Subject: [PATCH 16/48] printk: nbcon: Fix kerneldoc for enums
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-Kerneldoc requires enums to be specified as such. Otherwise it is
-interpreted as function documentation.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Reviewed-by: Randy Dunlap <rdunlap@infradead.org>
-Reviewed-by: Petr Mladek <pmladek@suse.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/console.h | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -137,7 +137,7 @@ static inline int con_debug_leave(void)
- */
-
- /**
-- * cons_flags - General console flags
-+ * enum cons_flags - General console flags
- * @CON_PRINTBUFFER: Used by newly registered consoles to avoid duplicate
- * output of messages that were already shown by boot
- * consoles or read by userspace via syslog() syscall.
-@@ -218,7 +218,7 @@ struct nbcon_state {
- static_assert(sizeof(struct nbcon_state) <= sizeof(int));
-
- /**
-- * nbcon_prio - console owner priority for nbcon consoles
-+ * enum nbcon_prio - console owner priority for nbcon consoles
- * @NBCON_PRIO_NONE: Unused
- * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
- * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
diff --git a/debian/patches-rt/0016-serial-pch-Don-t-initialize-uart_port-s-spin_lock.patch b/debian/patches-rt/0016-serial-pch-Don-t-initialize-uart_port-s-spin_lock.patch
deleted file mode 100644
index 4acdbb831f..0000000000
--- a/debian/patches-rt/0016-serial-pch-Don-t-initialize-uart_port-s-spin_lock.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:29 +0100
-Subject: [PATCH 16/18] serial: pch: Don't initialize uart_port's spin_lock.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-There is no need to directly initialize the spinlock_t in struct
-uart_port. The structure is later passed to uart_add_one_port() which
-initialize the complete struct including the lock member.
-
-Remove spin_lock_init() on uart_port's internal lock.
-
-Link: https://lore.kernel.org/r/20240301215246.891055-17-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/pch_uart.c | 2 --
- 1 file changed, 2 deletions(-)
-
---- a/drivers/tty/serial/pch_uart.c
-+++ b/drivers/tty/serial/pch_uart.c
-@@ -1725,8 +1725,6 @@ static struct eg20t_port *pch_uart_init_
- KBUILD_MODNAME ":" PCH_UART_DRIVER_DEVICE "%d",
- priv->port.line);
-
-- spin_lock_init(&priv->port.lock);
--
- pci_set_drvdata(pdev, priv);
- priv->trigger_level = 1;
- priv->fcr = 0;
diff --git a/debian/patches-rt/0017-printk-nbcon-Add-helper-to-assign-priority-based-on-.patch b/debian/patches-rt/0017-printk-nbcon-Add-helper-to-assign-priority-based-on-.patch
new file mode 100644
index 0000000000..a7cb342ff6
--- /dev/null
+++ b/debian/patches-rt/0017-printk-nbcon-Add-helper-to-assign-priority-based-on-.patch
@@ -0,0 +1,67 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 14 Dec 2023 14:38:42 +0000
+Subject: [PATCH 17/48] printk: nbcon: Add helper to assign priority based on
+ CPU state
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Add a helper function to use the current state of the CPU to
+determine which priority to assign to the printing context.
+
+The EMERGENCY priority handling is added in a follow-up commit.
+It will use a per-CPU variable.
+
+Note: nbcon_device_try_acquire(), which is used by console
+ drivers to acquire the nbcon console for non-printing
+ activities, will always use NORMAL priority.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 2 ++
+ kernel/printk/nbcon.c | 16 ++++++++++++++++
+ 2 files changed, 18 insertions(+)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -84,6 +84,7 @@ void nbcon_seq_force(struct console *con
+ bool nbcon_alloc(struct console *con);
+ void nbcon_init(struct console *con, u64 init_seq);
+ void nbcon_free(struct console *con);
++enum nbcon_prio nbcon_get_default_prio(void);
+
+ /*
+ * Check if the given console is currently capable and allowed to print
+@@ -138,6 +139,7 @@ static inline void nbcon_seq_force(struc
+ static inline bool nbcon_alloc(struct console *con) { return false; }
+ static inline void nbcon_init(struct console *con, u64 init_seq) { }
+ static inline void nbcon_free(struct console *con) { }
++static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
+
+ static inline bool console_is_usable(struct console *con, short flags) { return false; }
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -938,6 +938,22 @@ static bool nbcon_emit_next_record(struc
+ }
+
+ /**
++ * nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
++ * printing on the current CPU
++ *
++ * Context: Any context which could not be migrated to another CPU.
++ * Return: The nbcon_prio to use for acquiring an nbcon console in this
++ * context for printing.
++ */
++enum nbcon_prio nbcon_get_default_prio(void)
++{
++ if (this_cpu_in_panic())
++ return NBCON_PRIO_PANIC;
++
++ return NBCON_PRIO_NORMAL;
++}
++
++/**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+ *
diff --git a/debian/patches-rt/0017-serial-pch-Remove-eg20t_port-lock.patch b/debian/patches-rt/0017-serial-pch-Remove-eg20t_port-lock.patch
deleted file mode 100644
index 9ca34a1034..0000000000
--- a/debian/patches-rt/0017-serial-pch-Remove-eg20t_port-lock.patch
+++ /dev/null
@@ -1,128 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:30 +0100
-Subject: [PATCH 17/18] serial: pch: Remove eg20t_port::lock.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The struct eg20t_port has a spinlock_t which is used for locking while
-access I/O of the device. Then there is the uart_portlock which is
-sometimes and nests within eg20t_port's lock.
-
-The uart_port lock is not used while using the struct in
-pch_uart_hal_read() which might be okay. Then both locks are used in
-pch_console_write() which looks odd especially the double try_lock part.
-
-All in all it looks like the uart_port's lock could replace eg20t_port's
-lock and simplify the code.
-
-Remove eg20t_port::lock and use uart_port's lock for the lock scope.
-
-Link: https://lore.kernel.org/r/20240301215246.891055-18-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/pch_uart.c | 26 ++++++--------------------
- 1 file changed, 6 insertions(+), 20 deletions(-)
-
---- a/drivers/tty/serial/pch_uart.c
-+++ b/drivers/tty/serial/pch_uart.c
-@@ -237,9 +237,6 @@ struct eg20t_port {
-
- #define IRQ_NAME_SIZE 17
- char irq_name[IRQ_NAME_SIZE];
--
-- /* protect the eg20t_port private structure and io access to membase */
-- spinlock_t lock;
- };
-
- /**
-@@ -1013,7 +1010,7 @@ static irqreturn_t pch_uart_interrupt(in
- int next = 1;
- u8 msr;
-
-- spin_lock(&priv->lock);
-+ uart_port_lock(&priv->port);
- handled = 0;
- while (next) {
- iid = pch_uart_hal_get_iid(priv);
-@@ -1073,7 +1070,7 @@ static irqreturn_t pch_uart_interrupt(in
- handled |= (unsigned int)ret;
- }
-
-- spin_unlock(&priv->lock);
-+ uart_port_unlock(&priv->port);
- return IRQ_RETVAL(handled);
- }
-
-@@ -1184,9 +1181,9 @@ static void pch_uart_break_ctl(struct ua
- unsigned long flags;
-
- priv = container_of(port, struct eg20t_port, port);
-- spin_lock_irqsave(&priv->lock, flags);
-+ uart_port_lock_irqsave(&priv->port, &flags);
- pch_uart_hal_set_break(priv, ctl);
-- spin_unlock_irqrestore(&priv->lock, flags);
-+ uart_port_unlock_irqrestore(&priv->port, flags);
- }
-
- /* Grab any interrupt resources and initialise any low level driver state. */
-@@ -1336,8 +1333,7 @@ static void pch_uart_set_termios(struct
-
- baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
-
-- spin_lock_irqsave(&priv->lock, flags);
-- uart_port_lock(port);
-+ uart_port_lock_irqsave(port, &flags);
-
- uart_update_timeout(port, termios->c_cflag, baud);
- rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
-@@ -1350,8 +1346,7 @@ static void pch_uart_set_termios(struct
- tty_termios_encode_baud_rate(termios, baud, baud);
-
- out:
-- uart_port_unlock(port);
-- spin_unlock_irqrestore(&priv->lock, flags);
-+ uart_port_unlock_irqrestore(port, flags);
- }
-
- static const char *pch_uart_type(struct uart_port *port)
-@@ -1555,7 +1550,6 @@ pch_console_write(struct console *co, co
- {
- struct eg20t_port *priv;
- unsigned long flags;
-- int priv_locked = 1;
- int port_locked = 1;
- u8 ier;
-
-@@ -1565,15 +1559,11 @@ pch_console_write(struct console *co, co
-
- local_irq_save(flags);
- if (priv->port.sysrq) {
-- /* call to uart_handle_sysrq_char already took the priv lock */
-- priv_locked = 0;
- /* serial8250_handle_port() already took the port lock */
- port_locked = 0;
- } else if (oops_in_progress) {
-- priv_locked = spin_trylock(&priv->lock);
- port_locked = uart_port_trylock(&priv->port);
- } else {
-- spin_lock(&priv->lock);
- uart_port_lock(&priv->port);
- }
-
-@@ -1595,8 +1585,6 @@ pch_console_write(struct console *co, co
-
- if (port_locked)
- uart_port_unlock(&priv->port);
-- if (priv_locked)
-- spin_unlock(&priv->lock);
- local_irq_restore(flags);
- }
-
-@@ -1694,8 +1682,6 @@ static struct eg20t_port *pch_uart_init_
- pci_enable_msi(pdev);
- pci_set_master(pdev);
-
-- spin_lock_init(&priv->lock);
--
- iobase = pci_resource_start(pdev, 0);
- mapbase = pci_resource_start(pdev, 1);
- priv->mapbase = mapbase;
diff --git a/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch b/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch
new file mode 100644
index 0000000000..bca566898c
--- /dev/null
+++ b/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch
@@ -0,0 +1,268 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Sep 2023 12:00:08 +0000
+Subject: [PATCH 18/48] printk: nbcon: Provide function to flush using
+ write_atomic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Provide nbcon_atomic_flush_pending() to perform flushing of all
+registered nbcon consoles using their write_atomic() callback.
+
+Unlike console_flush_all(), nbcon_atomic_flush_pending() will
+only flush up through the newest record at the time of the
+call. This prevents a CPU from printing unbounded when other
+CPUs are adding records. If new records are added while
+flushing, it is expected that the dedicated printer threads
+will print those records. If the printer thread is not
+available (which is always the case at this point in the
+rework), nbcon_atomic_flush_pending() _will_ flush all records
+in the ringbuffer.
+
+Unlike console_flush_all(), nbcon_atomic_flush_pending() will
+fully flush one console before flushing the next. This helps to
+guarantee that a block of pending records (such as a stack
+trace in an emergency situation) can be printed atomically at
+once before releasing console ownership.
+
+nbcon_atomic_flush_pending() is safe in any context because it
+uses write_atomic() and acquires with unsafe_takeover disabled.
+
+Use it in console_flush_on_panic() before flushing legacy
+consoles. The legacy write() callbacks are not fully safe when
+oops_in_progress is set.
+
+Also use it in nbcon_device_release() to flush records added
+while the driver had the console locked to perform non-printing
+operations.
+
+Co-developed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 2
+ kernel/printk/nbcon.c | 169 ++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/printk/printk.c | 2
+ 3 files changed, 170 insertions(+), 3 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -85,6 +85,7 @@ bool nbcon_alloc(struct console *con);
+ void nbcon_init(struct console *con, u64 init_seq);
+ void nbcon_free(struct console *con);
+ enum nbcon_prio nbcon_get_default_prio(void);
++void nbcon_atomic_flush_pending(void);
+
+ /*
+ * Check if the given console is currently capable and allowed to print
+@@ -140,6 +141,7 @@ static inline bool nbcon_alloc(struct co
+ static inline void nbcon_init(struct console *con, u64 init_seq) { }
+ static inline void nbcon_free(struct console *con) { }
+ static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
++static inline void nbcon_atomic_flush_pending(void) { }
+
+ static inline bool console_is_usable(struct console *con, short flags) { return false; }
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -850,7 +850,6 @@ EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
+ * When true is returned, @wctxt->ctxt.backlog indicates whether there are
+ * still records pending in the ringbuffer,
+ */
+-__maybe_unused
+ static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
+ {
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+@@ -953,6 +952,155 @@ enum nbcon_prio nbcon_get_default_prio(v
+ return NBCON_PRIO_NORMAL;
+ }
+
++/*
++ * __nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
++ * write_atomic() callback
++ * @con: The nbcon console to flush
++ * @stop_seq: Flush up until this record
++ *
++ * Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
++ * failure.
++ *
++ * Errors:
++ *
++ * -EPERM: Unable to acquire console ownership.
++ *
++ * -EAGAIN: Another context took over ownership while printing.
++ *
++ * -ENOENT: A record before @stop_seq is not available.
++ *
++ * If flushing up to @stop_seq was not successful, it only makes sense for the
++ * caller to try again when -EAGAIN was returned. When -EPERM is returned,
++ * this context is not allowed to acquire the console. When -ENOENT is
++ * returned, it cannot be expected that the unfinalized record will become
++ * available.
++ */
++static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
++{
++ struct nbcon_write_context wctxt = { };
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
++ int err = 0;
++
++ ctxt->console = con;
++ ctxt->spinwait_max_us = 2000;
++ ctxt->prio = nbcon_get_default_prio();
++
++ if (!nbcon_context_try_acquire(ctxt))
++ return -EPERM;
++
++ while (nbcon_seq_read(con) < stop_seq) {
++ /*
++ * nbcon_emit_next_record() returns false when the console was
++ * handed over or taken over. In both cases the context is no
++ * longer valid.
++ */
++ if (!nbcon_emit_next_record(&wctxt))
++ return -EAGAIN;
++
++ if (!ctxt->backlog) {
++ /* Are there reserved but not yet finalized records? */
++ if (nbcon_seq_read(con) < stop_seq)
++ err = -ENOENT;
++ break;
++ }
++ }
++
++ nbcon_context_release(ctxt);
++ return err;
++}
++
++/**
++ * nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
++ * write_atomic() callback
++ * @con: The nbcon console to flush
++ * @stop_seq: Flush up until this record
++ *
++ * This will stop flushing before @stop_seq if another context has ownership.
++ * That context is then responsible for the flushing. Likewise, if new records
++ * are added while this context was flushing and there is no other context
++ * to handle the printing, this context must also flush those records.
++ */
++static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
++{
++ unsigned long flags;
++ int err;
++
++again:
++ /*
++ * Atomic flushing does not use console driver synchronization (i.e.
++ * it does not hold the port lock for uart consoles). Therefore IRQs
++ * must be disabled to avoid being interrupted and then calling into
++ * a driver that will deadlock trying to acquire console ownership.
++ */
++ local_irq_save(flags);
++
++ err = __nbcon_atomic_flush_pending_con(con, stop_seq);
++
++ local_irq_restore(flags);
++
++ /*
++ * If there was a new owner (-EPERM, -EAGAIN), that context is
++ * responsible for completing.
++ *
++ * Do not wait for records not yet finalized (-ENOENT) to avoid a
++ * possible deadlock. They will either get flushed by the writer or
++ * eventually skipped on panic CPU.
++ */
++ if (err)
++ return;
++
++ /*
++ * If flushing was successful but more records are available, this
++ * context must flush those remaining records because there is no
++ * other context that will do it.
++ */
++ if (prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
++ stop_seq = prb_next_reserve_seq(prb);
++ goto again;
++ }
++}
++
++/**
++ * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
++ * write_atomic() callback
++ * @stop_seq: Flush up until this record
++ */
++static void __nbcon_atomic_flush_pending(u64 stop_seq)
++{
++ struct console *con;
++ int cookie;
++
++ cookie = console_srcu_read_lock();
++ for_each_console_srcu(con) {
++ short flags = console_srcu_read_flags(con);
++
++ if (!(flags & CON_NBCON))
++ continue;
++
++ if (!console_is_usable(con, flags))
++ continue;
++
++ if (nbcon_seq_read(con) >= stop_seq)
++ continue;
++
++ nbcon_atomic_flush_pending_con(con, stop_seq);
++ }
++ console_srcu_read_unlock(cookie);
++}
++
++/**
++ * nbcon_atomic_flush_pending - Flush all nbcon consoles using their
++ * write_atomic() callback
++ *
++ * Flush the backlog up through the currently newest record. Any new
++ * records added while flushing will not be flushed. This is to avoid
++ * one CPU printing unbounded because other CPUs continue to add records.
++ */
++void nbcon_atomic_flush_pending(void)
++{
++ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
++}
++
+ /**
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
+ * @con: Console to allocate buffers for
+@@ -1065,8 +1213,23 @@ EXPORT_SYMBOL_GPL(nbcon_device_try_acqui
+ void nbcon_device_release(struct console *con)
+ {
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
++ int cookie;
+
+- if (nbcon_context_exit_unsafe(ctxt))
+- nbcon_context_release(ctxt);
++ if (!nbcon_context_exit_unsafe(ctxt))
++ return;
++
++ nbcon_context_release(ctxt);
++
++ /*
++ * This context must flush any new records added while the console
++ * was locked. The console_srcu_read_lock must be taken to ensure
++ * the console is usable throughout flushing.
++ */
++ cookie = console_srcu_read_lock();
++ if (console_is_usable(con, console_srcu_read_flags(con)) &&
++ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
++ __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb));
++ }
++ console_srcu_read_unlock(cookie);
+ }
+ EXPORT_SYMBOL_GPL(nbcon_device_release);
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3184,6 +3184,8 @@ void console_flush_on_panic(enum con_flu
+ if (mode == CONSOLE_REPLAY_ALL)
+ __console_rewind_all();
+
++ nbcon_atomic_flush_pending();
++
+ console_flush_all(false, &next_seq, &handover);
+ }
+
diff --git a/debian/patches-rt/0018-serial-pch-Use-uart_prepare_sysrq_char.patch b/debian/patches-rt/0018-serial-pch-Use-uart_prepare_sysrq_char.patch
deleted file mode 100644
index 55e6615c9e..0000000000
--- a/debian/patches-rt/0018-serial-pch-Use-uart_prepare_sysrq_char.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 1 Mar 2024 22:45:31 +0100
-Subject: [PATCH 18/18] serial: pch: Use uart_prepare_sysrq_char().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-The port lock is a spinlock_t which is becomes a sleeping lock on PREEMPT_RT.
-The driver splits the locking function into two parts: local_irq_save() and
-uart_port_lock() and this breaks PREEMPT_RT.
-
-Delay handling sysrq until port lock is dropped.
-Remove the special case in the console write routine an always use the
-complete locking function.
-
-Link: https://lore.kernel.org/r/20240301215246.891055-19-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/pch_uart.c | 24 +++++++++---------------
- 1 file changed, 9 insertions(+), 15 deletions(-)
-
---- a/drivers/tty/serial/pch_uart.c
-+++ b/drivers/tty/serial/pch_uart.c
-@@ -564,7 +564,7 @@ static int pch_uart_hal_read(struct eg20
- if (uart_handle_break(port))
- continue;
- }
-- if (uart_handle_sysrq_char(port, rbr))
-+ if (uart_prepare_sysrq_char(port, rbr))
- continue;
-
- buf[i++] = rbr;
-@@ -1070,7 +1070,7 @@ static irqreturn_t pch_uart_interrupt(in
- handled |= (unsigned int)ret;
- }
-
-- uart_port_unlock(&priv->port);
-+ uart_unlock_and_check_sysrq(&priv->port);
- return IRQ_RETVAL(handled);
- }
-
-@@ -1550,22 +1550,17 @@ pch_console_write(struct console *co, co
- {
- struct eg20t_port *priv;
- unsigned long flags;
-- int port_locked = 1;
-+ int locked = 1;
- u8 ier;
-
- priv = pch_uart_ports[co->index];
-
- touch_nmi_watchdog();
-
-- local_irq_save(flags);
-- if (priv->port.sysrq) {
-- /* serial8250_handle_port() already took the port lock */
-- port_locked = 0;
-- } else if (oops_in_progress) {
-- port_locked = uart_port_trylock(&priv->port);
-- } else {
-- uart_port_lock(&priv->port);
-- }
-+ if (oops_in_progress)
-+ locked = uart_port_trylock_irqsave(&priv->port, &flags);
-+ else
-+ uart_port_lock_irqsave(&priv->port, &flags);
-
- /*
- * First save the IER then disable the interrupts
-@@ -1583,9 +1578,8 @@ pch_console_write(struct console *co, co
- wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
- iowrite8(ier, priv->membase + UART_IER);
-
-- if (port_locked)
-- uart_port_unlock(&priv->port);
-- local_irq_restore(flags);
-+ if (locked)
-+ uart_port_unlock_irqrestore(&priv->port, flags);
- }
-
- static int __init pch_console_setup(struct console *co, char *options)
diff --git a/debian/patches-rt/0021-printk-Track-registered-boot-consoles.patch b/debian/patches-rt/0019-printk-Track-registered-boot-consoles.patch
index 64ee0c4dbb..29ab9ef99f 100644
--- a/debian/patches-rt/0021-printk-Track-registered-boot-consoles.patch
+++ b/debian/patches-rt/0019-printk-Track-registered-boot-consoles.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 24 Oct 2023 14:13:14 +0000
-Subject: [PATCH 21/48] printk: Track registered boot consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 19/48] printk: Track registered boot consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Unfortunately it is not known if a boot console and a regular
(legacy or nbcon) console use the same hardware. For this reason
@@ -40,8 +40,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* All 3 protected by @syslog_lock. */
-@@ -3518,6 +3526,9 @@ void register_console(struct console *ne
- newcon->seq = 0;
+@@ -3539,6 +3547,9 @@ void register_console(struct console *ne
+ newcon->seq = init_seq;
}
+ if (newcon->flags & CON_BOOT)
@@ -50,16 +50,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If another context is actively using the hardware of this new
* console, it will not be aware of the nbcon synchronization. This
-@@ -3587,6 +3598,8 @@ EXPORT_SYMBOL(register_console);
- /* Must be called under console_list_lock(). */
+@@ -3609,7 +3620,9 @@ EXPORT_SYMBOL(register_console);
static int unregister_console_locked(struct console *console)
{
+ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
+ bool found_boot_con = false;
+ unsigned long flags;
+ struct console *c;
int res;
lockdep_assert_console_list_lock_held();
-@@ -3634,6 +3647,17 @@ static int unregister_console_locked(str
+@@ -3667,6 +3680,17 @@ static int unregister_console_locked(str
if (console->exit)
res = console->exit(console);
diff --git a/debian/patches-rt/0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch b/debian/patches-rt/0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch
deleted file mode 100644
index 26c68ed229..0000000000
--- a/debian/patches-rt/0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch
+++ /dev/null
@@ -1,193 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue, 12 Sep 2023 12:00:08 +0000
-Subject: [PATCH 20/48] printk: nbcon: Provide function to flush using
- write_atomic()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-Provide nbcon_atomic_flush_pending() to perform flushing of all
-registered nbcon consoles using their write_atomic() callback.
-
-Unlike console_flush_all(), nbcon_atomic_flush_pending() will
-only flush up through the newest record at the time of the
-call. This prevents a CPU from printing unbounded when other
-CPUs are adding records.
-
-Also unlike console_flush_all(), nbcon_atomic_flush_pending()
-will fully flush one console before flushing the next. This
-helps to guarantee that a block of pending records (such as
-a stack trace in an emergency situation) can be printed
-atomically at once before releasing console ownership.
-
-nbcon_atomic_flush_pending() is safe in any context because it
-uses write_atomic() and acquires with unsafe_takeover disabled.
-
-Use it in console_flush_on_panic() before flushing legacy
-consoles. The legacy write() callbacks are not fully safe when
-oops_in_progress is set.
-
-Co-developed-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/internal.h | 2
- kernel/printk/nbcon.c | 104 ++++++++++++++++++++++++++++++++++++++++++++++-
- kernel/printk/printk.c | 2
- 3 files changed, 106 insertions(+), 2 deletions(-)
-
---- a/kernel/printk/internal.h
-+++ b/kernel/printk/internal.h
-@@ -84,6 +84,7 @@ void nbcon_seq_force(struct console *con
- bool nbcon_alloc(struct console *con);
- void nbcon_init(struct console *con);
- void nbcon_free(struct console *con);
-+void nbcon_atomic_flush_pending(void);
-
- /*
- * Check if the given console is currently capable and allowed to print
-@@ -138,6 +139,7 @@ static inline void nbcon_seq_force(struc
- static inline bool nbcon_alloc(struct console *con) { return false; }
- static inline void nbcon_init(struct console *con) { }
- static inline void nbcon_free(struct console *con) { }
-+static inline void nbcon_atomic_flush_pending(void) { }
-
- static inline bool console_is_usable(struct console *con, short flags) { return false; }
-
---- a/kernel/printk/nbcon.c
-+++ b/kernel/printk/nbcon.c
-@@ -548,7 +548,6 @@ static struct printk_buffers panic_nbcon
- * in an unsafe state. Otherwise, on success the caller may assume
- * the console is not in an unsafe state.
- */
--__maybe_unused
- static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
- {
- unsigned int cpu = smp_processor_id();
-@@ -850,7 +849,6 @@ EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
- * When true is returned, @wctxt->ctxt.backlog indicates whether there are
- * still records pending in the ringbuffer,
- */
--__maybe_unused
- static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
- {
- struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
-@@ -938,6 +936,108 @@ static bool nbcon_emit_next_record(struc
- }
-
- /**
-+ * __nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
-+ * write_atomic() callback
-+ * @con: The nbcon console to flush
-+ * @stop_seq: Flush up until this record
-+ *
-+ * Return: True if taken over while printing. Otherwise false.
-+ *
-+ * If flushing up to @stop_seq was not successful, it only makes sense for the
-+ * caller to try again when true was returned. When false is returned, either
-+ * there are no more records available to read or this context is not allowed
-+ * to acquire the console.
-+ */
-+static bool __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
-+{
-+ struct nbcon_write_context wctxt = { };
-+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
-+
-+ ctxt->console = con;
-+ ctxt->spinwait_max_us = 2000;
-+ ctxt->prio = NBCON_PRIO_NORMAL;
-+
-+ if (!nbcon_context_try_acquire(ctxt))
-+ return false;
-+
-+ while (nbcon_seq_read(con) < stop_seq) {
-+ /*
-+ * nbcon_emit_next_record() returns false when the console was
-+ * handed over or taken over. In both cases the context is no
-+ * longer valid.
-+ */
-+ if (!nbcon_emit_next_record(&wctxt))
-+ return true;
-+
-+ if (!ctxt->backlog)
-+ break;
-+ }
-+
-+ nbcon_context_release(ctxt);
-+
-+ return false;
-+}
-+
-+/**
-+ * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
-+ * write_atomic() callback
-+ * @stop_seq: Flush up until this record
-+ */
-+static void __nbcon_atomic_flush_pending(u64 stop_seq)
-+{
-+ struct console *con;
-+ bool should_retry;
-+ int cookie;
-+
-+ do {
-+ should_retry = false;
-+
-+ cookie = console_srcu_read_lock();
-+ for_each_console_srcu(con) {
-+ short flags = console_srcu_read_flags(con);
-+ unsigned long irq_flags;
-+
-+ if (!(flags & CON_NBCON))
-+ continue;
-+
-+ if (!console_is_usable(con, flags))
-+ continue;
-+
-+ if (nbcon_seq_read(con) >= stop_seq)
-+ continue;
-+
-+ /*
-+ * Atomic flushing does not use console driver
-+ * synchronization (i.e. it does not hold the port
-+ * lock for uart consoles). Therefore IRQs must be
-+ * disabled to avoid being interrupted and then
-+ * calling into a driver that will deadlock trying
-+ * to acquire console ownership.
-+ */
-+ local_irq_save(irq_flags);
-+
-+ should_retry |= __nbcon_atomic_flush_pending_con(con, stop_seq);
-+
-+ local_irq_restore(irq_flags);
-+ }
-+ console_srcu_read_unlock(cookie);
-+ } while (should_retry);
-+}
-+
-+/**
-+ * nbcon_atomic_flush_pending - Flush all nbcon consoles using their
-+ * write_atomic() callback
-+ *
-+ * Flush the backlog up through the currently newest record. Any new
-+ * records added while flushing will not be flushed. This is to avoid
-+ * one CPU printing unbounded because other CPUs continue to add records.
-+ */
-+void nbcon_atomic_flush_pending(void)
-+{
-+ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
-+}
-+
-+/**
- * nbcon_alloc - Allocate buffers needed by the nbcon console
- * @con: Console to allocate buffers for
- *
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -3177,6 +3177,8 @@ void console_flush_on_panic(enum con_flu
- console_srcu_read_unlock(cookie);
- }
-
-+ nbcon_atomic_flush_pending();
-+
- console_flush_all(false, &next_seq, &handover);
- }
-
diff --git a/debian/patches-rt/0022-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch b/debian/patches-rt/0020-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
index 02c868a001..934e1b556c 100644
--- a/debian/patches-rt/0022-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
+++ b/debian/patches-rt/0020-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 19 Sep 2023 14:33:27 +0000
-Subject: [PATCH 22/48] printk: nbcon: Use nbcon consoles in
+Subject: [PATCH 20/48] printk: nbcon: Use nbcon consoles in
console_flush_all()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Allow nbcon consoles to print messages in the legacy printk()
caller context (printing via unlock) by integrating them into
@@ -13,15 +13,22 @@ Provide nbcon_legacy_emit_next_record(), which acts as the
nbcon variant of console_emit_next_record(). Call this variant
within console_flush_all() for nbcon consoles. Since nbcon
consoles use their own @nbcon_seq variable to track the next
-record to print, this also must be appropriately handled.
+record to print, this also must be appropriately handled in
+console_flush_all().
+
+Note that the legacy printing logic uses @handover to detect
+handovers for printing all consoles. For nbcon consoles,
+handovers/takeovers occur on a per-console basis and thus do
+not cause the console_flush_all() loop to abort.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/internal.h | 6 +++
- kernel/printk/nbcon.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++
- kernel/printk/printk.c | 17 +++++++---
- 3 files changed, 95 insertions(+), 5 deletions(-)
+ kernel/printk/nbcon.c | 87 +++++++++++++++++++++++++++++++++++++++++++++++
+ kernel/printk/printk.c | 17 ++++++---
+ 3 files changed, 105 insertions(+), 5 deletions(-)
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -34,18 +41,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
u64 nbcon_seq_read(struct console *con);
void nbcon_seq_force(struct console *con, u64 seq);
-@@ -85,6 +87,8 @@ bool nbcon_alloc(struct console *con);
- void nbcon_init(struct console *con);
+@@ -86,6 +88,8 @@ void nbcon_init(struct console *con, u64
void nbcon_free(struct console *con);
+ enum nbcon_prio nbcon_get_default_prio(void);
void nbcon_atomic_flush_pending(void);
+bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
+ int cookie);
/*
* Check if the given console is currently capable and allowed to print
-@@ -140,6 +144,8 @@ static inline bool nbcon_alloc(struct co
- static inline void nbcon_init(struct console *con) { }
+@@ -142,6 +146,8 @@ static inline void nbcon_init(struct con
static inline void nbcon_free(struct console *con) { }
+ static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
static inline void nbcon_atomic_flush_pending(void) { }
+static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
+ int cookie) { return false; }
@@ -54,24 +61,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
-@@ -541,6 +541,7 @@ static struct printk_buffers panic_nbcon
- * nbcon_context_try_acquire - Try to acquire nbcon console
- * @ctxt: The context of the caller
- *
-+ * Context: Any context which could not be migrated to another CPU.
- * Return: True if the console was acquired. False otherwise.
- *
- * If the caller allowed an unsafe hostile takeover, on success the
-@@ -936,6 +937,82 @@ static bool nbcon_emit_next_record(struc
+@@ -953,6 +953,93 @@ enum nbcon_prio nbcon_get_default_prio(v
}
- /**
+ /*
+ * nbcon_atomic_emit_one - Print one record for an nbcon console using the
+ * write_atomic() callback
+ * @wctxt: An initialized write context struct to use for this context
+ *
-+ * Return: False if it is known there are no more records to print,
-+ * otherwise true.
++ * Return: True, when a record has been printed and there are still
++ * pending records. The caller might want to continue flushing.
++ *
++ * False, when there is no pending record, or when the console
++ * context cannot be acquired, or the ownership has been lost.
++ * The caller should give up. Either the job is done, cannot be
++ * done, or will be handled by the owning context.
+ *
+ * This is an internal helper to handle the locking of the console before
+ * calling nbcon_emit_next_record().
@@ -81,15 +85,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
+
+ if (!nbcon_context_try_acquire(ctxt))
-+ return true;
++ return false;
+
+ /*
+ * nbcon_emit_next_record() returns false when the console was
+ * handed over or taken over. In both cases the context is no
+ * longer valid.
++ *
++ * The higher priority printing context takes over responsibility
++ * to print the pending records.
+ */
+ if (!nbcon_emit_next_record(wctxt))
-+ return true;
++ return false;
+
+ nbcon_context_release(ctxt);
+
@@ -107,8 +114,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * @cookie: The cookie from the SRCU read lock.
+ *
+ * Context: Any context except NMI.
-+ * Return: False if the given console has no next record to print,
-+ * otherwise true.
++ * Return: True, when a record has been printed and there are still
++ * pending records. The caller might want to continue flushing.
++ *
++ * False, when there is no pending record, or when the console
++ * context cannot be acquired, or the ownership has been lost.
++ * The caller should give up. Either the job is done, cannot be
++ * done, or will be handled by the owning context.
+ *
+ * This function is meant to be called by console_flush_all() to print records
+ * on nbcon consoles from legacy context (printing via console unlocking).
@@ -122,15 +134,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ unsigned long flags;
+ bool progress;
+
-+ *handover = false;
-+
+ /* Use the same procedure as console_emit_next_record(). */
+ printk_safe_enter_irqsave(flags);
+ console_lock_spinning_enable();
+ stop_critical_timings();
+
+ ctxt->console = con;
-+ ctxt->prio = NBCON_PRIO_NORMAL;
++ ctxt->prio = nbcon_get_default_prio();
+
+ progress = nbcon_atomic_emit_one(&wctxt);
+
@@ -147,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @con: The nbcon console to flush
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1871,7 +1871,7 @@ static bool console_waiter;
+@@ -1860,7 +1860,7 @@ static bool console_waiter;
* there may be a waiter spinning (like a spinlock). Also it must be
* ready to hand over the lock at the end of the section.
*/
@@ -156,7 +166,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
/*
* Do not use spinning in panic(). The panic CPU wants to keep the lock.
-@@ -1910,7 +1910,7 @@ static void console_lock_spinning_enable
+@@ -1899,7 +1899,7 @@ static void console_lock_spinning_enable
*
* Return: 1 if the lock rights were passed, 0 otherwise.
*/
@@ -165,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
int waiter;
-@@ -2956,13 +2956,20 @@ static bool console_flush_all(bool do_co
+@@ -2951,13 +2951,20 @@ static bool console_flush_all(bool do_co
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
short flags = console_srcu_read_flags(con);
@@ -187,7 +197,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If a handover has occurred, the SRCU read lock
-@@ -2972,8 +2979,8 @@ static bool console_flush_all(bool do_co
+@@ -2967,8 +2974,8 @@ static bool console_flush_all(bool do_co
return false;
/* Track the next of the highest seq flushed. */
diff --git a/debian/patches-rt/0021-printk-nbcon-Add-unsafe-flushing-on-panic.patch b/debian/patches-rt/0021-printk-nbcon-Add-unsafe-flushing-on-panic.patch
new file mode 100644
index 0000000000..424d8dc3f1
--- /dev/null
+++ b/debian/patches-rt/0021-printk-nbcon-Add-unsafe-flushing-on-panic.patch
@@ -0,0 +1,159 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 20 Oct 2023 10:03:42 +0000
+Subject: [PATCH 21/48] printk: nbcon: Add unsafe flushing on panic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Add nbcon_atomic_flush_unsafe() to flush all nbcon consoles
+using the write_atomic() callback and allowing unsafe hostile
+takeovers. Call this at the end of panic() as a final attempt
+to flush any pending messages.
+
+Note that legacy consoles use unsafe methods for flushing
+from the beginning of panic (see bust_spinlocks()). Therefore,
+systems using both legacy and nbcon consoles may still fail to
+see panic messages due to unsafe legacy console usage.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/printk.h | 5 +++++
+ kernel/panic.c | 1 +
+ kernel/printk/nbcon.c | 32 +++++++++++++++++++++++++-------
+ 3 files changed, 31 insertions(+), 7 deletions(-)
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -198,6 +198,7 @@ void printk_trigger_flush(void);
+ void console_replay_all(void);
+ extern bool nbcon_device_try_acquire(struct console *con);
+ extern void nbcon_device_release(struct console *con);
++void nbcon_atomic_flush_unsafe(void);
+ #else
+ static inline __printf(1, 0)
+ int vprintk(const char *s, va_list args)
+@@ -291,6 +292,10 @@ static inline void nbcon_device_release(
+ {
+ }
+
++static inline void nbcon_atomic_flush_unsafe(void)
++{
++}
++
+ #endif
+
+ bool this_cpu_in_panic(void);
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -456,6 +456,7 @@ void panic(const char *fmt, ...)
+ * Explicitly flush the kernel log buffer one last time.
+ */
+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
++ nbcon_atomic_flush_unsafe();
+
+ local_irq_enable();
+ for (i = 0; ; i += PANIC_TIMER_STEP) {
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1044,6 +1044,7 @@ bool nbcon_legacy_emit_next_record(struc
+ * write_atomic() callback
+ * @con: The nbcon console to flush
+ * @stop_seq: Flush up until this record
++ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
+ *
+ * Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
+ * failure.
+@@ -1062,7 +1063,8 @@ bool nbcon_legacy_emit_next_record(struc
+ * returned, it cannot be expected that the unfinalized record will become
+ * available.
+ */
+-static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
++static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
++ bool allow_unsafe_takeover)
+ {
+ struct nbcon_write_context wctxt = { };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
+@@ -1071,6 +1073,7 @@ static int __nbcon_atomic_flush_pending_
+ ctxt->console = con;
+ ctxt->spinwait_max_us = 2000;
+ ctxt->prio = nbcon_get_default_prio();
++ ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
+
+ if (!nbcon_context_try_acquire(ctxt))
+ return -EPERM;
+@@ -1101,13 +1104,15 @@ static int __nbcon_atomic_flush_pending_
+ * write_atomic() callback
+ * @con: The nbcon console to flush
+ * @stop_seq: Flush up until this record
++ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
+ *
+ * This will stop flushing before @stop_seq if another context has ownership.
+ * That context is then responsible for the flushing. Likewise, if new records
+ * are added while this context was flushing and there is no other context
+ * to handle the printing, this context must also flush those records.
+ */
+-static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
++static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
++ bool allow_unsafe_takeover)
+ {
+ unsigned long flags;
+ int err;
+@@ -1121,7 +1126,7 @@ static void nbcon_atomic_flush_pending_c
+ */
+ local_irq_save(flags);
+
+- err = __nbcon_atomic_flush_pending_con(con, stop_seq);
++ err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
+
+ local_irq_restore(flags);
+
+@@ -1151,8 +1156,9 @@ static void nbcon_atomic_flush_pending_c
+ * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
+ * write_atomic() callback
+ * @stop_seq: Flush up until this record
++ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
+ */
+-static void __nbcon_atomic_flush_pending(u64 stop_seq)
++static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover)
+ {
+ struct console *con;
+ int cookie;
+@@ -1170,7 +1176,7 @@ static void __nbcon_atomic_flush_pending
+ if (nbcon_seq_read(con) >= stop_seq)
+ continue;
+
+- nbcon_atomic_flush_pending_con(con, stop_seq);
++ nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
+ }
+ console_srcu_read_unlock(cookie);
+ }
+@@ -1185,7 +1191,19 @@ static void __nbcon_atomic_flush_pending
+ */
+ void nbcon_atomic_flush_pending(void)
+ {
+- __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
++ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false);
++}
++
++/**
++ * nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
++ * write_atomic() callback and allowing unsafe hostile takeovers
++ *
++ * Flush the backlog up through the currently newest record. Unsafe hostile
++ * takeovers will be performed, if necessary.
++ */
++void nbcon_atomic_flush_unsafe(void)
++{
++ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true);
+ }
+
+ /**
+@@ -1315,7 +1333,7 @@ void nbcon_device_release(struct console
+ cookie = console_srcu_read_lock();
+ if (console_is_usable(con, console_srcu_read_flags(con)) &&
+ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+- __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb));
++ __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
+ }
+ console_srcu_read_unlock(cookie);
+ }
diff --git a/debian/patches-rt/0025-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch b/debian/patches-rt/0022-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
index 8156ae1d14..01d254db59 100644
--- a/debian/patches-rt/0025-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
+++ b/debian/patches-rt/0022-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 26 Sep 2023 12:44:07 +0000
-Subject: [PATCH 25/48] printk: Avoid console_lock dance if no legacy or boot
+Subject: [PATCH 22/48] printk: Avoid console_lock dance if no legacy or boot
consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Currently the console lock is used to attempt legacy-type
printing even if there are no legacy or boot consoles registered.
@@ -15,10 +15,11 @@ do not use the console lock if there are no boot consoles
and no legacy consoles.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/printk.c | 79 +++++++++++++++++++++++++++++++++++++++----------
- 1 file changed, 64 insertions(+), 15 deletions(-)
+ kernel/printk/printk.c | 78 +++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 63 insertions(+), 15 deletions(-)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -51,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* All 3 protected by @syslog_lock. */
-@@ -2344,7 +2359,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -2339,7 +2354,7 @@ asmlinkage int vprintk_emit(int facility
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
/* If called from the scheduler, we can not call up(). */
@@ -60,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The caller may be holding system-critical or
* timing-sensitive locks. Disable preemption during
-@@ -2653,7 +2668,7 @@ void resume_console(void)
+@@ -2648,7 +2663,7 @@ void resume_console(void)
*/
static int console_cpu_notify(unsigned int cpu)
{
@@ -69,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
-@@ -3194,7 +3209,8 @@ void console_flush_on_panic(enum con_flu
+@@ -3201,7 +3216,8 @@ void console_flush_on_panic(enum con_flu
nbcon_atomic_flush_pending();
@@ -79,24 +80,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3531,6 +3547,8 @@ void register_console(struct console *ne
- */
- nbcon_seq_force(newcon, newcon->seq);
- newcon->seq = 0;
-+ } else {
+@@ -3551,6 +3567,7 @@ void register_console(struct console *ne
+ if (newcon->flags & CON_NBCON) {
+ nbcon_init(newcon, init_seq);
+ } else {
+ have_legacy_console = true;
+ newcon->seq = init_seq;
}
- if (newcon->flags & CON_BOOT)
-@@ -3605,6 +3623,7 @@ EXPORT_SYMBOL(register_console);
- /* Must be called under console_list_lock(). */
+@@ -3627,6 +3644,7 @@ EXPORT_SYMBOL(register_console);
static int unregister_console_locked(struct console *console)
{
+ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
+ bool found_legacy_con = false;
bool found_boot_con = false;
+ unsigned long flags;
struct console *c;
- int res;
-@@ -3661,9 +3680,13 @@ static int unregister_console_locked(str
+@@ -3694,9 +3712,13 @@ static int unregister_console_locked(str
for_each_console(c) {
if (c->flags & CON_BOOT)
found_boot_con = true;
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return res;
}
-@@ -3824,22 +3847,34 @@ static bool __pr_flush(struct console *c
+@@ -3857,22 +3879,34 @@ static bool __pr_flush(struct console *c
seq = prb_next_reserve_seq(prb);
/* Flush the consoles so that records up to @seq are printed. */
@@ -154,7 +154,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
-@@ -3859,6 +3894,7 @@ static bool __pr_flush(struct console *c
+@@ -3892,6 +3926,7 @@ static bool __pr_flush(struct console *c
if (flags & CON_NBCON) {
printk_seq = nbcon_seq_read(c);
} else {
@@ -162,7 +162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
printk_seq = c->seq;
}
-@@ -3870,7 +3906,8 @@ static bool __pr_flush(struct console *c
+@@ -3903,7 +3938,8 @@ static bool __pr_flush(struct console *c
if (diff != last_diff && reset_on_progress)
remaining_jiffies = timeout_jiffies;
@@ -172,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Note: @diff is 0 if there are no usable consoles. */
if (diff == 0 || remaining_jiffies == 0)
-@@ -3940,6 +3977,7 @@ static void __wake_up_klogd(int val)
+@@ -3973,6 +4009,7 @@ static void __wake_up_klogd(int val)
return;
preempt_disable();
@@ -180,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Guarantee any new records can be seen by tasks preparing to wait
* before this context checks if the wait queue is empty.
-@@ -3951,11 +3989,22 @@ static void __wake_up_klogd(int val)
+@@ -3984,11 +4021,22 @@ static void __wake_up_klogd(int val)
*
* This pairs with devkmsg_read:A and syslog_print:A.
*/
diff --git a/debian/patches-rt/0026-printk-Track-nbcon-consoles.patch b/debian/patches-rt/0023-printk-Track-nbcon-consoles.patch
index 7c54a03775..c8f9a2f9a2 100644
--- a/debian/patches-rt/0026-printk-Track-nbcon-consoles.patch
+++ b/debian/patches-rt/0023-printk-Track-nbcon-consoles.patch
@@ -1,13 +1,14 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 11 Dec 2023 09:36:52 +0000
-Subject: [PATCH 26/48] printk: Track nbcon consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 23/48] printk: Track nbcon consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Add a global flag @have_nbcon_console to identify if any nbcon
consoles are registered. This will be used in follow-up commits
to preserve legacy behavior when no nbcon consoles are registered.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/printk.c | 14 +++++++++++++-
@@ -27,23 +28,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Specifies if a boot console is registered. If boot consoles are present,
* nbcon consoles cannot print simultaneously and must be synchronized by
* the console lock. This is because boot consoles and nbcon consoles may
-@@ -3538,6 +3543,7 @@ void register_console(struct console *ne
- console_init_seq(newcon, bootcon_registered);
+@@ -3565,6 +3570,7 @@ void register_console(struct console *ne
+ init_seq = get_init_console_seq(newcon, bootcon_registered);
if (newcon->flags & CON_NBCON) {
+ have_nbcon_console = true;
- nbcon_init(newcon);
-
- /*
-@@ -3624,6 +3630,7 @@ EXPORT_SYMBOL(register_console);
- static int unregister_console_locked(struct console *console)
+ nbcon_init(newcon, init_seq);
+ } else {
+ have_legacy_console = true;
+@@ -3645,6 +3651,7 @@ static int unregister_console_locked(str
{
+ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
bool found_legacy_con = false;
+ bool found_nbcon_con = false;
bool found_boot_con = false;
+ unsigned long flags;
struct console *c;
- int res;
-@@ -3680,13 +3687,18 @@ static int unregister_console_locked(str
+@@ -3712,13 +3719,18 @@ static int unregister_console_locked(str
for_each_console(c) {
if (c->flags & CON_BOOT)
found_boot_con = true;
diff --git a/debian/patches-rt/0023-printk-nbcon-Assign-priority-based-on-CPU-state.patch b/debian/patches-rt/0023-printk-nbcon-Assign-priority-based-on-CPU-state.patch
deleted file mode 100644
index ff275e2ab9..0000000000
--- a/debian/patches-rt/0023-printk-nbcon-Assign-priority-based-on-CPU-state.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Thu, 14 Dec 2023 14:38:42 +0000
-Subject: [PATCH 23/48] printk: nbcon: Assign priority based on CPU state
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-Use the current state of the CPU to determine which priority to
-assign to the printing context.
-
-The EMERGENCY priority handling is added in a follow-up commit.
-It will use a per-CPU variable.
-
-Note: nbcon_driver_acquire(), which is used by console drivers
- to acquire the nbcon console for non-printing activities,
- will always use NORMAL priority.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Reviewed-by: Petr Mladek <pmladek@suse.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/internal.h | 2 ++
- kernel/printk/nbcon.c | 20 ++++++++++++++++++--
- 2 files changed, 20 insertions(+), 2 deletions(-)
-
---- a/kernel/printk/internal.h
-+++ b/kernel/printk/internal.h
-@@ -86,6 +86,7 @@ void nbcon_seq_force(struct console *con
- bool nbcon_alloc(struct console *con);
- void nbcon_init(struct console *con);
- void nbcon_free(struct console *con);
-+enum nbcon_prio nbcon_get_default_prio(void);
- void nbcon_atomic_flush_pending(void);
- bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
- int cookie);
-@@ -143,6 +144,7 @@ static inline void nbcon_seq_force(struc
- static inline bool nbcon_alloc(struct console *con) { return false; }
- static inline void nbcon_init(struct console *con) { }
- static inline void nbcon_free(struct console *con) { }
-+static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
- static inline void nbcon_atomic_flush_pending(void) { }
- static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
- int cookie) { return false; }
---- a/kernel/printk/nbcon.c
-+++ b/kernel/printk/nbcon.c
-@@ -968,6 +968,22 @@ static bool nbcon_atomic_emit_one(struct
- }
-
- /**
-+ * nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
-+ * printing on the current CPU
-+ *
-+ * Context: Any context which could not be migrated to another CPU.
-+ * Return: The nbcon_prio to use for acquiring an nbcon console in this
-+ * context for printing.
-+ */
-+enum nbcon_prio nbcon_get_default_prio(void)
-+{
-+ if (this_cpu_in_panic())
-+ return NBCON_PRIO_PANIC;
-+
-+ return NBCON_PRIO_NORMAL;
-+}
-+
-+/**
- * nbcon_legacy_emit_next_record - Print one record for an nbcon console
- * in legacy contexts
- * @con: The console to print on
-@@ -1001,7 +1017,7 @@ bool nbcon_legacy_emit_next_record(struc
- stop_critical_timings();
-
- ctxt->console = con;
-- ctxt->prio = NBCON_PRIO_NORMAL;
-+ ctxt->prio = nbcon_get_default_prio();
-
- progress = nbcon_atomic_emit_one(&wctxt);
-
-@@ -1032,7 +1048,7 @@ static bool __nbcon_atomic_flush_pending
-
- ctxt->console = con;
- ctxt->spinwait_max_us = 2000;
-- ctxt->prio = NBCON_PRIO_NORMAL;
-+ ctxt->prio = nbcon_get_default_prio();
-
- if (!nbcon_context_try_acquire(ctxt))
- return false;
diff --git a/debian/patches-rt/0027-printk-Coordinate-direct-printing-in-panic.patch b/debian/patches-rt/0024-printk-Coordinate-direct-printing-in-panic.patch
index 4893dd463f..c318b1ee55 100644
--- a/debian/patches-rt/0027-printk-Coordinate-direct-printing-in-panic.patch
+++ b/debian/patches-rt/0024-printk-Coordinate-direct-printing-in-panic.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 22 Nov 2023 11:56:58 +0000
-Subject: [PATCH 27/48] printk: Coordinate direct printing in panic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 24/48] printk: Coordinate direct printing in panic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Perform printing by nbcon consoles on the panic CPU from the
printk() caller context in order to get panic messages printed
@@ -18,6 +18,7 @@ behavior (i.e. legacy consoles will always attempt to print
from the printk() caller context).
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/printk.h | 5 +++
@@ -27,15 +28,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -195,6 +195,7 @@ void show_regs_print_info(const char *lo
- extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
+@@ -196,6 +196,7 @@ extern asmlinkage void dump_stack_lvl(co
extern asmlinkage void dump_stack(void) __cold;
void printk_trigger_flush(void);
+ void console_replay_all(void);
+void printk_legacy_allow_panic_sync(void);
- extern void nbcon_driver_acquire(struct console *con);
- extern void nbcon_driver_release(struct console *con);
+ extern bool nbcon_device_try_acquire(struct console *con);
+ extern void nbcon_device_release(struct console *con);
void nbcon_atomic_flush_unsafe(void);
-@@ -278,6 +279,10 @@ static inline void printk_trigger_flush(
+@@ -283,6 +284,10 @@ static inline void console_replay_all(vo
{
}
@@ -43,12 +44,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+{
+}
+
- static inline void nbcon_driver_acquire(struct console *con)
+ static inline bool nbcon_device_try_acquire(struct console *con)
{
- }
+ return false;
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -364,6 +364,8 @@ void panic(const char *fmt, ...)
+@@ -367,6 +367,8 @@ void panic(const char *fmt, ...)
panic_other_cpus_shutdown(_crash_kexec_post_notifiers);
@@ -70,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static bool have_nbcon_console;
-@@ -2335,12 +2337,29 @@ int vprintk_store(int facility, int leve
+@@ -2330,12 +2332,29 @@ int vprintk_store(int facility, int leve
return ret;
}
@@ -101,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Suppress unimportant messages after panic happens */
if (unlikely(suppress_printk))
-@@ -2356,15 +2375,42 @@ asmlinkage int vprintk_emit(int facility
+@@ -2351,15 +2370,42 @@ asmlinkage int vprintk_emit(int facility
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
@@ -147,7 +148,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The caller may be holding system-critical or
* timing-sensitive locks. Disable preemption during
-@@ -2384,10 +2430,10 @@ asmlinkage int vprintk_emit(int facility
+@@ -2379,10 +2425,10 @@ asmlinkage int vprintk_emit(int facility
preempt_enable();
}
diff --git a/debian/patches-rt/0024-printk-nbcon-Add-unsafe-flushing-on-panic.patch b/debian/patches-rt/0024-printk-nbcon-Add-unsafe-flushing-on-panic.patch
deleted file mode 100644
index a63a8ac515..0000000000
--- a/debian/patches-rt/0024-printk-nbcon-Add-unsafe-flushing-on-panic.patch
+++ /dev/null
@@ -1,125 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Fri, 20 Oct 2023 10:03:42 +0000
-Subject: [PATCH 24/48] printk: nbcon: Add unsafe flushing on panic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-Add nbcon_atomic_flush_unsafe() to flush all nbcon consoles
-using the write_atomic() callback and allowing unsafe hostile
-takeovers. Call this at the end of panic() as a final attempt
-to flush any pending messages.
-
-Note that legacy consoles use unsafe methods for flushing
-from the beginning of panic (see bust_spinlocks()). Therefore,
-systems using both legacy and nbcon consoles may still fail to
-see panic messages due to unsafe legacy console usage.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/printk.h | 5 +++++
- kernel/panic.c | 1 +
- kernel/printk/nbcon.c | 26 +++++++++++++++++++++-----
- 3 files changed, 27 insertions(+), 5 deletions(-)
-
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -197,6 +197,7 @@ extern asmlinkage void dump_stack(void)
- void printk_trigger_flush(void);
- extern void nbcon_driver_acquire(struct console *con);
- extern void nbcon_driver_release(struct console *con);
-+void nbcon_atomic_flush_unsafe(void);
- #else
- static inline __printf(1, 0)
- int vprintk(const char *s, va_list args)
-@@ -285,6 +286,10 @@ static inline void nbcon_driver_release(
- {
- }
-
-+static inline void nbcon_atomic_flush_unsafe(void)
-+{
-+}
-+
- #endif
-
- bool this_cpu_in_panic(void);
---- a/kernel/panic.c
-+++ b/kernel/panic.c
-@@ -453,6 +453,7 @@ void panic(const char *fmt, ...)
- * Explicitly flush the kernel log buffer one last time.
- */
- console_flush_on_panic(CONSOLE_FLUSH_PENDING);
-+ nbcon_atomic_flush_unsafe();
-
- local_irq_enable();
- for (i = 0; ; i += PANIC_TIMER_STEP) {
---- a/kernel/printk/nbcon.c
-+++ b/kernel/printk/nbcon.c
-@@ -1033,6 +1033,7 @@ bool nbcon_legacy_emit_next_record(struc
- * write_atomic() callback
- * @con: The nbcon console to flush
- * @stop_seq: Flush up until this record
-+ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
- *
- * Return: True if taken over while printing. Otherwise false.
- *
-@@ -1041,7 +1042,8 @@ bool nbcon_legacy_emit_next_record(struc
- * there are no more records available to read or this context is not allowed
- * to acquire the console.
- */
--static bool __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
-+static bool __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
-+ bool allow_unsafe_takeover)
- {
- struct nbcon_write_context wctxt = { };
- struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
-@@ -1049,6 +1051,7 @@ static bool __nbcon_atomic_flush_pending
- ctxt->console = con;
- ctxt->spinwait_max_us = 2000;
- ctxt->prio = nbcon_get_default_prio();
-+ ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
-
- if (!nbcon_context_try_acquire(ctxt))
- return false;
-@@ -1075,8 +1078,9 @@ static bool __nbcon_atomic_flush_pending
- * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
- * write_atomic() callback
- * @stop_seq: Flush up until this record
-+ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
- */
--static void __nbcon_atomic_flush_pending(u64 stop_seq)
-+static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover)
- {
- struct console *con;
- bool should_retry;
-@@ -1109,8 +1113,8 @@ static void __nbcon_atomic_flush_pending
- */
- local_irq_save(irq_flags);
-
-- should_retry |= __nbcon_atomic_flush_pending_con(con, stop_seq);
--
-+ should_retry |= __nbcon_atomic_flush_pending_con(con, stop_seq,
-+ allow_unsafe_takeover);
- local_irq_restore(irq_flags);
- }
- console_srcu_read_unlock(cookie);
-@@ -1127,7 +1131,19 @@ static void __nbcon_atomic_flush_pending
- */
- void nbcon_atomic_flush_pending(void)
- {
-- __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
-+ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false);
-+}
-+
-+/**
-+ * nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
-+ * write_atomic() callback and allowing unsafe hostile takeovers
-+ *
-+ * Flush the backlog up through the currently newest record. Unsafe hostile
-+ * takeovers will be performed, if necessary.
-+ */
-+void nbcon_atomic_flush_unsafe(void)
-+{
-+ __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true);
- }
-
- /**
diff --git a/debian/patches-rt/0028-printk-nbcon-Implement-emergency-sections.patch b/debian/patches-rt/0025-printk-nbcon-Implement-emergency-sections.patch
index f3a05868e7..7370f8513f 100644
--- a/debian/patches-rt/0028-printk-nbcon-Implement-emergency-sections.patch
+++ b/debian/patches-rt/0025-printk-nbcon-Implement-emergency-sections.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 11 Sep 2023 15:21:57 +0000
-Subject: [PATCH 28/48] printk: nbcon: Implement emergency sections
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 25/48] printk: nbcon: Implement emergency sections
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
In emergency situations (something has gone wrong but the
system continues to operate), usually important information
@@ -26,8 +26,8 @@ Do not print if the current CPU is in an emergency state.
When exiting all emergency nesting, flush nbcon consoles
directly using their atomic callback. Legacy consoles are
-triggered for flushing via irq_work because it is not known
-if the context was safe for a trylock on the console lock.
+flushed directly if safe, otherwise they are triggered for
+flushing via irq_work.
Note that the emergency state is not system-wide. While one CPU
is in an emergency state, another CPU may continue to print
@@ -36,30 +36,64 @@ console messages.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/console.h | 4 ++
- kernel/printk/nbcon.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++
- kernel/printk/printk.c | 13 ++++++-
- 3 files changed, 98 insertions(+), 2 deletions(-)
+ include/linux/console.h | 6 ++
+ kernel/printk/internal.h | 13 ++++
+ kernel/printk/nbcon.c | 126 ++++++++++++++++++++++++++++++++++++++++++++
+ kernel/printk/printk.c | 25 ++++----
+ kernel/printk/printk_safe.c | 11 +++
+ 5 files changed, 168 insertions(+), 13 deletions(-)
--- a/include/linux/console.h
+++ b/include/linux/console.h
-@@ -559,10 +559,14 @@ static inline bool console_is_registered
+@@ -553,10 +553,16 @@ static inline bool console_is_registered
hlist_for_each_entry(con, &console_list, node)
#ifdef CONFIG_PRINTK
+extern void nbcon_cpu_emergency_enter(void);
+extern void nbcon_cpu_emergency_exit(void);
++extern void nbcon_cpu_emergency_flush(void);
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
#else
+static inline void nbcon_cpu_emergency_enter(void) { }
+static inline void nbcon_cpu_emergency_exit(void) { }
++static inline void nbcon_cpu_emergency_flush(void) { }
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -76,6 +76,8 @@ bool printk_percpu_data_ready(void);
+
+ void defer_console_output(void);
+
++bool is_printk_deferred(void);
++
+ u16 printk_parse_prefix(const char *text, int *level,
+ enum printk_info_flags *flags);
+ void console_lock_spinning_enable(void);
+@@ -153,6 +155,17 @@ static inline bool console_is_usable(str
+
+ #endif /* CONFIG_PRINTK */
+
++extern bool have_boot_console;
++extern bool have_legacy_console;
++
++/*
++ * Specifies if the console lock/unlock dance is needed for console
++ * printing. If @have_boot_console is true, the nbcon consoles will
++ * be printed serially along with the legacy consoles because nbcon
++ * consoles cannot print simultaneously with boot consoles.
++ */
++#define printing_via_unlock (have_legacy_console || have_boot_console)
++
+ extern struct printk_buffers printk_shared_pbufs;
+
+ /**
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
@@ -936,6 +936,29 @@ static bool nbcon_emit_next_record(struc
@@ -90,9 +124,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+}
+
/**
- * nbcon_atomic_emit_one - Print one record for an nbcon console using the
- * write_atomic() callback
-@@ -977,9 +1000,15 @@ static bool nbcon_atomic_emit_one(struct
+ * nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
+ * printing on the current CPU
+@@ -946,9 +969,15 @@ static bool nbcon_emit_next_record(struc
*/
enum nbcon_prio nbcon_get_default_prio(void)
{
@@ -108,12 +142,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return NBCON_PRIO_NORMAL;
}
-@@ -1147,6 +1176,60 @@ void nbcon_atomic_flush_unsafe(void)
+@@ -1207,6 +1236,103 @@ void nbcon_atomic_flush_unsafe(void)
}
/**
+ * nbcon_cpu_emergency_enter - Enter an emergency section where printk()
-+ * messages for that CPU are only stored
++ * messages for that CPU are only stored
+ *
+ * Upon exiting the emergency section, all stored messages are flushed.
+ *
@@ -136,7 +170,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+/**
+ * nbcon_cpu_emergency_exit - Exit an emergency section and flush the
-+ * stored messages
++ * stored messages
+ *
+ * Flushing only occurs when exiting all nesting for the CPU.
+ *
@@ -149,15 +183,33 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
+
-+ WARN_ON_ONCE(*cpu_emergency_nesting == 0);
-+
++ /*
++ * Flush the messages before enabling preemtion to see them ASAP.
++ *
++ * Reduce the risk of potential softlockup by using the
++ * flush_pending() variant which ignores messages added later. It is
++ * called before decrementing the counter so that the printing context
++ * for the emergency messages is NBCON_PRIO_EMERGENCY.
++ */
+ if (*cpu_emergency_nesting == 1) {
+ nbcon_atomic_flush_pending();
++
++ /*
++ * Safely attempt to flush the legacy consoles in this
++ * context. Otherwise an irq_work context is triggered
++ * to handle it.
++ */
+ do_trigger_flush = true;
++ if (printing_via_unlock && !is_printk_deferred()) {
++ if (console_trylock()) {
++ do_trigger_flush = false;
++ console_unlock();
++ }
++ }
+ }
+
-+ /* Undo the nesting count of nbcon_cpu_emergency_enter(). */
-+ (*cpu_emergency_nesting)--;
++ if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0))
++ (*cpu_emergency_nesting)--;
+
+ preempt_enable();
+
@@ -166,12 +218,63 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+}
+
+/**
++ * nbcon_cpu_emergency_flush - Explicitly flush consoles while
++ * within emergency context
++ *
++ * Both nbcon and legacy consoles are flushed.
++ *
++ * It should be used only when there are too many messages printed
++ * in emergency context, for example, printing backtraces of all
++ * CPUs or processes. It is typically needed when the watchdogs
++ * need to be touched as well.
++ */
++void nbcon_cpu_emergency_flush(void)
++{
++ /* The explicit flush is needed only in the emergency context. */
++ if (*(nbcon_get_cpu_emergency_nesting()) == 0)
++ return;
++
++ nbcon_atomic_flush_pending();
++
++ if (printing_via_unlock && !is_printk_deferred()) {
++ if (console_trylock())
++ console_unlock();
++ }
++}
++
++/**
* nbcon_alloc - Allocate buffers needed by the nbcon console
* @con: Console to allocate buffers for
*
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2417,16 +2417,25 @@ asmlinkage int vprintk_emit(int facility
+@@ -468,7 +468,7 @@ static DEFINE_MUTEX(syslog_lock);
+ * present, it is necessary to perform the console lock/unlock dance
+ * whenever console flushing should occur.
+ */
+-static bool have_legacy_console;
++bool have_legacy_console;
+
+ /*
+ * Specifies if an nbcon console is registered. If nbcon consoles are present,
+@@ -483,15 +483,7 @@ static bool have_nbcon_console;
+ * the console lock. This is because boot consoles and nbcon consoles may
+ * have mapped the same hardware.
+ */
+-static bool have_boot_console;
+-
+-/*
+- * Specifies if the console lock/unlock dance is needed for console
+- * printing. If @have_boot_console is true, the nbcon consoles will
+- * be printed serially along with the legacy consoles because nbcon
+- * consoles cannot print simultaneously with boot consoles.
+- */
+-#define printing_via_unlock (have_legacy_console || have_boot_console)
++bool have_boot_console;
+
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+@@ -2412,16 +2404,25 @@ asmlinkage int vprintk_emit(int facility
* printing of all remaining records to all consoles so that
* this context can return as soon as possible. Hopefully
* another printk() caller will take over the printing.
@@ -199,3 +302,30 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -38,6 +38,15 @@ void __printk_deferred_exit(void)
+ __printk_safe_exit();
+ }
+
++bool is_printk_deferred(void)
++{
++ /*
++ * The per-CPU variable @printk_context can be read safely in any
++ * context. The CPU migration always disabled when set.
++ */
++ return (this_cpu_read(printk_context) || in_nmi());
++}
++
+ asmlinkage int vprintk(const char *fmt, va_list args)
+ {
+ #ifdef CONFIG_KGDB_KDB
+@@ -50,7 +59,7 @@ asmlinkage int vprintk(const char *fmt,
+ * Use the main logbuf even in NMI. But avoid calling console
+ * drivers that might have their own locks.
+ */
+- if (this_cpu_read(printk_context) || in_nmi())
++ if (is_printk_deferred())
+ return vprintk_deferred(fmt, args);
+
+ /* No obstacles. */
diff --git a/debian/patches-rt/0029-panic-Mark-emergency-section-in-warn.patch b/debian/patches-rt/0026-panic-Mark-emergency-section-in-warn.patch
index 35c04773c5..3dd6faca7e 100644
--- a/debian/patches-rt/0029-panic-Mark-emergency-section-in-warn.patch
+++ b/debian/patches-rt/0026-panic-Mark-emergency-section-in-warn.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 11 Sep 2023 15:53:04 +0000
-Subject: [PATCH 29/48] panic: Mark emergency section in warn
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 26/48] panic: Mark emergency section in warn
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Mark the full contents of __warn() as an emergency section. In
this section, the CPU will not perform console output for the
@@ -11,6 +11,7 @@ triggered when exiting the emergency section.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/panic.c | 4 ++++
@@ -18,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -667,6 +667,8 @@ struct warn_args {
+@@ -670,6 +670,8 @@ struct warn_args {
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args)
{
@@ -27,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
disable_trace_on_warning();
if (file)
-@@ -697,6 +699,8 @@ void __warn(const char *file, int line,
+@@ -705,6 +707,8 @@ void __warn(const char *file, int line,
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
diff --git a/debian/patches-rt/0030-panic-Mark-emergency-section-in-oops.patch b/debian/patches-rt/0027-panic-Mark-emergency-section-in-oops.patch
index cd3d80fe60..d5f87811d1 100644
--- a/debian/patches-rt/0030-panic-Mark-emergency-section-in-oops.patch
+++ b/debian/patches-rt/0027-panic-Mark-emergency-section-in-oops.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 19 Sep 2023 17:07:34 +0000
-Subject: [PATCH 30/48] panic: Mark emergency section in oops
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 27/48] panic: Mark emergency section in oops
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Mark an emergency section beginning with oops_enter() until the
end of oops_exit(). In this section, the CPU will not perform
@@ -14,6 +14,7 @@ flushing mechanism that should occur after the consoles have
been triggered to flush.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/panic.c | 2 ++
@@ -21,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -634,6 +634,7 @@ bool oops_may_print(void)
+@@ -637,6 +637,7 @@ bool oops_may_print(void)
*/
void oops_enter(void)
{
@@ -29,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
tracing_off();
/* can't trust the integrity of the kernel anymore: */
debug_locks_off();
-@@ -656,6 +657,7 @@ void oops_exit(void)
+@@ -659,6 +660,7 @@ void oops_exit(void)
{
do_oops_enter_exit();
print_oops_end_marker();
diff --git a/debian/patches-rt/0031-rcu-Mark-emergency-sections-in-rcu-stalls.patch b/debian/patches-rt/0028-rcu-Mark-emergency-sections-in-rcu-stalls.patch
index 0cca134a21..9e2e8b1934 100644
--- a/debian/patches-rt/0031-rcu-Mark-emergency-sections-in-rcu-stalls.patch
+++ b/debian/patches-rt/0028-rcu-Mark-emergency-sections-in-rcu-stalls.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 11 Sep 2023 15:53:39 +0000
-Subject: [PATCH 31/48] rcu: Mark emergency sections in rcu stalls
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 28/48] rcu: Mark emergency sections in rcu stalls
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Mark emergency sections wherever multiple lines of
rcu stall information are generated. In an emergency
@@ -12,11 +12,12 @@ This allows the full message block to be stored as
quickly as possible in the ringbuffer.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/rcu/tree_exp.h | 7 +++++++
- kernel/rcu/tree_stall.h | 9 +++++++++
- 2 files changed, 16 insertions(+)
+ kernel/rcu/tree_exp.h | 9 +++++++++
+ kernel/rcu/tree_stall.h | 11 +++++++++++
+ 2 files changed, 20 insertions(+)
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -28,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/lockdep.h>
static void rcu_exp_handler(void *unused);
-@@ -636,6 +637,9 @@ static void synchronize_rcu_expedited_wa
+@@ -571,6 +572,9 @@ static void synchronize_rcu_expedited_wa
return;
if (rcu_stall_is_suppressed())
continue;
@@ -38,7 +39,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
j = jiffies;
rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start));
trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
-@@ -689,6 +693,9 @@ static void synchronize_rcu_expedited_wa
+@@ -620,10 +624,14 @@ static void synchronize_rcu_expedited_wa
+ preempt_disable(); // For smp_processor_id() in dump_cpu_task().
+ dump_cpu_task(cpu);
+ preempt_enable();
++ nbcon_cpu_emergency_flush();
+ }
rcu_exp_print_detail_task_stall_rnp(rnp);
}
jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
@@ -48,6 +54,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
panic_on_rcu_stall();
}
}
+@@ -792,6 +800,7 @@ static void rcu_exp_print_detail_task_st
+ */
+ touch_nmi_watchdog();
+ sched_show_task(t);
++ nbcon_cpu_emergency_flush();
+ }
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -7,6 +7,7 @@
@@ -58,7 +72,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/kvm_para.h>
#include <linux/rcu_notifier.h>
-@@ -604,6 +605,8 @@ static void print_other_cpu_stall(unsign
+@@ -260,6 +261,7 @@ static void rcu_print_detail_task_stall_
+ */
+ touch_nmi_watchdog();
+ sched_show_task(t);
++ nbcon_cpu_emergency_flush();
+ }
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
+@@ -523,6 +525,7 @@ static void print_cpu_stall_info(int cpu
+ falsepositive ? " (false positive?)" : "");
+
+ print_cpu_stat_info(cpu);
++ nbcon_cpu_emergency_flush();
+ }
+
+ /* Complain about starvation of grace-period kthread. */
+@@ -605,6 +608,8 @@ static void print_other_cpu_stall(unsign
if (rcu_stall_is_suppressed())
return;
@@ -67,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* OK, time to rat on our buddy...
* See Documentation/RCU/stallwarn.rst for info on how to debug
-@@ -655,6 +658,8 @@ static void print_other_cpu_stall(unsign
+@@ -657,6 +662,8 @@ static void print_other_cpu_stall(unsign
rcu_check_gp_kthread_expired_fqs_timer();
rcu_check_gp_kthread_starvation();
@@ -76,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
panic_on_rcu_stall();
rcu_force_quiescent_state(); /* Kick them all. */
-@@ -675,6 +680,8 @@ static void print_cpu_stall(unsigned lon
+@@ -677,6 +684,8 @@ static void print_cpu_stall(unsigned lon
if (rcu_stall_is_suppressed())
return;
@@ -85,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* OK, time to rat on ourselves...
* See Documentation/RCU/stallwarn.rst for info on how to debug
-@@ -703,6 +710,8 @@ static void print_cpu_stall(unsigned lon
+@@ -706,6 +715,8 @@ static void print_cpu_stall(unsigned lon
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
diff --git a/debian/patches-rt/0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch b/debian/patches-rt/0029-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
index 64cf39b672..0141d9fd20 100644
--- a/debian/patches-rt/0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
+++ b/debian/patches-rt/0029-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 18 Sep 2023 20:27:41 +0000
-Subject: [PATCH 32/48] lockdep: Mark emergency sections in lockdep splats
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 29/48] lockdep: Mark emergency sections in lockdep splats
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Mark emergency sections wherever multiple lines of
lock debugging output are generated. In an emergency
@@ -11,11 +11,18 @@ output is triggered when exiting the emergency section.
This allows the full message block to be stored as
quickly as possible in the ringbuffer.
+Note that debug_show_all_locks() and
+lockdep_print_held_locks() rely on their callers to
+enter the emergency section. This is because these
+functions can also be called in non-emergency
+situations (such as sysrq).
+
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/locking/lockdep.c | 91 +++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 88 insertions(+), 3 deletions(-)
+ kernel/locking/lockdep.c | 84 +++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 82 insertions(+), 2 deletions(-)
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -38,31 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return NULL;
}
-@@ -782,6 +785,8 @@ static void lockdep_print_held_locks(str
- {
- int i, depth = READ_ONCE(p->lockdep_depth);
-
-+ nbcon_cpu_emergency_enter();
-+
- if (!depth)
- printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
- else
-@@ -792,11 +797,13 @@ static void lockdep_print_held_locks(str
- * and it's not the current task.
- */
- if (p != current && task_is_running(p))
-- return;
-+ goto out;
- for (i = 0; i < depth; i++) {
- printk(" #%d: ", i);
- print_lock(p->held_locks + i);
- }
-+out:
-+ nbcon_cpu_emergency_exit();
- }
-
- static void print_kernel_ident(void)
-@@ -888,11 +895,13 @@ look_up_lock_class(const struct lockdep_
+@@ -888,11 +891,13 @@ look_up_lock_class(const struct lockdep_
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
instrumentation_begin();
debug_locks_off();
@@ -76,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
instrumentation_end();
return NULL;
}
-@@ -969,11 +978,13 @@ static bool assign_lock_key(struct lockd
+@@ -969,11 +974,13 @@ static bool assign_lock_key(struct lockd
else {
/* Debug-check: all keys must be persistent! */
debug_locks_off();
@@ -90,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
}
-@@ -1317,8 +1328,10 @@ register_lock_class(struct lockdep_map *
+@@ -1317,8 +1324,10 @@ register_lock_class(struct lockdep_map *
return NULL;
}
@@ -101,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return NULL;
}
nr_lock_classes++;
-@@ -1350,11 +1363,13 @@ register_lock_class(struct lockdep_map *
+@@ -1350,11 +1359,13 @@ register_lock_class(struct lockdep_map *
if (verbose(class)) {
graph_unlock();
@@ -115,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!graph_lock()) {
return NULL;
-@@ -1393,8 +1408,10 @@ static struct lock_list *alloc_list_entr
+@@ -1393,8 +1404,10 @@ static struct lock_list *alloc_list_entr
if (!debug_locks_off_graph_unlock())
return NULL;
@@ -126,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return NULL;
}
nr_list_entries++;
-@@ -2040,6 +2057,8 @@ static noinline void print_circular_bug(
+@@ -2040,6 +2053,8 @@ static noinline void print_circular_bug(
depth = get_lock_depth(target);
@@ -135,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
print_circular_bug_header(target, depth, check_src, check_tgt);
parent = get_lock_parent(target);
-@@ -2058,6 +2077,8 @@ static noinline void print_circular_bug(
+@@ -2058,6 +2073,8 @@ static noinline void print_circular_bug(
printk("\nstack backtrace:\n");
dump_stack();
@@ -144,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static noinline void print_bfs_bug(int ret)
-@@ -2570,6 +2591,8 @@ print_bad_irq_dependency(struct task_str
+@@ -2570,6 +2587,8 @@ print_bad_irq_dependency(struct task_str
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;
@@ -153,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("=====================================================\n");
pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
-@@ -2619,11 +2642,13 @@ print_bad_irq_dependency(struct task_str
+@@ -2619,11 +2638,13 @@ print_bad_irq_dependency(struct task_str
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
next_root->trace = save_trace();
if (!next_root->trace)
@@ -168,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static const char *state_names[] = {
-@@ -2988,6 +3013,8 @@ print_deadlock_bug(struct task_struct *c
+@@ -2988,6 +3009,8 @@ print_deadlock_bug(struct task_struct *c
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;
@@ -177,7 +160,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("============================================\n");
pr_warn("WARNING: possible recursive locking detected\n");
-@@ -3010,6 +3037,8 @@ print_deadlock_bug(struct task_struct *c
+@@ -3010,6 +3033,8 @@ print_deadlock_bug(struct task_struct *c
pr_warn("\nstack backtrace:\n");
dump_stack();
@@ -186,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3607,6 +3636,8 @@ static void print_collision(struct task_
+@@ -3607,6 +3632,8 @@ static void print_collision(struct task_
struct held_lock *hlock_next,
struct lock_chain *chain)
{
@@ -195,7 +178,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("============================\n");
pr_warn("WARNING: chain_key collision\n");
-@@ -3623,6 +3654,8 @@ static void print_collision(struct task_
+@@ -3623,6 +3650,8 @@ static void print_collision(struct task_
pr_warn("\nstack backtrace:\n");
dump_stack();
@@ -204,7 +187,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -3713,8 +3746,10 @@ static inline int add_chain_cache(struct
+@@ -3713,8 +3742,10 @@ static inline int add_chain_cache(struct
if (!debug_locks_off_graph_unlock())
return 0;
@@ -215,7 +198,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
chain->chain_key = chain_key;
-@@ -3731,8 +3766,10 @@ static inline int add_chain_cache(struct
+@@ -3731,8 +3762,10 @@ static inline int add_chain_cache(struct
if (!debug_locks_off_graph_unlock())
return 0;
@@ -226,7 +209,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -3971,6 +4008,8 @@ print_usage_bug(struct task_struct *curr
+@@ -3971,6 +4004,8 @@ print_usage_bug(struct task_struct *curr
if (!debug_locks_off() || debug_locks_silent)
return;
@@ -235,7 +218,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("================================\n");
pr_warn("WARNING: inconsistent lock state\n");
-@@ -3999,6 +4038,8 @@ print_usage_bug(struct task_struct *curr
+@@ -3999,6 +4034,8 @@ print_usage_bug(struct task_struct *curr
pr_warn("\nstack backtrace:\n");
dump_stack();
@@ -244,7 +227,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -4033,6 +4074,8 @@ print_irq_inversion_bug(struct task_stru
+@@ -4033,6 +4070,8 @@ print_irq_inversion_bug(struct task_stru
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;
@@ -253,7 +236,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("========================================================\n");
pr_warn("WARNING: possible irq lock inversion dependency detected\n");
-@@ -4073,11 +4116,13 @@ print_irq_inversion_bug(struct task_stru
+@@ -4073,11 +4112,13 @@ print_irq_inversion_bug(struct task_stru
pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
root->trace = save_trace();
if (!root->trace)
@@ -268,7 +251,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -4154,6 +4199,8 @@ void print_irqtrace_events(struct task_s
+@@ -4154,6 +4195,8 @@ void print_irqtrace_events(struct task_s
{
const struct irqtrace_events *trace = &curr->irqtrace;
@@ -277,7 +260,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
printk("irq event stamp: %u\n", trace->irq_events);
printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
-@@ -4167,6 +4214,8 @@ void print_irqtrace_events(struct task_s
+@@ -4167,6 +4210,8 @@ void print_irqtrace_events(struct task_s
printk("softirqs last disabled at (%u): [<%px>] %pS\n",
trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
(void *)trace->softirq_disable_ip);
@@ -286,7 +269,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static int HARDIRQ_verbose(struct lock_class *class)
-@@ -4687,10 +4736,12 @@ static int mark_lock(struct task_struct
+@@ -4687,10 +4732,12 @@ static int mark_lock(struct task_struct
* We must printk outside of the graph_lock:
*/
if (ret == 2) {
@@ -299,7 +282,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
return ret;
-@@ -4731,6 +4782,8 @@ print_lock_invalid_wait_context(struct t
+@@ -4731,6 +4778,8 @@ print_lock_invalid_wait_context(struct t
if (debug_locks_silent)
return 0;
@@ -308,7 +291,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("=============================\n");
pr_warn("[ BUG: Invalid wait context ]\n");
-@@ -4750,6 +4803,8 @@ print_lock_invalid_wait_context(struct t
+@@ -4750,6 +4799,8 @@ print_lock_invalid_wait_context(struct t
pr_warn("stack backtrace:\n");
dump_stack();
@@ -317,7 +300,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -4954,6 +5009,8 @@ print_lock_nested_lock_not_held(struct t
+@@ -4954,6 +5005,8 @@ print_lock_nested_lock_not_held(struct t
if (debug_locks_silent)
return;
@@ -326,7 +309,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("==================================\n");
pr_warn("WARNING: Nested lock was not taken\n");
-@@ -4974,6 +5031,8 @@ print_lock_nested_lock_not_held(struct t
+@@ -4974,6 +5027,8 @@ print_lock_nested_lock_not_held(struct t
pr_warn("\nstack backtrace:\n");
dump_stack();
@@ -335,7 +318,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static int __lock_is_held(const struct lockdep_map *lock, int read);
-@@ -5019,11 +5078,13 @@ static int __lock_acquire(struct lockdep
+@@ -5019,11 +5074,13 @@ static int __lock_acquire(struct lockdep
debug_class_ops_inc(class);
if (very_verbose(class)) {
@@ -349,7 +332,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -5150,6 +5211,7 @@ static int __lock_acquire(struct lockdep
+@@ -5150,6 +5207,7 @@ static int __lock_acquire(struct lockdep
#endif
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
debug_locks_off();
@@ -357,7 +340,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
printk(KERN_DEBUG "depth: %i max: %lu!\n",
curr->lockdep_depth, MAX_LOCK_DEPTH);
-@@ -5157,6 +5219,7 @@ static int __lock_acquire(struct lockdep
+@@ -5157,6 +5215,7 @@ static int __lock_acquire(struct lockdep
lockdep_print_held_locks(current);
debug_show_all_locks();
dump_stack();
@@ -365,7 +348,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -5176,6 +5239,8 @@ static void print_unlock_imbalance_bug(s
+@@ -5176,6 +5235,8 @@ static void print_unlock_imbalance_bug(s
if (debug_locks_silent)
return;
@@ -374,7 +357,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("=====================================\n");
pr_warn("WARNING: bad unlock balance detected!\n");
-@@ -5192,6 +5257,8 @@ static void print_unlock_imbalance_bug(s
+@@ -5192,6 +5253,8 @@ static void print_unlock_imbalance_bug(s
pr_warn("\nstack backtrace:\n");
dump_stack();
@@ -383,7 +366,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static noinstr int match_held_lock(const struct held_lock *hlock,
-@@ -5895,6 +5962,8 @@ static void print_lock_contention_bug(st
+@@ -5895,6 +5958,8 @@ static void print_lock_contention_bug(st
if (debug_locks_silent)
return;
@@ -392,7 +375,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("=================================\n");
pr_warn("WARNING: bad contention detected!\n");
-@@ -5911,6 +5980,8 @@ static void print_lock_contention_bug(st
+@@ -5911,6 +5976,8 @@ static void print_lock_contention_bug(st
pr_warn("\nstack backtrace:\n");
dump_stack();
@@ -401,7 +384,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void
-@@ -6524,6 +6595,8 @@ print_freed_lock_bug(struct task_struct
+@@ -6524,6 +6591,8 @@ print_freed_lock_bug(struct task_struct
if (debug_locks_silent)
return;
@@ -410,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("=========================\n");
pr_warn("WARNING: held lock freed!\n");
-@@ -6536,6 +6609,8 @@ print_freed_lock_bug(struct task_struct
+@@ -6536,6 +6605,8 @@ print_freed_lock_bug(struct task_struct
pr_warn("\nstack backtrace:\n");
dump_stack();
@@ -419,7 +402,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline int not_in_range(const void* mem_from, unsigned long mem_len,
-@@ -6582,6 +6657,8 @@ static void print_held_locks_bug(void)
+@@ -6582,6 +6653,8 @@ static void print_held_locks_bug(void)
if (debug_locks_silent)
return;
@@ -428,7 +411,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("====================================\n");
pr_warn("WARNING: %s/%d still has locks held!\n",
-@@ -6591,6 +6668,8 @@ static void print_held_locks_bug(void)
+@@ -6591,6 +6664,8 @@ static void print_held_locks_bug(void)
lockdep_print_held_locks(current);
pr_warn("\nstack backtrace:\n");
dump_stack();
@@ -437,23 +420,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void debug_check_no_locks_held(void)
-@@ -6609,6 +6688,7 @@ void debug_show_all_locks(void)
- pr_warn("INFO: lockdep is turned off.\n");
- return;
+@@ -6616,6 +6691,7 @@ void debug_show_all_locks(void)
+ if (!p->lockdep_depth)
+ continue;
+ lockdep_print_held_locks(p);
++ nbcon_cpu_emergency_flush();
+ touch_nmi_watchdog();
+ touch_all_softlockup_watchdogs();
}
-+ nbcon_cpu_emergency_enter();
- pr_warn("\nShowing all locks held in the system:\n");
-
- rcu_read_lock();
-@@ -6623,6 +6703,7 @@ void debug_show_all_locks(void)
-
- pr_warn("\n");
- pr_warn("=============================================\n\n");
-+ nbcon_cpu_emergency_exit();
- }
- EXPORT_SYMBOL_GPL(debug_show_all_locks);
- #endif
-@@ -6648,6 +6729,7 @@ asmlinkage __visible void lockdep_sys_ex
+@@ -6648,6 +6724,7 @@ asmlinkage __visible void lockdep_sys_ex
if (unlikely(curr->lockdep_depth)) {
if (!debug_locks_off())
return;
@@ -461,7 +436,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("================================================\n");
pr_warn("WARNING: lock held when returning to user space!\n");
-@@ -6656,6 +6738,7 @@ asmlinkage __visible void lockdep_sys_ex
+@@ -6656,6 +6733,7 @@ asmlinkage __visible void lockdep_sys_ex
pr_warn("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
lockdep_print_held_locks(curr);
@@ -469,7 +444,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -6672,6 +6755,7 @@ void lockdep_rcu_suspicious(const char *
+@@ -6672,6 +6750,7 @@ void lockdep_rcu_suspicious(const char *
bool rcu = warn_rcu_enter();
/* Note: the following can be executed concurrently, so be careful. */
@@ -477,7 +452,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("\n");
pr_warn("=============================\n");
pr_warn("WARNING: suspicious RCU usage\n");
-@@ -6710,6 +6794,7 @@ void lockdep_rcu_suspicious(const char *
+@@ -6710,6 +6789,7 @@ void lockdep_rcu_suspicious(const char *
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
diff --git a/debian/patches-rt/0030-printk-Rename-console_replay_all-and-update-context.patch b/debian/patches-rt/0030-printk-Rename-console_replay_all-and-update-context.patch
new file mode 100644
index 0000000000..00a80c8832
--- /dev/null
+++ b/debian/patches-rt/0030-printk-Rename-console_replay_all-and-update-context.patch
@@ -0,0 +1,77 @@
+From: Sreenath Vijayan <sreenath.vijayan@sony.com>
+Date: Thu, 30 May 2024 13:15:47 +0530
+Subject: [PATCH 30/48] printk: Rename console_replay_all() and update context
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Rename console_replay_all() to console_try_replay_all() to make
+clear that the implementation is best effort. Also, the function
+should not be called in NMI context as it takes locks, so update
+the comment in code.
+
+Fixes: 693f75b91a91 ("printk: Add function to replay kernel log on consoles")
+Fixes: 1b743485e27f ("tty/sysrq: Replay kernel log messages on consoles via sysrq")
+Suggested-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Shimoyashiki Taichi <taichi.shimoyashiki@sony.com>
+Signed-off-by: Sreenath Vijayan <sreenath.vijayan@sony.com>
+Link: https://lore.kernel.org/r/Zlguq/wU21Z8MqI4@sreenath.vijayan@sony.com
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/sysrq.c | 2 +-
+ include/linux/printk.h | 4 ++--
+ kernel/printk/printk.c | 6 +++---
+ 3 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -452,7 +452,7 @@ static const struct sysrq_key_op sysrq_u
+
+ static void sysrq_handle_replay_logs(u8 key)
+ {
+- console_replay_all();
++ console_try_replay_all();
+ }
+ static struct sysrq_key_op sysrq_replay_logs_op = {
+ .handler = sysrq_handle_replay_logs,
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -195,7 +195,7 @@ void show_regs_print_info(const char *lo
+ extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
+ extern asmlinkage void dump_stack(void) __cold;
+ void printk_trigger_flush(void);
+-void console_replay_all(void);
++void console_try_replay_all(void);
+ void printk_legacy_allow_panic_sync(void);
+ extern bool nbcon_device_try_acquire(struct console *con);
+ extern void nbcon_device_release(struct console *con);
+@@ -280,7 +280,7 @@ static inline void printk_trigger_flush(
+ {
+ }
+
+-static inline void console_replay_all(void)
++static inline void console_try_replay_all(void)
+ {
+ }
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -4460,15 +4460,15 @@ void kmsg_dump_rewind(struct kmsg_dump_i
+ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+
+ /**
+- * console_replay_all - replay kernel log on consoles
++ * console_try_replay_all - try to replay kernel log on consoles
+ *
+ * Try to obtain lock on console subsystem and replay all
+ * available records in printk buffer on the consoles.
+ * Does nothing if lock is not obtained.
+ *
+- * Context: Any context.
++ * Context: Any, except for NMI.
+ */
+-void console_replay_all(void)
++void console_try_replay_all(void)
+ {
+ if (console_trylock()) {
+ __console_rewind_all();
diff --git a/debian/patches-rt/0033-printk-nbcon-Introduce-printing-kthreads.patch b/debian/patches-rt/0031-printk-nbcon-Introduce-printing-kthreads.patch
index 12f69b52fc..8557ac1ea9 100644
--- a/debian/patches-rt/0033-printk-nbcon-Introduce-printing-kthreads.patch
+++ b/debian/patches-rt/0031-printk-nbcon-Introduce-printing-kthreads.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 22 Sep 2023 14:12:21 +0000
-Subject: [PATCH 33/48] printk: nbcon: Introduce printing kthreads
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 31/48] printk: nbcon: Introduce printing kthreads
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Provide the main implementation for running a printer kthread
per nbcon console that is takeover/handover aware.
@@ -12,10 +12,10 @@ Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/console.h | 26 ++++++
- kernel/printk/internal.h | 28 +++++++
- kernel/printk/nbcon.c | 187 +++++++++++++++++++++++++++++++++++++++++++++--
- kernel/printk/printk.c | 33 ++++++++
- 4 files changed, 269 insertions(+), 5 deletions(-)
+ kernel/printk/internal.h | 26 ++++++
+ kernel/printk/nbcon.c | 196 +++++++++++++++++++++++++++++++++++++++++++++--
+ kernel/printk/printk.c | 34 ++++++++
+ 4 files changed, 275 insertions(+), 7 deletions(-)
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -25,28 +25,28 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/rculist.h>
+#include <linux/rcuwait.h>
#include <linux/types.h>
+ #include <linux/vesa.h>
- struct vc_data;
-@@ -324,6 +325,8 @@ struct nbcon_drvdata {
- * @nbcon_state: State for nbcon consoles
+@@ -324,6 +325,8 @@ struct nbcon_write_context {
* @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @nbcon_device_ctxt: Context available for non-printing operations
* @pbufs: Pointer to nbcon private buffer
+ * @kthread: Printer kthread for this console
+ * @rcuwait: RCU-safe wait object for @kthread waking
*/
struct console {
char name[16];
-@@ -372,6 +375,27 @@ struct console {
+@@ -374,6 +377,27 @@ struct console {
void (*write_atomic)(struct console *con, struct nbcon_write_context *wctxt);
/**
+ * @write_thread:
+ *
-+ * NBCON callback to write out text in task context. (Optional)
++ * NBCON callback to write out text in task context.
+ *
-+ * This callback is called with the console already acquired. Any
-+ * additional driver synchronization should have been performed by
-+ * device_lock().
++ * This callback is called after device_lock() and with the nbcon
++ * console acquired. Any necessary driver synchronization should have
++ * been performed by the device_lock() callback.
+ *
+ * This callback is always called from task context but with migration
+ * disabled.
@@ -55,8 +55,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * sections applies as with write_atomic(). The difference between
+ * this callback and write_atomic() is that this callback is used
+ * during normal operation and is always called from task context.
-+ * This provides drivers with a relatively relaxed locking context
-+ * for synchronizing output to the hardware.
++ * This allows drivers to operate in their own locking context for
++ * synchronizing output to the hardware.
+ */
+ void (*write_thread)(struct console *con, struct nbcon_write_context *wctxt);
+
@@ -64,9 +64,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @device_lock:
*
* NBCON callback to begin synchronization with driver code.
-@@ -431,6 +455,8 @@ struct console {
- struct nbcon_drvdata *nbcon_drvdata;
-
+@@ -420,6 +444,8 @@ struct console {
+ atomic_long_t __private nbcon_seq;
+ struct nbcon_context __private nbcon_device_ctxt;
struct printk_buffers *pbufs;
+ struct task_struct *kthread;
+ struct rcuwait rcuwait;
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_LOCKDEP
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -90,6 +90,7 @@ enum nbcon_prio nbcon_get_default_prio(v
+@@ -92,6 +92,7 @@ enum nbcon_prio nbcon_get_default_prio(v
void nbcon_atomic_flush_pending(void);
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
int cookie);
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Check if the given console is currently capable and allowed to print
-@@ -108,6 +109,8 @@ static inline bool console_is_usable(str
+@@ -110,6 +111,8 @@ static inline bool console_is_usable(str
if (flags & CON_NBCON) {
if (!con->write_atomic)
return false;
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else {
if (!con->write)
return false;
-@@ -124,12 +127,35 @@ static inline bool console_is_usable(str
+@@ -126,12 +129,35 @@ static inline bool console_is_usable(str
return true;
}
@@ -128,15 +128,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* In !PRINTK builds we still export console_sem
* semaphore and some of console functions (console_unlock()/etc.), so
-@@ -153,6 +179,8 @@ static inline bool console_is_usable(str
-
- #endif /* CONFIG_PRINTK */
-
-+extern bool have_boot_console;
-+
- extern struct printk_buffers printk_shared_pbufs;
-
- /**
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
@@ -10,6 +10,7 @@
@@ -151,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* nbcon_emit_next_record - Emit a record in the acquired context
* @wctxt: The write context that will be handed to the write function
-+ * @use_atomic: True if the write_atomic callback is to be used
++ * @use_atomic: True if the write_atomic() callback is to be used
*
* Return: True if this context still owns the console. False if
* ownership was handed over or taken.
@@ -180,15 +171,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else {
/*
* This function should never be called for legacy consoles.
-@@ -936,6 +944,118 @@ static bool nbcon_emit_next_record(struc
+@@ -936,6 +944,120 @@ static bool nbcon_emit_next_record(struc
return nbcon_context_exit_unsafe(ctxt);
}
+/**
+ * nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup
+ * @con: Console to operate on
-+ * @ctxt: The acquire context that contains the state
-+ * at console_acquire()
++ * @ctxt: The nbcon context from nbcon_context_try_acquire()
+ *
+ * Return: True if the thread should shutdown or if the console is
+ * allowed to print and a record is available. False otherwise.
@@ -222,6 +212,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+/**
+ * nbcon_kthread_func - The printer thread function
+ * @__console: Console to operate on
++ *
++ * Return: 0
+ */
+static int nbcon_kthread_func(void *__console)
+{
@@ -231,7 +223,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ .ctxt.prio = NBCON_PRIO_NORMAL,
+ };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
-+ unsigned long flags;
+ short con_flags;
+ bool backlog;
+ int cookie;
@@ -267,7 +258,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ con_flags = console_srcu_read_flags(con);
+
+ if (console_is_usable(con, con_flags)) {
-+ con->device_lock(con, &flags);
++ unsigned long lock_flags;
++
++ con->device_lock(con, &lock_flags);
+
+ /*
+ * Ensure this stays on the CPU to make handover and
@@ -286,7 +279,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ }
+ }
+
-+ con->device_unlock(con, flags);
++ con->device_unlock(con, lock_flags);
+ }
+
+ console_srcu_read_unlock(cookie);
@@ -299,28 +292,43 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Track the nbcon emergency nesting per CPU. */
static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
-@@ -982,7 +1102,7 @@ static bool nbcon_atomic_emit_one(struct
- * handed over or taken over. In both cases the context is no
- * longer valid.
+@@ -1012,7 +1134,7 @@ static bool nbcon_atomic_emit_one(struct
+ * The higher priority printing context takes over responsibility
+ * to print the pending records.
*/
- if (!nbcon_emit_next_record(wctxt))
+ if (!nbcon_emit_next_record(wctxt, true))
- return true;
+ return false;
nbcon_context_release(ctxt);
-@@ -1091,7 +1211,7 @@ static bool __nbcon_atomic_flush_pending
+@@ -1113,7 +1235,7 @@ static int __nbcon_atomic_flush_pending_
* handed over or taken over. In both cases the context is no
* longer valid.
*/
- if (!nbcon_emit_next_record(&wctxt))
+ if (!nbcon_emit_next_record(&wctxt, true))
- return true;
+ return -EAGAIN;
+
+ if (!ctxt->backlog) {
+@@ -1172,10 +1294,10 @@ static void nbcon_atomic_flush_pending_c
- if (!ctxt->backlog)
-@@ -1230,6 +1350,63 @@ void nbcon_cpu_emergency_exit(void)
+ /*
+ * If flushing was successful but more records are available, this
+- * context must flush those remaining records because there is no
+- * other context that will do it.
++ * context must flush those remaining records if the printer thread
++ * is not available do it.
+ */
+- if (prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
++ if (!con->kthread && prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+ stop_seq = prb_next_reserve_seq(prb);
+ goto again;
+ }
+@@ -1332,6 +1454,63 @@ void nbcon_cpu_emergency_flush(void)
+ }
}
- /**
++/*
+ * nbcon_kthread_stop - Stop a printer thread
+ * @con: Console to operate on
+ */
@@ -377,19 +385,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ sched_set_normal(con->kthread, -20);
+}
+
-+/**
+ /**
* nbcon_alloc - Allocate buffers needed by the nbcon console
* @con: Console to allocate buffers for
- *
-@@ -1273,6 +1450,7 @@ void nbcon_init(struct console *con)
+@@ -1377,6 +1556,7 @@ void nbcon_init(struct console *con, u64
/* nbcon_alloc() must have been called and successful! */
BUG_ON(!con->pbufs);
+ rcuwait_init(&con->rcuwait);
- nbcon_seq_force(con, 0);
+ nbcon_seq_force(con, init_seq);
nbcon_state_set(con, &state);
}
-@@ -1285,6 +1463,7 @@ void nbcon_free(struct console *con)
+@@ -1389,6 +1569,7 @@ void nbcon_free(struct console *con)
{
struct nbcon_state state = { };
@@ -397,18 +404,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
nbcon_state_set(con, &state);
/* Boot consoles share global printk buffers. */
+@@ -1458,6 +1639,7 @@ void nbcon_device_release(struct console
+ */
+ cookie = console_srcu_read_lock();
+ if (console_is_usable(con, console_srcu_read_flags(con)) &&
++ !con->kthread &&
+ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+ __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
+ }
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -483,7 +483,7 @@ static bool have_nbcon_console;
- * the console lock. This is because boot consoles and nbcon consoles may
- * have mapped the same hardware.
- */
--static bool have_boot_console;
-+bool have_boot_console;
-
- /*
- * Specifies if the console lock/unlock dance is needed for console
-@@ -2698,6 +2698,8 @@ void suspend_console(void)
+@@ -2685,6 +2685,8 @@ void suspend_console(void)
void resume_console(void)
{
struct console *con;
@@ -417,7 +423,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!console_suspend_enabled)
return;
-@@ -2714,6 +2716,14 @@ void resume_console(void)
+@@ -2701,6 +2703,14 @@ void resume_console(void)
*/
synchronize_srcu(&console_srcu);
@@ -432,7 +438,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_flush(1000, true);
}
-@@ -3034,6 +3044,13 @@ static bool console_flush_all(bool do_co
+@@ -3021,6 +3031,13 @@ static bool console_flush_all(bool do_co
u64 printk_seq;
bool progress;
@@ -446,15 +452,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!console_is_usable(con, flags))
continue;
any_usable = true;
-@@ -3327,9 +3344,23 @@ EXPORT_SYMBOL(console_stop);
+@@ -3326,9 +3343,26 @@ EXPORT_SYMBOL(console_stop);
void console_start(struct console *console)
{
+ short flags;
++ int cookie;
+
console_list_lock();
console_srcu_write_flags(console, console->flags | CON_ENABLED);
-+ flags = console->flags;
console_list_unlock();
+
+ /*
@@ -464,8 +470,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+ synchronize_srcu(&console_srcu);
+
++ cookie = console_srcu_read_lock();
++ flags = console_srcu_read_flags(console);
+ if (flags & CON_NBCON)
+ nbcon_kthread_wake(console);
++ console_srcu_read_unlock(cookie);
+
__pr_flush(console, 1000, true);
}
diff --git a/debian/patches-rt/0032-printk-Atomic-print-in-printk-context-on-shutdown.patch b/debian/patches-rt/0032-printk-Atomic-print-in-printk-context-on-shutdown.patch
new file mode 100644
index 0000000000..6e162919ea
--- /dev/null
+++ b/debian/patches-rt/0032-printk-Atomic-print-in-printk-context-on-shutdown.patch
@@ -0,0 +1,62 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 23 Oct 2023 17:43:48 +0000
+Subject: [PATCH 32/48] printk: Atomic print in printk context on shutdown
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+For nbcon consoles, normally the printing is handled by the
+dedicated console printing threads. However, on shutdown the
+printing threads may not get a chance to print the final
+messages.
+
+When shutting down or rebooting (system_state > SYSTEM_RUNNING),
+perform atomic printing from the printk() caller context.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/nbcon.c | 5 +++--
+ kernel/printk/printk.c | 7 ++++++-
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1297,7 +1297,8 @@ static void nbcon_atomic_flush_pending_c
+ * context must flush those remaining records if the printer thread
+ * is not available do it.
+ */
+- if (!con->kthread && prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
++ if ((!con->kthread || (system_state > SYSTEM_RUNNING)) &&
++ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+ stop_seq = prb_next_reserve_seq(prb);
+ goto again;
+ }
+@@ -1639,7 +1640,7 @@ void nbcon_device_release(struct console
+ */
+ cookie = console_srcu_read_lock();
+ if (console_is_usable(con, console_srcu_read_flags(con)) &&
+- !con->kthread &&
++ (!con->kthread || (system_state > SYSTEM_RUNNING)) &&
+ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+ __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
+ }
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2389,12 +2389,17 @@ asmlinkage int vprintk_emit(int facility
+ *
+ * - When this CPU is in panic.
+ *
++ * - During shutdown, since the printing threads may not get
++ * a chance to print the final messages.
++ *
+ * Note that if boot consoles are registered, the console
+ * lock/unlock dance must be relied upon instead because nbcon
+ * consoles cannot print simultaneously with boot consoles.
+ */
+- if (is_panic_context)
++ if (is_panic_context ||
++ (system_state > SYSTEM_RUNNING)) {
+ nbcon_atomic_flush_pending();
++ }
+ }
+
+ if (do_trylock_unlock) {
diff --git a/debian/patches-rt/0033-printk-nbcon-Fix-nbcon_cpu_emergency_flush-when-pree.patch b/debian/patches-rt/0033-printk-nbcon-Fix-nbcon_cpu_emergency_flush-when-pree.patch
new file mode 100644
index 0000000000..af8ccf56d5
--- /dev/null
+++ b/debian/patches-rt/0033-printk-nbcon-Fix-nbcon_cpu_emergency_flush-when-pree.patch
@@ -0,0 +1,46 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 5 Jun 2024 09:25:30 +0000
+Subject: [PATCH 33/48] printk: nbcon: Fix nbcon_cpu_emergency_flush() when
+ preemptible
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+nbcon_cpu_emergency_flush() can be called in a preemptible
+context. In that case the CPU is not in an emergency state.
+However, in order to see that the CPU is not in an emergency
+state (accessing the per-cpu variable), preemption must be
+disabled.
+
+Disable preemption when checking the CPU state.
+
+Reported-by: Juri Lelli <juri.lelli@redhat.com>
+Closes: https://lore.kernel.org/aqkcpca4vgadxc3yzcu74xwq3grslj5m43f3eb5fcs23yo2gy4@gcsnqcts5tos
+Fixes: 46a1379208b7 ("printk: nbcon: Implement emergency sections")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/nbcon.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1443,8 +1443,19 @@ void nbcon_cpu_emergency_exit(void)
+ */
+ void nbcon_cpu_emergency_flush(void)
+ {
++ bool is_emergency;
++
++ /*
++ * If this context is not an emergency context, preemption might be
++ * enabled. To be sure, disable preemption when checking if this is
++ * an emergency context.
++ */
++ preempt_disable();
++ is_emergency = (*nbcon_get_cpu_emergency_nesting() != 0);
++ preempt_enable();
++
+ /* The explicit flush is needed only in the emergency context. */
+- if (*(nbcon_get_cpu_emergency_nesting()) == 0)
++ if (!is_emergency)
+ return;
+
+ nbcon_atomic_flush_pending();
diff --git a/debian/patches-rt/0034-printk-Atomic-print-in-printk-context-on-shutdown.patch b/debian/patches-rt/0034-printk-Atomic-print-in-printk-context-on-shutdown.patch
deleted file mode 100644
index 68b58b5f41..0000000000
--- a/debian/patches-rt/0034-printk-Atomic-print-in-printk-context-on-shutdown.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon, 23 Oct 2023 17:43:48 +0000
-Subject: [PATCH 34/48] printk: Atomic print in printk context on shutdown
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-For nbcon consoles, normally the printing is handled by the
-dedicated console printing threads. However, on shutdown the
-printing threads may not get a chance to print the final
-messages.
-
-When shutting down or rebooting (system_state > SYSTEM_RUNNING),
-perform atomic printing from the printk() caller context.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -2402,12 +2402,17 @@ asmlinkage int vprintk_emit(int facility
- *
- * - When this CPU is in panic.
- *
-+ * - During shutdown, since the printing threads may not get
-+ * a chance to print the final messages.
-+ *
- * Note that if boot consoles are registered, the console
- * lock/unlock dance must be relied upon instead because nbcon
- * consoles cannot print simultaneously with boot consoles.
- */
-- if (is_panic_context)
-+ if (is_panic_context ||
-+ (system_state > SYSTEM_RUNNING)) {
- nbcon_atomic_flush_pending();
-+ }
- }
-
- if (do_trylock_unlock) {
diff --git a/debian/patches-rt/0035-printk-nbcon-Add-context-to-console_is_usable.patch b/debian/patches-rt/0034-printk-nbcon-Add-context-to-console_is_usable.patch
index bd434ec326..93272372ad 100644
--- a/debian/patches-rt/0035-printk-nbcon-Add-context-to-console_is_usable.patch
+++ b/debian/patches-rt/0034-printk-nbcon-Add-context-to-console_is_usable.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 26 Sep 2023 14:43:30 +0000
-Subject: [PATCH 35/48] printk: nbcon: Add context to console_is_usable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 34/48] printk: nbcon: Add context to console_is_usable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The nbcon consoles have two callbacks to be used for different
contexts. In order to determine if an nbcon console is usable,
@@ -14,13 +14,13 @@ Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/internal.h | 16 ++++++++++------
- kernel/printk/nbcon.c | 6 +++---
+ kernel/printk/nbcon.c | 8 ++++----
kernel/printk/printk.c | 6 ++++--
- 3 files changed, 17 insertions(+), 11 deletions(-)
+ 3 files changed, 18 insertions(+), 12 deletions(-)
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -98,7 +98,7 @@ void nbcon_kthread_create(struct console
+@@ -100,7 +100,7 @@ void nbcon_kthread_create(struct console
* which can also play a role in deciding if @con can be used to print
* records.
*/
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
if (!(flags & CON_ENABLED))
return false;
-@@ -107,10 +107,13 @@ static inline bool console_is_usable(str
+@@ -109,10 +109,13 @@ static inline bool console_is_usable(str
return false;
if (flags & CON_NBCON) {
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else {
if (!con->write)
return false;
-@@ -175,7 +178,8 @@ static inline void nbcon_atomic_flush_pe
+@@ -177,7 +180,8 @@ static inline void nbcon_atomic_flush_pe
static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
int cookie) { return false; }
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
-@@ -968,7 +968,7 @@ static bool nbcon_kthread_should_wakeup(
+@@ -967,7 +967,7 @@ static bool nbcon_kthread_should_wakeup(
cookie = console_srcu_read_lock();
flags = console_srcu_read_flags(con);
@@ -74,21 +74,30 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- if (console_is_usable(con, con_flags)) {
+ if (console_is_usable(con, con_flags, false)) {
- con->device_lock(con, &flags);
+ unsigned long lock_flags;
- /*
-@@ -1246,7 +1246,7 @@ static void __nbcon_atomic_flush_pending
- if (!(flags & CON_NBCON))
- continue;
+ con->device_lock(con, &lock_flags);
+@@ -1322,7 +1322,7 @@ static void __nbcon_atomic_flush_pending
+ if (!(flags & CON_NBCON))
+ continue;
-- if (!console_is_usable(con, flags))
-+ if (!console_is_usable(con, flags, true))
- continue;
+- if (!console_is_usable(con, flags))
++ if (!console_is_usable(con, flags, true))
+ continue;
- if (nbcon_seq_read(con) >= stop_seq)
+ if (nbcon_seq_read(con) >= stop_seq)
+@@ -1650,7 +1650,7 @@ void nbcon_device_release(struct console
+ * the console is usable throughout flushing.
+ */
+ cookie = console_srcu_read_lock();
+- if (console_is_usable(con, console_srcu_read_flags(con)) &&
++ if (console_is_usable(con, console_srcu_read_flags(con), true) &&
+ (!con->kthread || (system_state > SYSTEM_RUNNING)) &&
+ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+ __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3056,7 +3056,7 @@ static bool console_flush_all(bool do_co
+@@ -3043,7 +3043,7 @@ static bool console_flush_all(bool do_co
if ((flags & CON_NBCON) && con->kthread)
continue;
@@ -97,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
any_usable = true;
-@@ -3991,8 +3991,10 @@ static bool __pr_flush(struct console *c
+@@ -4018,8 +4018,10 @@ static bool __pr_flush(struct console *c
* that they make forward progress, so only increment
* @diff for usable consoles.
*/
diff --git a/debian/patches-rt/0036-printk-nbcon-Add-printer-thread-wakeups.patch b/debian/patches-rt/0035-printk-nbcon-Add-printer-thread-wakeups.patch
index 81701b90f0..c12f3e9f9f 100644
--- a/debian/patches-rt/0036-printk-nbcon-Add-printer-thread-wakeups.patch
+++ b/debian/patches-rt/0035-printk-nbcon-Add-printer-thread-wakeups.patch
@@ -1,17 +1,18 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 26 Sep 2023 13:03:52 +0000
-Subject: [PATCH 36/48] printk: nbcon: Add printer thread wakeups
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 35/48] printk: nbcon: Add printer thread wakeups
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
-Add a function to wakeup the printer threads. Use the new function
-when:
+Add a function to wakeup the printer threads. The printer threads
+are woken when:
- - records are added to the printk ringbuffer
+ - a record is added to the printk ringbuffer
- consoles are resumed
- triggered via printk_trigger_flush()
+ - consoles should be replayed via sysrq
-The actual waking is performed via irq_work so that the wakeup can
-be triggered from any context.
+The actual waking is performed via irq_work so that the function
+can be called from any context.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
@@ -21,8 +22,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/console.h | 3 ++
kernel/printk/internal.h | 1
kernel/printk/nbcon.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++
- kernel/printk/printk.c | 7 +++++
- 4 files changed, 67 insertions(+)
+ kernel/printk/printk.c | 8 ++++++
+ 4 files changed, 68 insertions(+)
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -34,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/rculist.h>
#include <linux/rcuwait.h>
#include <linux/types.h>
-@@ -327,6 +328,7 @@ struct nbcon_drvdata {
+@@ -327,6 +328,7 @@ struct nbcon_write_context {
* @pbufs: Pointer to nbcon private buffer
* @kthread: Printer kthread for this console
* @rcuwait: RCU-safe wait object for @kthread waking
@@ -42,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
struct console {
char name[16];
-@@ -457,6 +459,7 @@ struct console {
+@@ -446,6 +448,7 @@ struct console {
struct printk_buffers *pbufs;
struct task_struct *kthread;
struct rcuwait rcuwait;
@@ -52,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_LOCKDEP
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -91,6 +91,7 @@ void nbcon_atomic_flush_pending(void);
+@@ -93,6 +93,7 @@ void nbcon_atomic_flush_pending(void);
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
int cookie);
void nbcon_kthread_create(struct console *con);
@@ -62,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Check if the given console is currently capable and allowed to print
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
-@@ -1056,6 +1056,61 @@ static int nbcon_kthread_func(void *__co
+@@ -1058,6 +1058,61 @@ static int nbcon_kthread_func(void *__co
goto wait_for_event;
}
@@ -124,17 +125,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Track the nbcon emergency nesting per CPU. */
static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
-@@ -1451,6 +1506,7 @@ void nbcon_init(struct console *con)
+@@ -1569,6 +1624,7 @@ void nbcon_init(struct console *con, u64
BUG_ON(!con->pbufs);
rcuwait_init(&con->rcuwait);
+ init_irq_work(&con->irq_work, nbcon_irq_work);
- nbcon_seq_force(con, 0);
+ nbcon_seq_force(con, init_seq);
nbcon_state_set(con, &state);
}
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2415,6 +2415,8 @@ asmlinkage int vprintk_emit(int facility
+@@ -2402,6 +2402,8 @@ asmlinkage int vprintk_emit(int facility
}
}
@@ -143,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_trylock_unlock) {
/*
* The caller may be holding system-critical or
-@@ -2721,6 +2723,10 @@ void resume_console(void)
+@@ -2708,6 +2710,10 @@ void resume_console(void)
*/
synchronize_srcu(&console_srcu);
@@ -154,7 +155,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
flags = console_srcu_read_flags(con);
-@@ -4151,6 +4157,7 @@ void defer_console_output(void)
+@@ -4178,6 +4184,7 @@ void defer_console_output(void)
void printk_trigger_flush(void)
{
@@ -162,3 +163,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
defer_console_output();
}
+@@ -4513,6 +4520,7 @@ void console_try_replay_all(void)
+ {
+ if (console_trylock()) {
+ __console_rewind_all();
++ nbcon_wake_threads();
+ /* Consoles are flushed as part of console_unlock(). */
+ console_unlock();
+ }
diff --git a/debian/patches-rt/0037-printk-nbcon-Stop-threads-on-shutdown-reboot.patch b/debian/patches-rt/0036-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
index 28fccd7fc7..f043bc8be2 100644
--- a/debian/patches-rt/0037-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
+++ b/debian/patches-rt/0036-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
@@ -1,12 +1,12 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 26 Sep 2023 13:04:15 +0000
-Subject: [PATCH 37/48] printk: nbcon: Stop threads on shutdown/reboot
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 36/48] printk: nbcon: Stop threads on shutdown/reboot
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Register a syscore_ops shutdown function to stop all threaded
-printers on shutdown/reboot. This allows printk to transition back
-to atomic printing in order to provide a robust mechanism for
-outputting the final messages.
+printers on shutdown/reboot. This allows printk to cleanly
+transition back to atomic printing in order to provide a robust
+mechanism for outputting the final messages.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -24,10 +24,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/types.h>
#include "internal.h"
#include "printk_ringbuffer.h"
-@@ -1577,3 +1578,33 @@ void nbcon_driver_release(struct console
- nbcon_context_release(ctxt);
+@@ -1714,3 +1715,33 @@ void nbcon_device_release(struct console
+ console_srcu_read_unlock(cookie);
}
- EXPORT_SYMBOL_GPL(nbcon_driver_release);
+ EXPORT_SYMBOL_GPL(nbcon_device_release);
+
+/**
+ * printk_kthread_shutdown - shutdown all threaded printers
diff --git a/debian/patches-rt/0038-printk-nbcon-Start-printing-threads.patch b/debian/patches-rt/0037-printk-nbcon-Start-printing-threads.patch
index 63aae75ebb..24f0a2c9c0 100644
--- a/debian/patches-rt/0038-printk-nbcon-Start-printing-threads.patch
+++ b/debian/patches-rt/0037-printk-nbcon-Start-printing-threads.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 5 Dec 2023 14:09:31 +0000
-Subject: [PATCH 38/48] printk: nbcon: Start printing threads
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 37/48] printk: nbcon: Start printing threads
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
If there are no boot consoles, the printing threads are started
in early_initcall.
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__printf(4, 0)
int vprintk_store(int facility, int level,
-@@ -159,6 +160,7 @@ static inline void nbcon_kthread_wake(st
+@@ -161,6 +162,7 @@ static inline void nbcon_kthread_wake(st
static inline void nbcon_kthread_wake(struct console *con) { }
static inline void nbcon_kthread_create(struct console *con) { }
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* nbcon_context_try_acquire_direct - Try to acquire directly
* @ctxt: The context of the caller
-@@ -1436,7 +1438,7 @@ void nbcon_kthread_create(struct console
+@@ -1553,7 +1555,7 @@ void nbcon_kthread_create(struct console
if (!(con->flags & CON_NBCON) || !con->write_thread)
return;
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
/*
-@@ -1462,6 +1464,19 @@ void nbcon_kthread_create(struct console
+@@ -1579,6 +1581,19 @@ void nbcon_kthread_create(struct console
sched_set_normal(con->kthread, -20);
}
@@ -81,9 +81,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* nbcon_alloc - Allocate buffers needed by the nbcon console
* @con: Console to allocate buffers for
-@@ -1510,6 +1525,7 @@ void nbcon_init(struct console *con)
+@@ -1628,6 +1643,7 @@ void nbcon_init(struct console *con, u64
init_irq_work(&con->irq_work, nbcon_irq_work);
- nbcon_seq_force(con, 0);
+ nbcon_seq_force(con, init_seq);
nbcon_state_set(con, &state);
+ nbcon_kthread_create(con);
}
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2402,6 +2402,9 @@ asmlinkage int vprintk_emit(int facility
+@@ -2389,6 +2389,9 @@ asmlinkage int vprintk_emit(int facility
*
* - When this CPU is in panic.
*
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* - During shutdown, since the printing threads may not get
* a chance to print the final messages.
*
-@@ -2410,6 +2413,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -2397,6 +2400,7 @@ asmlinkage int vprintk_emit(int facility
* consoles cannot print simultaneously with boot consoles.
*/
if (is_panic_context ||
@@ -109,15 +109,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
(system_state > SYSTEM_RUNNING)) {
nbcon_atomic_flush_pending();
}
-@@ -3726,6 +3730,7 @@ EXPORT_SYMBOL(register_console);
- /* Must be called under console_list_lock(). */
+@@ -3742,6 +3746,7 @@ EXPORT_SYMBOL(register_console);
static int unregister_console_locked(struct console *console)
{
+ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
+ bool is_boot_con = (console->flags & CON_BOOT);
bool found_legacy_con = false;
bool found_nbcon_con = false;
bool found_boot_con = false;
-@@ -3797,6 +3802,15 @@ static int unregister_console_locked(str
+@@ -3824,6 +3829,15 @@ static int unregister_console_locked(str
if (!found_nbcon_con)
have_nbcon_console = found_nbcon_con;
diff --git a/debian/patches-rt/0039-printk-Provide-helper-for-message-prepending.patch b/debian/patches-rt/0038-printk-Provide-helper-for-message-prepending.patch
index 35bfe33e7a..91ab6355fc 100644
--- a/debian/patches-rt/0039-printk-Provide-helper-for-message-prepending.patch
+++ b/debian/patches-rt/0038-printk-Provide-helper-for-message-prepending.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Thu, 28 Mar 2024 13:29:10 +0000
-Subject: [PATCH 39/48] printk: Provide helper for message prepending
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 38/48] printk: Provide helper for message prepending
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
In order to support prepending different texts to printk
messages, split out the prepending code into a helper
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2819,30 +2819,25 @@ static void __console_unlock(void)
+@@ -2806,30 +2806,25 @@ static void __console_unlock(void)
#ifdef CONFIG_PRINTK
/*
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Make sure outbuf is sufficiently large before prepending.
-@@ -2865,6 +2860,27 @@ void console_prepend_dropped(struct prin
+@@ -2852,6 +2847,27 @@ void console_prepend_dropped(struct prin
}
/*
diff --git a/debian/patches-rt/0040-printk-nbcon-Show-replay-message-on-takeover.patch b/debian/patches-rt/0039-printk-nbcon-Show-replay-message-on-takeover.patch
index ab1046954b..28fc95581c 100644
--- a/debian/patches-rt/0040-printk-nbcon-Show-replay-message-on-takeover.patch
+++ b/debian/patches-rt/0039-printk-nbcon-Show-replay-message-on-takeover.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 25 Mar 2024 21:00:40 +0000
-Subject: [PATCH 40/48] printk: nbcon: Show replay message on takeover
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 39/48] printk: nbcon: Show replay message on takeover
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
An emergency or panic context can takeover console ownership
while the current owner was printing a printk message. The
@@ -26,33 +26,34 @@ the user that the previous message is being printed again.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/console.h | 2 ++
+ include/linux/console.h | 3 +++
kernel/printk/internal.h | 1 +
kernel/printk/nbcon.c | 24 ++++++++++++++++++++++++
kernel/printk/printk.c | 19 +++++++++++++++++++
- 4 files changed, 46 insertions(+)
+ 4 files changed, 47 insertions(+)
--- a/include/linux/console.h
+++ b/include/linux/console.h
-@@ -325,6 +325,7 @@ struct nbcon_drvdata {
- *
+@@ -325,6 +325,7 @@ struct nbcon_write_context {
* @nbcon_state: State for nbcon consoles
* @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @nbcon_device_ctxt: Context available for non-printing operations
+ * @nbcon_prev_seq: Seq num the previous nbcon owner was assigned to print
* @pbufs: Pointer to nbcon private buffer
* @kthread: Printer kthread for this console
* @rcuwait: RCU-safe wait object for @kthread waking
-@@ -441,6 +442,7 @@ struct console {
-
+@@ -445,6 +446,8 @@ struct console {
atomic_t __private nbcon_state;
atomic_long_t __private nbcon_seq;
+ struct nbcon_context __private nbcon_device_ctxt;
+ atomic_long_t __private nbcon_prev_seq;
-
- /**
- * @nbcon_drvdata:
++
+ struct printk_buffers *pbufs;
+ struct task_struct *kthread;
+ struct rcuwait rcuwait;
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -222,4 +222,5 @@ bool printk_get_next_message(struct prin
+@@ -233,4 +233,5 @@ bool printk_get_next_message(struct prin
#ifdef CONFIG_PRINTK
void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
@@ -97,17 +98,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!nbcon_context_exit_unsafe(ctxt))
return false;
-@@ -1524,6 +1547,7 @@ void nbcon_init(struct console *con)
+@@ -1642,6 +1665,7 @@ void nbcon_init(struct console *con, u64
rcuwait_init(&con->rcuwait);
init_irq_work(&con->irq_work, nbcon_irq_work);
- nbcon_seq_force(con, 0);
+ nbcon_seq_force(con, init_seq);
+ atomic_long_set(&ACCESS_PRIVATE(con, nbcon_prev_seq), -1UL);
nbcon_state_set(con, &state);
nbcon_kthread_create(con);
}
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2881,6 +2881,25 @@ void console_prepend_dropped(struct prin
+@@ -2868,6 +2868,25 @@ void console_prepend_dropped(struct prin
}
/*
diff --git a/debian/patches-rt/0046-printk-Add-kthread-for-all-legacy-consoles.patch b/debian/patches-rt/0040-printk-Add-kthread-for-all-legacy-consoles.patch
index e29281db8f..afb049333d 100644
--- a/debian/patches-rt/0046-printk-Add-kthread-for-all-legacy-consoles.patch
+++ b/debian/patches-rt/0040-printk-Add-kthread-for-all-legacy-consoles.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 22 Sep 2023 17:35:04 +0000
-Subject: [PATCH 46/48] printk: Add kthread for all legacy consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 40/48] printk: Add kthread for all legacy consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The write callback of legacy consoles makes use of spinlocks.
This is not permitted with PREEMPT_RT in atomic contexts.
@@ -23,10 +23,10 @@ These changes only affect CONFIG_PREEMPT_RT.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/internal.h | 20 +++
- kernel/printk/nbcon.c | 50 +++++----
- kernel/printk/printk.c | 252 +++++++++++++++++++++++++++++++++++++----------
- 3 files changed, 251 insertions(+), 71 deletions(-)
+ kernel/printk/internal.h | 11 +-
+ kernel/printk/nbcon.c | 60 +++++++----
+ kernel/printk/printk.c | 241 +++++++++++++++++++++++++++++++++++++++--------
+ 3 files changed, 249 insertions(+), 63 deletions(-)
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PRINTK
#ifdef CONFIG_PRINTK_CALLER
-@@ -90,9 +96,10 @@ void nbcon_free(struct console *con);
+@@ -92,9 +98,10 @@ void nbcon_free(struct console *con);
enum nbcon_prio nbcon_get_default_prio(void);
void nbcon_atomic_flush_pending(void);
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Check if the given console is currently capable and allowed to print
-@@ -179,7 +186,7 @@ static inline void nbcon_free(struct con
+@@ -181,7 +188,7 @@ static inline void nbcon_free(struct con
static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
static inline void nbcon_atomic_flush_pending(void) { }
static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
@@ -64,38 +64,22 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline bool console_is_usable(struct console *con, short flags,
bool use_atomic) { return false; }
-@@ -187,6 +194,15 @@ static inline bool console_is_usable(str
- #endif /* CONFIG_PRINTK */
-
- extern bool have_boot_console;
-+extern bool have_legacy_console;
-+
-+/*
-+ * Specifies if the console lock/unlock dance is needed for console
-+ * printing. If @have_boot_console is true, the nbcon consoles will
-+ * be printed serially along with the legacy consoles because nbcon
-+ * consoles cannot print simultaneously with boot consoles.
-+ */
-+#define printing_via_unlock (have_legacy_console || have_boot_console)
-
- extern struct printk_buffers printk_shared_pbufs;
-
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
-@@ -1202,9 +1202,10 @@ static __ref unsigned int *nbcon_get_cpu
+@@ -1185,9 +1185,10 @@ enum nbcon_prio nbcon_get_default_prio(v
}
- /**
+ /*
- * nbcon_atomic_emit_one - Print one record for an nbcon console using the
- * write_atomic() callback
+ * nbcon_emit_one - Print one record for an nbcon console using the
+ * specified callback
* @wctxt: An initialized write context struct to use for this context
-+ * @use_atomic: True if the write_atomic callback is to be used
++ * @use_atomic: True if the write_atomic() callback is to be used
*
- * Return: False if it is known there are no more records to print,
- * otherwise true.
-@@ -1212,7 +1213,7 @@ static __ref unsigned int *nbcon_get_cpu
+ * Return: True, when a record has been printed and there are still
+ * pending records. The caller might want to continue flushing.
+@@ -1200,7 +1201,7 @@ enum nbcon_prio nbcon_get_default_prio(v
* This is an internal helper to handle the locking of the console before
* calling nbcon_emit_next_record().
*/
@@ -104,24 +88,24 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
-@@ -1224,7 +1225,7 @@ static bool nbcon_atomic_emit_one(struct
- * handed over or taken over. In both cases the context is no
- * longer valid.
+@@ -1215,7 +1216,7 @@ static bool nbcon_atomic_emit_one(struct
+ * The higher priority printing context takes over responsibility
+ * to print the pending records.
*/
- if (!nbcon_emit_next_record(wctxt, true))
+ if (!nbcon_emit_next_record(wctxt, use_atomic))
- return true;
+ return false;
nbcon_context_release(ctxt);
-@@ -1263,6 +1264,7 @@ enum nbcon_prio nbcon_get_default_prio(v
+@@ -1232,6 +1233,7 @@ static bool nbcon_atomic_emit_one(struct
* both the console_lock and the SRCU read lock. Otherwise it
* is set to false.
* @cookie: The cookie from the SRCU read lock.
-+ * @use_atomic: True if the write_atomic callback is to be used
++ * @use_atomic: True if the write_atomic() callback is to be used
*
* Context: Any context except NMI.
- * Return: False if the given console has no next record to print,
-@@ -1273,7 +1275,7 @@ enum nbcon_prio nbcon_get_default_prio(v
+ * Return: True, when a record has been printed and there are still
+@@ -1247,26 +1249,38 @@ static bool nbcon_atomic_emit_one(struct
* Essentially it is the nbcon version of console_emit_next_record().
*/
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
@@ -130,9 +114,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct nbcon_write_context wctxt = { };
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
-@@ -1282,19 +1284,29 @@ bool nbcon_legacy_emit_next_record(struc
-
- *handover = false;
+ unsigned long flags;
+ bool progress;
- /* Use the same procedure as console_emit_next_record(). */
- printk_safe_enter_irqsave(flags);
@@ -162,6 +145,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *handover = console_lock_spinning_disable_and_check(cookie);
+ printk_safe_exit_irqrestore(flags);
+ } else {
++ *handover = false;
++
+ con->device_lock(con, &flags);
+ cant_migrate();
+
@@ -173,7 +158,29 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return progress;
}
-@@ -1536,6 +1548,8 @@ static int __init printk_setup_threads(v
+@@ -1494,7 +1508,9 @@ void nbcon_cpu_emergency_exit(void)
+ * to handle it.
+ */
+ do_trigger_flush = true;
+- if (printing_via_unlock && !is_printk_deferred()) {
++ if (!force_printkthreads() &&
++ printing_via_unlock &&
++ !is_printk_deferred()) {
+ if (console_trylock()) {
+ do_trigger_flush = false;
+ console_unlock();
+@@ -1541,7 +1557,9 @@ void nbcon_cpu_emergency_flush(void)
+
+ nbcon_atomic_flush_pending();
+
+- if (printing_via_unlock && !is_printk_deferred()) {
++ if (!force_printkthreads() &&
++ printing_via_unlock &&
++ !is_printk_deferred()) {
+ if (console_trylock())
+ console_unlock();
+ }
+@@ -1612,6 +1630,8 @@ static int __init printk_setup_threads(v
printk_threads_enabled = true;
for_each_console(con)
nbcon_kthread_create(con);
@@ -184,27 +191,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -468,7 +468,7 @@ static DEFINE_MUTEX(syslog_lock);
- * present, it is necessary to perform the console lock/unlock dance
- * whenever console flushing should occur.
- */
--static bool have_legacy_console;
-+bool have_legacy_console;
-
- /*
- * Specifies if an nbcon console is registered. If nbcon consoles are present,
-@@ -485,16 +485,11 @@ static bool have_nbcon_console;
- */
- bool have_boot_console;
+@@ -487,6 +487,9 @@ bool have_boot_console;
--/*
-- * Specifies if the console lock/unlock dance is needed for console
-- * printing. If @have_boot_console is true, the nbcon consoles will
-- * be printed serially along with the legacy consoles because nbcon
-- * consoles cannot print simultaneously with boot consoles.
-- */
--#define printing_via_unlock (have_legacy_console || have_boot_console)
--
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
+
@@ -213,17 +201,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
-@@ -2358,7 +2353,8 @@ asmlinkage int vprintk_emit(int facility
+@@ -2345,7 +2348,8 @@ asmlinkage int vprintk_emit(int facility
const struct dev_printk_info *dev_info,
const char *fmt, va_list args)
{
- bool do_trylock_unlock = printing_via_unlock;
-+ bool do_trylock_unlock = printing_via_unlock &&
-+ !force_printkthreads();
++ bool do_trylock_unlock = !force_printkthreads() &&
++ printing_via_unlock;
int printed_len;
/* Suppress unimportant messages after panic happens */
-@@ -2481,6 +2477,14 @@ EXPORT_SYMBOL(_printk);
+@@ -2468,6 +2472,14 @@ EXPORT_SYMBOL(_printk);
static bool pr_flush(int timeout_ms, bool reset_on_progress);
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
@@ -238,7 +226,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else /* CONFIG_PRINTK */
#define printk_time false
-@@ -2494,6 +2498,8 @@ static u64 syslog_seq;
+@@ -2481,6 +2493,8 @@ static u64 syslog_seq;
static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
@@ -247,7 +235,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_PRINTK */
#ifdef CONFIG_EARLY_PRINTK
-@@ -2739,6 +2745,8 @@ void resume_console(void)
+@@ -2726,6 +2740,8 @@ void resume_console(void)
}
console_srcu_read_unlock(cookie);
@@ -256,17 +244,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_flush(1000, true);
}
-@@ -2753,7 +2761,8 @@ void resume_console(void)
+@@ -2740,7 +2756,9 @@ void resume_console(void)
*/
static int console_cpu_notify(unsigned int cpu)
{
- if (!cpuhp_tasks_frozen && printing_via_unlock) {
-+ if (!cpuhp_tasks_frozen && printing_via_unlock &&
-+ !force_printkthreads()) {
++ if (!force_printkthreads() &&
++ !cpuhp_tasks_frozen &&
++ printing_via_unlock) {
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
-@@ -3013,31 +3022,43 @@ static bool console_emit_next_record(str
+@@ -3000,31 +3018,43 @@ static bool console_emit_next_record(str
con->dropped = 0;
}
@@ -283,7 +272,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- printk_safe_enter_irqsave(flags);
- console_lock_spinning_enable();
+ /* Write everything out to the hardware. */
-+
+
+- /* Do not trace print latency. */
+- stop_critical_timings();
+ if (force_printkthreads()) {
+ /*
+ * With forced threading this function is either in a thread
@@ -291,8 +282,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * printk reentrance or handovers.
+ */
-- /* Do not trace print latency. */
-- stop_critical_timings();
+- /* Write everything out to the hardware. */
+- con->write(con, outbuf, pmsg.outbuf_len);
+ con->write(con, outbuf, pmsg.outbuf_len);
+ con->seq = pmsg.seq + 1;
+ } else {
@@ -309,19 +300,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ printk_safe_enter_irqsave(flags);
+ console_lock_spinning_enable();
-- /* Write everything out to the hardware. */
-- con->write(con, outbuf, pmsg.outbuf_len);
+- start_critical_timings();
+ /* Do not trace print latency. */
+ stop_critical_timings();
-- start_critical_timings();
-+ con->write(con, outbuf, pmsg.outbuf_len);
-
- con->seq = pmsg.seq + 1;
-+ start_critical_timings();
++ con->write(con, outbuf, pmsg.outbuf_len);
- *handover = console_lock_spinning_disable_and_check(cookie);
- printk_safe_exit_irqrestore(flags);
++ start_critical_timings();
++
+ con->seq = pmsg.seq + 1;
+
+ *handover = console_lock_spinning_disable_and_check(cookie);
@@ -330,7 +319,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
skip:
return true;
}
-@@ -3101,12 +3122,13 @@ static bool console_flush_all(bool do_co
+@@ -3088,12 +3118,13 @@ static bool console_flush_all(bool do_co
if ((flags & CON_NBCON) && con->kthread)
continue;
@@ -346,7 +335,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
printk_seq = nbcon_seq_read(con);
} else {
progress = console_emit_next_record(con, handover, cookie);
-@@ -3145,19 +3167,7 @@ static bool console_flush_all(bool do_co
+@@ -3132,19 +3163,7 @@ static bool console_flush_all(bool do_co
return false;
}
@@ -367,7 +356,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
bool do_cond_resched;
bool handover;
-@@ -3201,6 +3211,32 @@ void console_unlock(void)
+@@ -3188,6 +3207,32 @@ void console_unlock(void)
*/
} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
}
@@ -400,12 +389,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
EXPORT_SYMBOL(console_unlock);
/**
-@@ -3410,11 +3446,106 @@ void console_start(struct console *conso
-
+@@ -3411,12 +3456,107 @@ void console_start(struct console *conso
+ flags = console_srcu_read_flags(console);
if (flags & CON_NBCON)
nbcon_kthread_wake(console);
+ else
+ wake_up_legacy_kthread();
+ console_srcu_read_unlock(cookie);
__pr_flush(console, 1000, true);
}
@@ -433,7 +423,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if ((flags & CON_NBCON) && con->kthread)
+ continue;
+
-+ if (!console_is_usable(con, flags, true))
++ if (!console_is_usable(con, flags, false))
+ continue;
+
+ if (flags & CON_NBCON) {
@@ -507,15 +497,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int __read_mostly keep_bootcon;
static int __init keep_bootcon_setup(char *str)
-@@ -3691,6 +3822,7 @@ void register_console(struct console *ne
- newcon->seq = 0;
+@@ -3706,6 +3846,7 @@ void register_console(struct console *ne
} else {
have_legacy_console = true;
+ newcon->seq = init_seq;
+ nbcon_legacy_kthread_create();
}
if (newcon->flags & CON_BOOT)
-@@ -3846,6 +3978,13 @@ static int unregister_console_locked(str
+@@ -3873,6 +4014,13 @@ static int unregister_console_locked(str
nbcon_kthread_create(c);
}
@@ -529,22 +519,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return res;
}
-@@ -4004,8 +4143,12 @@ static bool __pr_flush(struct console *c
+@@ -4031,7 +4179,11 @@ static bool __pr_flush(struct console *c
seq = prb_next_reserve_seq(prb);
- /* Flush the consoles so that records up to @seq are printed. */
-- if (printing_via_unlock) {
+ /*
+ * Flush the consoles so that records up to @seq are printed.
+ * Otherwise this function will just wait for the threaded printers
+ * to print up to @seq.
+ */
-+ if (printing_via_unlock && !force_printkthreads()) {
+ if (printing_via_unlock) {
console_lock();
console_unlock();
- }
-@@ -4119,9 +4262,16 @@ static void wake_up_klogd_work_func(stru
+@@ -4146,9 +4298,16 @@ static void wake_up_klogd_work_func(stru
int pending = this_cpu_xchg(printk_pending, 0);
if (pending & PRINTK_PENDING_OUTPUT) {
diff --git a/debian/patches-rt/0041-proc-consoles-Add-notation-to-c_start-c_stop.patch b/debian/patches-rt/0041-proc-consoles-Add-notation-to-c_start-c_stop.patch
new file mode 100644
index 0000000000..f31e4da245
--- /dev/null
+++ b/debian/patches-rt/0041-proc-consoles-Add-notation-to-c_start-c_stop.patch
@@ -0,0 +1,34 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 2 May 2024 08:02:58 +0000
+Subject: [PATCH 41/48] proc: consoles: Add notation to c_start/c_stop
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+fs/proc/consoles.c:78:13: warning: context imbalance in 'c_start'
+ - wrong count at exit
+fs/proc/consoles.c:104:13: warning: context imbalance in 'c_stop'
+ - unexpected unlock
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/proc/consoles.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/proc/consoles.c
++++ b/fs/proc/consoles.c
+@@ -68,6 +68,7 @@ static int show_console_dev(struct seq_f
+ }
+
+ static void *c_start(struct seq_file *m, loff_t *pos)
++ __acquires(&console_mutex)
+ {
+ struct console *con;
+ loff_t off = 0;
+@@ -94,6 +95,7 @@ static void *c_next(struct seq_file *m,
+ }
+
+ static void c_stop(struct seq_file *m, void *v)
++ __releases(&console_mutex)
+ {
+ console_list_unlock();
+ }
diff --git a/debian/patches-rt/0041-proc-Add-nbcon-support-for-proc-consoles.patch b/debian/patches-rt/0042-proc-Add-nbcon-support-for-proc-consoles.patch
index edae68d991..1c27e7f9aa 100644
--- a/debian/patches-rt/0041-proc-Add-nbcon-support-for-proc-consoles.patch
+++ b/debian/patches-rt/0042-proc-Add-nbcon-support-for-proc-consoles.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 26 Sep 2023 13:31:00 +0000
-Subject: [PATCH 41/48] proc: Add nbcon support for /proc/consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 42/48] proc: Add nbcon support for /proc/consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Update /proc/consoles output to show 'W' if an nbcon write
callback is implemented (write_atomic or write_thread).
diff --git a/debian/patches-rt/0042-tty-sysfs-Add-nbcon-support-for-active.patch b/debian/patches-rt/0043-tty-sysfs-Add-nbcon-support-for-active.patch
index 0c5dc15561..9f6999349e 100644
--- a/debian/patches-rt/0042-tty-sysfs-Add-nbcon-support-for-active.patch
+++ b/debian/patches-rt/0043-tty-sysfs-Add-nbcon-support-for-active.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 22 Sep 2023 14:31:09 +0000
-Subject: [PATCH 42/48] tty: sysfs: Add nbcon support for 'active'
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 43/48] tty: sysfs: Add nbcon support for 'active'
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Allow the 'active' attribute to list nbcon consoles.
diff --git a/debian/patches-rt/0047-printk-Provide-threadprintk-boot-argument.patch b/debian/patches-rt/0044-printk-Provide-threadprintk-boot-argument.patch
index cc47be3205..dfe4ed595c 100644
--- a/debian/patches-rt/0047-printk-Provide-threadprintk-boot-argument.patch
+++ b/debian/patches-rt/0044-printk-Provide-threadprintk-boot-argument.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 6 Feb 2024 14:19:34 +0000
-Subject: [PATCH 47/48] printk: Provide threadprintk boot argument
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 44/48] printk: Provide threadprintk boot argument
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
For PREEMPT_RT, legacy console printing is performed in a dedicated
kthread. However, this behavior can also be interesting for other
@@ -12,7 +12,7 @@ Provide a new boot argument "threadprintk" that will create the
dedicated kthread for legacy console printing for !PREEMPT_RT
systems.
-The implementation is the same as "threadirqs" boot argument.
+The implementation is the same as the "threadirqs" boot argument.
Users should be aware that if this option is enabled, the shutdown,
reboot, and panic messages probably will not be visible on the
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -6508,6 +6508,18 @@
+@@ -6596,6 +6596,18 @@
Force threading of all interrupt handlers except those
marked explicitly IRQF_NO_THREAD.
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ driver is legacy or not. Non-legacy (NBCON) console
+ drivers are already threaded and are shown with 'N'.
+
- topology= [S390]
+ topology= [S390,EARLY]
Format: {off | on}
Specify if the kernel should make use of the cpu
--- a/kernel/printk/internal.h
diff --git a/debian/patches-rt/0048-printk-Avoid-false-positive-lockdep-report-for-legac.patch b/debian/patches-rt/0045-printk-Avoid-false-positive-lockdep-report-for-legac.patch
index 510b3c0808..937d22b85e 100644
--- a/debian/patches-rt/0048-printk-Avoid-false-positive-lockdep-report-for-legac.patch
+++ b/debian/patches-rt/0045-printk-Avoid-false-positive-lockdep-report-for-legac.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 11 Dec 2023 09:34:16 +0000
-Subject: [PATCH 48/48] printk: Avoid false positive lockdep report for legacy
+Subject: [PATCH 45/48] printk: Avoid false positive lockdep report for legacy
printing
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Legacy console printing from printk() caller context may invoke
the console driver from atomic context. This leads to a lockdep
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2985,6 +2985,33 @@ bool printk_get_next_message(struct prin
+@@ -2981,6 +2981,33 @@ bool printk_get_next_message(struct prin
}
/*
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Used as the printk buffers for non-panic, serialized console printing.
* This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
* Its usage requires the console_lock held.
-@@ -3039,7 +3066,7 @@ static bool console_emit_next_record(str
+@@ -3035,7 +3062,7 @@ static bool console_emit_next_record(str
/*
* With forced threading this function is either in a thread
* or panic context. So there is no need for concern about
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
con->write(con, outbuf, pmsg.outbuf_len);
-@@ -3061,7 +3088,9 @@ static bool console_emit_next_record(str
+@@ -3057,7 +3084,9 @@ static bool console_emit_next_record(str
/* Do not trace print latency. */
stop_critical_timings();
diff --git a/debian/patches-rt/0043-printk-nbcon-Provide-function-to-reacquire-ownership.patch b/debian/patches-rt/0046-printk-nbcon-Add-function-for-printers-to-reacquire-.patch
index 5dd9fe97d7..75a090edc3 100644
--- a/debian/patches-rt/0043-printk-nbcon-Provide-function-to-reacquire-ownership.patch
+++ b/debian/patches-rt/0046-printk-nbcon-Add-function-for-printers-to-reacquire-.patch
@@ -1,51 +1,50 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 20 Oct 2023 10:01:58 +0000
-Subject: [PATCH 43/48] printk: nbcon: Provide function to reacquire ownership
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
-
-Contexts may become nbcon owners for various reasons, not just
-for printing. Indeed, the port->lock wrapper takes ownership
-for anything relating to the hardware.
+Subject: [PATCH 46/48] printk: nbcon: Add function for printers to reacquire
+ ownership
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Since ownership can be lost at any time due to handover or
-takeover, a context _should_ be prepared to back out
-immediately and carefully. However, there are many scenarios
-where the context _must_ reacquire ownership in order to
+takeover, a printing context _must_ be prepared to back out
+immediately and carefully. However, there are scenarios where
+the printing context must reacquire ownership in order to
finalize or revert hardware changes.
-One such example is when interrupts are disabled by a context.
-No other context will automagically re-enable the interrupts.
-For this case, the disabling context _must_ reacquire nbcon
-ownership so that it can re-enable the interrupts.
+One such example is when interrupts are disabled during
+printing. No other context will automagically re-enable the
+interrupts. For this case, the disabling context _must_
+reacquire nbcon ownership so that it can re-enable the
+interrupts.
-Provide nbcon_reacquire() for exactly this purpose.
+Provide nbcon_reacquire() for exactly this purpose. It allows a
+printing context to reacquire ownership using the same priority
+as its previous ownership.
-Note that for printing contexts, after a successful reacquire
-the context will have no output buffer because that has been
-lost. nbcon_reacquire() cannot be used to resume printing.
+Note that after a successful reacquire the printing context
+will have no output buffer because that has been lost. This
+function cannot be used to resume printing.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/console.h | 7 +++++++
+ include/linux/console.h | 6 ++++++
kernel/printk/nbcon.c | 41 +++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 48 insertions(+)
+ 2 files changed, 47 insertions(+)
--- a/include/linux/console.h
+++ b/include/linux/console.h
-@@ -371,6 +371,11 @@ struct console {
- * functions are also points of possible ownership transfer. If
- * either function returns false, ownership has been lost.
+@@ -372,6 +372,10 @@ struct console {
*
+ * The callback should allow the takeover whenever it is safe. It
+ * increases the chance to see messages when the system is in trouble.
+ * If the driver must reacquire ownership in order to finalize or
+ * revert hardware changes, nbcon_reacquire() can be used. However,
+ * on reacquire the buffer content is no longer available. A
+ * reacquire cannot be used to resume printing.
-+ *
- * This callback can be called from any context (including NMI).
+ *
+ * The callback can be called from any context (including NMI).
* Therefore it must avoid usage of any locking and instead rely
- * on the console ownership for synchronization.
-@@ -595,12 +600,14 @@ extern void nbcon_cpu_emergency_exit(voi
+@@ -591,6 +595,7 @@ extern void nbcon_cpu_emergency_flush(vo
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
@@ -53,6 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else
static inline void nbcon_cpu_emergency_enter(void) { }
static inline void nbcon_cpu_emergency_exit(void) { }
+@@ -598,6 +603,7 @@ static inline void nbcon_cpu_emergency_f
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
@@ -66,20 +66,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
/**
-+ * nbcon_reacquire - Reacquire a console after losing ownership
-+ * @wctxt: The write context that was handed to the write function
++ * nbcon_reacquire - Reacquire a console after losing ownership while printing
++ * @wctxt: The write context that was handed to the write callback
+ *
+ * Since ownership can be lost at any time due to handover or takeover, a
-+ * printing context _should_ be prepared to back out immediately and
-+ * carefully. However, there are many scenarios where the context _must_
++ * printing context _must_ be prepared to back out immediately and
++ * carefully. However, there are scenarios where the printing context must
+ * reacquire ownership in order to finalize or revert hardware changes.
+ *
-+ * This function allows a context to reacquire ownership using the same
-+ * priority as its previous ownership.
++ * This function allows a printing context to reacquire ownership using the
++ * same priority as its previous ownership.
+ *
-+ * Note that for printing contexts, after a successful reacquire the
-+ * context will have no output buffer because that has been lost. This
-+ * function cannot be used to resume printing.
++ * Note that after a successful reacquire the printing context will have no
++ * output buffer because that has been lost. This function cannot be used to
++ * resume printing.
+ */
+void nbcon_reacquire(struct nbcon_write_context *wctxt)
+{
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+/**
* nbcon_emit_next_record - Emit a record in the acquired context
* @wctxt: The write context that will be handed to the write function
- * @use_atomic: True if the write_atomic callback is to be used
+ * @use_atomic: True if the write_atomic() callback is to be used
@@ -944,6 +976,15 @@ static bool nbcon_emit_next_record(struc
nbcon_context_release(ctxt);
return false;
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!wctxt->outbuf) {
+ /*
+ * Ownership was lost and reacquired by the driver.
-+ * Handle it as if ownership was lost and try to continue.
++ * Handle it as if ownership was lost.
+ */
+ nbcon_context_release(ctxt);
+ return false;
diff --git a/debian/patches-rt/0044-serial-8250-Switch-to-nbcon-console.patch b/debian/patches-rt/0047-serial-8250-Switch-to-nbcon-console.patch
index 22edc18426..ffefc3a024 100644
--- a/debian/patches-rt/0044-serial-8250-Switch-to-nbcon-console.patch
+++ b/debian/patches-rt/0047-serial-8250-Switch-to-nbcon-console.patch
@@ -1,14 +1,14 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 13 Sep 2023 15:30:36 +0000
-Subject: [PATCH 44/48] serial: 8250: Switch to nbcon console
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Subject: [PATCH 47/48] serial: 8250: Switch to nbcon console
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Implement the necessary callbacks to switch the 8250 console driver
to perform as an nbcon console.
Add implementations for the nbcon console callbacks (write_atomic,
-write_thread, device_lock, device_unlock), provide @nbcon_drvdata, and
-add CON_NBCON to the initial flags.
+write_thread, device_lock, device_unlock) and add CON_NBCON to the
+initial flags.
The legacy code is kept in order to easily switch back to legacy mode
by defining CONFIG_SERIAL_8250_LEGACY_CONSOLE.
@@ -16,14 +16,14 @@ by defining CONFIG_SERIAL_8250_LEGACY_CONSOLE.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/tty/serial/8250/8250_core.c | 45 ++++++++++
+ drivers/tty/serial/8250/8250_core.c | 42 +++++++++
drivers/tty/serial/8250/8250_port.c | 154 +++++++++++++++++++++++++++++++++++-
include/linux/serial_8250.h | 6 +
- 3 files changed, 202 insertions(+), 3 deletions(-)
+ 3 files changed, 199 insertions(+), 3 deletions(-)
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -592,6 +592,7 @@ serial8250_register_ports(struct uart_dr
+@@ -589,6 +589,7 @@ serial8250_register_ports(struct uart_dr
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void univ8250_console_write(struct console *co, const char *s,
unsigned int count)
{
-@@ -599,6 +600,39 @@ static void univ8250_console_write(struc
+@@ -596,6 +597,37 @@ static void univ8250_console_write(struc
serial8250_console_write(up, s, count);
}
@@ -65,13 +65,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+ __uart_port_unlock_irqrestore(up, flags);
+}
-+
-+static struct nbcon_drvdata serial8250_nbcon_drvdata;
+#endif /* CONFIG_SERIAL_8250_LEGACY_CONSOLE */
static int univ8250_console_setup(struct console *co, char *options)
{
-@@ -698,12 +732,21 @@ static int univ8250_console_match(struct
+@@ -695,12 +727,20 @@ static int univ8250_console_match(struct
static struct console univ8250_console = {
.name = "ttyS",
@@ -84,7 +82,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ .device_lock = univ8250_console_device_lock,
+ .device_unlock = univ8250_console_device_unlock,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_NBCON,
-+ .nbcon_drvdata = &serial8250_nbcon_drvdata,
+#endif
.device = uart_console_device,
.setup = univ8250_console_setup,
@@ -96,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -550,6 +550,13 @@ static int serial8250_em485_init(struct
+@@ -546,6 +546,13 @@ static int serial8250_em485_init(struct
if (!p->em485)
return -ENOMEM;
@@ -110,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
-@@ -702,7 +709,11 @@ static void serial8250_set_sleep(struct
+@@ -691,7 +698,11 @@ static void serial8250_set_sleep(struct
serial8250_rpm_put(p);
}
@@ -123,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
if (up->capabilities & UART_CAP_UUE)
serial_out(up, UART_IER, UART_IER_UUE);
-@@ -710,6 +721,11 @@ static void serial8250_clear_IER(struct
+@@ -699,6 +710,11 @@ static void serial8250_clear_IER(struct
serial_out(up, UART_IER, 0);
}
@@ -135,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SERIAL_8250_RSA
/*
* Attempts to turn on the RSA FIFO. Returns zero on failure.
-@@ -3320,6 +3336,11 @@ static void serial8250_console_putchar(s
+@@ -3269,6 +3285,11 @@ static void serial8250_console_putchar(s
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out(port, UART_TX, ch);
@@ -147,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3348,6 +3369,7 @@ static void serial8250_console_restore(s
+@@ -3297,6 +3318,7 @@ static void serial8250_console_restore(s
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
@@ -155,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Print a string to the serial port using the device FIFO
*
-@@ -3406,7 +3428,7 @@ void serial8250_console_write(struct uar
+@@ -3355,7 +3377,7 @@ void serial8250_console_write(struct uar
* First save the IER then disable the interrupts
*/
ier = serial_port_in(port, UART_IER);
@@ -164,7 +161,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3472,6 +3494,131 @@ void serial8250_console_write(struct uar
+@@ -3421,6 +3443,131 @@ void serial8250_console_write(struct uar
if (locked)
uart_port_unlock_irqrestore(port, flags);
}
@@ -296,7 +293,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static unsigned int probe_baud(struct uart_port *port)
{
-@@ -3490,6 +3637,7 @@ static unsigned int probe_baud(struct ua
+@@ -3439,6 +3586,7 @@ static unsigned int probe_baud(struct ua
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
@@ -304,7 +301,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int baud = 9600;
int bits = 8;
int parity = 'n';
-@@ -3499,6 +3647,8 @@ int serial8250_console_setup(struct uart
+@@ -3448,6 +3596,8 @@ int serial8250_console_setup(struct uart
if (!port->iobase && !port->membase)
return -ENODEV;
diff --git a/debian/patches-rt/0045-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch b/debian/patches-rt/0048-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch
index e9544b9f8c..3a02162768 100644
--- a/debian/patches-rt/0045-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch
+++ b/debian/patches-rt/0048-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 2 Oct 2023 15:30:43 +0000
-Subject: [PATCH 45/48] serial: 8250: Revert "drop lockdep annotation from
+Subject: [PATCH 48/48] serial: 8250: Revert "drop lockdep annotation from
serial8250_clear_IER()"
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The 8250 driver no longer depends on @oops_in_progress and
will no longer violate the port->lock locking constraints.
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -723,6 +723,9 @@ static void __serial8250_clear_IER(struc
+@@ -712,6 +712,9 @@ static void __serial8250_clear_IER(struc
static inline void serial8250_clear_IER(struct uart_8250_port *up)
{
diff --git a/debian/patches-rt/ARM64__Allow_to_enable_RT.patch b/debian/patches-rt/ARM64__Allow_to_enable_RT.patch
index 9f85cf15b9..7f9b640ce0 100644
--- a/debian/patches-rt/ARM64__Allow_to_enable_RT.patch
+++ b/debian/patches-rt/ARM64__Allow_to_enable_RT.patch
@@ -1,7 +1,7 @@
Subject: ARM64: Allow to enable RT
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri Oct 11 13:14:35 2019 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/ARM__Allow_to_enable_RT.patch b/debian/patches-rt/ARM__Allow_to_enable_RT.patch
index 08fb67f116..7db905a726 100644
--- a/debian/patches-rt/ARM__Allow_to_enable_RT.patch
+++ b/debian/patches-rt/ARM__Allow_to_enable_RT.patch
@@ -1,7 +1,7 @@
Subject: ARM: Allow to enable RT
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri Oct 11 13:14:29 2019 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -17,15 +17,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -36,6 +36,7 @@ config ARM
- select ARCH_SUPPORTS_ATOMIC_RMW
+@@ -38,6 +38,7 @@ config ARM
+ select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_MEMTEST
-@@ -120,6 +121,7 @@ config ARM
+@@ -123,6 +124,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch b/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
index 9ca02d6a8b..553a0159d7 100644
--- a/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
+++ b/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
@@ -1,7 +1,7 @@
Subject: ARM: enable irq in translation/section permission fault handlers
From: Yadi.hu <yadi.hu@windriver.com>
Date: Wed Dec 10 10:32:09 2014 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Yadi.hu <yadi.hu@windriver.com>
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
-@@ -436,6 +436,9 @@ do_translation_fault(unsigned long addr,
+@@ -474,6 +474,9 @@ do_translation_fault(unsigned long addr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (user_mode(regs))
goto bad_area;
-@@ -506,6 +509,9 @@ do_translation_fault(unsigned long addr,
+@@ -544,6 +547,9 @@ do_translation_fault(unsigned long addr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
diff --git a/debian/patches-rt/Add_localversion_for_-RT_release.patch b/debian/patches-rt/Add_localversion_for_-RT_release.patch
index 1dbdcfcb68..57e9de279a 100644
--- a/debian/patches-rt/Add_localversion_for_-RT_release.patch
+++ b/debian/patches-rt/Add_localversion_for_-RT_release.patch
@@ -1,7 +1,7 @@
Subject: Add localversion for -RT release
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri Jul 8 20:25:16 2011 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Thomas Gleixner <tglx@linutronix.de>
@@ -16,4 +16,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt11
++-rt14
diff --git a/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch b/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch
index 34d9aae651..3d22d2463f 100644
--- a/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch
+++ b/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch
@@ -1,7 +1,7 @@
Subject: POWERPC: Allow to enable RT
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri Oct 11 13:14:41 2019 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -166,6 +166,7 @@ config PPC
+@@ -168,6 +168,7 @@ config PPC
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_MEMTEST
-@@ -270,6 +271,7 @@ config PPC
+@@ -272,6 +273,7 @@ config PPC
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
diff --git a/debian/patches-rt/PREEMPT_AUTO.patch b/debian/patches-rt/PREEMPT_AUTO.patch
index 338abae7c3..de6d49cf41 100644
--- a/debian/patches-rt/PREEMPT_AUTO.patch
+++ b/debian/patches-rt/PREEMPT_AUTO.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 23 Sep 2023 03:11:05 +0200
Subject: [PATCH] sched: define TIF_ALLOW_RESCHED
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
On Fri, Sep 22 2023 at 00:55, Thomas Gleixner wrote:
> On Thu, Sep 21 2023 at 09:00, Linus Torvalds wrote:
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -277,6 +277,7 @@ config X86
+@@ -282,6 +282,7 @@
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
select HAVE_PREEMPT_DYNAMIC_CALL
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
select HAVE_SYSCALL_TRACEPOINTS
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
-@@ -81,8 +81,9 @@ struct thread_info {
+@@ -87,8 +87,9 @@
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
#define TIF_SPEC_L1D_FLUSH 10 /* Flush L1D on mm switches (processes) */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
-@@ -104,6 +105,7 @@ struct thread_info {
+@@ -110,6 +111,7 @@
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -154,7 +154,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
-@@ -108,7 +108,7 @@ static const struct dmi_system_id proces
+@@ -107,7 +107,7 @@
*/
static void __cpuidle acpi_safe_halt(void)
{
@@ -187,7 +187,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1934,17 +1934,17 @@ static inline void update_tsk_thread_fla
+@@ -1949,17 +1949,17 @@
update_ti_thread_flag(task_thread_info(tsk), flag, value);
}
@@ -208,7 +208,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
return test_ti_thread_flag(task_thread_info(tsk), flag);
}
-@@ -1957,9 +1957,11 @@ static inline void set_tsk_need_resched(
+@@ -1972,9 +1972,11 @@
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
@@ -221,7 +221,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
-@@ -2100,7 +2102,7 @@ static inline bool preempt_model_preempt
+@@ -2074,7 +2076,7 @@
static __always_inline bool need_resched(void)
{
@@ -232,7 +232,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
-@@ -63,7 +63,7 @@ static __always_inline bool __must_check
+@@ -63,7 +63,7 @@
*/
smp_mb__after_atomic();
@@ -241,7 +241,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static __always_inline bool __must_check current_clr_polling_and_test(void)
-@@ -76,7 +76,7 @@ static __always_inline bool __must_check
+@@ -76,7 +76,7 @@
*/
smp_mb__after_atomic();
@@ -250,7 +250,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#else
-@@ -85,11 +85,11 @@ static inline void __current_clr_polling
+@@ -85,11 +85,11 @@
static inline bool __must_check current_set_polling_and_test(void)
{
@@ -266,7 +266,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
-@@ -59,6 +59,16 @@ enum syscall_work_bit {
+@@ -59,6 +59,16 @@
#include <asm/thread_info.h>
@@ -283,7 +283,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef __KERNEL__
#ifndef arch_set_restart_data
-@@ -185,6 +195,13 @@ static __always_inline bool tif_need_res
+@@ -185,6 +195,13 @@
(unsigned long *)(&current_thread_info()->flags));
}
@@ -297,7 +297,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else
static __always_inline bool tif_need_resched(void)
-@@ -193,6 +210,13 @@ static __always_inline bool tif_need_res
+@@ -193,6 +210,13 @@
(unsigned long *)(&current_thread_info()->flags));
}
@@ -313,7 +313,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
-@@ -178,8 +178,8 @@ unsigned int tracing_gen_ctx_irq_test(un
+@@ -184,8 +184,8 @@
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
@@ -324,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
-@@ -205,11 +205,11 @@ static inline unsigned int tracing_gen_c
+@@ -211,11 +211,11 @@
static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
{
@@ -340,7 +340,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
-@@ -11,6 +11,13 @@ config PREEMPT_BUILD
+@@ -11,6 +11,13 @@
select PREEMPTION
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
@@ -354,7 +354,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
choice
prompt "Preemption Model"
default PREEMPT_NONE
-@@ -67,9 +74,17 @@ config PREEMPT
+@@ -67,9 +74,17 @@
embedded system with latency requirements in the milliseconds
range.
@@ -372,7 +372,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
select PREEMPTION
help
This option turns the kernel into a real-time kernel by replacing
-@@ -95,7 +110,7 @@ config PREEMPTION
+@@ -95,7 +110,7 @@
config PREEMPT_DYNAMIC
bool "Preemption behaviour defined on boot"
@@ -383,7 +383,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
default y if HAVE_PREEMPT_DYNAMIC_CALL
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
-@@ -92,7 +92,7 @@ void __weak arch_do_signal_or_restart(st
+@@ -98,7 +98,7 @@
local_irq_enable_exit_to_user(ti_work);
@@ -392,7 +392,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
schedule();
if (ti_work & _TIF_UPROBE)
-@@ -301,7 +301,7 @@ void raw_irqentry_exit_cond_resched(void
+@@ -307,7 +307,7 @@
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack());
@@ -403,7 +403,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/entry/kvm.c
+++ b/kernel/entry/kvm.c
-@@ -13,7 +13,7 @@ static int xfer_to_guest_mode_work(struc
+@@ -13,7 +13,7 @@
return -EINTR;
}
@@ -414,7 +414,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ti_work & _TIF_NOTIFY_RESUME)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -899,14 +899,15 @@ static inline void hrtick_rq_init(struct
+@@ -898,14 +898,15 @@
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
/*
@@ -433,7 +433,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -923,7 +924,7 @@ static bool set_nr_if_polling(struct tas
+@@ -922,7 +923,7 @@
do {
if (!(val & _TIF_POLLING_NRFLAG))
return false;
@@ -442,7 +442,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return true;
} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
-@@ -931,9 +932,9 @@ static bool set_nr_if_polling(struct tas
+@@ -930,9 +931,9 @@
}
#else
@@ -454,7 +454,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return true;
}
-@@ -1038,28 +1039,47 @@ void wake_up_q(struct wake_q_head *head)
+@@ -1037,28 +1038,47 @@
* might also involve a cross-CPU call to trigger the scheduler on
* the target CPU.
*/
@@ -510,7 +510,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void resched_cpu(int cpu)
-@@ -1154,7 +1174,7 @@ static void wake_up_idle_cpu(int cpu)
+@@ -1153,7 +1173,7 @@
* and testing of the above solutions didn't appear to report
* much benefits.
*/
@@ -521,7 +521,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
trace_sched_wake_idle_without_ipi(cpu);
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -333,6 +333,23 @@ static const struct file_operations sche
+@@ -333,6 +333,23 @@
.release = seq_release,
};
@@ -545,7 +545,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static struct dentry *debugfs_sched;
static __init int sched_init_debug(void)
-@@ -374,6 +391,8 @@ static __init int sched_init_debug(void)
+@@ -374,6 +391,8 @@
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
@@ -556,7 +556,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
late_initcall(sched_init_debug);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -975,8 +975,10 @@ static void clear_buddies(struct cfs_rq
+@@ -974,8 +974,10 @@
* XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
* this is probably good enough.
*/
@@ -568,7 +568,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if ((s64)(se->vruntime - se->deadline) < 0)
return;
-@@ -995,10 +997,19 @@ static void update_deadline(struct cfs_r
+@@ -994,10 +996,19 @@
/*
* The task has consumed its request, reschedule.
*/
@@ -591,7 +591,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#include "pelt.h"
-@@ -1153,7 +1164,7 @@ s64 update_curr_common(struct rq *rq)
+@@ -1153,7 +1164,7 @@
/*
* Update the current task's runtime statistics.
*/
@@ -600,7 +600,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct sched_entity *curr = cfs_rq->curr;
s64 delta_exec;
-@@ -1166,7 +1177,7 @@ static void update_curr(struct cfs_rq *c
+@@ -1166,7 +1177,7 @@
return;
curr->vruntime += calc_delta_fair(delta_exec, curr);
@@ -609,7 +609,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
update_min_vruntime(cfs_rq);
if (entity_is_task(curr))
-@@ -1175,6 +1186,11 @@ static void update_curr(struct cfs_rq *c
+@@ -1175,6 +1186,11 @@
account_cfs_rq_runtime(cfs_rq, delta_exec);
}
@@ -621,7 +621,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void update_curr_fair(struct rq *rq)
{
update_curr(cfs_rq_of(&rq->curr->se));
-@@ -5493,7 +5509,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -5511,7 +5527,7 @@
/*
* Update run-time statistics of the 'current'.
*/
@@ -630,7 +630,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Ensure that runnable average is periodically updated.
-@@ -5507,7 +5523,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -5525,7 +5541,7 @@
* validating it and just reschedule.
*/
if (queued) {
@@ -639,7 +639,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
}
/*
-@@ -5653,7 +5669,7 @@ static void __account_cfs_rq_runtime(str
+@@ -5671,7 +5687,7 @@
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -648,7 +648,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static __always_inline
-@@ -5913,7 +5929,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cf
+@@ -5931,7 +5947,7 @@
/* Determine whether we need to wake up potentially idle CPU: */
if (rq->curr == rq->idle && rq->cfs.nr_running)
@@ -657,7 +657,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#ifdef CONFIG_SMP
-@@ -6628,7 +6644,7 @@ static void hrtick_start_fair(struct rq
+@@ -6646,7 +6662,7 @@
if (delta < 0) {
if (task_current(rq, p))
@@ -666,7 +666,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -8304,7 +8320,7 @@ static void check_preempt_wakeup_fair(st
+@@ -8378,7 +8394,7 @@
* prevents us from potentially nominating it as a false LAST_BUDDY
* below.
*/
@@ -675,7 +675,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
/* Idle tasks are by definition preempted by non-idle tasks. */
-@@ -8346,7 +8362,7 @@ static void check_preempt_wakeup_fair(st
+@@ -8420,7 +8436,7 @@
return;
preempt:
@@ -684,7 +684,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#ifdef CONFIG_SMP
-@@ -12516,7 +12532,7 @@ static inline void task_tick_core(struct
+@@ -12566,7 +12582,7 @@
*/
if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
__entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
@@ -693,7 +693,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -12681,7 +12697,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -12733,7 +12749,7 @@
*/
if (task_current(rq, p)) {
if (p->prio > oldprio)
@@ -704,7 +704,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
-@@ -87,3 +87,5 @@ SCHED_FEAT(UTIL_EST, true)
+@@ -87,3 +87,5 @@
SCHED_FEAT(LATENCY_WARN, false)
SCHED_FEAT(HZ_BW, true)
@@ -712,7 +712,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+SCHED_FEAT(FORCE_NEED_RESCHED, false)
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
-@@ -57,8 +57,7 @@ static noinline int __cpuidle cpu_idle_p
+@@ -57,8 +57,7 @@
ct_cpuidle_enter();
raw_local_irq_enable();
@@ -724,8 +724,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -2463,6 +2463,7 @@ extern void init_sched_fair_class(void);
- extern void reweight_task(struct task_struct *p, int prio);
+@@ -2467,6 +2467,7 @@
+ extern void reweight_task(struct task_struct *p, const struct load_weight *lw);
extern void resched_curr(struct rq *rq);
+extern void resched_curr_lazy(struct rq *rq);
@@ -734,7 +734,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern struct rt_bandwidth def_rt_bandwidth;
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2717,6 +2717,8 @@ unsigned int tracing_gen_ctx_irq_test(un
+@@ -2519,6 +2519,8 @@
if (tif_need_resched())
trace_flags |= TRACE_FLAG_NEED_RESCHED;
@@ -745,7 +745,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
-@@ -460,17 +460,29 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -460,17 +460,29 @@
(entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
bh_off ? 'b' :
diff --git a/debian/patches-rt/arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch b/debian/patches-rt/arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch
index 09dcd78d53..8fdf6bea8f 100644
--- a/debian/patches-rt/arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch
+++ b/debian/patches-rt/arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 23 Jan 2024 12:56:21 +0100
Subject: [PATCH] arm: Disable FAST_GUP on PREEMPT_RT if HIGHPTE is also
enabled.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
gup_pgd_range() is invoked with disabled interrupts and invokes
__kmap_local_page_prot() via pte_offset_map(), gup_p4d_range().
@@ -24,12 +24,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -98,7 +98,7 @@ config ARM
+@@ -100,7 +100,7 @@ config ARM
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_EXIT_THREAD
-- select HAVE_FAST_GUP if ARM_LPAE
-+ select HAVE_FAST_GUP if ARM_LPAE && !(PREEMPT_RT && HIGHPTE)
+- select HAVE_GUP_FAST if ARM_LPAE
++ select HAVE_GUP_FAST if ARM_LPAE && !(PREEMPT_RT && HIGHPTE)
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/debian/patches-rt/bpf-Remove-tst_run-from-lwt_seg6local_prog_ops.patch b/debian/patches-rt/bpf-Remove-tst_run-from-lwt_seg6local_prog_ops.patch
new file mode 100644
index 0000000000..37d4e85805
--- /dev/null
+++ b/debian/patches-rt/bpf-Remove-tst_run-from-lwt_seg6local_prog_ops.patch
@@ -0,0 +1,40 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 10 Jul 2024 16:16:31 +0200
+Subject: [PATCH] bpf: Remove tst_run from lwt_seg6local_prog_ops.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The syzbot reported that the lwt_seg6 related BPF ops can be invoked
+via bpf_test_run() without without entering input_action_end_bpf()
+first.
+
+Martin KaFai Lau said that self test for BPF_PROG_TYPE_LWT_SEG6LOCAL
+probably didn't work since it was introduced in commit 04d4b274e2a
+("ipv6: sr: Add seg6local action End.BPF"). The reason is that the
+per-CPU variable seg6_bpf_srh_states::srh is never assigned in the self
+test case but each BPF function expects it.
+
+Remove test_run for BPF_PROG_TYPE_LWT_SEG6LOCAL.
+
+Suggested-by: Martin KaFai Lau <martin.lau@linux.dev>
+Reported-by: syzbot+608a2acde8c5a101d07d@syzkaller.appspotmail.com
+Fixes: d1542d4ae4df ("seg6: Use nested-BH locking for seg6_bpf_srh_states.")
+Fixes: 004d4b274e2a ("ipv6: sr: Add seg6local action End.BPF")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20240710141631.FbmHcQaX@linutronix.de
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/filter.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -11047,7 +11047,6 @@ const struct bpf_verifier_ops lwt_seg6lo
+ };
+
+ const struct bpf_prog_ops lwt_seg6local_prog_ops = {
+- .test_run = bpf_prog_test_run_skb,
+ };
+
+ const struct bpf_verifier_ops cg_sock_verifier_ops = {
diff --git a/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch b/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch
index e1c231f6cb..82659cbff5 100644
--- a/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch
+++ b/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 9 Mar 2023 09:13:52 +0100
Subject: [PATCH] powerpc/pseries: Select the generic memory allocator.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The RTAS work area allocator is using the generic memory allocator and
as such it must select it.
diff --git a/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch b/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch
index 0b7d65c60a..c780250682 100644
--- a/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch
+++ b/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch
@@ -1,7 +1,7 @@
Subject: powerpc: traps: Use PREEMPT_RT
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri Jul 26 11:30:49 2019 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch b/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
index 871e35c564..97a04f4699 100644
--- a/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
+++ b/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
@@ -1,7 +1,7 @@
Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT
From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
Date: Fri Apr 24 15:53:13 2015 +0000
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
-@@ -222,6 +222,7 @@ config KVM_E500MC
+@@ -221,6 +221,7 @@ config KVM_E500MC
config KVM_MPIC
bool "KVM in-kernel MPIC emulation"
depends on KVM && PPC_E500
diff --git a/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch b/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch
index d88fbd396a..dcead3382f 100644
--- a/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch
+++ b/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch
@@ -1,7 +1,7 @@
Subject: powerpc/pseries/iommu: Use a locallock instead local_irq_save()
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue Mar 26 18:31:54 2019 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch b/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch
index f61851260e..5a453c8f7d 100644
--- a/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch
+++ b/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch
@@ -1,7 +1,7 @@
Subject: powerpc/stackprotector: work around stack-guard init from atomic
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue Mar 26 18:31:29 2019 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/prinkt-nbcon-Add-a-scheduling-point-to-nbcon_kthread.patch b/debian/patches-rt/prinkt-nbcon-Add-a-scheduling-point-to-nbcon_kthread.patch
new file mode 100644
index 0000000000..b637815a28
--- /dev/null
+++ b/debian/patches-rt/prinkt-nbcon-Add-a-scheduling-point-to-nbcon_kthread.patch
@@ -0,0 +1,34 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 20 Jun 2024 11:21:13 +0200
+Subject: [PATCH] prinkt/nbcon: Add a scheduling point to nbcon_kthread_func().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Constant printing can lead to a CPU hog in nbcon_kthread_func(). The
+context is preemptible but on !PREEMPT kernels there is no explicit
+preemption point which leads softlockup warnings.
+
+Add an explicit preemption point in nbcon_kthread_func().
+
+Reported-by: Derek Barbosa <debarbos@redhat.com>
+Link: https://lore.kernel.org/ZnHF5j1DUDjN1kkq@debarbos-thinkpadt14sgen2i.remote.csb
+Reviewed-by: John Ogness <john.ogness@linutronix.de>
+Acked-by: Andrew Halaney <ahalaney@redhat.com>
+Tested-by: Andrew Halaney <ahalaney@redhat.com>
+Acked-by: Derek Barbosa <debarbos@redhat.com>
+Tested-by: Derek Barbosa <debarbos@redhat.com>
+Link: https://lore.kernel.org/r/20240620094300.YJlW043f@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/nbcon.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1119,6 +1119,7 @@ static int nbcon_kthread_func(void *__co
+ }
+
+ console_srcu_read_unlock(cookie);
++ cond_resched();
+
+ } while (backlog);
+
diff --git a/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch b/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
index 97f6c5b9bf..7ba366d557 100644
--- a/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
+++ b/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
@@ -2,7 +2,7 @@ From: Frederic Weisbecker <frederic@kernel.org>
Date: Tue, 5 Apr 2022 03:07:51 +0200
Subject: [PATCH] rcutorture: Also force sched priority to timersd on
boosting test.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
ksoftirqd is statically boosted to the priority level right above the
one of rcu_torture_boost() so that timers, which torture readers rely on,
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -610,6 +610,7 @@ extern void raise_softirq_irqoff(unsigne
+@@ -613,6 +613,7 @@ extern void raise_softirq_irqoff(unsigne
extern void raise_softirq(unsigned int nr);
#ifdef CONFIG_PREEMPT_RT
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
-@@ -2409,6 +2409,12 @@ static int rcutorture_booster_init(unsig
+@@ -2420,6 +2420,12 @@ static int rcutorture_booster_init(unsig
WARN_ON_ONCE(!t);
sp.sched_priority = 2;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Don't allow time recalculation while creating a new task. */
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -620,7 +620,7 @@ static inline void tick_irq_exit(void)
+@@ -625,7 +625,7 @@ static inline void tick_irq_exit(void)
}
#ifdef CONFIG_PREEMPT_RT
diff --git a/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch b/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch
index 0158d653a0..8567f52f03 100644
--- a/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch
+++ b/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch
@@ -1,7 +1,7 @@
From: Jisheng Zhang <jszhang@kernel.org>
Date: Tue, 31 Oct 2023 22:35:20 +0800
Subject: [PATCH] riscv: add PREEMPT_AUTO support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
riscv has switched to GENERIC_ENTRY, so adding PREEMPT_AUTO is as simple
as adding TIF_ARCH_RESCHED_LAZY related definitions and enabling
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
-@@ -142,6 +142,7 @@ config RISCV
+@@ -163,6 +163,7 @@ config RISCV
select HAVE_PERF_USER_STACK_DUMP
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_PREEMPT_DYNAMIC_KEY if !XIP_KERNEL
diff --git a/debian/patches-rt/riscv-allow-to-enable-RT.patch b/debian/patches-rt/riscv-allow-to-enable-RT.patch
index 4a8356d70f..afacb81ea9 100644
--- a/debian/patches-rt/riscv-allow-to-enable-RT.patch
+++ b/debian/patches-rt/riscv-allow-to-enable-RT.patch
@@ -1,7 +1,7 @@
From: Jisheng Zhang <jszhang@kernel.org>
Date: Tue, 31 Oct 2023 22:35:21 +0800
Subject: [PATCH] riscv: allow to enable RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Now, it's ready to enable RT on riscv.
@@ -13,11 +13,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
-@@ -49,6 +49,7 @@ config RISCV
- select ARCH_SUPPORTS_HUGETLBFS if MMU
+@@ -58,6 +58,7 @@ config RISCV
+ select ARCH_SUPPORTS_LTO_CLANG_THIN if LLD_VERSION >= 140000
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
+ select ARCH_SUPPORTS_RT
select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK
+ select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
select ARCH_USE_MEMTEST
- select ARCH_USE_QUEUED_RWLOCKS
diff --git a/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch b/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
index dd0fd1eb70..c70409f0cc 100644
--- a/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
+++ b/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 1 Aug 2023 17:26:48 +0200
Subject: [PATCH] sched/rt: Don't try push tasks if there are none.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
I have a RT task X at a high priority and cyclictest on each CPU with
lower priority than X's. If X is active and each CPU wakes their own
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -2194,8 +2194,11 @@ static int rto_next_cpu(struct root_doma
+@@ -2193,8 +2193,11 @@ static int rto_next_cpu(struct root_doma
rd->rto_cpu = cpu;
diff --git a/debian/patches-rt/series b/debian/patches-rt/series
index 3429bdc302..dd8d18c0d7 100644
--- a/debian/patches-rt/series
+++ b/debian/patches-rt/series
@@ -4,53 +4,85 @@
# Posted and applied
###########################################################################
-# signal_x86__Delay_calling_signals_in_atomic.patch
-
###########################################################################
# Posted
###########################################################################
-# tty/ serial
-0001-serial-amba-pl011-Use-uart_prepare_sysrq_char.patch
-0002-serial-ar933x-Use-uart_prepare_sysrq_char.patch
-0003-serial-bcm63xx-Use-uart_prepare_sysrq_char.patch
-0004-serial-meson-Use-uart_prepare_sysrq_char.patch
-0005-serial-msm-Use-uart_prepare_sysrq_char.patch
-0006-serial-omap-Use-uart_prepare_sysrq_char.patch
-0007-serial-pxa-Use-uart_prepare_sysrq_char.patch
-0008-serial-sunplus-Use-uart_prepare_sysrq_char.patch
-0009-serial-lpc32xx_hs-Use-uart_prepare_sysrq_char-to-han.patch
-0010-serial-owl-Use-uart_prepare_sysrq_char-to-handle-sys.patch
-0011-serial-rda-Use-uart_prepare_sysrq_char-to-handle-sys.patch
-0012-serial-sifive-Use-uart_prepare_sysrq_char-to-handle-.patch
-0013-serial-pch-Invoke-handle_rx_to-directly.patch
-0014-serial-pch-Make-push_rx-return-void.patch
-0015-serial-pch-Don-t-disable-interrupts-while-acquiring-.patch
-0016-serial-pch-Don-t-initialize-uart_port-s-spin_lock.patch
-0017-serial-pch-Remove-eg20t_port-lock.patch
-0018-serial-pch-Use-uart_prepare_sysrq_char.patch
-
-# net, RPS, v5
-0001-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch
-0002-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch
-0003-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch
-0004-net-Rename-rps_lock-to-backlog_lock.patch
-
-# perf, sigtrap, v3
+# perf, sigtrap, v5
0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch
-0002-perf-Enqueue-SIGTRAP-always-via-task_work.patch
-0003-perf-Remove-perf_swevent_get_recursion_context-from-.patch
-0004-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch
+0002-task_work-Add-TWA_NMI_CURRENT-as-an-additional-notif.patch
+0003-perf-Enqueue-SIGTRAP-always-via-task_work.patch
+0004-perf-Shrink-the-size-of-the-recursion-counter.patch
+0005-perf-Move-swevent_htable-recursion-into-task_struct.patch
+0006-perf-Don-t-disable-preemption-in-perf_pending_task.patch
+0007-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch
+task_work-make-TWA_NMI_CURRENT-handling-conditional-.patch
+
+###########################################################################
+# John's printk queue
+###########################################################################
+0001-printk-Add-notation-to-console_srcu-locking.patch
+0002-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch
+0003-printk-nbcon-Remove-return-value-for-write_atomic.patch
+0004-printk-Check-printk_deferred_enter-_exit-usage.patch
+0005-printk-nbcon-Add-detailed-doc-for-write_atomic.patch
+0006-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch
+0007-printk-nbcon-Use-driver-synchronization-while-un-reg.patch
+0008-serial-core-Provide-low-level-functions-to-lock-port.patch
+0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch
+0010-console-Improve-console_srcu_read_flags-comments.patch
+0011-nbcon-Add-API-to-acquire-context-for-non-printing-op.patch
+0012-serial-core-Implement-processing-in-port-lock-wrappe.patch
+0013-printk-nbcon-Do-not-rely-on-proxy-headers.patch
+0014-printk-Make-console_is_usable-available-to-nbcon.patch
+0015-printk-Let-console_is_usable-handle-nbcon.patch
+0016-printk-Add-flags-argument-for-console_is_usable.patch
+0017-printk-nbcon-Add-helper-to-assign-priority-based-on-.patch
+0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch
+0019-printk-Track-registered-boot-consoles.patch
+0020-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
+0021-printk-nbcon-Add-unsafe-flushing-on-panic.patch
+0022-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
+0023-printk-Track-nbcon-consoles.patch
+0024-printk-Coordinate-direct-printing-in-panic.patch
+0025-printk-nbcon-Implement-emergency-sections.patch
+0026-panic-Mark-emergency-section-in-warn.patch
+0027-panic-Mark-emergency-section-in-oops.patch
+0028-rcu-Mark-emergency-sections-in-rcu-stalls.patch
+0029-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
+0030-printk-Rename-console_replay_all-and-update-context.patch
+0031-printk-nbcon-Introduce-printing-kthreads.patch
+0032-printk-Atomic-print-in-printk-context-on-shutdown.patch
+0033-printk-nbcon-Fix-nbcon_cpu_emergency_flush-when-pree.patch
+0034-printk-nbcon-Add-context-to-console_is_usable.patch
+0035-printk-nbcon-Add-printer-thread-wakeups.patch
+0036-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
+0037-printk-nbcon-Start-printing-threads.patch
+0038-printk-Provide-helper-for-message-prepending.patch
+0039-printk-nbcon-Show-replay-message-on-takeover.patch
+0040-printk-Add-kthread-for-all-legacy-consoles.patch
+0041-proc-consoles-Add-notation-to-c_start-c_stop.patch
+0042-proc-Add-nbcon-support-for-proc-consoles.patch
+0043-tty-sysfs-Add-nbcon-support-for-active.patch
+0044-printk-Provide-threadprintk-boot-argument.patch
+0045-printk-Avoid-false-positive-lockdep-report-for-legac.patch
+0046-printk-nbcon-Add-function-for-printers-to-reacquire-.patch
+0047-serial-8250-Switch-to-nbcon-console.patch
+0048-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch
+#
+prinkt-nbcon-Add-a-scheduling-point-to-nbcon_kthread.patch
###########################################################################
# Post
###########################################################################
###########################################################################
-# X86:
+# Enabling
###########################################################################
x86__Allow_to_enable_RT.patch
x86__Enable_RT_also_on_32bit.patch
+ARM64__Allow_to_enable_RT.patch
+riscv-allow-to-enable-RT.patch
###########################################################################
# For later, not essencial
@@ -63,8 +95,11 @@ softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
tick-Fix-timer-storm-since-introduction-of-timersd.patch
softirq-Wake-ktimers-thread-also-in-softirq.patch
-zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
-# preempt-Put-preempt_enable-within-an-instrumentation.patch
+
+# zram
+0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch
+0002-zram-Remove-ZRAM_LOCK.patch
+0003-zram-Shrink-zram_table_entry-flags.patch
# Sched
0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
@@ -72,86 +107,60 @@ zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
0003-time-Allow-to-preempt-after-a-callback.patch
###########################################################################
-# John's printk queue
-###########################################################################
-0001-printk-ringbuffer-Clarify-special-lpos-values.patch
-0003-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch
-0006-printk-Add-notation-to-console_srcu-locking.patch
-0007-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch
-0008-printk-nbcon-Remove-return-value-for-write_atomic.patch
-0009-printk-Check-printk_deferred_enter-_exit-usage.patch
-0010-printk-nbcon-Add-detailed-doc-for-write_atomic.patch
-0011-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch
-0012-printk-nbcon-Use-driver-synchronization-while-regist.patch
-0013-serial-core-Provide-low-level-functions-to-lock-port.patch
-0014-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch
-0015-printk-nbcon-Do-not-rely-on-proxy-headers.patch
-0016-printk-nbcon-Fix-kerneldoc-for-enums.patch
-0017-printk-Make-console_is_usable-available-to-nbcon.patch
-0018-printk-Let-console_is_usable-handle-nbcon.patch
-0019-printk-Add-flags-argument-for-console_is_usable.patch
-0020-printk-nbcon-Provide-function-to-flush-using-write_a.patch
-0021-printk-Track-registered-boot-consoles.patch
-0022-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
-0023-printk-nbcon-Assign-priority-based-on-CPU-state.patch
-0024-printk-nbcon-Add-unsafe-flushing-on-panic.patch
-0025-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
-0026-printk-Track-nbcon-consoles.patch
-0027-printk-Coordinate-direct-printing-in-panic.patch
-0028-printk-nbcon-Implement-emergency-sections.patch
-0029-panic-Mark-emergency-section-in-warn.patch
-0030-panic-Mark-emergency-section-in-oops.patch
-0031-rcu-Mark-emergency-sections-in-rcu-stalls.patch
-0032-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
-0033-printk-nbcon-Introduce-printing-kthreads.patch
-0034-printk-Atomic-print-in-printk-context-on-shutdown.patch
-0035-printk-nbcon-Add-context-to-console_is_usable.patch
-0036-printk-nbcon-Add-printer-thread-wakeups.patch
-0037-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
-0038-printk-nbcon-Start-printing-threads.patch
-0039-printk-Provide-helper-for-message-prepending.patch
-0040-printk-nbcon-Show-replay-message-on-takeover.patch
-0041-proc-Add-nbcon-support-for-proc-consoles.patch
-0042-tty-sysfs-Add-nbcon-support-for-active.patch
-0043-printk-nbcon-Provide-function-to-reacquire-ownership.patch
-0044-serial-8250-Switch-to-nbcon-console.patch
-0045-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch
-0046-printk-Add-kthread-for-all-legacy-consoles.patch
-0047-printk-Provide-threadprintk-boot-argument.patch
-0048-printk-Avoid-false-positive-lockdep-report-for-legac.patch
-
-###########################################################################
# DRM:
###########################################################################
-0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
-0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
-0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
-0006-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
-0007-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
-0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch
-0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
-0010-drm-i915-Drop-the-irqs_disabled-check.patch
-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
-Revert-drm-i915-Depend-on-PREEMPT_RT.patch
+# https://lore.kernel.org/all/20240613102818.4056866-1-bigeasy@linutronix.de/
+0001-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
+0002-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
+0003-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
+0004-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
+0005-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
+0006-drm-i915-Drop-the-irqs_disabled-check.patch
+0007-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
+0008-Revert-drm-i915-Depend-on-PREEMPT_RT.patch
-###########################################################################
# Lazy preemption
-###########################################################################
PREEMPT_AUTO.patch
-###########################################################################
-# ARM/ARM64
+# BH series
+0001-locking-local_lock-Introduce-guard-definition-for-lo.patch
+0002-locking-local_lock-Add-local-nested-BH-locking-infra.patch
+0003-net-Use-__napi_alloc_frag_align-instead-of-open-codi.patch
+0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch
+0005-net-tcp_sigpool-Use-nested-BH-locking-for-sigpool_sc.patch
+0006-net-ipv4-Use-nested-BH-locking-for-ipv4_tcp_sk.patch
+0007-netfilter-br_netfilter-Use-nested-BH-locking-for-brn.patch
+0008-net-softnet_data-Make-xmit-per-task.patch
+0009-dev-Remove-PREEMPT_RT-ifdefs-from-backlog_lock.patch
+0010-dev-Use-nested-BH-locking-for-softnet_data.process_q.patch
+0011-lwt-Don-t-disable-migration-prio-invoking-BPF.patch
+0012-seg6-Use-nested-BH-locking-for-seg6_bpf_srh_states.patch
+0013-net-Use-nested-BH-locking-for-bpf_scratchpad.patch
+0014-net-Reference-bpf_redirect_info-via-task_struct-on-P.patch
+0015-net-Move-per-CPU-flush-lists-to-bpf_net_context-on-P.patch
+# optimisation + fixes
+0001-net-Remove-task_struct-bpf_net_context-init-on-fork.patch
+0002-net-Optimize-xdp_do_flush-with-bpf_net_context-infos.patch
+0003-net-Move-flush-list-retrieval-to-where-it-is-used.patch
+tun-Assign-missing-bpf_net_context.patch
+tun-Add-missing-bpf_net_ctx_clear-in-do_xdp_generic.patch
+bpf-Remove-tst_run-from-lwt_seg6local_prog_ops.patch
+# tw_timer
+0001-net-tcp-dccp-prepare-for-tw_timer-un-pinning.patch
+0002-net-tcp-un-pin-the-tw_timer.patch
+0003-tcp-move-inet_twsk_schedule-helper-out-of-header.patch
+
+###########################################################################
+# ARM
###########################################################################
0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch
-# arm64-signal-Use-ARCH_RT_DELAYS_SIGNAL_SEND.patch
0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
ARM__Allow_to_enable_RT.patch
-ARM64__Allow_to_enable_RT.patch
###########################################################################
# POWERPC
@@ -167,7 +176,6 @@ POWERPC__Allow_to_enable_RT.patch
# RISC-V
###########################################################################
riscv-add-PREEMPT_AUTO-support.patch
-riscv-allow-to-enable-RT.patch
# Sysfs file vs uname() -v
sysfs__Add__sys_kernel_realtime_entry.patch
diff --git a/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch b/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
index a5f1a10093..4b78fdfd15 100644
--- a/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
+++ b/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 1 Dec 2021 17:41:09 +0100
Subject: [PATCH] softirq: Use a dedicated thread for timer wakeups.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
A timer/hrtimer softirq is raised in-IRQ context. With threaded
interrupts enabled or on PREEMPT_RT this leads to waking the ksoftirqd
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -609,6 +609,22 @@ extern void __raise_softirq_irqoff(unsig
+@@ -612,6 +612,22 @@ extern void __raise_softirq_irqoff(unsig
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline struct task_struct *this_cpu_ksoftirqd(void)
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -619,6 +619,29 @@ static inline void tick_irq_exit(void)
+@@ -624,6 +624,29 @@ static inline void tick_irq_exit(void)
#endif
}
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void __irq_exit_rcu(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
-@@ -628,8 +651,13 @@ static inline void __irq_exit_rcu(void)
+@@ -633,8 +656,13 @@ static inline void __irq_exit_rcu(void)
#endif
account_hardirq_exit(current);
preempt_count_sub(HARDIRQ_OFFSET);
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
tick_irq_exit();
}
-@@ -963,12 +991,70 @@ static struct smp_hotplug_thread softirq
+@@ -972,12 +1000,70 @@ static struct smp_hotplug_thread softirq
.thread_comm = "ksoftirqd/%u",
};
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
-@@ -1922,7 +1922,7 @@ void hrtimer_run_queues(void)
+@@ -1904,7 +1904,7 @@ void hrtimer_run_queues(void)
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
@@ -211,12 +211,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -2070,7 +2070,7 @@ static void run_local_timers(void)
- if (time_before(jiffies, base->next_expiry))
+@@ -2465,7 +2465,7 @@ static void run_local_timers(void)
+ /* Raise the softirq only if required. */
+ if (time_after_eq(jiffies, base->next_expiry) ||
+ (i == BASE_DEF && tmigr_requires_handle_remote())) {
+- raise_softirq(TIMER_SOFTIRQ);
++ raise_timer_softirq();
return;
+ }
}
-- raise_softirq(TIMER_SOFTIRQ);
-+ raise_timer_softirq();
- }
-
- /*
diff --git a/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch b/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch
index 0528a0e138..2ad8979d2f 100644
--- a/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch
+++ b/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch
@@ -1,7 +1,7 @@
From: Junxiao Chang <junxiao.chang@intel.com>
Date: Mon, 20 Feb 2023 09:12:20 +0100
Subject: [PATCH] softirq: Wake ktimers thread also in softirq.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
If the hrtimer is raised while a softirq is processed then it does not
wake the corresponding ktimers thread. This is due to the optimisation in the
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -646,13 +646,12 @@ static inline void __irq_exit_rcu(void)
+@@ -651,13 +651,12 @@ static inline void __irq_exit_rcu(void)
#endif
account_hardirq_exit(current);
preempt_count_sub(HARDIRQ_OFFSET);
diff --git a/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch b/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch
index 8ae88f1168..bac948a2db 100644
--- a/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch
+++ b/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch
@@ -1,7 +1,7 @@
Subject: sysfs: Add /sys/kernel/realtime entry
From: Clark Williams <williams@redhat.com>
Date: Sat Jul 30 21:55:53 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Clark Williams <williams@redhat.com>
@@ -25,9 +25,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
-@@ -179,6 +179,15 @@ KERNEL_ATTR_RO(crash_elfcorehdr_size);
+@@ -181,6 +181,15 @@ KERNEL_ATTR_RO(crash_elfcorehdr_size);
- #endif /* CONFIG_CRASH_CORE */
+ #endif /* CONFIG_VMCORE_INFO */
+#if defined(CONFIG_PREEMPT_RT)
+static ssize_t realtime_show(struct kobject *kobj,
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
-@@ -275,6 +284,9 @@ static struct attribute * kernel_attrs[]
+@@ -279,6 +288,9 @@ static struct attribute * kernel_attrs[]
&rcu_expedited_attr.attr,
&rcu_normal_attr.attr,
#endif
diff --git a/debian/patches-rt/task_work-make-TWA_NMI_CURRENT-handling-conditional-.patch b/debian/patches-rt/task_work-make-TWA_NMI_CURRENT-handling-conditional-.patch
new file mode 100644
index 0000000000..957e92705a
--- /dev/null
+++ b/debian/patches-rt/task_work-make-TWA_NMI_CURRENT-handling-conditional-.patch
@@ -0,0 +1,68 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 29 Jul 2024 12:05:06 -0700
+Subject: [PATCH] task_work: make TWA_NMI_CURRENT handling conditional on
+ IRQ_WORK
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The TWA_NMI_CURRENT handling very much depends on IRQ_WORK, but that
+isn't universally enabled everywhere.
+
+Maybe the IRQ_WORK infrastructure should just be unconditional - x86
+ends up indirectly enabling it through unconditionally enabling
+PERF_EVENTS, for example. But it also gets enabled by having SMP
+support, or even if you just have PRINTK enabled.
+
+But in the meantime TWA_NMI_CURRENT causes tons of build failures on
+various odd minimal configs. Which did show up in linux-next, but
+despite that nobody bothered to fix it or even inform me until -rc1 was
+out.
+
+Fixes: 466e4d801cd4 ("task_work: Add TWA_NMI_CURRENT as an additional notify mode")
+Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
+Reported-by: kernelci.org bot <bot@kernelci.org>
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ kernel/task_work.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -7,12 +7,14 @@
+
+ static struct callback_head work_exited; /* all we need is ->next == NULL */
+
++#ifdef CONFIG_IRQ_WORK
+ static void task_work_set_notify_irq(struct irq_work *entry)
+ {
+ test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ }
+ static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
+ IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
++#endif
+
+ /**
+ * task_work_add - ask the @task to execute @work->func()
+@@ -58,6 +60,8 @@ int task_work_add(struct task_struct *ta
+ if (notify == TWA_NMI_CURRENT) {
+ if (WARN_ON_ONCE(task != current))
+ return -EINVAL;
++ if (!IS_ENABLED(CONFIG_IRQ_WORK))
++ return -EINVAL;
+ } else {
+ /* record the work call stack in order to print it in KASAN reports */
+ kasan_record_aux_stack(work);
+@@ -82,9 +86,11 @@ int task_work_add(struct task_struct *ta
+ case TWA_SIGNAL_NO_IPI:
+ __set_notify_signal(task);
+ break;
++#ifdef CONFIG_IRQ_WORK
+ case TWA_NMI_CURRENT:
+ irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
+ break;
++#endif
+ default:
+ WARN_ON_ONCE(1);
+ break;
diff --git a/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch b/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch
index be10774fe2..27c836a140 100644
--- a/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch
+++ b/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch
@@ -1,7 +1,7 @@
From: Frederic Weisbecker <frederic@kernel.org>
Date: Tue, 5 Apr 2022 03:07:52 +0200
Subject: [PATCH] tick: Fix timer storm since introduction of timersd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
If timers are pending while the tick is reprogrammed on nohz_mode, the
next expiry is not armed to fire now, it is delayed one jiffy forward
@@ -48,7 +48,7 @@ Link: https://lkml.kernel.org/r/20220405010752.1347437-2-frederic@kernel.org
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -611,9 +611,16 @@ extern void raise_softirq(unsigned int n
+@@ -614,9 +614,16 @@ extern void raise_softirq(unsigned int n
#ifdef CONFIG_PREEMPT_RT
DECLARE_PER_CPU(struct task_struct *, timersd);
@@ -65,7 +65,7 @@ Link: https://lkml.kernel.org/r/20220405010752.1347437-2-frederic@kernel.org
#else
static inline void raise_timer_softirq(void)
{
-@@ -624,6 +631,11 @@ static inline void raise_hrtimer_softirq
+@@ -627,6 +634,11 @@ static inline void raise_hrtimer_softirq
{
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
@@ -79,7 +79,7 @@ Link: https://lkml.kernel.org/r/20220405010752.1347437-2-frederic@kernel.org
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -621,12 +621,7 @@ static inline void tick_irq_exit(void)
+@@ -626,12 +626,7 @@ static inline void tick_irq_exit(void)
#ifdef CONFIG_PREEMPT_RT
DEFINE_PER_CPU(struct task_struct *, timersd);
@@ -95,7 +95,7 @@ Link: https://lkml.kernel.org/r/20220405010752.1347437-2-frederic@kernel.org
{
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -796,7 +796,7 @@ static void tick_nohz_restart(struct tic
+@@ -859,7 +859,7 @@ static void tick_nohz_restart(struct tic
static inline bool local_timer_softirq_pending(void)
{
@@ -103,4 +103,4 @@ Link: https://lkml.kernel.org/r/20220405010752.1347437-2-frederic@kernel.org
+ return local_pending_timers() & BIT(TIMER_SOFTIRQ);
}
- static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
+ /*
diff --git a/debian/patches-rt/tun-Add-missing-bpf_net_ctx_clear-in-do_xdp_generic.patch b/debian/patches-rt/tun-Add-missing-bpf_net_ctx_clear-in-do_xdp_generic.patch
new file mode 100644
index 0000000000..3c70d0f6dd
--- /dev/null
+++ b/debian/patches-rt/tun-Add-missing-bpf_net_ctx_clear-in-do_xdp_generic.patch
@@ -0,0 +1,34 @@
+From: Jeongjun Park <aha310510@gmail.com>
+Date: Fri, 26 Jul 2024 06:40:49 +0900
+Subject: [PATCH] tun: Add missing bpf_net_ctx_clear() in do_xdp_generic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+There are cases where do_xdp_generic returns bpf_net_context without
+clearing it. This causes various memory corruptions, so the missing
+bpf_net_ctx_clear must be added.
+
+Reported-by: syzbot+44623300f057a28baf1e@syzkaller.appspotmail.com
+Fixes: fecef4cd42c6 ("tun: Assign missing bpf_net_context.")
+Signed-off-by: Jeongjun Park <aha310510@gmail.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Reported-by: syzbot+3c2b6d5d4bec3b904933@syzkaller.appspotmail.com
+Reported-by: syzbot+707d98c8649695eaf329@syzkaller.appspotmail.com
+Reported-by: syzbot+c226757eb784a9da3e8b@syzkaller.appspotmail.com
+Reported-by: syzbot+61a1cfc2b6632363d319@syzkaller.appspotmail.com
+Reported-by: syzbot+709e4c85c904bcd62735@syzkaller.appspotmail.com
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/core/dev.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5150,6 +5150,7 @@ int do_xdp_generic(struct bpf_prog *xdp_
+ bpf_net_ctx_clear(bpf_net_ctx);
+ return XDP_DROP;
+ }
++ bpf_net_ctx_clear(bpf_net_ctx);
+ }
+ return XDP_PASS;
+ out_redir:
diff --git a/debian/patches-rt/tun-Assign-missing-bpf_net_context.patch b/debian/patches-rt/tun-Assign-missing-bpf_net_context.patch
new file mode 100644
index 0000000000..1d16417b19
--- /dev/null
+++ b/debian/patches-rt/tun-Assign-missing-bpf_net_context.patch
@@ -0,0 +1,114 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 16:48:15 +0200
+Subject: [PATCH] tun: Assign missing bpf_net_context.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+During the introduction of struct bpf_net_context handling for
+XDP-redirect, the tun driver has been missed.
+Jakub also pointed out that there is another call chain to
+do_xdp_generic() originating from netif_receive_skb() and drivers may
+use it outside from the NAPI context.
+
+Set the bpf_net_context before invoking BPF XDP program within the TUN
+driver. Set the bpf_net_context also in do_xdp_generic() if a xdp
+program is available.
+
+Reported-by: syzbot+0b5c75599f1d872bea6f@syzkaller.appspotmail.com
+Reported-by: syzbot+5ae46b237278e2369cac@syzkaller.appspotmail.com
+Reported-by: syzbot+c1e04a422bbc0f0f2921@syzkaller.appspotmail.com
+Fixes: 401cb7dae8130 ("net: Reference bpf_redirect_info via task_struct on PREEMPT_RT.")
+Link: https://lore.kernel.org/r/20240704144815.j8xQda5r@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/tun.c | 7 +++++++
+ net/core/dev.c | 5 +++++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1661,6 +1661,7 @@ static struct sk_buff *tun_build_skb(str
+ int len, int *skb_xdp)
+ {
+ struct page_frag *alloc_frag = &current->task_frag;
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ struct bpf_prog *xdp_prog;
+ int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ char *buf;
+@@ -1700,6 +1701,7 @@ static struct sk_buff *tun_build_skb(str
+
+ local_bh_disable();
+ rcu_read_lock();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ xdp_prog = rcu_dereference(tun->xdp_prog);
+ if (xdp_prog) {
+ struct xdp_buff xdp;
+@@ -1728,12 +1730,14 @@ static struct sk_buff *tun_build_skb(str
+ pad = xdp.data - xdp.data_hard_start;
+ len = xdp.data_end - xdp.data;
+ }
++ bpf_net_ctx_clear(bpf_net_ctx);
+ rcu_read_unlock();
+ local_bh_enable();
+
+ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
+
+ out:
++ bpf_net_ctx_clear(bpf_net_ctx);
+ rcu_read_unlock();
+ local_bh_enable();
+ return NULL;
+@@ -2566,6 +2570,7 @@ static int tun_sendmsg(struct socket *so
+
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ struct tun_page tpage;
+ int n = ctl->num;
+ int flush = 0, queued = 0;
+@@ -2574,6 +2579,7 @@ static int tun_sendmsg(struct socket *so
+
+ local_bh_disable();
+ rcu_read_lock();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
+ for (i = 0; i < n; i++) {
+ xdp = &((struct xdp_buff *)ctl->ptr)[i];
+@@ -2588,6 +2594,7 @@ static int tun_sendmsg(struct socket *so
+ if (tfile->napi_enabled && queued > 0)
+ napi_schedule(&tfile->napi);
+
++ bpf_net_ctx_clear(bpf_net_ctx);
+ rcu_read_unlock();
+ local_bh_enable();
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5126,11 +5126,14 @@ static DEFINE_STATIC_KEY_FALSE(generic_x
+
+ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
++
+ if (xdp_prog) {
+ struct xdp_buff xdp;
+ u32 act;
+ int err;
+
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
+ if (act != XDP_PASS) {
+ switch (act) {
+@@ -5144,11 +5147,13 @@ int do_xdp_generic(struct bpf_prog *xdp_
+ generic_xdp_tx(*pskb, xdp_prog);
+ break;
+ }
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return XDP_DROP;
+ }
+ }
+ return XDP_PASS;
+ out_redir:
++ bpf_net_ctx_clear(bpf_net_ctx);
+ kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
+ return XDP_DROP;
+ }
diff --git a/debian/patches-rt/x86__Allow_to_enable_RT.patch b/debian/patches-rt/x86__Allow_to_enable_RT.patch
index 9e900b332c..5c40768f74 100644
--- a/debian/patches-rt/x86__Allow_to_enable_RT.patch
+++ b/debian/patches-rt/x86__Allow_to_enable_RT.patch
@@ -1,7 +1,7 @@
Subject: x86: Allow to enable RT
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed Aug 7 18:15:38 2019 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch b/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch
index 5f49eedcca..1ecb254401 100644
--- a/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch
+++ b/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch
@@ -1,7 +1,7 @@
Subject: x86: Enable RT also on 32bit
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu Nov 7 17:49:20 2019 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -120,6 +120,7 @@ config X86
+@@ -123,6 +123,7 @@ config X86
select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN