summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt')
-rw-r--r--debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch2
-rw-r--r--debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch4
-rw-r--r--debian/patches-rt/0001-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch (renamed from debian/patches-rt/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch)4
-rw-r--r--debian/patches-rt/0001-locking-local_lock-Introduce-guard-definition-for-lo.patch34
-rw-r--r--debian/patches-rt/0001-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch75
-rw-r--r--debian/patches-rt/0001-net-Remove-task_struct-bpf_net_context-init-on-fork.patch32
-rw-r--r--debian/patches-rt/0001-net-tcp-dccp-prepare-for-tw_timer-un-pinning.patch188
-rw-r--r--debian/patches-rt/0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch31
-rw-r--r--debian/patches-rt/0001-printk-Add-notation-to-console_srcu-locking.patch5
-rw-r--r--debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch6
-rw-r--r--debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch (renamed from debian/patches-rt/zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch)64
-rw-r--r--debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch2
-rw-r--r--debian/patches-rt/0002-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch (renamed from debian/patches-rt/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch)45
-rw-r--r--debian/patches-rt/0002-locking-local_lock-Add-local-nested-BH-locking-infra.patch141
-rw-r--r--debian/patches-rt/0002-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch333
-rw-r--r--debian/patches-rt/0002-net-Optimize-xdp_do_flush-with-bpf_net_context-infos.patch291
-rw-r--r--debian/patches-rt/0002-net-tcp-un-pin-the-tw_timer.patch29
-rw-r--r--debian/patches-rt/0002-perf-Enqueue-SIGTRAP-always-via-task_work.patch160
-rw-r--r--debian/patches-rt/0002-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch14
-rw-r--r--debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch2
-rw-r--r--debian/patches-rt/0002-task_work-Add-TWA_NMI_CURRENT-as-an-additional-notif.patch102
-rw-r--r--debian/patches-rt/0002-zram-Remove-ZRAM_LOCK.patch66
-rw-r--r--debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch2
-rw-r--r--debian/patches-rt/0003-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch (renamed from debian/patches-rt/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch)25
-rw-r--r--debian/patches-rt/0003-net-Move-flush-list-retrieval-to-where-it-is-used.patch88
-rw-r--r--debian/patches-rt/0003-net-Use-__napi_alloc_frag_align-instead-of-open-codi.patch42
-rw-r--r--debian/patches-rt/0003-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch121
-rw-r--r--debian/patches-rt/0003-perf-Enqueue-SIGTRAP-always-via-task_work.patch120
-rw-r--r--debian/patches-rt/0003-perf-Remove-perf_swevent_get_recursion_context-from-.patch55
-rw-r--r--debian/patches-rt/0003-printk-nbcon-Remove-return-value-for-write_atomic.patch4
-rw-r--r--debian/patches-rt/0003-tcp-move-inet_twsk_schedule-helper-out-of-header.patch45
-rw-r--r--debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch4
-rw-r--r--debian/patches-rt/0003-zram-Shrink-zram_table_entry-flags.patch48
-rw-r--r--debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch2
-rw-r--r--debian/patches-rt/0004-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch (renamed from debian/patches-rt/drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch)5
-rw-r--r--debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch164
-rw-r--r--debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch131
-rw-r--r--debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch66
-rw-r--r--debian/patches-rt/0004-printk-Check-printk_deferred_enter-_exit-usage.patch4
-rw-r--r--debian/patches-rt/0005-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch (renamed from debian/patches-rt/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch)4
-rw-r--r--debian/patches-rt/0005-net-tcp_sigpool-Use-nested-BH-locking-for-sigpool_sc.patch75
-rw-r--r--debian/patches-rt/0005-perf-Move-swevent_htable-recursion-into-task_struct.patch121
-rw-r--r--debian/patches-rt/0005-printk-nbcon-Add-detailed-doc-for-write_atomic.patch5
-rw-r--r--debian/patches-rt/0006-drm-i915-Drop-the-irqs_disabled-check.patch (renamed from debian/patches-rt/0010-drm-i915-Drop-the-irqs_disabled-check.patch)5
-rw-r--r--debian/patches-rt/0006-net-ipv4-Use-nested-BH-locking-for-ipv4_tcp_sk.patch94
-rw-r--r--debian/patches-rt/0006-perf-Don-t-disable-preemption-in-perf_pending_task.patch60
-rw-r--r--debian/patches-rt/0006-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch25
-rw-r--r--debian/patches-rt/0007-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch (renamed from debian/patches-rt/drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch)7
-rw-r--r--debian/patches-rt/0007-netfilter-br_netfilter-Use-nested-BH-locking-for-brn.patch98
-rw-r--r--debian/patches-rt/0007-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch (renamed from debian/patches-rt/0004-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch)44
-rw-r--r--debian/patches-rt/0007-printk-nbcon-Use-driver-synchronization-while-un-reg.patch35
-rw-r--r--debian/patches-rt/0008-Revert-drm-i915-Depend-on-PREEMPT_RT.patch (renamed from debian/patches-rt/Revert-drm-i915-Depend-on-PREEMPT_RT.patch)5
-rw-r--r--debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch42
-rw-r--r--debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch227
-rw-r--r--debian/patches-rt/0008-serial-core-Provide-low-level-functions-to-lock-port.patch6
-rw-r--r--debian/patches-rt/0009-dev-Remove-PREEMPT_RT-ifdefs-from-backlog_lock.patch60
-rw-r--r--debian/patches-rt/0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch22
-rw-r--r--debian/patches-rt/0010-console-Improve-console_srcu_read_flags-comments.patch5
-rw-r--r--debian/patches-rt/0010-dev-Use-nested-BH-locking-for-softnet_data.process_q.patch94
-rw-r--r--debian/patches-rt/0011-lwt-Don-t-disable-migration-prio-invoking-BPF.patch40
-rw-r--r--debian/patches-rt/0011-nbcon-Add-API-to-acquire-context-for-non-printing-op.patch (renamed from debian/patches-rt/0011-nbcon-Provide-functions-for-drivers-to-acquire-conso.patch)82
-rw-r--r--debian/patches-rt/0012-seg6-Use-nested-BH-locking-for-seg6_bpf_srh_states.patch152
-rw-r--r--debian/patches-rt/0012-serial-core-Implement-processing-in-port-lock-wrappe.patch34
-rw-r--r--debian/patches-rt/0013-net-Use-nested-BH-locking-for-bpf_scratchpad.patch70
-rw-r--r--debian/patches-rt/0013-printk-nbcon-Do-not-rely-on-proxy-headers.patch4
-rw-r--r--debian/patches-rt/0014-net-Reference-bpf_redirect_info-via-task_struct-on-P.patch657
-rw-r--r--debian/patches-rt/0014-printk-Make-console_is_usable-available-to-nbcon.patch4
-rw-r--r--debian/patches-rt/0015-net-Move-per-CPU-flush-lists-to-bpf_net_context-on-P.patch270
-rw-r--r--debian/patches-rt/0015-printk-Let-console_is_usable-handle-nbcon.patch4
-rw-r--r--debian/patches-rt/0016-printk-Add-flags-argument-for-console_is_usable.patch6
-rw-r--r--debian/patches-rt/0017-printk-nbcon-Add-helper-to-assign-priority-based-on-.patch6
-rw-r--r--debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch46
-rw-r--r--debian/patches-rt/0019-printk-Track-registered-boot-consoles.patch12
-rw-r--r--debian/patches-rt/0020-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch5
-rw-r--r--debian/patches-rt/0021-printk-nbcon-Add-unsafe-flushing-on-panic.patch28
-rw-r--r--debian/patches-rt/0022-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch24
-rw-r--r--debian/patches-rt/0023-printk-Track-nbcon-consoles.patch12
-rw-r--r--debian/patches-rt/0024-printk-Coordinate-direct-printing-in-panic.patch18
-rw-r--r--debian/patches-rt/0025-printk-nbcon-Implement-emergency-sections.patch80
-rw-r--r--debian/patches-rt/0026-panic-Mark-emergency-section-in-warn.patch8
-rw-r--r--debian/patches-rt/0027-panic-Mark-emergency-section-in-oops.patch8
-rw-r--r--debian/patches-rt/0028-rcu-Mark-emergency-sections-in-rcu-stalls.patch28
-rw-r--r--debian/patches-rt/0029-lockdep-Mark-emergency-sections-in-lockdep-splats.patch5
-rw-r--r--debian/patches-rt/0030-printk-Rename-console_replay_all-and-update-context.patch77
-rw-r--r--debian/patches-rt/0031-printk-nbcon-Introduce-printing-kthreads.patch (renamed from debian/patches-rt/0030-printk-nbcon-Introduce-printing-kthreads.patch)77
-rw-r--r--debian/patches-rt/0032-printk-Atomic-print-in-printk-context-on-shutdown.patch (renamed from debian/patches-rt/0031-printk-Atomic-print-in-printk-context-on-shutdown.patch)28
-rw-r--r--debian/patches-rt/0033-printk-nbcon-Fix-nbcon_cpu_emergency_flush-when-pree.patch46
-rw-r--r--debian/patches-rt/0034-printk-nbcon-Add-context-to-console_is_usable.patch (renamed from debian/patches-rt/0032-printk-nbcon-Add-context-to-console_is_usable.patch)24
-rw-r--r--debian/patches-rt/0035-printk-nbcon-Add-printer-thread-wakeups.patch (renamed from debian/patches-rt/0033-printk-nbcon-Add-printer-thread-wakeups.patch)35
-rw-r--r--debian/patches-rt/0036-printk-nbcon-Stop-threads-on-shutdown-reboot.patch (renamed from debian/patches-rt/0034-printk-nbcon-Stop-threads-on-shutdown-reboot.patch)14
-rw-r--r--debian/patches-rt/0037-printk-nbcon-Start-printing-threads.patch (renamed from debian/patches-rt/0035-printk-nbcon-Start-printing-threads.patch)18
-rw-r--r--debian/patches-rt/0038-printk-Provide-helper-for-message-prepending.patch (renamed from debian/patches-rt/0036-printk-Provide-helper-for-message-prepending.patch)4
-rw-r--r--debian/patches-rt/0039-printk-nbcon-Show-replay-message-on-takeover.patch (renamed from debian/patches-rt/0037-printk-nbcon-Show-replay-message-on-takeover.patch)12
-rw-r--r--debian/patches-rt/0040-printk-Add-kthread-for-all-legacy-consoles.patch (renamed from debian/patches-rt/0044-printk-Add-kthread-for-all-legacy-consoles.patch)90
-rw-r--r--debian/patches-rt/0041-proc-consoles-Add-notation-to-c_start-c_stop.patch (renamed from debian/patches-rt/0038-proc-consoles-Add-notation-to-c_start-c_stop.patch)4
-rw-r--r--debian/patches-rt/0042-proc-Add-nbcon-support-for-proc-consoles.patch (renamed from debian/patches-rt/0039-proc-Add-nbcon-support-for-proc-consoles.patch)4
-rw-r--r--debian/patches-rt/0043-tty-sysfs-Add-nbcon-support-for-active.patch (renamed from debian/patches-rt/0040-tty-sysfs-Add-nbcon-support-for-active.patch)4
-rw-r--r--debian/patches-rt/0044-printk-Provide-threadprintk-boot-argument.patch (renamed from debian/patches-rt/0045-printk-Provide-threadprintk-boot-argument.patch)8
-rw-r--r--debian/patches-rt/0045-printk-Avoid-false-positive-lockdep-report-for-legac.patch (renamed from debian/patches-rt/0046-printk-Avoid-false-positive-lockdep-report-for-legac.patch)10
-rw-r--r--debian/patches-rt/0046-printk-nbcon-Add-function-for-printers-to-reacquire-.patch (renamed from debian/patches-rt/0041-printk-nbcon-Provide-function-to-reacquire-ownership.patch)71
-rw-r--r--debian/patches-rt/0047-serial-8250-Switch-to-nbcon-console.patch (renamed from debian/patches-rt/0042-serial-8250-Switch-to-nbcon-console.patch)26
-rw-r--r--debian/patches-rt/0048-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch (renamed from debian/patches-rt/0043-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch)6
-rw-r--r--debian/patches-rt/ARM64__Allow_to_enable_RT.patch2
-rw-r--r--debian/patches-rt/ARM__Allow_to_enable_RT.patch8
-rw-r--r--debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch6
-rw-r--r--debian/patches-rt/Add_localversion_for_-RT_release.patch4
-rw-r--r--debian/patches-rt/POWERPC__Allow_to_enable_RT.patch6
-rw-r--r--debian/patches-rt/PREEMPT_AUTO.patch98
-rw-r--r--debian/patches-rt/arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch8
-rw-r--r--debian/patches-rt/bpf-Remove-tst_run-from-lwt_seg6local_prog_ops.patch40
-rw-r--r--debian/patches-rt/drm-ttm-tests-Let-ttm_bo_test-consider-different-ww_.patch52
-rw-r--r--debian/patches-rt/pinctrl-renesas-rzg2l-Use-spin_-lock-unlock-_irq-sav.patch38
-rw-r--r--debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch2
-rw-r--r--debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch2
-rw-r--r--debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch2
-rw-r--r--debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch2
-rw-r--r--debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch2
-rw-r--r--debian/patches-rt/prinkt-nbcon-Add-a-scheduling-point-to-nbcon_kthread.patch34
-rw-r--r--debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch4
-rw-r--r--debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch4
-rw-r--r--debian/patches-rt/riscv-allow-to-enable-RT.patch6
-rw-r--r--debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch4
-rw-r--r--debian/patches-rt/series184
-rw-r--r--debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch8
-rw-r--r--debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch2
-rw-r--r--debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch2
-rw-r--r--debian/patches-rt/task_work-make-TWA_NMI_CURRENT-handling-conditional-.patch68
-rw-r--r--debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch2
-rw-r--r--debian/patches-rt/tun-Add-missing-bpf_net_ctx_clear-in-do_xdp_generic.patch34
-rw-r--r--debian/patches-rt/tun-Assign-missing-bpf_net_context.patch114
-rw-r--r--debian/patches-rt/x86__Allow_to_enable_RT.patch2
-rw-r--r--debian/patches-rt/x86__Enable_RT_also_on_32bit.patch4
132 files changed, 4757 insertions, 1738 deletions
diff --git a/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch b/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
index 4fb5196d59..126b521923 100644
--- a/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
+++ b/debian/patches-rt/0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 19 May 2023 16:57:29 +0200
Subject: [PATCH 1/4] ARM: vfp: Provide vfp_lock() for VFP locking.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
kernel_neon_begin() uses local_bh_disable() to ensure exclusive access
to the VFP unit. This is broken on PREEMPT_RT because a BH disabled
diff --git a/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch b/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
index 403d1baade..c427bf0803 100644
--- a/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
+++ b/debian/patches-rt/0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 8 Jul 2015 17:14:48 +0200
Subject: [PATCH 1/2] arm: Disable jump-label on PREEMPT_RT.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
jump-labels are used to efficiently switch between two possible code
paths. To achieve this, stop_machine() is used to keep the CPU in a
@@ -25,7 +25,7 @@ Link: https://lkml.kernel.org/r/20220613182447.112191-2-bigeasy@linutronix.de
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -76,7 +76,7 @@ config ARM
+@@ -77,7 +77,7 @@ config ARM
select HAS_IOPORT
select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
diff --git a/debian/patches-rt/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch b/debian/patches-rt/0001-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
index 5c4a72e137..7c4ce85fa4 100644
--- a/debian/patches-rt/0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
+++ b/debian/patches-rt/0001-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
@@ -1,8 +1,8 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 27 Feb 2016 08:09:11 +0100
-Subject: [PATCH 03/10] drm/i915: Use preempt_disable/enable_rt() where
+Subject: [PATCH 1/8] drm/i915: Use preempt_disable/enable_rt() where
recommended
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Mario Kleiner suggest in commit
ad3543ede630f ("drm/intel: Push get_scanout_position() timestamping into kms driver.")
diff --git a/debian/patches-rt/0001-locking-local_lock-Introduce-guard-definition-for-lo.patch b/debian/patches-rt/0001-locking-local_lock-Introduce-guard-definition-for-lo.patch
new file mode 100644
index 0000000000..9f0d4bff7c
--- /dev/null
+++ b/debian/patches-rt/0001-locking-local_lock-Introduce-guard-definition-for-lo.patch
@@ -0,0 +1,34 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 23 Oct 2023 16:14:09 +0200
+Subject: [PATCH 01/15] locking/local_lock: Introduce guard definition for
+ local_lock.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Introduce lock guard definition for local_lock_t. There are no users
+yet.
+
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/local_lock.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/include/linux/local_lock.h
++++ b/include/linux/local_lock.h
+@@ -51,4 +51,15 @@
+ #define local_unlock_irqrestore(lock, flags) \
+ __local_unlock_irqrestore(lock, flags)
+
++DEFINE_GUARD(local_lock, local_lock_t __percpu*,
++ local_lock(_T),
++ local_unlock(_T))
++DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
++ local_lock_irq(_T),
++ local_unlock_irq(_T))
++DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
++ local_lock_irqsave(_T->lock, _T->flags),
++ local_unlock_irqrestore(_T->lock, _T->flags),
++ unsigned long flags)
++
+ #endif
diff --git a/debian/patches-rt/0001-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch b/debian/patches-rt/0001-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch
deleted file mode 100644
index 882e69292f..0000000000
--- a/debian/patches-rt/0001-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 25 Mar 2024 08:40:28 +0100
-Subject: [PATCH 1/4] net: Remove conditional threaded-NAPI wakeup based on
- task state.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
-
-A NAPI thread is scheduled by first setting NAPI_STATE_SCHED bit. If
-successful (the bit was not yet set) then the NAPI_STATE_SCHED_THREADED
-is set but only if thread's state is not TASK_INTERRUPTIBLE (is
-TASK_RUNNING) followed by task wakeup.
-
-If the task is idle (TASK_INTERRUPTIBLE) then the
-NAPI_STATE_SCHED_THREADED bit is not set. The thread is no relying on
-the bit but always leaving the wait-loop after returning from schedule()
-because there must have been a wakeup.
-
-The smpboot-threads implementation for per-CPU threads requires an
-explicit condition and does not support "if we get out of schedule()
-then there must be something to do".
-
-Removing this optimisation simplifies the following integration.
-
-Set NAPI_STATE_SCHED_THREADED unconditionally on wakeup and rely on it
-in the wait path by removing the `woken' condition.
-
-Acked-by: Jakub Kicinski <kuba@kernel.org>
-Link: https://lore.kernel.org/r/20240325074943.289909-2-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/core/dev.c | 14 ++------------
- 1 file changed, 2 insertions(+), 12 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -4433,13 +4433,7 @@ static inline void ____napi_schedule(str
- */
- thread = READ_ONCE(napi->thread);
- if (thread) {
-- /* Avoid doing set_bit() if the thread is in
-- * INTERRUPTIBLE state, cause napi_thread_wait()
-- * makes sure to proceed with napi polling
-- * if the thread is explicitly woken from here.
-- */
-- if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
-- set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
-+ set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
- wake_up_process(thread);
- return;
- }
-@@ -6716,8 +6710,6 @@ static int napi_poll(struct napi_struct
-
- static int napi_thread_wait(struct napi_struct *napi)
- {
-- bool woken = false;
--
- set_current_state(TASK_INTERRUPTIBLE);
-
- while (!kthread_should_stop()) {
-@@ -6726,15 +6718,13 @@ static int napi_thread_wait(struct napi_
- * Testing SCHED bit is not enough because SCHED bit might be
- * set by some other busy poll thread or by napi_disable().
- */
-- if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
-+ if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
- WARN_ON(!list_empty(&napi->poll_list));
- __set_current_state(TASK_RUNNING);
- return 0;
- }
-
- schedule();
-- /* woken being true indicates this thread owns this napi. */
-- woken = true;
- set_current_state(TASK_INTERRUPTIBLE);
- }
- __set_current_state(TASK_RUNNING);
diff --git a/debian/patches-rt/0001-net-Remove-task_struct-bpf_net_context-init-on-fork.patch b/debian/patches-rt/0001-net-Remove-task_struct-bpf_net_context-init-on-fork.patch
new file mode 100644
index 0000000000..2309345d52
--- /dev/null
+++ b/debian/patches-rt/0001-net-Remove-task_struct-bpf_net_context-init-on-fork.patch
@@ -0,0 +1,32 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 28 Jun 2024 12:18:54 +0200
+Subject: [PATCH 1/3] net: Remove task_struct::bpf_net_context init on fork.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+There is no clone() invocation within a bpf_net_ctx_…() block. Therefore
+the task_struct::bpf_net_context has always to be NULL and an explicit
+initialisation is not required.
+
+Remove the NULL assignment in the clone() path.
+
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20240628103020.1766241-2-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/fork.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2355,7 +2355,6 @@ static void rv_task_fork(struct task_str
+ RCU_INIT_POINTER(p->bpf_storage, NULL);
+ p->bpf_ctx = NULL;
+ #endif
+- p->bpf_net_context = NULL;
+
+ /* Perform scheduler related setup. Assign this task to a CPU. */
+ retval = sched_fork(clone_flags, p);
diff --git a/debian/patches-rt/0001-net-tcp-dccp-prepare-for-tw_timer-un-pinning.patch b/debian/patches-rt/0001-net-tcp-dccp-prepare-for-tw_timer-un-pinning.patch
new file mode 100644
index 0000000000..7fd7164b95
--- /dev/null
+++ b/debian/patches-rt/0001-net-tcp-dccp-prepare-for-tw_timer-un-pinning.patch
@@ -0,0 +1,188 @@
+From: Valentin Schneider <vschneid@redhat.com>
+Date: Tue, 4 Jun 2024 16:08:47 +0200
+Subject: [PATCH 1/3] net: tcp/dccp: prepare for tw_timer un-pinning
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The TCP timewait timer is proving to be problematic for setups where
+scheduler CPU isolation is achieved at runtime via cpusets (as opposed to
+statically via isolcpus=domains).
+
+What happens there is a CPU goes through tcp_time_wait(), arming the
+time_wait timer, then gets isolated. TCP_TIMEWAIT_LEN later, the timer
+fires, causing interference for the now-isolated CPU. This is conceptually
+similar to the issue described in commit e02b93124855 ("workqueue: Unbind
+kworkers before sending them to exit()")
+
+Move inet_twsk_schedule() to within inet_twsk_hashdance(), with the ehash
+lock held. Expand the lock's critical section from inet_twsk_kill() to
+inet_twsk_deschedule_put(), serializing the scheduling vs descheduling of
+the timer. IOW, this prevents the following race:
+
+ tcp_time_wait()
+ inet_twsk_hashdance()
+ inet_twsk_deschedule_put()
+ del_timer_sync()
+ inet_twsk_schedule()
+
+Thanks to Paolo Abeni for suggesting to leverage the ehash lock.
+
+This also restores a comment from commit ec94c2696f0b ("tcp/dccp: avoid
+one atomic operation for timewait hashdance") as inet_twsk_hashdance() had
+a "Step 1" and "Step 3" comment, but the "Step 2" had gone missing.
+
+inet_twsk_deschedule_put() now acquires the ehash spinlock to synchronize
+with inet_twsk_hashdance_schedule().
+
+To ease possible regression search, actual un-pin is done in next patch.
+
+Link: https://lore.kernel.org/all/ZPhpfMjSiHVjQkTk@localhost.localdomain/
+Signed-off-by: Valentin Schneider <vschneid@redhat.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20240604140903.31939-2-fw@strlen.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/inet_timewait_sock.h | 6 +++-
+ net/dccp/minisocks.c | 3 --
+ net/ipv4/inet_timewait_sock.c | 52 +++++++++++++++++++++++++++++++++------
+ net/ipv4/tcp_minisocks.c | 3 --
+ 4 files changed, 51 insertions(+), 13 deletions(-)
+
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -93,8 +93,10 @@ struct inet_timewait_sock *inet_twsk_all
+ struct inet_timewait_death_row *dr,
+ const int state);
+
+-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+- struct inet_hashinfo *hashinfo);
++void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
++ struct sock *sk,
++ struct inet_hashinfo *hashinfo,
++ int timeo);
+
+ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
+ bool rearm);
+--- a/net/dccp/minisocks.c
++++ b/net/dccp/minisocks.c
+@@ -59,11 +59,10 @@ void dccp_time_wait(struct sock *sk, int
+ * we complete the initialization.
+ */
+ local_bh_disable();
+- inet_twsk_schedule(tw, timeo);
+ /* Linkage updates.
+ * Note that access to tw after this point is illegal.
+ */
+- inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
++ inet_twsk_hashdance_schedule(tw, sk, &dccp_hashinfo, timeo);
+ local_bh_enable();
+ } else {
+ /* Sorry, if we're out of memory, just CLOSE this
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -96,9 +96,13 @@ static void inet_twsk_add_node_rcu(struc
+ * Enter the time wait state. This is called with locally disabled BH.
+ * Essentially we whip up a timewait bucket, copy the relevant info into it
+ * from the SK, and mess with hash chains and list linkage.
++ *
++ * The caller must not access @tw anymore after this function returns.
+ */
+-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+- struct inet_hashinfo *hashinfo)
++void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
++ struct sock *sk,
++ struct inet_hashinfo *hashinfo,
++ int timeo)
+ {
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+@@ -129,26 +133,33 @@ void inet_twsk_hashdance(struct inet_tim
+
+ spin_lock(lock);
+
++ /* Step 2: Hash TW into tcp ehash chain */
+ inet_twsk_add_node_rcu(tw, &ehead->chain);
+
+ /* Step 3: Remove SK from hash chain */
+ if (__sk_nulls_del_node_init_rcu(sk))
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+
+- spin_unlock(lock);
+
++ /* Ensure above writes are committed into memory before updating the
++ * refcount.
++ * Provides ordering vs later refcount_inc().
++ */
++ smp_wmb();
+ /* tw_refcnt is set to 3 because we have :
+ * - one reference for bhash chain.
+ * - one reference for ehash chain.
+ * - one reference for timer.
+- * We can use atomic_set() because prior spin_lock()/spin_unlock()
+- * committed into memory all tw fields.
+ * Also note that after this point, we lost our implicit reference
+ * so we are not allowed to use tw anymore.
+ */
+ refcount_set(&tw->tw_refcnt, 3);
++
++ inet_twsk_schedule(tw, timeo);
++
++ spin_unlock(lock);
+ }
+-EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
++EXPORT_SYMBOL_GPL(inet_twsk_hashdance_schedule);
+
+ static void tw_timer_handler(struct timer_list *t)
+ {
+@@ -217,7 +228,34 @@ EXPORT_SYMBOL_GPL(inet_twsk_alloc);
+ */
+ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
+ {
+- if (del_timer_sync(&tw->tw_timer))
++ struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
++ spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
++
++ /* inet_twsk_purge() walks over all sockets, including tw ones,
++ * and removes them via inet_twsk_deschedule_put() after a
++ * refcount_inc_not_zero().
++ *
++ * inet_twsk_hashdance_schedule() must (re)init the refcount before
++ * arming the timer, i.e. inet_twsk_purge can obtain a reference to
++ * a twsk that did not yet schedule the timer.
++ *
++ * The ehash lock synchronizes these two:
++ * After acquiring the lock, the timer is always scheduled (else
++ * timer_shutdown returns false), because hashdance_schedule releases
++ * the ehash lock only after completing the timer initialization.
++ *
++ * Without grabbing the ehash lock, we get:
++ * 1) cpu x sets twsk refcount to 3
++ * 2) cpu y bumps refcount to 4
++ * 3) cpu y calls inet_twsk_deschedule_put() and shuts timer down
++ * 4) cpu x tries to start timer, but mod_timer is a noop post-shutdown
++ * -> timer refcount is never decremented.
++ */
++ spin_lock(lock);
++ /* Makes sure hashdance_schedule() has completed */
++ spin_unlock(lock);
++
++ if (timer_shutdown_sync(&tw->tw_timer))
+ inet_twsk_kill(tw);
+ inet_twsk_put(tw);
+ }
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -344,11 +344,10 @@ void tcp_time_wait(struct sock *sk, int
+ * we complete the initialization.
+ */
+ local_bh_disable();
+- inet_twsk_schedule(tw, timeo);
+ /* Linkage updates.
+ * Note that access to tw after this point is illegal.
+ */
+- inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
++ inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
+ local_bh_enable();
+ } else {
+ /* Sorry, if we're out of memory, just CLOSE this
diff --git a/debian/patches-rt/0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch b/debian/patches-rt/0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch
index b5d1590348..8b06ae678c 100644
--- a/debian/patches-rt/0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch
+++ b/debian/patches-rt/0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Mar 2024 19:01:49 +0100
-Subject: [PATCH 1/4] perf: Move irq_work_queue() where the event is prepared.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Date: Thu, 4 Jul 2024 19:03:35 +0200
+Subject: [PATCH 1/7] perf: Move irq_work_queue() where the event is prepared.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Only if perf_event::pending_sigtrap is zero, the irq_work accounted by
increminging perf_event::nr_pending. The member perf_event::pending_addr
@@ -15,27 +15,34 @@ irq_work is scheduled once.
Tested-by: Marco Elver <elver@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Link: https://lore.kernel.org/r/20240312180814.3373778-2-bigeasy@linutronix.de
+Link: https://lore.kernel.org/r/20240704170424.1466941-2-bigeasy@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/events/core.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
+ kernel/events/core.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -9591,6 +9591,7 @@ static int __perf_event_overflow(struct
+@@ -9738,6 +9738,11 @@ static int __perf_event_overflow(struct
if (!event->pending_sigtrap) {
event->pending_sigtrap = pending_id;
local_inc(&event->ctx->nr_pending);
++
++ event->pending_addr = 0;
++ if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
++ event->pending_addr = data->addr;
+ irq_work_queue(&event->pending_irq);
} else if (event->attr.exclude_kernel && valid_sample) {
/*
* Should not be able to return to user space without
-@@ -9610,7 +9611,6 @@ static int __perf_event_overflow(struct
- event->pending_addr = 0;
- if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
- event->pending_addr = data->addr;
+@@ -9753,11 +9758,6 @@ static int __perf_event_overflow(struct
+ */
+ WARN_ON_ONCE(event->pending_sigtrap != pending_id);
+ }
+-
+- event->pending_addr = 0;
+- if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
+- event->pending_addr = data->addr;
- irq_work_queue(&event->pending_irq);
}
diff --git a/debian/patches-rt/0001-printk-Add-notation-to-console_srcu-locking.patch b/debian/patches-rt/0001-printk-Add-notation-to-console_srcu-locking.patch
index d2d214826b..f104135985 100644
--- a/debian/patches-rt/0001-printk-Add-notation-to-console_srcu-locking.patch
+++ b/debian/patches-rt/0001-printk-Add-notation-to-console_srcu-locking.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 9 Oct 2023 13:55:19 +0000
-Subject: [PATCH 01/46] printk: Add notation to console_srcu locking
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 01/48] printk: Add notation to console_srcu locking
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
kernel/printk/printk.c:284:5: sparse: sparse: context imbalance in
'console_srcu_read_lock' - wrong count at exit
@@ -10,6 +10,7 @@ include/linux/srcu.h:301:9: sparse: sparse: context imbalance in
Fixes: 6c4afa79147e ("printk: Prepare for SRCU console list protection")
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/printk.c | 2 ++
diff --git a/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch b/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
index 716c67be4e..d7a8f51a72 100644
--- a/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
+++ b/debian/patches-rt/0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 4 Aug 2023 13:30:37 +0200
Subject: [PATCH 1/3] sched/core: Provide a method to check if a task is
PI-boosted.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Provide a method to check if a task inherited the priority from another
task. This happens if a task owns a lock which is requested by a task
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1795,6 +1795,7 @@ static inline int dl_task_check_affinity
+@@ -1806,6 +1806,7 @@ static inline int dl_task_check_affinity
}
#endif
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int task_prio(const struct task_struct *p);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -8911,6 +8911,21 @@ static inline void preempt_dynamic_init(
+@@ -8910,6 +8910,21 @@ static inline void preempt_dynamic_init(
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
diff --git a/debian/patches-rt/zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch b/debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch
index 0151ca89f4..81f4f3c5dd 100644
--- a/debian/patches-rt/zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
+++ b/debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch
@@ -1,32 +1,35 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Thu, 31 Mar 2016 04:08:28 +0200
-Subject: [PATCH] zram: Replace bit spinlocks with spinlock_t for PREEMPT_RT.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 1/3] zram: Replace bit spinlocks with a spinlock_t.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The bit spinlock disables preemption. The spinlock_t lock becomes a sleeping
lock on PREEMPT_RT and it can not be acquired in this context. In this locked
section, zs_free() acquires a zs_pool::lock, and there is access to
zram::wb_limit_lock.
-Use a spinlock_t on PREEMPT_RT for locking and set/ clear ZRAM_LOCK bit after
-the lock has been acquired/ dropped.
+Add a spinlock_t for locking. Keep the set/ clear ZRAM_LOCK bit after
+the lock has been acquired/ dropped. The size of struct zram_table_entry
+increases by 4 bytes due to lock and additional 4 bytes padding with
+CONFIG_ZRAM_TRACK_ENTRY_ACTIME enabled.
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Link: https://lore.kernel.org/r/20240620153556.777272-2-bigeasy@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lkml.kernel.org/r/YqIbMuHCPiQk+Ac2@linutronix.de
Link: https://lore.kernel.org/20230323161830.jFbWCosd@linutronix.de
+Link: https://lore.kernel.org/20240619150814.BRAvaziM@linutronix.de
---
- drivers/block/zram/zram_drv.c | 37 +++++++++++++++++++++++++++++++++++++
- drivers/block/zram/zram_drv.h | 3 +++
- 2 files changed, 40 insertions(+)
+ drivers/block/zram/zram_drv.c | 22 +++++++++++++++++++---
+ drivers/block/zram/zram_drv.h | 1 +
+ 2 files changed, 20 insertions(+), 3 deletions(-)
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -57,6 +57,41 @@ static void zram_free_page(struct zram *
+@@ -57,19 +57,34 @@ static void zram_free_page(struct zram *
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent);
-+#ifdef CONFIG_PREEMPT_RT
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
+{
+ size_t index;
@@ -35,44 +38,33 @@ Link: https://lore.kernel.org/20230323161830.jFbWCosd@linutronix.de
+ spin_lock_init(&zram->table[index].lock);
+}
+
-+static int zram_slot_trylock(struct zram *zram, u32 index)
-+{
+ static int zram_slot_trylock(struct zram *zram, u32 index)
+ {
+- return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
+ int ret;
+
+ ret = spin_trylock(&zram->table[index].lock);
+ if (ret)
+ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
+ return ret;
-+}
-+
-+static void zram_slot_lock(struct zram *zram, u32 index)
-+{
+ }
+
+ static void zram_slot_lock(struct zram *zram, u32 index)
+ {
+- bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
+ spin_lock(&zram->table[index].lock);
+ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
-+}
-+
-+static void zram_slot_unlock(struct zram *zram, u32 index)
-+{
+ }
+
+ static void zram_slot_unlock(struct zram *zram, u32 index)
+ {
+- bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
+ __clear_bit(ZRAM_LOCK, &zram->table[index].flags);
+ spin_unlock(&zram->table[index].lock);
-+}
-+
-+#else
-+
-+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
-+
- static int zram_slot_trylock(struct zram *zram, u32 index)
- {
- return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
-@@ -71,6 +106,7 @@ static void zram_slot_unlock(struct zram
- {
- bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
}
-+#endif
static inline bool init_done(struct zram *zram)
- {
-@@ -1241,6 +1277,7 @@ static bool zram_meta_alloc(struct zram
+@@ -1226,6 +1241,7 @@ static bool zram_meta_alloc(struct zram
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
@@ -82,13 +74,11 @@ Link: https://lore.kernel.org/20230323161830.jFbWCosd@linutronix.de
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
-@@ -69,6 +69,9 @@ struct zram_table_entry {
+@@ -69,6 +69,7 @@ struct zram_table_entry {
unsigned long element;
};
unsigned long flags;
-+#ifdef CONFIG_PREEMPT_RT
+ spinlock_t lock;
-+#endif
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
ktime_t ac_time;
#endif
diff --git a/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch b/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
index 175da5e4b4..bf3f19ee85 100644
--- a/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
+++ b/debian/patches-rt/0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 19 May 2023 16:57:30 +0200
Subject: [PATCH 2/4] ARM: vfp: Use vfp_lock() in vfp_sync_hwstate().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
vfp_sync_hwstate() uses preempt_disable() followed by local_bh_disable()
to ensure that it won't get interrupted while checking the VFP state.
diff --git a/debian/patches-rt/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch b/debian/patches-rt/0002-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
index aebb5534f6..c4f8ef9737 100644
--- a/debian/patches-rt/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
+++ b/debian/patches-rt/0002-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
@@ -1,8 +1,8 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 27 Feb 2016 09:01:42 +0100
-Subject: [PATCH 04/10] drm/i915: Don't disable interrupts on PREEMPT_RT during
+Subject: [PATCH 2/8] drm/i915: Don't disable interrupts on PREEMPT_RT during
atomic updates
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Commit
8d7849db3eab7 ("drm/i915: Make sprite updates atomic")
@@ -33,12 +33,13 @@ Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/display/intel_crtc.c | 9 ++++++---
+ drivers/gpu/drm/i915/display/intel_cursor.c | 9 ++++++---
drivers/gpu/drm/i915/display/intel_vblank.c | 6 ++++--
- 2 files changed, 10 insertions(+), 5 deletions(-)
+ 3 files changed, 16 insertions(+), 8 deletions(-)
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
-@@ -512,7 +512,8 @@ void intel_pipe_update_start(struct inte
+@@ -521,7 +521,8 @@ void intel_pipe_update_start(struct inte
*/
intel_psr_wait_for_idle_locked(new_crtc_state);
@@ -48,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
crtc->debug.min_vbl = evade.min;
crtc->debug.max_vbl = evade.max;
-@@ -530,7 +531,8 @@ void intel_pipe_update_start(struct inte
+@@ -539,7 +540,8 @@ void intel_pipe_update_start(struct inte
return;
irq_disable:
@@ -58,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
-@@ -632,7 +634,8 @@ void intel_pipe_update_end(struct intel_
+@@ -668,7 +670,8 @@ void intel_pipe_update_end(struct intel_
*/
intel_vrr_send_push(new_crtc_state);
@@ -68,9 +69,39 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (intel_vgpu_active(dev_priv))
goto out;
+--- a/drivers/gpu/drm/i915/display/intel_cursor.c
++++ b/drivers/gpu/drm/i915/display/intel_cursor.c
+@@ -895,13 +895,15 @@ intel_legacy_cursor_update(struct drm_pl
+ */
+ intel_psr_wait_for_idle_locked(crtc_state);
+
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+
+ intel_vblank_evade(&evade);
+
+ drm_crtc_vblank_put(&crtc->base);
+ } else {
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+ }
+
+ if (new_plane_state->uapi.visible) {
+@@ -911,7 +913,8 @@ intel_legacy_cursor_update(struct drm_pl
+ intel_plane_disable_arm(plane, crtc_state);
+ }
+
+- local_irq_enable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_enable();
+
+ intel_psr_unlock(crtc_state);
+
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
-@@ -700,11 +700,13 @@ int intel_vblank_evade(struct intel_vbla
+@@ -705,11 +705,13 @@ int intel_vblank_evade(struct intel_vbla
break;
}
diff --git a/debian/patches-rt/0002-locking-local_lock-Add-local-nested-BH-locking-infra.patch b/debian/patches-rt/0002-locking-local_lock-Add-local-nested-BH-locking-infra.patch
new file mode 100644
index 0000000000..a840e3e47c
--- /dev/null
+++ b/debian/patches-rt/0002-locking-local_lock-Add-local-nested-BH-locking-infra.patch
@@ -0,0 +1,141 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 18 Aug 2023 15:17:44 +0200
+Subject: [PATCH 02/15] locking/local_lock: Add local nested BH locking
+ infrastructure.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Add local_lock_nested_bh() locking. It is based on local_lock_t and the
+naming follows the preempt_disable_nested() example.
+
+For !PREEMPT_RT + !LOCKDEP it is a per-CPU annotation for locking
+assumptions based on local_bh_disable(). The macro is optimized away
+during compilation.
+For !PREEMPT_RT + LOCKDEP the local_lock_nested_bh() is reduced to
+the usual lock-acquire plus lockdep_assert_in_softirq() - ensuring that
+BH is disabled.
+
+For PREEMPT_RT local_lock_nested_bh() acquires the specified per-CPU
+lock. It does not disable CPU migration because it relies on
+local_bh_disable() disabling CPU migration.
+With LOCKDEP it performans the usual lockdep checks as with !PREEMPT_RT.
+Due to include hell the softirq check has been moved spinlock.c.
+
+The intention is to use this locking in places where locking of a per-CPU
+variable relies on BH being disabled. Instead of treating disabled
+bottom halves as a big per-CPU lock, PREEMPT_RT can use this to reduce
+the locking scope to what actually needs protecting.
+A side effect is that it also documents the protection scope of the
+per-CPU variables.
+
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/local_lock.h | 10 ++++++++++
+ include/linux/local_lock_internal.h | 31 +++++++++++++++++++++++++++++++
+ include/linux/lockdep.h | 3 +++
+ kernel/locking/spinlock.c | 8 ++++++++
+ 4 files changed, 52 insertions(+)
+
+--- a/include/linux/local_lock.h
++++ b/include/linux/local_lock.h
+@@ -62,4 +62,14 @@ DEFINE_LOCK_GUARD_1(local_lock_irqsave,
+ local_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
++#define local_lock_nested_bh(_lock) \
++ __local_lock_nested_bh(_lock)
++
++#define local_unlock_nested_bh(_lock) \
++ __local_unlock_nested_bh(_lock)
++
++DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
++ local_lock_nested_bh(_T),
++ local_unlock_nested_bh(_T))
++
+ #endif
+--- a/include/linux/local_lock_internal.h
++++ b/include/linux/local_lock_internal.h
+@@ -62,6 +62,17 @@ do { \
+ local_lock_debug_init(lock); \
+ } while (0)
+
++#define __spinlock_nested_bh_init(lock) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
++ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
++ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
++ LD_LOCK_NORMAL); \
++ local_lock_debug_init(lock); \
++} while (0)
++
+ #define __local_lock(lock) \
+ do { \
+ preempt_disable(); \
+@@ -98,6 +109,15 @@ do { \
+ local_irq_restore(flags); \
+ } while (0)
+
++#define __local_lock_nested_bh(lock) \
++ do { \
++ lockdep_assert_in_softirq(); \
++ local_lock_acquire(this_cpu_ptr(lock)); \
++ } while (0)
++
++#define __local_unlock_nested_bh(lock) \
++ local_lock_release(this_cpu_ptr(lock))
++
+ #else /* !CONFIG_PREEMPT_RT */
+
+ /*
+@@ -138,4 +158,15 @@ typedef spinlock_t local_lock_t;
+
+ #define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
++#define __local_lock_nested_bh(lock) \
++do { \
++ lockdep_assert_in_softirq_func(); \
++ spin_lock(this_cpu_ptr(lock)); \
++} while (0)
++
++#define __local_unlock_nested_bh(lock) \
++do { \
++ spin_unlock(this_cpu_ptr((lock))); \
++} while (0)
++
+ #endif /* CONFIG_PREEMPT_RT */
+--- a/include/linux/lockdep.h
++++ b/include/linux/lockdep.h
+@@ -600,6 +600,8 @@ do { \
+ (!in_softirq() || in_irq() || in_nmi())); \
+ } while (0)
+
++extern void lockdep_assert_in_softirq_func(void);
++
+ #else
+ # define might_lock(lock) do { } while (0)
+ # define might_lock_read(lock) do { } while (0)
+@@ -613,6 +615,7 @@ do { \
+ # define lockdep_assert_preemption_enabled() do { } while (0)
+ # define lockdep_assert_preemption_disabled() do { } while (0)
+ # define lockdep_assert_in_softirq() do { } while (0)
++# define lockdep_assert_in_softirq_func() do { } while (0)
+ #endif
+
+ #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+--- a/kernel/locking/spinlock.c
++++ b/kernel/locking/spinlock.c
+@@ -413,3 +413,11 @@ notrace int in_lock_functions(unsigned l
+ && addr < (unsigned long)__lock_text_end;
+ }
+ EXPORT_SYMBOL(in_lock_functions);
++
++#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_PREEMPT_RT)
++void notrace lockdep_assert_in_softirq_func(void)
++{
++ lockdep_assert_in_softirq();
++}
++EXPORT_SYMBOL(lockdep_assert_in_softirq_func);
++#endif
diff --git a/debian/patches-rt/0002-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch b/debian/patches-rt/0002-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch
deleted file mode 100644
index 3e4b5b7bec..0000000000
--- a/debian/patches-rt/0002-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch
+++ /dev/null
@@ -1,333 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 25 Mar 2024 08:40:29 +0100
-Subject: [PATCH 2/4] net: Allow to use SMP threads for backlog NAPI.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
-
-Backlog NAPI is a per-CPU NAPI struct only (with no device behind it)
-used by drivers which don't do NAPI them self, RPS and parts of the
-stack which need to avoid recursive deadlocks while processing a packet.
-
-The non-NAPI driver use the CPU local backlog NAPI. If RPS is enabled
-then a flow for the skb is computed and based on the flow the skb can be
-enqueued on a remote CPU. Scheduling/ raising the softirq (for backlog's
-NAPI) on the remote CPU isn't trivial because the softirq is only
-scheduled on the local CPU and performed after the hardirq is done.
-In order to schedule a softirq on the remote CPU, an IPI is sent to the
-remote CPU which schedules the backlog-NAPI on the then local CPU.
-
-On PREEMPT_RT interrupts are force-threaded. The soft interrupts are
-raised within the interrupt thread and processed after the interrupt
-handler completed still within the context of the interrupt thread. The
-softirq is handled in the context where it originated.
-
-With force-threaded interrupts enabled, ksoftirqd is woken up if a
-softirq is raised from hardirq context. This is the case if it is raised
-from an IPI. Additionally there is a warning on PREEMPT_RT if the
-softirq is raised from the idle thread.
-This was done for two reasons:
-- With threaded interrupts the processing should happen in thread
- context (where it originated) and ksoftirqd is the only thread for
- this context if raised from hardirq. Using the currently running task
- instead would "punish" a random task.
-- Once ksoftirqd is active it consumes all further softirqs until it
- stops running. This changed recently and is no longer the case.
-
-Instead of keeping the backlog NAPI in ksoftirqd (in force-threaded/
-PREEMPT_RT setups) I am proposing NAPI-threads for backlog.
-The "proper" setup with threaded-NAPI is not doable because the threads
-are not pinned to an individual CPU and can be modified by the user.
-Additionally a dummy network device would have to be assigned. Also
-CPU-hotplug has to be considered if additional CPUs show up.
-All this can be probably done/ solved but the smpboot-threads already
-provide this infrastructure.
-
-Sending UDP packets over loopback expects that the packet is processed
-within the call. Delaying it by handing it over to the thread hurts
-performance. It is not beneficial to the outcome if the context switch
-happens immediately after enqueue or after a while to process a few
-packets in a batch.
-There is no need to always use the thread if the backlog NAPI is
-requested on the local CPU. This restores the loopback throuput. The
-performance drops mostly to the same value after enabling RPS on the
-loopback comparing the IPI and the tread result.
-
-Create NAPI-threads for backlog if request during boot. The thread runs
-the inner loop from napi_threaded_poll(), the wait part is different. It
-checks for NAPI_STATE_SCHED (the backlog NAPI can not be disabled).
-
-The NAPI threads for backlog are optional, it has to be enabled via the boot
-argument "thread_backlog_napi". It is mandatory for PREEMPT_RT to avoid the
-wakeup of ksoftirqd from the IPI.
-
-Acked-by: Jakub Kicinski <kuba@kernel.org>
-Link: https://lore.kernel.org/r/20240325074943.289909-3-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/core/dev.c | 152 +++++++++++++++++++++++++++++++++++++++++++--------------
- 1 file changed, 115 insertions(+), 37 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -78,6 +78,7 @@
- #include <linux/slab.h>
- #include <linux/sched.h>
- #include <linux/sched/mm.h>
-+#include <linux/smpboot.h>
- #include <linux/mutex.h>
- #include <linux/rwsem.h>
- #include <linux/string.h>
-@@ -197,6 +198,31 @@ static inline struct hlist_head *dev_ind
- return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
- }
-
-+#ifndef CONFIG_PREEMPT_RT
-+
-+static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
-+
-+static int __init setup_backlog_napi_threads(char *arg)
-+{
-+ static_branch_enable(&use_backlog_threads_key);
-+ return 0;
-+}
-+early_param("thread_backlog_napi", setup_backlog_napi_threads);
-+
-+static bool use_backlog_threads(void)
-+{
-+ return static_branch_unlikely(&use_backlog_threads_key);
-+}
-+
-+#else
-+
-+static bool use_backlog_threads(void)
-+{
-+ return true;
-+}
-+
-+#endif
-+
- static inline void rps_lock_irqsave(struct softnet_data *sd,
- unsigned long *flags)
- {
-@@ -4410,6 +4436,7 @@ EXPORT_SYMBOL(__dev_direct_xmit);
- /*************************************************************************
- * Receiver routines
- *************************************************************************/
-+static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
-
- unsigned int sysctl_skb_defer_max __read_mostly = 64;
- int weight_p __read_mostly = 64; /* old backlog weight */
-@@ -4433,12 +4460,16 @@ static inline void ____napi_schedule(str
- */
- thread = READ_ONCE(napi->thread);
- if (thread) {
-+ if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
-+ goto use_local_napi;
-+
- set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
- wake_up_process(thread);
- return;
- }
- }
-
-+use_local_napi:
- list_add_tail(&napi->poll_list, &sd->poll_list);
- WRITE_ONCE(napi->list_owner, smp_processor_id());
- /* If not called from net_rx_action()
-@@ -4678,6 +4709,11 @@ static void napi_schedule_rps(struct sof
-
- #ifdef CONFIG_RPS
- if (sd != mysd) {
-+ if (use_backlog_threads()) {
-+ __napi_schedule_irqoff(&sd->backlog);
-+ return;
-+ }
-+
- sd->rps_ipi_next = mysd->rps_ipi_list;
- mysd->rps_ipi_list = sd;
-
-@@ -5937,7 +5973,7 @@ static void net_rps_action_and_irq_enabl
- #ifdef CONFIG_RPS
- struct softnet_data *remsd = sd->rps_ipi_list;
-
-- if (remsd) {
-+ if (!use_backlog_threads() && remsd) {
- sd->rps_ipi_list = NULL;
-
- local_irq_enable();
-@@ -5952,7 +5988,7 @@ static void net_rps_action_and_irq_enabl
- static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
- {
- #ifdef CONFIG_RPS
-- return sd->rps_ipi_list != NULL;
-+ return !use_backlog_threads() && sd->rps_ipi_list;
- #else
- return false;
- #endif
-@@ -5996,7 +6032,7 @@ static int process_backlog(struct napi_s
- * We can use a plain write instead of clear_bit(),
- * and we dont need an smp_mb() memory barrier.
- */
-- napi->state = 0;
-+ napi->state &= NAPIF_STATE_THREADED;
- again = false;
- } else {
- skb_queue_splice_tail_init(&sd->input_pkt_queue,
-@@ -6732,43 +6768,48 @@ static int napi_thread_wait(struct napi_
- return -1;
- }
-
--static int napi_threaded_poll(void *data)
-+static void napi_threaded_poll_loop(struct napi_struct *napi)
- {
-- struct napi_struct *napi = data;
- struct softnet_data *sd;
-- void *have;
-+ unsigned long last_qs = jiffies;
-
-- while (!napi_thread_wait(napi)) {
-- unsigned long last_qs = jiffies;
-+ for (;;) {
-+ bool repoll = false;
-+ void *have;
-
-- for (;;) {
-- bool repoll = false;
-+ local_bh_disable();
-+ sd = this_cpu_ptr(&softnet_data);
-+ sd->in_napi_threaded_poll = true;
-
-- local_bh_disable();
-- sd = this_cpu_ptr(&softnet_data);
-- sd->in_napi_threaded_poll = true;
--
-- have = netpoll_poll_lock(napi);
-- __napi_poll(napi, &repoll);
-- netpoll_poll_unlock(have);
--
-- sd->in_napi_threaded_poll = false;
-- barrier();
--
-- if (sd_has_rps_ipi_waiting(sd)) {
-- local_irq_disable();
-- net_rps_action_and_irq_enable(sd);
-- }
-- skb_defer_free_flush(sd);
-- local_bh_enable();
-+ have = netpoll_poll_lock(napi);
-+ __napi_poll(napi, &repoll);
-+ netpoll_poll_unlock(have);
-+
-+ sd->in_napi_threaded_poll = false;
-+ barrier();
-+
-+ if (sd_has_rps_ipi_waiting(sd)) {
-+ local_irq_disable();
-+ net_rps_action_and_irq_enable(sd);
-+ }
-+ skb_defer_free_flush(sd);
-+ local_bh_enable();
-
-- if (!repoll)
-- break;
-+ if (!repoll)
-+ break;
-
-- rcu_softirq_qs_periodic(last_qs);
-- cond_resched();
-- }
-+ rcu_softirq_qs_periodic(last_qs);
-+ cond_resched();
- }
-+}
-+
-+static int napi_threaded_poll(void *data)
-+{
-+ struct napi_struct *napi = data;
-+
-+ while (!napi_thread_wait(napi))
-+ napi_threaded_poll_loop(napi);
-+
- return 0;
- }
-
-@@ -11369,7 +11410,7 @@ static int dev_cpu_dead(unsigned int old
-
- list_del_init(&napi->poll_list);
- if (napi->poll == process_backlog)
-- napi->state = 0;
-+ napi->state &= NAPIF_STATE_THREADED;
- else
- ____napi_schedule(sd, napi);
- }
-@@ -11377,12 +11418,14 @@ static int dev_cpu_dead(unsigned int old
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_enable();
-
-+ if (!use_backlog_threads()) {
- #ifdef CONFIG_RPS
-- remsd = oldsd->rps_ipi_list;
-- oldsd->rps_ipi_list = NULL;
-+ remsd = oldsd->rps_ipi_list;
-+ oldsd->rps_ipi_list = NULL;
- #endif
-- /* send out pending IPI's on offline CPU */
-- net_rps_send_ipi(remsd);
-+ /* send out pending IPI's on offline CPU */
-+ net_rps_send_ipi(remsd);
-+ }
-
- /* Process offline CPU's input_pkt_queue */
- while ((skb = __skb_dequeue(&oldsd->process_queue))) {
-@@ -11721,6 +11764,38 @@ static int net_page_pool_create(int cpui
- return 0;
- }
-
-+static int backlog_napi_should_run(unsigned int cpu)
-+{
-+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
-+ struct napi_struct *napi = &sd->backlog;
-+
-+ return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
-+}
-+
-+static void run_backlog_napi(unsigned int cpu)
-+{
-+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
-+
-+ napi_threaded_poll_loop(&sd->backlog);
-+}
-+
-+static void backlog_napi_setup(unsigned int cpu)
-+{
-+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
-+ struct napi_struct *napi = &sd->backlog;
-+
-+ napi->thread = this_cpu_read(backlog_napi);
-+ set_bit(NAPI_STATE_THREADED, &napi->state);
-+}
-+
-+static struct smp_hotplug_thread backlog_threads = {
-+ .store = &backlog_napi,
-+ .thread_should_run = backlog_napi_should_run,
-+ .thread_fn = run_backlog_napi,
-+ .thread_comm = "backlog_napi/%u",
-+ .setup = backlog_napi_setup,
-+};
-+
- /*
- * This is called single threaded during boot, so no need
- * to take the rtnl semaphore.
-@@ -11772,10 +11847,13 @@ static int __init net_dev_init(void)
- init_gro_hash(&sd->backlog);
- sd->backlog.poll = process_backlog;
- sd->backlog.weight = weight_p;
-+ INIT_LIST_HEAD(&sd->backlog.poll_list);
-
- if (net_page_pool_create(i))
- goto out;
- }
-+ if (use_backlog_threads())
-+ smpboot_register_percpu_thread(&backlog_threads);
-
- dev_boot_phase = 0;
-
diff --git a/debian/patches-rt/0002-net-Optimize-xdp_do_flush-with-bpf_net_context-infos.patch b/debian/patches-rt/0002-net-Optimize-xdp_do_flush-with-bpf_net_context-infos.patch
new file mode 100644
index 0000000000..07738df76a
--- /dev/null
+++ b/debian/patches-rt/0002-net-Optimize-xdp_do_flush-with-bpf_net_context-infos.patch
@@ -0,0 +1,291 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 28 Jun 2024 12:18:55 +0200
+Subject: [PATCH 2/3] net: Optimize xdp_do_flush() with bpf_net_context infos.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Every NIC driver utilizing XDP should invoke xdp_do_flush() after
+processing all packages. With the introduction of the bpf_net_context
+logic the flush lists (for dev, CPU-map and xsk) are lazy initialized
+only if used. However xdp_do_flush() tries to flush all three of them so
+all three lists are always initialized and the likely empty lists are
+"iterated".
+Without the usage of XDP but with CONFIG_DEBUG_NET the lists are also
+initialized due to xdp_do_check_flushed().
+
+Jakub suggest to utilize the hints in bpf_net_context and avoid invoking
+the flush function. This will also avoiding initializing the lists which
+are otherwise unused.
+
+Introduce bpf_net_ctx_get_all_used_flush_lists() to return the
+individual list if not-empty. Use the logic in xdp_do_flush() and
+xdp_do_check_flushed(). Remove the not needed .*_check_flush().
+
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20240628103020.1766241-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/bpf.h | 10 ++++------
+ include/linux/filter.h | 27 +++++++++++++++++++++++++++
+ include/net/xdp_sock.h | 14 ++------------
+ kernel/bpf/cpumap.c | 13 +------------
+ kernel/bpf/devmap.c | 13 +------------
+ net/core/filter.c | 33 +++++++++++++++++++++++++--------
+ net/xdp/xsk.c | 13 +------------
+ 7 files changed, 61 insertions(+), 62 deletions(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -2492,7 +2492,7 @@ struct sk_buff;
+ struct bpf_dtab_netdev;
+ struct bpf_cpu_map_entry;
+
+-void __dev_flush(void);
++void __dev_flush(struct list_head *flush_list);
+ int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx);
+ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
+@@ -2505,7 +2505,7 @@ int dev_map_redirect_multi(struct net_de
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress);
+
+-void __cpu_map_flush(void);
++void __cpu_map_flush(struct list_head *flush_list);
+ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
+ struct net_device *dev_rx);
+ int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+@@ -2642,8 +2642,6 @@ void bpf_dynptr_init(struct bpf_dynptr_k
+ void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
+ void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
+
+-bool dev_check_flush(void);
+-bool cpu_map_check_flush(void);
+ #else /* !CONFIG_BPF_SYSCALL */
+ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
+ {
+@@ -2731,7 +2729,7 @@ static inline struct bpf_token *bpf_toke
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+-static inline void __dev_flush(void)
++static inline void __dev_flush(struct list_head *flush_list)
+ {
+ }
+
+@@ -2777,7 +2775,7 @@ int dev_map_redirect_multi(struct net_de
+ return 0;
+ }
+
+-static inline void __cpu_map_flush(void)
++static inline void __cpu_map_flush(struct list_head *flush_list)
+ {
+ }
+
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -829,6 +829,33 @@ static inline struct list_head *bpf_net_
+ return &bpf_net_ctx->xskmap_map_flush_list;
+ }
+
++static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map,
++ struct list_head **lh_dev,
++ struct list_head **lh_xsk)
++{
++ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
++ u32 kern_flags = bpf_net_ctx->ri.kern_flags;
++ struct list_head *lh;
++
++ *lh_map = *lh_dev = *lh_xsk = NULL;
++
++ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
++ return;
++
++ lh = &bpf_net_ctx->dev_map_flush_list;
++ if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh))
++ *lh_dev = lh;
++
++ lh = &bpf_net_ctx->cpu_map_flush_list;
++ if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh))
++ *lh_map = lh;
++
++ lh = &bpf_net_ctx->xskmap_map_flush_list;
++ if (IS_ENABLED(CONFIG_XDP_SOCKETS) &&
++ kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh))
++ *lh_xsk = lh;
++}
++
+ /* Compute the linear packet data range [data, data_end) which
+ * will be accessed by various program types (cls_bpf, act_bpf,
+ * lwt, ...). Subsystems allowing direct data access must (!)
+--- a/include/net/xdp_sock.h
++++ b/include/net/xdp_sock.h
+@@ -121,7 +121,7 @@ struct xsk_tx_metadata_ops {
+
+ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
+ int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
+-void __xsk_map_flush(void);
++void __xsk_map_flush(struct list_head *flush_list);
+
+ /**
+ * xsk_tx_metadata_to_compl - Save enough relevant metadata information
+@@ -206,7 +206,7 @@ static inline int __xsk_map_redirect(str
+ return -EOPNOTSUPP;
+ }
+
+-static inline void __xsk_map_flush(void)
++static inline void __xsk_map_flush(struct list_head *flush_list)
+ {
+ }
+
+@@ -228,14 +228,4 @@ static inline void xsk_tx_metadata_compl
+ }
+
+ #endif /* CONFIG_XDP_SOCKETS */
+-
+-#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET)
+-bool xsk_map_check_flush(void);
+-#else
+-static inline bool xsk_map_check_flush(void)
+-{
+- return false;
+-}
+-#endif
+-
+ #endif /* _LINUX_XDP_SOCK_H */
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -757,9 +757,8 @@ int cpu_map_generic_redirect(struct bpf_
+ return ret;
+ }
+
+-void __cpu_map_flush(void)
++void __cpu_map_flush(struct list_head *flush_list)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
+ struct xdp_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
+@@ -769,13 +768,3 @@ void __cpu_map_flush(void)
+ wake_up_process(bq->obj->kthread);
+ }
+ }
+-
+-#ifdef CONFIG_DEBUG_NET
+-bool cpu_map_check_flush(void)
+-{
+- if (list_empty(bpf_net_ctx_get_cpu_map_flush_list()))
+- return false;
+- __cpu_map_flush();
+- return true;
+-}
+-#endif
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -412,9 +412,8 @@ static void bq_xmit_all(struct xdp_dev_b
+ * driver before returning from its napi->poll() routine. See the comment above
+ * xdp_do_flush() in filter.c.
+ */
+-void __dev_flush(void)
++void __dev_flush(struct list_head *flush_list)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
+ struct xdp_dev_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
+@@ -425,16 +424,6 @@ void __dev_flush(void)
+ }
+ }
+
+-#ifdef CONFIG_DEBUG_NET
+-bool dev_check_flush(void)
+-{
+- if (list_empty(bpf_net_ctx_get_dev_flush_list()))
+- return false;
+- __dev_flush();
+- return true;
+-}
+-#endif
+-
+ /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
+ * by local_bh_disable() (from XDP calls inside NAPI). The
+ * rcu_read_lock_bh_held() below makes lockdep accept both.
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4277,22 +4277,39 @@ static const struct bpf_func_proto bpf_x
+ */
+ void xdp_do_flush(void)
+ {
+- __dev_flush();
+- __cpu_map_flush();
+- __xsk_map_flush();
++ struct list_head *lh_map, *lh_dev, *lh_xsk;
++
++ bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk);
++ if (lh_dev)
++ __dev_flush(lh_dev);
++ if (lh_map)
++ __cpu_map_flush(lh_map);
++ if (lh_xsk)
++ __xsk_map_flush(lh_xsk);
+ }
+ EXPORT_SYMBOL_GPL(xdp_do_flush);
+
+ #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
+ void xdp_do_check_flushed(struct napi_struct *napi)
+ {
+- bool ret;
++ struct list_head *lh_map, *lh_dev, *lh_xsk;
++ bool missed = false;
+
+- ret = dev_check_flush();
+- ret |= cpu_map_check_flush();
+- ret |= xsk_map_check_flush();
++ bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk);
++ if (lh_dev) {
++ __dev_flush(lh_dev);
++ missed = true;
++ }
++ if (lh_map) {
++ __cpu_map_flush(lh_map);
++ missed = true;
++ }
++ if (lh_xsk) {
++ __xsk_map_flush(lh_xsk);
++ missed = true;
++ }
+
+- WARN_ONCE(ret, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
++ WARN_ONCE(missed, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
+ napi->poll);
+ }
+ #endif
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -383,9 +383,8 @@ int __xsk_map_redirect(struct xdp_sock *
+ return 0;
+ }
+
+-void __xsk_map_flush(void)
++void __xsk_map_flush(struct list_head *flush_list)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+ struct xdp_sock *xs, *tmp;
+
+ list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
+@@ -394,16 +393,6 @@ void __xsk_map_flush(void)
+ }
+ }
+
+-#ifdef CONFIG_DEBUG_NET
+-bool xsk_map_check_flush(void)
+-{
+- if (list_empty(bpf_net_ctx_get_xskmap_flush_list()))
+- return false;
+- __xsk_map_flush();
+- return true;
+-}
+-#endif
+-
+ void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
+ {
+ xskq_prod_submit_n(pool->cq, nb_entries);
diff --git a/debian/patches-rt/0002-net-tcp-un-pin-the-tw_timer.patch b/debian/patches-rt/0002-net-tcp-un-pin-the-tw_timer.patch
new file mode 100644
index 0000000000..d19d45bf09
--- /dev/null
+++ b/debian/patches-rt/0002-net-tcp-un-pin-the-tw_timer.patch
@@ -0,0 +1,29 @@
+From: Florian Westphal <fw@strlen.de>
+Date: Tue, 4 Jun 2024 16:08:48 +0200
+Subject: [PATCH 2/3] net: tcp: un-pin the tw_timer
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+After previous patch, even if timer fires immediately on another CPU,
+context that schedules the timer now holds the ehash spinlock, so timer
+cannot reap tw socket until ehash lock is released.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20240604140903.31939-3-fw@strlen.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/ipv4/inet_timewait_sock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -203,7 +203,7 @@ struct inet_timewait_sock *inet_twsk_all
+ tw->tw_prot = sk->sk_prot_creator;
+ atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
+ twsk_net_set(tw, sock_net(sk));
+- timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
++ timer_setup(&tw->tw_timer, tw_timer_handler, 0);
+ /*
+ * Because we use RCU lookups, we should not set tw_refcnt
+ * to a non null value before everything is setup for this
diff --git a/debian/patches-rt/0002-perf-Enqueue-SIGTRAP-always-via-task_work.patch b/debian/patches-rt/0002-perf-Enqueue-SIGTRAP-always-via-task_work.patch
deleted file mode 100644
index 03a9a576ad..0000000000
--- a/debian/patches-rt/0002-perf-Enqueue-SIGTRAP-always-via-task_work.patch
+++ /dev/null
@@ -1,160 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Mar 2024 19:01:50 +0100
-Subject: [PATCH 2/4] perf: Enqueue SIGTRAP always via task_work.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
-
-A signal is delivered by raising irq_work() which works from any context
-including NMI. irq_work() can be delayed if the architecture does not
-provide an interrupt vector. In order not to lose a signal, the signal
-is injected via task_work during event_sched_out().
-
-Instead going via irq_work, the signal could be added directly via
-task_work. The signal is sent to current and can be enqueued on its
-return path to userland instead of triggering irq_work. A dummy IRQ is
-required in the NMI case to ensure the task_work is handled before
-returning to user land. For this irq_work is used. An alternative would
-be just raising an interrupt like arch_send_call_function_single_ipi().
-
-During testing with `remove_on_exec' it become visible that the event
-can be enqueued via NMI during execve(). The task_work must not be kept
-because free_event() will complain later. Also the new task will not
-have a sighandler installed.
-
-Queue signal via task_work. Remove perf_event::pending_sigtrap and
-and use perf_event::pending_work instead. Raise irq_work in the NMI case
-for a dummy interrupt. Remove the task_work if the event is freed.
-
-Tested-by: Marco Elver <elver@google.com>
-Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Link: https://lore.kernel.org/r/20240312180814.3373778-3-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/perf_event.h | 3 --
- kernel/events/core.c | 57 +++++++++++++++++++++++++--------------------
- 2 files changed, 33 insertions(+), 27 deletions(-)
-
---- a/include/linux/perf_event.h
-+++ b/include/linux/perf_event.h
-@@ -781,7 +781,6 @@ struct perf_event {
- unsigned int pending_wakeup;
- unsigned int pending_kill;
- unsigned int pending_disable;
-- unsigned int pending_sigtrap;
- unsigned long pending_addr; /* SIGTRAP */
- struct irq_work pending_irq;
- struct callback_head pending_task;
-@@ -959,7 +958,7 @@ struct perf_event_context {
- struct rcu_head rcu_head;
-
- /*
-- * Sum (event->pending_sigtrap + event->pending_work)
-+ * Sum (event->pending_work + event->pending_work)
- *
- * The SIGTRAP is targeted at ctx->task, as such it won't do changing
- * that until the signal is delivered.
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -2283,21 +2283,6 @@ event_sched_out(struct perf_event *event
- state = PERF_EVENT_STATE_OFF;
- }
-
-- if (event->pending_sigtrap) {
-- bool dec = true;
--
-- event->pending_sigtrap = 0;
-- if (state != PERF_EVENT_STATE_OFF &&
-- !event->pending_work) {
-- event->pending_work = 1;
-- dec = false;
-- WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
-- task_work_add(current, &event->pending_task, TWA_RESUME);
-- }
-- if (dec)
-- local_dec(&event->ctx->nr_pending);
-- }
--
- perf_event_set_state(event, state);
-
- if (!is_software_event(event))
-@@ -6741,11 +6726,6 @@ static void __perf_pending_irq(struct pe
- * Yay, we hit home and are in the context of the event.
- */
- if (cpu == smp_processor_id()) {
-- if (event->pending_sigtrap) {
-- event->pending_sigtrap = 0;
-- perf_sigtrap(event);
-- local_dec(&event->ctx->nr_pending);
-- }
- if (event->pending_disable) {
- event->pending_disable = 0;
- perf_event_disable_local(event);
-@@ -9588,14 +9568,23 @@ static int __perf_event_overflow(struct
-
- if (regs)
- pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
-- if (!event->pending_sigtrap) {
-- event->pending_sigtrap = pending_id;
-+ if (!event->pending_work) {
-+ event->pending_work = pending_id;
- local_inc(&event->ctx->nr_pending);
-- irq_work_queue(&event->pending_irq);
-+ WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
-+ task_work_add(current, &event->pending_task, TWA_RESUME);
-+ /*
-+ * The NMI path returns directly to userland. The
-+ * irq_work is raised as a dummy interrupt to ensure
-+ * regular return path to user is taken and task_work
-+ * is processed.
-+ */
-+ if (in_nmi())
-+ irq_work_queue(&event->pending_irq);
- } else if (event->attr.exclude_kernel && valid_sample) {
- /*
- * Should not be able to return to user space without
-- * consuming pending_sigtrap; with exceptions:
-+ * consuming pending_work; with exceptions:
- *
- * 1. Where !exclude_kernel, events can overflow again
- * in the kernel without returning to user space.
-@@ -9605,7 +9594,7 @@ static int __perf_event_overflow(struct
- * To approximate progress (with false negatives),
- * check 32-bit hash of the current IP.
- */
-- WARN_ON_ONCE(event->pending_sigtrap != pending_id);
-+ WARN_ON_ONCE(event->pending_work != pending_id);
- }
-
- event->pending_addr = 0;
-@@ -13045,6 +13034,13 @@ static void sync_child_event(struct perf
- &parent_event->child_total_time_running);
- }
-
-+static bool task_work_cb_match(struct callback_head *cb, void *data)
-+{
-+ struct perf_event *event = container_of(cb, struct perf_event, pending_task);
-+
-+ return event == data;
-+}
-+
- static void
- perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
- {
-@@ -13084,6 +13080,17 @@ perf_event_exit_event(struct perf_event
- * Kick perf_poll() for is_event_hup();
- */
- perf_event_wakeup(parent_event);
-+ /*
-+ * Cancel pending task_work and update counters if it has not
-+ * yet been delivered to userland. free_event() expects the
-+ * reference counter at 1 and keeping the event around until the
-+ * task return to userland will be a unexpected.
-+ */
-+ if (event->pending_work &&
-+ task_work_cancel_match(current, task_work_cb_match, event)) {
-+ put_event(event);
-+ local_dec(&event->ctx->nr_pending);
-+ }
- free_event(event);
- put_event(parent_event);
- return;
diff --git a/debian/patches-rt/0002-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch b/debian/patches-rt/0002-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch
index e42d83f140..a178c87993 100644
--- a/debian/patches-rt/0002-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch
+++ b/debian/patches-rt/0002-printk-Properly-deal-with-nbcon-consoles-on-seq-init.patch
@@ -1,7 +1,7 @@
From: Petr Mladek <pmladek@suse.com>
Date: Wed, 22 Nov 2023 11:23:43 +0000
-Subject: [PATCH 02/46] printk: Properly deal with nbcon consoles on seq init
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 02/48] printk: Properly deal with nbcon consoles on seq init
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
If a non-boot console is registering and boot consoles exist,
the consoles are flushed before being unregistered. This allows
@@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3363,19 +3363,21 @@ static void try_enable_default_console(s
+@@ -3375,19 +3375,21 @@ static void try_enable_default_console(s
newcon->flags |= CON_CONSDEV;
}
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If any enabled boot consoles are due to be unregistered
-@@ -3396,7 +3398,7 @@ static void console_init_seq(struct cons
+@@ -3408,7 +3410,7 @@ static void console_init_seq(struct cons
* Flush all consoles and set the console to start at
* the next unprinted sequence number.
*/
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Flushing failed. Just choose the lowest
* sequence of the enabled boot consoles.
-@@ -3409,19 +3411,30 @@ static void console_init_seq(struct cons
+@@ -3421,19 +3423,30 @@ static void console_init_seq(struct cons
if (handover)
console_lock();
@@ -163,7 +163,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#define console_first() \
-@@ -3453,6 +3466,7 @@ void register_console(struct console *ne
+@@ -3465,6 +3478,7 @@ void register_console(struct console *ne
struct console *con;
bool bootcon_registered = false;
bool realcon_registered = false;
@@ -171,7 +171,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int err;
console_list_lock();
-@@ -3530,10 +3544,13 @@ void register_console(struct console *ne
+@@ -3542,10 +3556,13 @@ void register_console(struct console *ne
}
newcon->dropped = 0;
diff --git a/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch b/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
index 50da7bdae5..bbafcf5a1c 100644
--- a/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
+++ b/debian/patches-rt/0002-softirq-Add-function-to-preempt-serving-softirqs.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 4 Aug 2023 13:30:38 +0200
Subject: [PATCH 2/3] softirq: Add function to preempt serving softirqs.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Add a functionality for the softirq handler to preempt its current work
if needed. The softirq core has no particular state. It reads and resets
diff --git a/debian/patches-rt/0002-task_work-Add-TWA_NMI_CURRENT-as-an-additional-notif.patch b/debian/patches-rt/0002-task_work-Add-TWA_NMI_CURRENT-as-an-additional-notif.patch
new file mode 100644
index 0000000000..30a0f071e2
--- /dev/null
+++ b/debian/patches-rt/0002-task_work-Add-TWA_NMI_CURRENT-as-an-additional-notif.patch
@@ -0,0 +1,102 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:36 +0200
+Subject: [PATCH 2/7] task_work: Add TWA_NMI_CURRENT as an additional notify
+ mode.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Adding task_work from NMI context requires the following:
+- The kasan_record_aux_stack() is not NMU safe and must be avoided.
+- Using TWA_RESUME is NMI safe. If the NMI occurs while the CPU is in
+ userland then it will continue in userland and not invoke the `work'
+ callback.
+
+Add TWA_NMI_CURRENT as an additional notify mode. In this mode skip
+kasan and use irq_work in hardirq-mode to for needed interrupt. Set
+TIF_NOTIFY_RESUME within the irq_work callback due to k[ac]san
+instrumentation in test_and_set_bit() which does not look NMI safe in
+case of a report.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20240704170424.1466941-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/task_work.h | 1 +
+ kernel/task_work.c | 25 ++++++++++++++++++++++---
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+--- a/include/linux/task_work.h
++++ b/include/linux/task_work.h
+@@ -18,6 +18,7 @@ enum task_work_notify_mode {
+ TWA_RESUME,
+ TWA_SIGNAL,
+ TWA_SIGNAL_NO_IPI,
++ TWA_NMI_CURRENT,
+ };
+
+ static inline bool task_work_pending(struct task_struct *task)
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -1,10 +1,19 @@
+ // SPDX-License-Identifier: GPL-2.0
++#include <linux/irq_work.h>
+ #include <linux/spinlock.h>
+ #include <linux/task_work.h>
+ #include <linux/resume_user_mode.h>
++#include <trace/events/ipi.h>
+
+ static struct callback_head work_exited; /* all we need is ->next == NULL */
+
++static void task_work_set_notify_irq(struct irq_work *entry)
++{
++ test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
++}
++static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
++ IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
++
+ /**
+ * task_work_add - ask the @task to execute @work->func()
+ * @task: the task which should run the callback
+@@ -12,7 +21,7 @@ static struct callback_head work_exited;
+ * @notify: how to notify the targeted task
+ *
+ * Queue @work for task_work_run() below and notify the @task if @notify
+- * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI.
++ * is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT.
+ *
+ * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
+ * task and run the task_work, regardless of whether the task is currently
+@@ -24,6 +33,8 @@ static struct callback_head work_exited;
+ * kernel anyway.
+ * @TWA_RESUME work is run only when the task exits the kernel and returns to
+ * user mode, or before entering guest mode.
++ * @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the
++ * current @task and if the current context is NMI.
+ *
+ * Fails if the @task is exiting/exited and thus it can't process this @work.
+ * Otherwise @work->func() will be called when the @task goes through one of
+@@ -44,8 +55,13 @@ int task_work_add(struct task_struct *ta
+ {
+ struct callback_head *head;
+
+- /* record the work call stack in order to print it in KASAN reports */
+- kasan_record_aux_stack(work);
++ if (notify == TWA_NMI_CURRENT) {
++ if (WARN_ON_ONCE(task != current))
++ return -EINVAL;
++ } else {
++ /* record the work call stack in order to print it in KASAN reports */
++ kasan_record_aux_stack(work);
++ }
+
+ head = READ_ONCE(task->task_works);
+ do {
+@@ -66,6 +82,9 @@ int task_work_add(struct task_struct *ta
+ case TWA_SIGNAL_NO_IPI:
+ __set_notify_signal(task);
+ break;
++ case TWA_NMI_CURRENT:
++ irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
++ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
diff --git a/debian/patches-rt/0002-zram-Remove-ZRAM_LOCK.patch b/debian/patches-rt/0002-zram-Remove-ZRAM_LOCK.patch
new file mode 100644
index 0000000000..f795579f71
--- /dev/null
+++ b/debian/patches-rt/0002-zram-Remove-ZRAM_LOCK.patch
@@ -0,0 +1,66 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 20 Jun 2024 12:27:11 +0200
+Subject: [PATCH 2/3] zram: Remove ZRAM_LOCK
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The ZRAM_LOCK was used for locking and after the addition of spinlock_t
+the bit set and cleared but there no reader of it.
+
+Remove the ZRAM_LOCK bit.
+
+Link: https://lore.kernel.org/r/20240620153556.777272-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/block/zram/zram_drv.c | 11 ++---------
+ drivers/block/zram/zram_drv.h | 4 +---
+ 2 files changed, 3 insertions(+), 12 deletions(-)
+
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -67,23 +67,16 @@ static void zram_meta_init_table_locks(s
+
+ static int zram_slot_trylock(struct zram *zram, u32 index)
+ {
+- int ret;
+-
+- ret = spin_trylock(&zram->table[index].lock);
+- if (ret)
+- __set_bit(ZRAM_LOCK, &zram->table[index].flags);
+- return ret;
++ return spin_trylock(&zram->table[index].lock);
+ }
+
+ static void zram_slot_lock(struct zram *zram, u32 index)
+ {
+ spin_lock(&zram->table[index].lock);
+- __set_bit(ZRAM_LOCK, &zram->table[index].flags);
+ }
+
+ static void zram_slot_unlock(struct zram *zram, u32 index)
+ {
+- __clear_bit(ZRAM_LOCK, &zram->table[index].flags);
+ spin_unlock(&zram->table[index].lock);
+ }
+
+@@ -1299,7 +1292,7 @@ static void zram_free_page(struct zram *
+ zram_set_handle(zram, index, 0);
+ zram_set_obj_size(zram, index, 0);
+ WARN_ON_ONCE(zram->table[index].flags &
+- ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
++ ~(1UL << ZRAM_UNDER_WB));
+ }
+
+ /*
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -45,9 +45,7 @@
+
+ /* Flags for zram pages (table[page_no].flags) */
+ enum zram_pageflags {
+- /* zram slot is locked */
+- ZRAM_LOCK = ZRAM_FLAG_SHIFT,
+- ZRAM_SAME, /* Page consists the same element */
++ ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */
+ ZRAM_WB, /* page is stored on backing_device */
+ ZRAM_UNDER_WB, /* page is under writeback */
+ ZRAM_HUGE, /* Incompressible page */
diff --git a/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch b/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
index 4ec0cd7b62..5e9b00218a 100644
--- a/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
+++ b/debian/patches-rt/0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 28 Jun 2023 09:36:10 +0200
Subject: [PATCH 3/4] ARM: vfp: Use vfp_lock() in vfp_support_entry().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
vfp_entry() is invoked from exception handler and is fully preemptible.
It uses local_bh_disable() to remain uninterrupted while checking the
diff --git a/debian/patches-rt/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch b/debian/patches-rt/0003-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
index c985a8382f..583f8e96c9 100644
--- a/debian/patches-rt/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
+++ b/debian/patches-rt/0003-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
@@ -1,29 +1,38 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 25 Oct 2021 15:05:18 +0200
-Subject: [PATCH 05/10] drm/i915: Don't check for atomic context on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 3/8] drm/i915: Don't check for atomic context on PREEMPT_RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The !in_atomic() check in _wait_for_atomic() triggers on PREEMPT_RT
because the uncore::lock is a spinlock_t and does not disable
preemption or interrupts.
Changing the uncore:lock to a raw_spinlock_t doubles the worst case
-latency on an otherwise idle testbox during testing. Therefore I'm
-currently unsure about changing this.
+latency on an otherwise idle testbox during testing.
+Ignore _WAIT_FOR_ATOMIC_CHECK() on PREEMPT_RT.
+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Link: https://lore.kernel.org/all/20211006164628.s2mtsdd2jdbfyf7g@linutronix.de/
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/gpu/drm/i915/i915_utils.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
+ drivers/gpu/drm/i915/i915_utils.h | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
-@@ -288,7 +288,7 @@ wait_remaining_ms_from_jiffies(unsigned
+@@ -273,8 +273,13 @@ wait_remaining_ms_from_jiffies(unsigned
+ (Wmax))
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
- /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
+-/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
-#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
++/*
++ * If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false.
++ * On PREEMPT_RT the context isn't becoming atomic because it is used in an
++ * interrupt handler or because a spinlock_t is acquired. This leads to
++ * warnings which don't occur otherwise and therefore the check is disabled.
++ */
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT)
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
#else
diff --git a/debian/patches-rt/0003-net-Move-flush-list-retrieval-to-where-it-is-used.patch b/debian/patches-rt/0003-net-Move-flush-list-retrieval-to-where-it-is-used.patch
new file mode 100644
index 0000000000..bb954adf85
--- /dev/null
+++ b/debian/patches-rt/0003-net-Move-flush-list-retrieval-to-where-it-is-used.patch
@@ -0,0 +1,88 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 28 Jun 2024 12:18:56 +0200
+Subject: [PATCH 3/3] net: Move flush list retrieval to where it is used.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The bpf_net_ctx_get_.*_flush_list() are used at the top of the function.
+This means the variable is always assigned even if unused. By moving the
+function to where it is used, it is possible to delay the initialisation
+until it is unavoidable.
+Not sure how much this gains in reality but by looking at bq_enqueue()
+(in devmap.c) gcc pushes one register less to the stack. \o/.
+
+ Move flush list retrieval to where it is used.
+
+Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20240628103020.1766241-4-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/bpf/cpumap.c | 6 ++++--
+ kernel/bpf/devmap.c | 3 ++-
+ net/xdp/xsk.c | 6 ++++--
+ 3 files changed, 10 insertions(+), 5 deletions(-)
+
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -707,7 +707,6 @@ static void bq_flush_to_queue(struct xdp
+ */
+ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
+ struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
+
+ if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
+@@ -724,8 +723,11 @@ static void bq_enqueue(struct bpf_cpu_ma
+ */
+ bq->q[bq->count++] = xdpf;
+
+- if (!bq->flush_node.prev)
++ if (!bq->flush_node.prev) {
++ struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
++
+ list_add(&bq->flush_node, flush_list);
++ }
+ }
+
+ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -448,7 +448,6 @@ static void *__dev_map_lookup_elem(struc
+ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx, struct bpf_prog *xdp_prog)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
+ struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
+
+ if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
+@@ -462,6 +461,8 @@ static void bq_enqueue(struct net_device
+ * are only ever modified together.
+ */
+ if (!bq->dev_rx) {
++ struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
++
+ bq->dev_rx = dev_rx;
+ bq->xdp_prog = xdp_prog;
+ list_add(&bq->flush_node, flush_list);
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -370,15 +370,17 @@ static int xsk_rcv(struct xdp_sock *xs,
+
+ int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
+ {
+- struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+ int err;
+
+ err = xsk_rcv(xs, xdp);
+ if (err)
+ return err;
+
+- if (!xs->flush_node.prev)
++ if (!xs->flush_node.prev) {
++ struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
++
+ list_add(&xs->flush_node, flush_list);
++ }
+
+ return 0;
+ }
diff --git a/debian/patches-rt/0003-net-Use-__napi_alloc_frag_align-instead-of-open-codi.patch b/debian/patches-rt/0003-net-Use-__napi_alloc_frag_align-instead-of-open-codi.patch
new file mode 100644
index 0000000000..677c50ab56
--- /dev/null
+++ b/debian/patches-rt/0003-net-Use-__napi_alloc_frag_align-instead-of-open-codi.patch
@@ -0,0 +1,42 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 23 Oct 2023 15:11:23 +0200
+Subject: [PATCH 03/15] net: Use __napi_alloc_frag_align() instead of open
+ coding it.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The else condition within __netdev_alloc_frag_align() is an open coded
+__napi_alloc_frag_align().
+
+Use __napi_alloc_frag_align() instead of open coding it.
+Move fragsz assignment before page_frag_alloc_align() invocation because
+__napi_alloc_frag_align() also contains this statement.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/skbuff.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -318,19 +318,15 @@ void *__netdev_alloc_frag_align(unsigned
+ {
+ void *data;
+
+- fragsz = SKB_DATA_ALIGN(fragsz);
+ if (in_hardirq() || irqs_disabled()) {
+ struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
+
++ fragsz = SKB_DATA_ALIGN(fragsz);
+ data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
+ align_mask);
+ } else {
+- struct napi_alloc_cache *nc;
+-
+ local_bh_disable();
+- nc = this_cpu_ptr(&napi_alloc_cache);
+- data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+- align_mask);
++ data = __napi_alloc_frag_align(fragsz, align_mask);
+ local_bh_enable();
+ }
+ return data;
diff --git a/debian/patches-rt/0003-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch b/debian/patches-rt/0003-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch
deleted file mode 100644
index 66b0a2cf23..0000000000
--- a/debian/patches-rt/0003-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch
+++ /dev/null
@@ -1,121 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 25 Mar 2024 08:40:30 +0100
-Subject: [PATCH 3/4] net: Use backlog-NAPI to clean up the defer_list.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
-
-The defer_list is a per-CPU list which is used to free skbs outside of
-the socket lock and on the CPU on which they have been allocated.
-The list is processed during NAPI callbacks so ideally the list is
-cleaned up.
-Should the amount of skbs on the list exceed a certain water mark then
-the softirq is triggered remotely on the target CPU by invoking a remote
-function call. The raise of the softirqs via a remote function call
-leads to waking the ksoftirqd on PREEMPT_RT which is undesired.
-The backlog-NAPI threads already provide the infrastructure which can be
-utilized to perform the cleanup of the defer_list.
-
-The NAPI state is updated with the input_pkt_queue.lock acquired. It
-order not to break the state, it is needed to also wake the backlog-NAPI
-thread with the lock held. This requires to acquire the use the lock in
-rps_lock_irq*() if the backlog-NAPI threads are used even with RPS
-disabled.
-
-Move the logic of remotely starting softirqs to clean up the defer_list
-into kick_defer_list_purge(). Make sure a lock is held in
-rps_lock_irq*() if backlog-NAPI threads are used. Schedule backlog-NAPI
-for defer_list cleanup if backlog-NAPI is available.
-
-Acked-by: Jakub Kicinski <kuba@kernel.org>
-Link: https://lore.kernel.org/r/20240325074943.289909-4-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/netdevice.h | 1 +
- net/core/dev.c | 25 +++++++++++++++++++++----
- net/core/skbuff.c | 4 ++--
- 3 files changed, 24 insertions(+), 6 deletions(-)
-
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -3287,6 +3287,7 @@ static inline void dev_xmit_recursion_de
- __this_cpu_dec(softnet_data.xmit.recursion);
- }
-
-+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
- void __netif_schedule(struct Qdisc *q);
- void netif_schedule_queue(struct netdev_queue *txq);
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -226,7 +226,7 @@ static bool use_backlog_threads(void)
- static inline void rps_lock_irqsave(struct softnet_data *sd,
- unsigned long *flags)
- {
-- if (IS_ENABLED(CONFIG_RPS))
-+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_save(*flags);
-@@ -234,7 +234,7 @@ static inline void rps_lock_irqsave(stru
-
- static inline void rps_lock_irq_disable(struct softnet_data *sd)
- {
-- if (IS_ENABLED(CONFIG_RPS))
-+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_lock_irq(&sd->input_pkt_queue.lock);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_disable();
-@@ -243,7 +243,7 @@ static inline void rps_lock_irq_disable(
- static inline void rps_unlock_irq_restore(struct softnet_data *sd,
- unsigned long *flags)
- {
-- if (IS_ENABLED(CONFIG_RPS))
-+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_restore(*flags);
-@@ -251,7 +251,7 @@ static inline void rps_unlock_irq_restor
-
- static inline void rps_unlock_irq_enable(struct softnet_data *sd)
- {
-- if (IS_ENABLED(CONFIG_RPS))
-+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock_irq(&sd->input_pkt_queue.lock);
- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- local_irq_enable();
-@@ -4728,6 +4728,23 @@ static void napi_schedule_rps(struct sof
- __napi_schedule_irqoff(&mysd->backlog);
- }
-
-+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
-+{
-+ unsigned long flags;
-+
-+ if (use_backlog_threads()) {
-+ rps_lock_irqsave(sd, &flags);
-+
-+ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
-+ __napi_schedule_irqoff(&sd->backlog);
-+
-+ rps_unlock_irq_restore(sd, &flags);
-+
-+ } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
-+ smp_call_function_single_async(cpu, &sd->defer_csd);
-+ }
-+}
-+
- #ifdef CONFIG_NET_FLOW_LIMIT
- int netdev_flow_limit_table_len __read_mostly = (1 << 12);
- #endif
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -7050,8 +7050,8 @@ nodefer: __kfree_skb(skb);
- /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
- * if we are unlucky enough (this seems very unlikely).
- */
-- if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
-- smp_call_function_single_async(cpu, &sd->defer_csd);
-+ if (unlikely(kick))
-+ kick_defer_list_purge(sd, cpu);
- }
-
- static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
diff --git a/debian/patches-rt/0003-perf-Enqueue-SIGTRAP-always-via-task_work.patch b/debian/patches-rt/0003-perf-Enqueue-SIGTRAP-always-via-task_work.patch
new file mode 100644
index 0000000000..725fb82046
--- /dev/null
+++ b/debian/patches-rt/0003-perf-Enqueue-SIGTRAP-always-via-task_work.patch
@@ -0,0 +1,120 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:37 +0200
+Subject: [PATCH 3/7] perf: Enqueue SIGTRAP always via task_work.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+A signal is delivered by raising irq_work() which works from any context
+including NMI. irq_work() can be delayed if the architecture does not
+provide an interrupt vector. In order not to lose a signal, the signal
+is injected via task_work during event_sched_out().
+
+Instead going via irq_work, the signal could be added directly via
+task_work. The signal is sent to current and can be enqueued on its
+return path to userland.
+
+Queue signal via task_work and consider possible NMI context. Remove
+perf_event::pending_sigtrap and and use perf_event::pending_work
+instead.
+
+Tested-by: Marco Elver <elver@google.com>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Link: https://lore.kernel.org/all/ZMAtZ2t43GXoF6tM@kernel.org/
+Link: https://lore.kernel.org/r/20240704170424.1466941-4-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/perf_event.h | 3 +--
+ kernel/events/core.c | 31 ++++++++++---------------------
+ 2 files changed, 11 insertions(+), 23 deletions(-)
+
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -781,7 +781,6 @@ struct perf_event {
+ unsigned int pending_wakeup;
+ unsigned int pending_kill;
+ unsigned int pending_disable;
+- unsigned int pending_sigtrap;
+ unsigned long pending_addr; /* SIGTRAP */
+ struct irq_work pending_irq;
+ struct callback_head pending_task;
+@@ -963,7 +962,7 @@ struct perf_event_context {
+ struct rcu_head rcu_head;
+
+ /*
+- * Sum (event->pending_sigtrap + event->pending_work)
++ * Sum (event->pending_work + event->pending_work)
+ *
+ * The SIGTRAP is targeted at ctx->task, as such it won't do changing
+ * that until the signal is delivered.
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2283,17 +2283,6 @@ event_sched_out(struct perf_event *event
+ state = PERF_EVENT_STATE_OFF;
+ }
+
+- if (event->pending_sigtrap) {
+- event->pending_sigtrap = 0;
+- if (state != PERF_EVENT_STATE_OFF &&
+- !event->pending_work &&
+- !task_work_add(current, &event->pending_task, TWA_RESUME)) {
+- event->pending_work = 1;
+- } else {
+- local_dec(&event->ctx->nr_pending);
+- }
+- }
+-
+ perf_event_set_state(event, state);
+
+ if (!is_software_event(event))
+@@ -6787,11 +6776,6 @@ static void __perf_pending_irq(struct pe
+ * Yay, we hit home and are in the context of the event.
+ */
+ if (cpu == smp_processor_id()) {
+- if (event->pending_sigtrap) {
+- event->pending_sigtrap = 0;
+- perf_sigtrap(event);
+- local_dec(&event->ctx->nr_pending);
+- }
+ if (event->pending_disable) {
+ event->pending_disable = 0;
+ perf_event_disable_local(event);
+@@ -9732,21 +9716,26 @@ static int __perf_event_overflow(struct
+ */
+ bool valid_sample = sample_is_allowed(event, regs);
+ unsigned int pending_id = 1;
++ enum task_work_notify_mode notify_mode;
+
+ if (regs)
+ pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
+- if (!event->pending_sigtrap) {
+- event->pending_sigtrap = pending_id;
++
++ notify_mode = in_nmi() ? TWA_NMI_CURRENT : TWA_RESUME;
++
++ if (!event->pending_work &&
++ !task_work_add(current, &event->pending_task, notify_mode)) {
++ event->pending_work = pending_id;
+ local_inc(&event->ctx->nr_pending);
+
+ event->pending_addr = 0;
+ if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
+ event->pending_addr = data->addr;
+- irq_work_queue(&event->pending_irq);
++
+ } else if (event->attr.exclude_kernel && valid_sample) {
+ /*
+ * Should not be able to return to user space without
+- * consuming pending_sigtrap; with exceptions:
++ * consuming pending_work; with exceptions:
+ *
+ * 1. Where !exclude_kernel, events can overflow again
+ * in the kernel without returning to user space.
+@@ -9756,7 +9745,7 @@ static int __perf_event_overflow(struct
+ * To approximate progress (with false negatives),
+ * check 32-bit hash of the current IP.
+ */
+- WARN_ON_ONCE(event->pending_sigtrap != pending_id);
++ WARN_ON_ONCE(event->pending_work != pending_id);
+ }
+ }
+
diff --git a/debian/patches-rt/0003-perf-Remove-perf_swevent_get_recursion_context-from-.patch b/debian/patches-rt/0003-perf-Remove-perf_swevent_get_recursion_context-from-.patch
deleted file mode 100644
index 55e31a0ebd..0000000000
--- a/debian/patches-rt/0003-perf-Remove-perf_swevent_get_recursion_context-from-.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Mar 2024 19:01:51 +0100
-Subject: [PATCH 3/4] perf: Remove perf_swevent_get_recursion_context() from
- perf_pending_task().
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
-
-perf_swevent_get_recursion_context() is supposed to avoid recursion.
-This requires to remain on the same CPU in order to decrement/ increment
-the same counter. This is done by using preempt_disable(). Having
-preemption disabled while sending a signal leads to locking problems on
-PREEMPT_RT because sighand, a spinlock_t, becomes a sleeping lock.
-
-This callback runs in task context and currently delivers only a signal
-to "itself". Any kind of recusrion protection in this context is not
-required.
-
-Remove recursion protection in perf_pending_task().
-
-Tested-by: Marco Elver <elver@google.com>
-Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Link: https://lore.kernel.org/r/20240312180814.3373778-4-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/events/core.c | 12 ------------
- 1 file changed, 12 deletions(-)
-
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -6785,14 +6785,6 @@ static void perf_pending_irq(struct irq_
- static void perf_pending_task(struct callback_head *head)
- {
- struct perf_event *event = container_of(head, struct perf_event, pending_task);
-- int rctx;
--
-- /*
-- * If we 'fail' here, that's OK, it means recursion is already disabled
-- * and we won't recurse 'further'.
-- */
-- preempt_disable_notrace();
-- rctx = perf_swevent_get_recursion_context();
-
- if (event->pending_work) {
- event->pending_work = 0;
-@@ -6800,10 +6792,6 @@ static void perf_pending_task(struct cal
- local_dec(&event->ctx->nr_pending);
- }
-
-- if (rctx >= 0)
-- perf_swevent_put_recursion_context(rctx);
-- preempt_enable_notrace();
--
- put_event(event);
- }
-
diff --git a/debian/patches-rt/0003-printk-nbcon-Remove-return-value-for-write_atomic.patch b/debian/patches-rt/0003-printk-nbcon-Remove-return-value-for-write_atomic.patch
index 92ecbe0366..fe6d01032d 100644
--- a/debian/patches-rt/0003-printk-nbcon-Remove-return-value-for-write_atomic.patch
+++ b/debian/patches-rt/0003-printk-nbcon-Remove-return-value-for-write_atomic.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 20 Oct 2023 09:52:59 +0000
-Subject: [PATCH 03/46] printk: nbcon: Remove return value for write_atomic()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 03/48] printk: nbcon: Remove return value for write_atomic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The return value of write_atomic() does not provide any useful
information. On the contrary, it makes things more complicated
diff --git a/debian/patches-rt/0003-tcp-move-inet_twsk_schedule-helper-out-of-header.patch b/debian/patches-rt/0003-tcp-move-inet_twsk_schedule-helper-out-of-header.patch
new file mode 100644
index 0000000000..d75af6d28d
--- /dev/null
+++ b/debian/patches-rt/0003-tcp-move-inet_twsk_schedule-helper-out-of-header.patch
@@ -0,0 +1,45 @@
+From: Florian Westphal <fw@strlen.de>
+Date: Tue, 4 Jun 2024 16:08:49 +0200
+Subject: [PATCH 3/3] tcp: move inet_twsk_schedule helper out of header
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Its no longer used outside inet_timewait_sock.c, so move it there.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20240604140903.31939-4-fw@strlen.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/inet_timewait_sock.h | 5 -----
+ net/ipv4/inet_timewait_sock.c | 5 +++++
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -101,11 +101,6 @@ void inet_twsk_hashdance_schedule(struct
+ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
+ bool rearm);
+
+-static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+-{
+- __inet_twsk_schedule(tw, timeo, false);
+-}
+-
+ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
+ {
+ __inet_twsk_schedule(tw, timeo, true);
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -92,6 +92,11 @@ static void inet_twsk_add_node_rcu(struc
+ hlist_nulls_add_head_rcu(&tw->tw_node, list);
+ }
+
++static void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
++{
++ __inet_twsk_schedule(tw, timeo, false);
++}
++
+ /*
+ * Enter the time wait state. This is called with locally disabled BH.
+ * Essentially we whip up a timewait bucket, copy the relevant info into it
diff --git a/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch b/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch
index 3b6f87857c..7481d00114 100644
--- a/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch
+++ b/debian/patches-rt/0003-time-Allow-to-preempt-after-a-callback.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 4 Aug 2023 13:30:39 +0200
Subject: [PATCH 3/3] time: Allow to preempt after a callback.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The TIMER_SOFTIRQ handler invokes timer callbacks of the expired timers.
Before each invocation the timer_base::lock is dropped. The only lock
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1563,9 +1563,16 @@ static inline void timer_base_unlock_exp
+@@ -1562,9 +1562,16 @@ static inline void timer_base_unlock_exp
*/
static void timer_sync_wait_running(struct timer_base *base)
{
diff --git a/debian/patches-rt/0003-zram-Shrink-zram_table_entry-flags.patch b/debian/patches-rt/0003-zram-Shrink-zram_table_entry-flags.patch
new file mode 100644
index 0000000000..153dee8fd2
--- /dev/null
+++ b/debian/patches-rt/0003-zram-Shrink-zram_table_entry-flags.patch
@@ -0,0 +1,48 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 20 Jun 2024 12:53:06 +0200
+Subject: [PATCH 3/3] zram: Shrink zram_table_entry::flags.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The zram_table_entry::flags member is of type long and uses 8 bytes on a
+64bit architecture. With a PAGE_SIZE of 256KiB we have PAGE_SHIFT of 18
+which in turn leads to __NR_ZRAM_PAGEFLAGS = 27. This still fits in an
+ordinary integer.
+By reducing it the size of `flags' to four bytes, the size of the struct
+goes back to 16 bytes. The padding between the lock and ac_time (if
+enabled) is also gone.
+
+Make zram_table_entry::flags an unsigned int and update the build test
+to reflect the change.
+
+Link: https://lore.kernel.org/r/20240620153556.777272-4-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/block/zram/zram_drv.c | 3 ++-
+ drivers/block/zram/zram_drv.h | 2 +-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -2412,9 +2412,10 @@ static void destroy_devices(void)
+
+ static int __init zram_init(void)
+ {
++ struct zram_table_entry zram_te;
+ int ret;
+
+- BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG);
++ BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
+
+ ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
+ zcomp_cpu_up_prepare, zcomp_cpu_dead);
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -66,7 +66,7 @@ struct zram_table_entry {
+ unsigned long handle;
+ unsigned long element;
+ };
+- unsigned long flags;
++ unsigned int flags;
+ spinlock_t lock;
+ #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
+ ktime_t ac_time;
diff --git a/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch b/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
index 34f4e6e0ec..45080a4fe0 100644
--- a/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
+++ b/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 28 Jun 2023 09:39:33 +0200
Subject: [PATCH 4/4] ARM: vfp: Move sending signals outside of vfp_lock()ed
section.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
VFP_bounce() is invoked from within vfp_support_entry() and may send a
signal. Sending a signal uses spinlock_t which becomes a sleeping lock
diff --git a/debian/patches-rt/drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch b/debian/patches-rt/0004-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
index 33a7f3cc72..6ac572a8ad 100644
--- a/debian/patches-rt/drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
+++ b/debian/patches-rt/0004-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 6 Dec 2018 09:52:20 +0100
-Subject: [PATCH] drm/i915: Disable tracing points on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 4/8] drm/i915: Disable tracing points on PREEMPT_RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Luca Abeni reported this:
| BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003
@@ -23,6 +23,7 @@ is disabled and so the locks must not be acquired on PREEMPT_RT.
Based on this I don't see any other way than disable trace points on
PREMPT_RT.
+Acked-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Reported-by: Luca Abeni <lucabe72@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch b/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch
deleted file mode 100644
index c40d93a753..0000000000
--- a/debian/patches-rt/0004-net-Rename-rps_lock-to-backlog_lock.patch
+++ /dev/null
@@ -1,164 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 25 Mar 2024 08:40:31 +0100
-Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
-
-The rps_lock.*() functions use the inner lock of a sk_buff_head for
-locking. This lock is used if RPS is enabled, otherwise the list is
-accessed lockless and disabling interrupts is enough for the
-synchronisation because it is only accessed CPU local. Not only the list
-is protected but also the NAPI state protected.
-With the addition of backlog threads, the lock is also needed because of
-the cross CPU access even without RPS. The clean up of the defer_list
-list is also done via backlog threads (if enabled).
-
-It has been suggested to rename the locking function since it is no
-longer just RPS.
-
-Rename the rps_lock*() functions to backlog_lock*().
-
-Suggested-by: Jakub Kicinski <kuba@kernel.org>
-Acked-by: Jakub Kicinski <kuba@kernel.org>
-Link: https://lore.kernel.org/r/20240325074943.289909-5-bigeasy@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/core/dev.c | 34 +++++++++++++++++-----------------
- 1 file changed, 17 insertions(+), 17 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -223,8 +223,8 @@ static bool use_backlog_threads(void)
-
- #endif
-
--static inline void rps_lock_irqsave(struct softnet_data *sd,
-- unsigned long *flags)
-+static inline void backlog_lock_irq_save(struct softnet_data *sd,
-+ unsigned long *flags)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
-@@ -232,7 +232,7 @@ static inline void rps_lock_irqsave(stru
- local_irq_save(*flags);
- }
-
--static inline void rps_lock_irq_disable(struct softnet_data *sd)
-+static inline void backlog_lock_irq_disable(struct softnet_data *sd)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_lock_irq(&sd->input_pkt_queue.lock);
-@@ -240,8 +240,8 @@ static inline void rps_lock_irq_disable(
- local_irq_disable();
- }
-
--static inline void rps_unlock_irq_restore(struct softnet_data *sd,
-- unsigned long *flags)
-+static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
-+ unsigned long *flags)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
-@@ -249,7 +249,7 @@ static inline void rps_unlock_irq_restor
- local_irq_restore(*flags);
- }
-
--static inline void rps_unlock_irq_enable(struct softnet_data *sd)
-+static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock_irq(&sd->input_pkt_queue.lock);
-@@ -4733,12 +4733,12 @@ void kick_defer_list_purge(struct softne
- unsigned long flags;
-
- if (use_backlog_threads()) {
-- rps_lock_irqsave(sd, &flags);
-+ backlog_lock_irq_save(sd, &flags);
-
- if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
- __napi_schedule_irqoff(&sd->backlog);
-
-- rps_unlock_irq_restore(sd, &flags);
-+ backlog_unlock_irq_restore(sd, &flags);
-
- } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
- smp_call_function_single_async(cpu, &sd->defer_csd);
-@@ -4800,7 +4800,7 @@ static int enqueue_to_backlog(struct sk_
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
- sd = &per_cpu(softnet_data, cpu);
-
-- rps_lock_irqsave(sd, &flags);
-+ backlog_lock_irq_save(sd, &flags);
- if (!netif_running(skb->dev))
- goto drop;
- qlen = skb_queue_len(&sd->input_pkt_queue);
-@@ -4810,7 +4810,7 @@ static int enqueue_to_backlog(struct sk_
- enqueue:
- __skb_queue_tail(&sd->input_pkt_queue, skb);
- input_queue_tail_incr_save(sd, qtail);
-- rps_unlock_irq_restore(sd, &flags);
-+ backlog_unlock_irq_restore(sd, &flags);
- return NET_RX_SUCCESS;
- }
-
-@@ -4825,7 +4825,7 @@ static int enqueue_to_backlog(struct sk_
-
- drop:
- sd->dropped++;
-- rps_unlock_irq_restore(sd, &flags);
-+ backlog_unlock_irq_restore(sd, &flags);
-
- dev_core_stats_rx_dropped_inc(skb->dev);
- kfree_skb_reason(skb, reason);
-@@ -5891,7 +5891,7 @@ static void flush_backlog(struct work_st
- local_bh_disable();
- sd = this_cpu_ptr(&softnet_data);
-
-- rps_lock_irq_disable(sd);
-+ backlog_lock_irq_disable(sd);
- skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
- if (skb->dev->reg_state == NETREG_UNREGISTERING) {
- __skb_unlink(skb, &sd->input_pkt_queue);
-@@ -5899,7 +5899,7 @@ static void flush_backlog(struct work_st
- input_queue_head_incr(sd);
- }
- }
-- rps_unlock_irq_enable(sd);
-+ backlog_unlock_irq_enable(sd);
-
- skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
- if (skb->dev->reg_state == NETREG_UNREGISTERING) {
-@@ -5917,14 +5917,14 @@ static bool flush_required(int cpu)
- struct softnet_data *sd = &per_cpu(softnet_data, cpu);
- bool do_flush;
-
-- rps_lock_irq_disable(sd);
-+ backlog_lock_irq_disable(sd);
-
- /* as insertion into process_queue happens with the rps lock held,
- * process_queue access may race only with dequeue
- */
- do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
- !skb_queue_empty_lockless(&sd->process_queue);
-- rps_unlock_irq_enable(sd);
-+ backlog_unlock_irq_enable(sd);
-
- return do_flush;
- #endif
-@@ -6039,7 +6039,7 @@ static int process_backlog(struct napi_s
-
- }
-
-- rps_lock_irq_disable(sd);
-+ backlog_lock_irq_disable(sd);
- if (skb_queue_empty(&sd->input_pkt_queue)) {
- /*
- * Inline a custom version of __napi_complete().
-@@ -6055,7 +6055,7 @@ static int process_backlog(struct napi_s
- skb_queue_splice_tail_init(&sd->input_pkt_queue,
- &sd->process_queue);
- }
-- rps_unlock_irq_enable(sd);
-+ backlog_unlock_irq_enable(sd);
- }
-
- return work;
diff --git a/debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch b/debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch
new file mode 100644
index 0000000000..76e8f4e04c
--- /dev/null
+++ b/debian/patches-rt/0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch
@@ -0,0 +1,131 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 23 Oct 2023 17:07:56 +0200
+Subject: [PATCH 04/15] net: Use nested-BH locking for napi_alloc_cache.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+napi_alloc_cache is a per-CPU variable and relies on disabled BH for its
+locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
+this data structure requires explicit locking.
+
+Add a local_lock_t to the data structure and use local_lock_nested_bh()
+for locking. This change adds only lockdep coverage and does not alter
+the functional behaviour for !PREEMPT_RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/skbuff.c | 29 ++++++++++++++++++++++++-----
+ 1 file changed, 24 insertions(+), 5 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -277,6 +277,7 @@ static void *page_frag_alloc_1k(struct p
+ #endif
+
+ struct napi_alloc_cache {
++ local_lock_t bh_lock;
+ struct page_frag_cache page;
+ struct page_frag_1k page_small;
+ unsigned int skb_count;
+@@ -284,7 +285,9 @@ struct napi_alloc_cache {
+ };
+
+ static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
+-static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
++static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ /* Double check that napi_get_frags() allocates skbs with
+ * skb->head being backed by slab, not a page fragment.
+@@ -306,11 +309,16 @@ void napi_get_frags_check(struct napi_st
+ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
+ {
+ struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ void *data;
+
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
+- return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
++ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
++ data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+ align_mask);
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
++ return data;
++
+ }
+ EXPORT_SYMBOL(__napi_alloc_frag_align);
+
+@@ -338,16 +346,20 @@ static struct sk_buff *napi_skb_cache_ge
+ struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ struct sk_buff *skb;
+
++ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
+ if (unlikely(!nc->skb_count)) {
+ nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
+ GFP_ATOMIC,
+ NAPI_SKB_CACHE_BULK,
+ nc->skb_cache);
+- if (unlikely(!nc->skb_count))
++ if (unlikely(!nc->skb_count)) {
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ return NULL;
++ }
+ }
+
+ skb = nc->skb_cache[--nc->skb_count];
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
+
+ return skb;
+@@ -740,9 +752,13 @@ struct sk_buff *__netdev_alloc_skb(struc
+ pfmemalloc = nc->pfmemalloc;
+ } else {
+ local_bh_disable();
++ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
++
+ nc = this_cpu_ptr(&napi_alloc_cache.page);
+ data = page_frag_alloc(nc, len, gfp_mask);
+ pfmemalloc = nc->pfmemalloc;
++
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ local_bh_enable();
+ }
+
+@@ -806,11 +822,11 @@ struct sk_buff *napi_alloc_skb(struct na
+ goto skb_success;
+ }
+
+- nc = this_cpu_ptr(&napi_alloc_cache);
+-
+ if (sk_memalloc_socks())
+ gfp_mask |= __GFP_MEMALLOC;
+
++ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
++ nc = this_cpu_ptr(&napi_alloc_cache);
+ if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
+ /* we are artificially inflating the allocation size, but
+ * that is not as bad as it may look like, as:
+@@ -832,6 +848,7 @@ struct sk_buff *napi_alloc_skb(struct na
+ data = page_frag_alloc(&nc->page, len, gfp_mask);
+ pfmemalloc = nc->page.pfmemalloc;
+ }
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+
+ if (unlikely(!data))
+ return NULL;
+@@ -1429,6 +1446,7 @@ static void napi_skb_cache_put(struct sk
+ if (!kasan_mempool_poison_object(skb))
+ return;
+
++ local_lock_nested_bh(&napi_alloc_cache.bh_lock);
+ nc->skb_cache[nc->skb_count++] = skb;
+
+ if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
+@@ -1440,6 +1458,7 @@ static void napi_skb_cache_put(struct sk
+ nc->skb_cache + NAPI_SKB_CACHE_HALF);
+ nc->skb_count = NAPI_SKB_CACHE_HALF;
+ }
++ local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
+ }
+
+ void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
diff --git a/debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch b/debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch
new file mode 100644
index 0000000000..ed4998a37a
--- /dev/null
+++ b/debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch
@@ -0,0 +1,66 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:38 +0200
+Subject: [PATCH 4/7] perf: Shrink the size of the recursion counter.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+There are four recursion counter, one for each context. The type of the
+counter is `int' but the counter is used as `bool' since it is only
+incremented if zero.
+The main goal here is to shrink the whole struct into 32bit int which
+can later be added task_struct into an existing hole.
+
+Reduce the type of the recursion counter to an unsigned char, keep the
+increment/ decrement operation.
+
+Tested-by: Marco Elver <elver@google.com>
+Link: https://lore.kernel.org/20240621091601.18227-1-frederic@kernel.org
+Link: https://lore.kernel.org/r/20240704170424.1466941-5-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/events/callchain.c | 2 +-
+ kernel/events/core.c | 2 +-
+ kernel/events/internal.h | 4 ++--
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/events/callchain.c
++++ b/kernel/events/callchain.c
+@@ -29,7 +29,7 @@ static inline size_t perf_callchain_entr
+ sysctl_perf_event_max_contexts_per_stack));
+ }
+
+-static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
++static DEFINE_PER_CPU(u8, callchain_recursion[PERF_NR_CONTEXTS]);
+ static atomic_t nr_callchain_events;
+ static DEFINE_MUTEX(callchain_mutex);
+ static struct callchain_cpus_entries *callchain_cpus_entries;
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9776,7 +9776,7 @@ struct swevent_htable {
+ int hlist_refcount;
+
+ /* Recursion avoidance in each contexts */
+- int recursion[PERF_NR_CONTEXTS];
++ u8 recursion[PERF_NR_CONTEXTS];
+ };
+
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -208,7 +208,7 @@ arch_perf_out_copy_user(void *dst, const
+
+ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
+
+-static inline int get_recursion_context(int *recursion)
++static inline int get_recursion_context(u8 *recursion)
+ {
+ unsigned char rctx = interrupt_context_level();
+
+@@ -221,7 +221,7 @@ static inline int get_recursion_context(
+ return rctx;
+ }
+
+-static inline void put_recursion_context(int *recursion, int rctx)
++static inline void put_recursion_context(u8 *recursion, int rctx)
+ {
+ barrier();
+ recursion[rctx]--;
diff --git a/debian/patches-rt/0004-printk-Check-printk_deferred_enter-_exit-usage.patch b/debian/patches-rt/0004-printk-Check-printk_deferred_enter-_exit-usage.patch
index 4cde1a93e9..ea2224e7e7 100644
--- a/debian/patches-rt/0004-printk-Check-printk_deferred_enter-_exit-usage.patch
+++ b/debian/patches-rt/0004-printk-Check-printk_deferred_enter-_exit-usage.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 22 Sep 2023 14:58:18 +0000
-Subject: [PATCH 04/46] printk: Check printk_deferred_enter()/_exit() usage
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 04/48] printk: Check printk_deferred_enter()/_exit() usage
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Add validation that printk_deferred_enter()/_exit() are called in
non-migration contexts.
diff --git a/debian/patches-rt/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch b/debian/patches-rt/0005-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
index 290298aead..f8e91753b7 100644
--- a/debian/patches-rt/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
+++ b/debian/patches-rt/0005-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
@@ -1,8 +1,8 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 8 Sep 2021 19:03:41 +0200
-Subject: [PATCH 09/10] drm/i915/gt: Use spin_lock_irq() instead of
+Subject: [PATCH 5/8] drm/i915/gt: Use spin_lock_irq() instead of
local_irq_disable() + spin_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
execlists_dequeue() is invoked from a function which uses
local_irq_disable() to disable interrupts so the spin_lock() behaves
diff --git a/debian/patches-rt/0005-net-tcp_sigpool-Use-nested-BH-locking-for-sigpool_sc.patch b/debian/patches-rt/0005-net-tcp_sigpool-Use-nested-BH-locking-for-sigpool_sc.patch
new file mode 100644
index 0000000000..d3773f2f0d
--- /dev/null
+++ b/debian/patches-rt/0005-net-tcp_sigpool-Use-nested-BH-locking-for-sigpool_sc.patch
@@ -0,0 +1,75 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 24 Nov 2023 10:11:03 +0100
+Subject: [PATCH 05/15] net/tcp_sigpool: Use nested-BH locking for
+ sigpool_scratch.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+sigpool_scratch is a per-CPU variable and relies on disabled BH for its
+locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
+this data structure requires explicit locking.
+
+Make a struct with a pad member (original sigpool_scratch) and a
+local_lock_t and use local_lock_nested_bh() for locking. This change
+adds only lockdep coverage and does not alter the functional behaviour
+for !PREEMPT_RT.
+
+Cc: David Ahern <dsahern@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/ipv4/tcp_sigpool.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_sigpool.c
++++ b/net/ipv4/tcp_sigpool.c
+@@ -10,7 +10,14 @@
+ #include <net/tcp.h>
+
+ static size_t __scratch_size;
+-static DEFINE_PER_CPU(void __rcu *, sigpool_scratch);
++struct sigpool_scratch {
++ local_lock_t bh_lock;
++ void __rcu *pad;
++};
++
++static DEFINE_PER_CPU(struct sigpool_scratch, sigpool_scratch) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ struct sigpool_entry {
+ struct crypto_ahash *hash;
+@@ -72,7 +79,7 @@ static int sigpool_reserve_scratch(size_
+ break;
+ }
+
+- old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
++ old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
+ scratch, lockdep_is_held(&cpool_mutex));
+ if (!cpu_online(cpu) || !old_scratch) {
+ kfree(old_scratch);
+@@ -93,7 +100,7 @@ static void sigpool_scratch_free(void)
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+- kfree(rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
++ kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
+ NULL, lockdep_is_held(&cpool_mutex)));
+ __scratch_size = 0;
+ }
+@@ -277,7 +284,8 @@ int tcp_sigpool_start(unsigned int id, s
+ /* Pairs with tcp_sigpool_reserve_scratch(), scratch area is
+ * valid (allocated) until tcp_sigpool_end().
+ */
+- c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch));
++ local_lock_nested_bh(&sigpool_scratch.bh_lock);
++ c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch.pad));
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(tcp_sigpool_start);
+@@ -286,6 +294,7 @@ void tcp_sigpool_end(struct tcp_sigpool
+ {
+ struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req);
+
++ local_unlock_nested_bh(&sigpool_scratch.bh_lock);
+ rcu_read_unlock_bh();
+ ahash_request_free(c->req);
+ crypto_free_ahash(hash);
diff --git a/debian/patches-rt/0005-perf-Move-swevent_htable-recursion-into-task_struct.patch b/debian/patches-rt/0005-perf-Move-swevent_htable-recursion-into-task_struct.patch
new file mode 100644
index 0000000000..5070c10a30
--- /dev/null
+++ b/debian/patches-rt/0005-perf-Move-swevent_htable-recursion-into-task_struct.patch
@@ -0,0 +1,121 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:39 +0200
+Subject: [PATCH 5/7] perf: Move swevent_htable::recursion into task_struct.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The swevent_htable::recursion counter is used to avoid creating an
+swevent while an event is processed to avoid recursion. The counter is
+per-CPU and preemption must be disabled to have a stable counter.
+perf_pending_task() disables preemption to access the counter and then
+signal. This is problematic on PREEMPT_RT because sending a signal uses
+a spinlock_t which must not be acquired in atomic on PREEMPT_RT because
+it becomes a sleeping lock.
+
+The atomic context can be avoided by moving the counter into the
+task_struct. There is a 4 byte hole between futex_state (usually always
+on) and the following perf pointer (perf_event_ctxp). After the
+recursion lost some weight it fits perfectly.
+
+Move swevent_htable::recursion into task_struct.
+
+Tested-by: Marco Elver <elver@google.com>
+Link: https://lore.kernel.org/r/20240704170424.1466941-6-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/perf_event.h | 6 ------
+ include/linux/sched.h | 7 +++++++
+ kernel/events/core.c | 13 +++----------
+ kernel/events/internal.h | 2 +-
+ 4 files changed, 11 insertions(+), 17 deletions(-)
+
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -970,12 +970,6 @@ struct perf_event_context {
+ local_t nr_pending;
+ };
+
+-/*
+- * Number of contexts where an event can trigger:
+- * task, softirq, hardirq, nmi.
+- */
+-#define PERF_NR_CONTEXTS 4
+-
+ struct perf_cpu_pmu_context {
+ struct perf_event_pmu_context epc;
+ struct perf_event_pmu_context *task_epc;
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -734,6 +734,12 @@ enum perf_event_task_context {
+ perf_nr_task_contexts,
+ };
+
++/*
++ * Number of contexts where an event can trigger:
++ * task, softirq, hardirq, nmi.
++ */
++#define PERF_NR_CONTEXTS 4
++
+ struct wake_q_node {
+ struct wake_q_node *next;
+ };
+@@ -1256,6 +1262,7 @@ struct task_struct {
+ unsigned int futex_state;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
++ u8 perf_recursion[PERF_NR_CONTEXTS];
+ struct perf_event_context *perf_event_ctxp;
+ struct mutex perf_event_mutex;
+ struct list_head perf_event_list;
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9774,11 +9774,7 @@ struct swevent_htable {
+ struct swevent_hlist *swevent_hlist;
+ struct mutex hlist_mutex;
+ int hlist_refcount;
+-
+- /* Recursion avoidance in each contexts */
+- u8 recursion[PERF_NR_CONTEXTS];
+ };
+-
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+
+ /*
+@@ -9976,17 +9972,13 @@ DEFINE_PER_CPU(struct pt_regs, __perf_re
+
+ int perf_swevent_get_recursion_context(void)
+ {
+- struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
+-
+- return get_recursion_context(swhash->recursion);
++ return get_recursion_context(current->perf_recursion);
+ }
+ EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
+
+ void perf_swevent_put_recursion_context(int rctx)
+ {
+- struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
+-
+- put_recursion_context(swhash->recursion, rctx);
++ put_recursion_context(current->perf_recursion, rctx);
+ }
+
+ void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
+@@ -13653,6 +13645,7 @@ int perf_event_init_task(struct task_str
+ {
+ int ret;
+
++ memset(child->perf_recursion, 0, sizeof(child->perf_recursion));
+ child->perf_event_ctxp = NULL;
+ mutex_init(&child->perf_event_mutex);
+ INIT_LIST_HEAD(&child->perf_event_list);
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -221,7 +221,7 @@ static inline int get_recursion_context(
+ return rctx;
+ }
+
+-static inline void put_recursion_context(u8 *recursion, int rctx)
++static inline void put_recursion_context(u8 *recursion, unsigned char rctx)
+ {
+ barrier();
+ recursion[rctx]--;
diff --git a/debian/patches-rt/0005-printk-nbcon-Add-detailed-doc-for-write_atomic.patch b/debian/patches-rt/0005-printk-nbcon-Add-detailed-doc-for-write_atomic.patch
index 7d6a606df3..7c3d1192e9 100644
--- a/debian/patches-rt/0005-printk-nbcon-Add-detailed-doc-for-write_atomic.patch
+++ b/debian/patches-rt/0005-printk-nbcon-Add-detailed-doc-for-write_atomic.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 7 Feb 2024 18:38:14 +0000
-Subject: [PATCH 05/46] printk: nbcon: Add detailed doc for write_atomic()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 05/48] printk: nbcon: Add detailed doc for write_atomic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The write_atomic() callback has special requirements and is
allowed to use special helper functions. Provide detailed
@@ -9,6 +9,7 @@ documentation of the callback so that a developer has a
chance of implementing it correctly.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/console.h | 33 +++++++++++++++++++++++++++++----
diff --git a/debian/patches-rt/0010-drm-i915-Drop-the-irqs_disabled-check.patch b/debian/patches-rt/0006-drm-i915-Drop-the-irqs_disabled-check.patch
index a40ec9b965..931d170015 100644
--- a/debian/patches-rt/0010-drm-i915-Drop-the-irqs_disabled-check.patch
+++ b/debian/patches-rt/0006-drm-i915-Drop-the-irqs_disabled-check.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 1 Oct 2021 20:01:03 +0200
-Subject: [PATCH 10/10] drm/i915: Drop the irqs_disabled() check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 6/8] drm/i915: Drop the irqs_disabled() check
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The !irqs_disabled() check triggers on PREEMPT_RT even with
i915_sched_engine::lock acquired. The reason is the lock is transformed
@@ -14,6 +14,7 @@ caller and will yell if the interrupts are not disabled.
Remove the !irqs_disabled() check.
Reported-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Acked-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/i915_request.c | 2 --
diff --git a/debian/patches-rt/0006-net-ipv4-Use-nested-BH-locking-for-ipv4_tcp_sk.patch b/debian/patches-rt/0006-net-ipv4-Use-nested-BH-locking-for-ipv4_tcp_sk.patch
new file mode 100644
index 0000000000..9b882bf198
--- /dev/null
+++ b/debian/patches-rt/0006-net-ipv4-Use-nested-BH-locking-for-ipv4_tcp_sk.patch
@@ -0,0 +1,94 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 24 Oct 2023 09:38:48 +0200
+Subject: [PATCH 06/15] net/ipv4: Use nested-BH locking for ipv4_tcp_sk.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+ipv4_tcp_sk is a per-CPU variable and relies on disabled BH for its
+locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
+this data structure requires explicit locking.
+
+Make a struct with a sock member (original ipv4_tcp_sk) and a
+local_lock_t and use local_lock_nested_bh() for locking. This change
+adds only lockdep coverage and does not alter the functional behaviour
+for !PREEMPT_RT.
+
+Cc: David Ahern <dsahern@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/sock.h | 5 +++++
+ net/ipv4/tcp_ipv4.c | 15 +++++++++++----
+ 2 files changed, 16 insertions(+), 4 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -544,6 +544,11 @@ struct sock {
+ netns_tracker ns_tracker;
+ };
+
++struct sock_bh_locked {
++ struct sock *sock;
++ local_lock_t bh_lock;
++};
++
+ enum sk_pacing {
+ SK_PACING_NONE = 0,
+ SK_PACING_NEEDED = 1,
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -93,7 +93,9 @@ static int tcp_v4_md5_hash_hdr(char *md5
+ struct inet_hashinfo tcp_hashinfo;
+ EXPORT_SYMBOL(tcp_hashinfo);
+
+-static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
++static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ static u32 tcp_v4_init_seq(const struct sk_buff *skb)
+ {
+@@ -885,7 +887,9 @@ static void tcp_v4_send_reset(const stru
+ arg.tos = ip_hdr(skb)->tos;
+ arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
+ local_bh_disable();
+- ctl_sk = this_cpu_read(ipv4_tcp_sk);
++ local_lock_nested_bh(&ipv4_tcp_sk.bh_lock);
++ ctl_sk = this_cpu_read(ipv4_tcp_sk.sock);
++
+ sock_net_set(ctl_sk, net);
+ if (sk) {
+ ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
+@@ -910,6 +914,7 @@ static void tcp_v4_send_reset(const stru
+ sock_net_set(ctl_sk, &init_net);
+ __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
++ local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock);
+ local_bh_enable();
+
+ #ifdef CONFIG_TCP_MD5SIG
+@@ -1005,7 +1010,8 @@ static void tcp_v4_send_ack(const struct
+ arg.tos = tos;
+ arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
+ local_bh_disable();
+- ctl_sk = this_cpu_read(ipv4_tcp_sk);
++ local_lock_nested_bh(&ipv4_tcp_sk.bh_lock);
++ ctl_sk = this_cpu_read(ipv4_tcp_sk.sock);
+ sock_net_set(ctl_sk, net);
+ ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
+ inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
+@@ -1020,6 +1026,7 @@ static void tcp_v4_send_ack(const struct
+
+ sock_net_set(ctl_sk, &init_net);
+ __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
++ local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock);
+ local_bh_enable();
+ }
+
+@@ -3620,7 +3627,7 @@ void __init tcp_v4_init(void)
+ */
+ inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
+
+- per_cpu(ipv4_tcp_sk, cpu) = sk;
++ per_cpu(ipv4_tcp_sk.sock, cpu) = sk;
+ }
+ if (register_pernet_subsys(&tcp_sk_ops))
+ panic("Failed to create the TCP control socket.\n");
diff --git a/debian/patches-rt/0006-perf-Don-t-disable-preemption-in-perf_pending_task.patch b/debian/patches-rt/0006-perf-Don-t-disable-preemption-in-perf_pending_task.patch
new file mode 100644
index 0000000000..d3408aee48
--- /dev/null
+++ b/debian/patches-rt/0006-perf-Don-t-disable-preemption-in-perf_pending_task.patch
@@ -0,0 +1,60 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:40 +0200
+Subject: [PATCH 6/7] perf: Don't disable preemption in perf_pending_task().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+perf_pending_task() is invoked in task context and disables preemption
+because perf_swevent_get_recursion_context() used to access per-CPU
+variables. The other reason is to create a RCU read section while
+accessing the perf_event.
+
+The recursion counter is no longer a per-CPU accounter so disabling
+preemption is no longer required. The RCU section is needed and must be
+created explicit.
+
+Replace the preemption-disable section with a explicit RCU-read section.
+
+Tested-by: Marco Elver <elver@google.com>
+Link: https://lore.kernel.org/r/20240704170424.1466941-7-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/events/core.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5208,10 +5208,9 @@ static void perf_pending_task_sync(struc
+ }
+
+ /*
+- * All accesses related to the event are within the same
+- * non-preemptible section in perf_pending_task(). The RCU
+- * grace period before the event is freed will make sure all
+- * those accesses are complete by then.
++ * All accesses related to the event are within the same RCU section in
++ * perf_pending_task(). The RCU grace period before the event is freed
++ * will make sure all those accesses are complete by then.
+ */
+ rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
+ }
+@@ -6842,7 +6841,7 @@ static void perf_pending_task(struct cal
+ * critical section as the ->pending_work reset. See comment in
+ * perf_pending_task_sync().
+ */
+- preempt_disable_notrace();
++ rcu_read_lock();
+ /*
+ * If we 'fail' here, that's OK, it means recursion is already disabled
+ * and we won't recurse 'further'.
+@@ -6855,10 +6854,10 @@ static void perf_pending_task(struct cal
+ local_dec(&event->ctx->nr_pending);
+ rcuwait_wake_up(&event->pending_work_wait);
+ }
++ rcu_read_unlock();
+
+ if (rctx >= 0)
+ perf_swevent_put_recursion_context(rctx);
+- preempt_enable_notrace();
+ }
+
+ #ifdef CONFIG_GUEST_PERF_EVENTS
diff --git a/debian/patches-rt/0006-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch b/debian/patches-rt/0006-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch
index 0323d580c4..ef850cfd27 100644
--- a/debian/patches-rt/0006-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch
+++ b/debian/patches-rt/0006-printk-nbcon-Add-callbacks-to-synchronize-with-drive.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 18 Mar 2024 10:11:56 +0000
-Subject: [PATCH 06/46] printk: nbcon: Add callbacks to synchronize with driver
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 06/48] printk: nbcon: Add callbacks to synchronize with driver
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Console drivers typically must deal with access to the hardware
via user input/output (such as an interactive login shell) and
@@ -12,19 +12,20 @@ serial consoles).
Until now, usage of this driver-specific locking has been hidden
from the printk-subsystem and implemented within the various
-console callbacks. However, for nbcon consoles, it is necessary
-that the printk-subsystem uses the driver-specific locking so
-that nbcon console ownership can be acquired _after_ the
-driver-specific locking has succeeded. This allows for lock
-contention to exist on the more context-friendly driver-specific
-locking rather than nbcon console ownership (for non-emergency
-and non-panic cases).
+console callbacks. However, nbcon consoles would need to use it
+even in the generic code.
-Require nbcon consoles to implement two new callbacks
-(device_lock(), device_unlock()) that will use whatever
-synchronization mechanism the driver is using for itself.
+Add device_lock() and device_unlock() callback which will need
+to get implemented by nbcon consoles.
+
+The callbacks will use whatever synchronization mechanism the
+driver is using for itself. The minimum requirement is to
+prevent CPU migration. It would allow a context friendly
+acquiring of nbcon console ownership in non-emergency and
+non-panic context.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/console.h | 43 +++++++++++++++++++++++++++++++++++++++++++
diff --git a/debian/patches-rt/drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch b/debian/patches-rt/0007-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
index 30f4ed6cf3..e2976aacd5 100644
--- a/debian/patches-rt/drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
+++ b/debian/patches-rt/0007-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Oct 2023 21:37:21 +0200
-Subject: [PATCH] drm/i915/guc: Consider also RCU depth in busy loop.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 7/8] drm/i915/guc: Consider also RCU depth in busy loop.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
intel_guc_send_busy_loop() looks at in_atomic() and irqs_disabled() to
decide if it should busy-spin while waiting or if it may sleep.
@@ -11,6 +11,7 @@ acquired leading to RCU splats while the function sleeps.
Check also if RCU has been disabled.
Reported-by: "John B. Wyatt IV" <jwyatt@redhat.com>
+Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/gt/uc/intel_guc.h | 2 +-
@@ -18,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
-@@ -360,7 +360,7 @@ static inline int intel_guc_send_busy_lo
+@@ -362,7 +362,7 @@ static inline int intel_guc_send_busy_lo
{
int err;
unsigned int sleep_period_ms = 1;
diff --git a/debian/patches-rt/0007-netfilter-br_netfilter-Use-nested-BH-locking-for-brn.patch b/debian/patches-rt/0007-netfilter-br_netfilter-Use-nested-BH-locking-for-brn.patch
new file mode 100644
index 0000000000..ab910553af
--- /dev/null
+++ b/debian/patches-rt/0007-netfilter-br_netfilter-Use-nested-BH-locking-for-brn.patch
@@ -0,0 +1,98 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 21 Aug 2023 09:59:09 +0200
+Subject: [PATCH 07/15] netfilter: br_netfilter: Use nested-BH locking for
+ brnf_frag_data_storage.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+brnf_frag_data_storage is a per-CPU variable and relies on disabled BH
+for its locking. Without per-CPU locking in local_bh_disable() on
+PREEMPT_RT this data structure requires explicit locking.
+
+Add a local_lock_t to the data structure and use local_lock_nested_bh()
+for locking. This change adds only lockdep coverage and does not alter
+the functional behaviour for !PREEMPT_RT.
+
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Jozsef Kadlecsik <kadlec@netfilter.org>
+Cc: Nikolay Aleksandrov <razor@blackwall.org>
+Cc: Pablo Neira Ayuso <pablo@netfilter.org>
+Cc: Roopa Prabhu <roopa@nvidia.com>
+Cc: bridge@lists.linux.dev
+Cc: coreteam@netfilter.org
+Cc: netfilter-devel@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/bridge/br_netfilter_hooks.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -137,6 +137,7 @@ static inline bool is_pppoe_ipv6(const s
+ #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
+
+ struct brnf_frag_data {
++ local_lock_t bh_lock;
+ char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
+ u8 encap_size;
+ u8 size;
+@@ -144,7 +145,9 @@ struct brnf_frag_data {
+ __be16 vlan_proto;
+ };
+
+-static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
++static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ static void nf_bridge_info_free(struct sk_buff *skb)
+ {
+@@ -850,6 +853,7 @@ static int br_nf_dev_queue_xmit(struct n
+ {
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ unsigned int mtu, mtu_reserved;
++ int ret;
+
+ mtu_reserved = nf_bridge_mtu_reduction(skb);
+ mtu = skb->dev->mtu;
+@@ -882,6 +886,7 @@ static int br_nf_dev_queue_xmit(struct n
+
+ IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
+
++ local_lock_nested_bh(&brnf_frag_data_storage.bh_lock);
+ data = this_cpu_ptr(&brnf_frag_data_storage);
+
+ if (skb_vlan_tag_present(skb)) {
+@@ -897,7 +902,9 @@ static int br_nf_dev_queue_xmit(struct n
+ skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
+ data->size);
+
+- return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
++ ret = br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
++ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
++ return ret;
+ }
+ if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6)) {
+@@ -909,6 +916,7 @@ static int br_nf_dev_queue_xmit(struct n
+
+ IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
+
++ local_lock_nested_bh(&brnf_frag_data_storage.bh_lock);
+ data = this_cpu_ptr(&brnf_frag_data_storage);
+ data->encap_size = nf_bridge_encap_header_len(skb);
+ data->size = ETH_HLEN + data->encap_size;
+@@ -916,8 +924,12 @@ static int br_nf_dev_queue_xmit(struct n
+ skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
+ data->size);
+
+- if (v6ops)
+- return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
++ if (v6ops) {
++ ret = v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
++ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
++ return ret;
++ }
++ local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
+
+ kfree_skb(skb);
+ return -EMSGSIZE;
diff --git a/debian/patches-rt/0004-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch b/debian/patches-rt/0007-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch
index a96b966fa5..d0bf8acc35 100644
--- a/debian/patches-rt/0004-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch
+++ b/debian/patches-rt/0007-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch
@@ -1,11 +1,11 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Mar 2024 19:01:52 +0100
-Subject: [PATCH 4/4] perf: Split __perf_pending_irq() out of
+Date: Thu, 4 Jul 2024 19:03:41 +0200
+Subject: [PATCH 7/7] perf: Split __perf_pending_irq() out of
perf_pending_irq()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
perf_pending_irq() invokes perf_event_wakeup() and __perf_pending_irq().
-The former is in charge of waking any tasks which wait to be woken up
+The former is in charge of waking any tasks which waits to be woken up
while the latter disables perf-events.
The irq_work perf_pending_irq(), while this an irq_work, the callback
@@ -23,12 +23,13 @@ PREEMPT_RT. Rename the split out callback to perf_pending_disable().
Tested-by: Marco Elver <elver@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Link: https://lore.kernel.org/r/20240312180814.3373778-5-bigeasy@linutronix.de
+Link: https://lore.kernel.org/all/ZMAtZ2t43GXoF6tM@kernel.org/
+Link: https://lore.kernel.org/r/20240704170424.1466941-8-bigeasy@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/perf_event.h | 1 +
- kernel/events/core.c | 31 +++++++++++++++++++++++--------
- 2 files changed, 24 insertions(+), 8 deletions(-)
+ kernel/events/core.c | 29 ++++++++++++++++++++++-------
+ 2 files changed, 23 insertions(+), 7 deletions(-)
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -39,10 +40,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ struct irq_work pending_disable_irq;
struct callback_head pending_task;
unsigned int pending_work;
-
+ struct rcuwait pending_work_wait;
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -2449,7 +2449,7 @@ static void __perf_event_disable(struct
+@@ -2451,7 +2451,7 @@ static void __perf_event_disable(struct
* hold the top-level event's child_mutex, so any descendant that
* goes to exit will block in perf_event_exit_event().
*
@@ -51,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
-@@ -2489,7 +2489,7 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
+@@ -2491,7 +2491,7 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
void perf_event_disable_inatomic(struct perf_event *event)
{
event->pending_disable = 1;
@@ -60,15 +61,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#define MAX_INTERRUPTS (~0ULL)
-@@ -5175,6 +5175,7 @@ static void perf_addr_filters_splice(str
+@@ -5218,6 +5218,7 @@ static void perf_pending_task_sync(struc
static void _free_event(struct perf_event *event)
{
irq_work_sync(&event->pending_irq);
+ irq_work_sync(&event->pending_disable_irq);
+ perf_pending_task_sync(event);
unaccount_event(event);
-
-@@ -6711,7 +6712,7 @@ static void perf_sigtrap(struct perf_eve
+@@ -6760,7 +6761,7 @@ static void perf_sigtrap(struct perf_eve
/*
* Deliver the pending work in-event-context or follow the context.
*/
@@ -77,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
int cpu = READ_ONCE(event->oncpu);
-@@ -6749,11 +6750,26 @@ static void __perf_pending_irq(struct pe
+@@ -6798,11 +6799,26 @@ static void __perf_pending_irq(struct pe
* irq_work_queue(); // FAILS
*
* irq_work_run()
@@ -106,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void perf_pending_irq(struct irq_work *entry)
-@@ -6776,8 +6792,6 @@ static void perf_pending_irq(struct irq_
+@@ -6825,8 +6841,6 @@ static void perf_pending_irq(struct irq_
perf_event_wakeup(event);
}
@@ -115,20 +116,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
}
-@@ -9568,7 +9582,7 @@ static int __perf_event_overflow(struct
- * is processed.
- */
- if (in_nmi())
-- irq_work_queue(&event->pending_irq);
-+ irq_work_queue(&event->pending_disable_irq);
- } else if (event->attr.exclude_kernel && valid_sample) {
- /*
- * Should not be able to return to user space without
-@@ -11908,6 +11922,7 @@ perf_event_alloc(struct perf_event_attr
+@@ -11967,6 +11981,7 @@ perf_event_alloc(struct perf_event_attr
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending_irq, perf_pending_irq);
+ event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable);
init_task_work(&event->pending_task, perf_pending_task);
+ rcuwait_init(&event->pending_work_wait);
- mutex_init(&event->mmap_mutex);
diff --git a/debian/patches-rt/0007-printk-nbcon-Use-driver-synchronization-while-un-reg.patch b/debian/patches-rt/0007-printk-nbcon-Use-driver-synchronization-while-un-reg.patch
index 08e2d52852..e2f23447fe 100644
--- a/debian/patches-rt/0007-printk-nbcon-Use-driver-synchronization-while-un-reg.patch
+++ b/debian/patches-rt/0007-printk-nbcon-Use-driver-synchronization-while-un-reg.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 15 Mar 2024 15:38:22 +0000
-Subject: [PATCH 07/46] printk: nbcon: Use driver synchronization while
+Subject: [PATCH 07/48] printk: nbcon: Use driver synchronization while
(un)registering
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Console drivers typically have to deal with access to the
hardware via user input/output (such as an interactive login
@@ -34,22 +34,28 @@ For example, these callbacks lock/unlock the port lock for
serial port drivers.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/printk.c | 29 +++++++++++++++++++++++++++++
- 1 file changed, 29 insertions(+)
+ kernel/printk/printk.c | 33 ++++++++++++++++++++++++++++++++-
+ 1 file changed, 32 insertions(+), 1 deletion(-)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3466,6 +3466,7 @@ void register_console(struct console *ne
- struct console *con;
+@@ -3475,9 +3475,11 @@ static int unregister_console_locked(str
+ */
+ void register_console(struct console *newcon)
+ {
+- struct console *con;
++ bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic;
bool bootcon_registered = false;
bool realcon_registered = false;
++ struct console *con;
+ unsigned long flags;
u64 init_seq;
int err;
-@@ -3553,6 +3554,19 @@ void register_console(struct console *ne
+@@ -3565,6 +3567,19 @@ void register_console(struct console *ne
}
/*
@@ -62,33 +68,34 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * Use the driver synchronization to ensure that the hardware is not
+ * in use while this new console transitions to being registered.
+ */
-+ if ((newcon->flags & CON_NBCON) && newcon->write_atomic)
++ if (use_device_lock)
+ newcon->device_lock(newcon, &flags);
+
+ /*
* Put this console in the list - keep the
* preferred driver at the head of the list.
*/
-@@ -3576,6 +3590,10 @@ void register_console(struct console *ne
+@@ -3588,6 +3603,10 @@ void register_console(struct console *ne
* register_console() completes.
*/
+ /* This new console is now registered. */
-+ if ((newcon->flags & CON_NBCON) && newcon->write_atomic)
++ if (use_device_lock)
+ newcon->device_unlock(newcon, flags);
+
console_sysfs_notify();
/*
-@@ -3604,6 +3622,7 @@ EXPORT_SYMBOL(register_console);
+@@ -3616,6 +3635,8 @@ EXPORT_SYMBOL(register_console);
/* Must be called under console_list_lock(). */
static int unregister_console_locked(struct console *console)
{
++ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
+ unsigned long flags;
int res;
lockdep_assert_console_list_lock_held();
-@@ -3622,8 +3641,18 @@ static int unregister_console_locked(str
+@@ -3634,8 +3655,18 @@ static int unregister_console_locked(str
if (!console_is_registered_locked(console))
return -ENODEV;
@@ -96,12 +103,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * Use the driver synchronization to ensure that the hardware is not
+ * in use while this console transitions to being unregistered.
+ */
-+ if ((console->flags & CON_NBCON) && console->write_atomic)
++ if (use_device_lock)
+ console->device_lock(console, &flags);
+
hlist_del_init_rcu(&console->node);
-+ if ((console->flags & CON_NBCON) && console->write_atomic)
++ if (use_device_lock)
+ console->device_unlock(console, flags);
+
/*
diff --git a/debian/patches-rt/Revert-drm-i915-Depend-on-PREEMPT_RT.patch b/debian/patches-rt/0008-Revert-drm-i915-Depend-on-PREEMPT_RT.patch
index a94094c5bc..bdbd84d448 100644
--- a/debian/patches-rt/Revert-drm-i915-Depend-on-PREEMPT_RT.patch
+++ b/debian/patches-rt/0008-Revert-drm-i915-Depend-on-PREEMPT_RT.patch
@@ -1,11 +1,12 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 21 Feb 2022 17:59:14 +0100
-Subject: [PATCH] Revert "drm/i915: Depend on !PREEMPT_RT."
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 8/8] Revert "drm/i915: Depend on !PREEMPT_RT."
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Once the known issues are addressed, it should be safe to enable the
driver.
+Acked-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/Kconfig | 1 -
diff --git a/debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch b/debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch
deleted file mode 100644
index 87797042a6..0000000000
--- a/debian/patches-rt/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 8 Sep 2021 17:18:00 +0200
-Subject: [PATCH 08/10] drm/i915/gt: Queue and wait for the irq_work item.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
-
-Disabling interrupts and invoking the irq_work function directly breaks
-on PREEMPT_RT.
-PREEMPT_RT does not invoke all irq_work from hardirq context because
-some of the user have spinlock_t locking in the callback function.
-These locks are then turned into a sleeping locks which can not be
-acquired with disabled interrupts.
-
-Using irq_work_queue() has the benefit that the irqwork will be invoked
-in the regular context. In general there is "no" delay between enqueuing
-the callback and its invocation because the interrupt is raised right
-away on architectures which support it (which includes x86).
-
-Use irq_work_queue() + irq_work_sync() instead invoking the callback
-directly.
-
-Reported-by: Clark Williams <williams@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
----
- drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
---- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
-+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
-@@ -317,10 +317,9 @@ void __intel_breadcrumbs_park(struct int
- /* Kick the work once more to drain the signalers, and disarm the irq */
- irq_work_sync(&b->irq_work);
- while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
-- local_irq_disable();
-- signal_irq_work(&b->irq_work);
-- local_irq_enable();
-+ irq_work_queue(&b->irq_work);
- cond_resched();
-+ irq_work_sync(&b->irq_work);
- }
- }
-
diff --git a/debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch b/debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch
new file mode 100644
index 0000000000..24ba5a1789
--- /dev/null
+++ b/debian/patches-rt/0008-net-softnet_data-Make-xmit-per-task.patch
@@ -0,0 +1,227 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 21 Aug 2023 11:47:40 +0200
+Subject: [PATCH 08/15] net: softnet_data: Make xmit per task.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Softirq is preemptible on PREEMPT_RT. Without a per-CPU lock in
+local_bh_disable() there is no guarantee that only one device is
+transmitting at a time.
+With preemption and multiple senders it is possible that the per-CPU
+`recursion' counter gets incremented by different threads and exceeds
+XMIT_RECURSION_LIMIT leading to a false positive recursion alert.
+The `more' member is subject to similar problems if set by one thread
+for one driver and wrongly used by another driver within another thread.
+
+Instead of adding a lock to protect the per-CPU variable it is simpler
+to make xmit per-task. Sending and receiving skbs happens always
+in thread context anyway.
+
+Having a lock to protected the per-CPU counter would block/ serialize two
+sending threads needlessly. It would also require a recursive lock to
+ensure that the owner can increment the counter further.
+
+Make the softnet_data.xmit a task_struct member on PREEMPT_RT. Add
+needed wrapper.
+
+Cc: Ben Segall <bsegall@google.com>
+Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
+Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Cc: Juri Lelli <juri.lelli@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Valentin Schneider <vschneid@redhat.com>
+Cc: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/netdevice.h | 42 ++++++++++++++++++++++++++++++-----------
+ include/linux/netdevice_xmit.h | 13 ++++++++++++
+ include/linux/sched.h | 5 +++-
+ net/core/dev.c | 14 +++++++++++++
+ net/core/dev.h | 18 +++++++++++++++++
+ 5 files changed, 80 insertions(+), 12 deletions(-)
+ create mode 100644 include/linux/netdevice_xmit.h
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -43,6 +43,7 @@
+
+ #include <linux/netdev_features.h>
+ #include <linux/neighbour.h>
++#include <linux/netdevice_xmit.h>
+ #include <uapi/linux/netdevice.h>
+ #include <uapi/linux/if_bonding.h>
+ #include <uapi/linux/pkt_cls.h>
+@@ -3222,13 +3223,7 @@ struct softnet_data {
+ struct sk_buff_head xfrm_backlog;
+ #endif
+ /* written and read only by owning cpu: */
+- struct {
+- u16 recursion;
+- u8 more;
+-#ifdef CONFIG_NET_EGRESS
+- u8 skip_txqueue;
+-#endif
+- } xmit;
++ struct netdev_xmit xmit;
+ #ifdef CONFIG_RPS
+ /* input_queue_head should be written by cpu owning this struct,
+ * and only read by other cpus. Worth using a cache line.
+@@ -3256,10 +3251,18 @@ struct softnet_data {
+
+ DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+
++#ifndef CONFIG_PREEMPT_RT
+ static inline int dev_recursion_level(void)
+ {
+ return this_cpu_read(softnet_data.xmit.recursion);
+ }
++#else
++static inline int dev_recursion_level(void)
++{
++ return current->net_xmit.recursion;
++}
++
++#endif
+
+ void __netif_schedule(struct Qdisc *q);
+ void netif_schedule_queue(struct netdev_queue *txq);
+@@ -4874,18 +4877,35 @@ static inline ktime_t netdev_get_tstamp(
+ return hwtstamps->hwtstamp;
+ }
+
+-static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
+- struct sk_buff *skb, struct net_device *dev,
+- bool more)
++#ifndef CONFIG_PREEMPT_RT
++static inline void netdev_xmit_set_more(bool more)
+ {
+ __this_cpu_write(softnet_data.xmit.more, more);
+- return ops->ndo_start_xmit(skb, dev);
+ }
+
+ static inline bool netdev_xmit_more(void)
+ {
+ return __this_cpu_read(softnet_data.xmit.more);
+ }
++#else
++static inline void netdev_xmit_set_more(bool more)
++{
++ current->net_xmit.more = more;
++}
++
++static inline bool netdev_xmit_more(void)
++{
++ return current->net_xmit.more;
++}
++#endif
++
++static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
++ struct sk_buff *skb, struct net_device *dev,
++ bool more)
++{
++ netdev_xmit_set_more(more);
++ return ops->ndo_start_xmit(skb, dev);
++}
+
+ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, bool more)
+--- /dev/null
++++ b/include/linux/netdevice_xmit.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++#ifndef _LINUX_NETDEVICE_XMIT_H
++#define _LINUX_NETDEVICE_XMIT_H
++
++struct netdev_xmit {
++ u16 recursion;
++ u8 more;
++#ifdef CONFIG_NET_EGRESS
++ u8 skip_txqueue;
++#endif
++};
++
++#endif
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -36,6 +36,7 @@
+ #include <linux/signal_types.h>
+ #include <linux/syscall_user_dispatch_types.h>
+ #include <linux/mm_types_task.h>
++#include <linux/netdevice_xmit.h>
+ #include <linux/task_io_accounting.h>
+ #include <linux/posix-timers_types.h>
+ #include <linux/restart_block.h>
+@@ -981,7 +982,9 @@ struct task_struct {
+ /* delay due to memory thrashing */
+ unsigned in_thrashing:1;
+ #endif
+-
++#ifdef CONFIG_PREEMPT_RT
++ struct netdev_xmit net_xmit;
++#endif
+ unsigned long atomic_flags; /* Flags requiring atomic access. */
+
+ struct restart_block restart_block;
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3940,6 +3940,7 @@ netdev_tx_queue_mapping(struct net_devic
+ return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ static bool netdev_xmit_txqueue_skipped(void)
+ {
+ return __this_cpu_read(softnet_data.xmit.skip_txqueue);
+@@ -3950,6 +3951,19 @@ void netdev_xmit_skip_txqueue(bool skip)
+ __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
+ }
+ EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
++
++#else
++static bool netdev_xmit_txqueue_skipped(void)
++{
++ return current->net_xmit.skip_txqueue;
++}
++
++void netdev_xmit_skip_txqueue(bool skip)
++{
++ current->net_xmit.skip_txqueue = skip;
++}
++EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
++#endif
+ #endif /* CONFIG_NET_EGRESS */
+
+ #ifdef CONFIG_NET_XGRESS
+--- a/net/core/dev.h
++++ b/net/core/dev.h
+@@ -150,6 +150,8 @@ struct napi_struct *napi_by_id(unsigned
+ void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
+
+ #define XMIT_RECURSION_LIMIT 8
++
++#ifndef CONFIG_PREEMPT_RT
+ static inline bool dev_xmit_recursion(void)
+ {
+ return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+@@ -165,5 +167,21 @@ static inline void dev_xmit_recursion_de
+ {
+ __this_cpu_dec(softnet_data.xmit.recursion);
+ }
++#else
++static inline bool dev_xmit_recursion(void)
++{
++ return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
++}
++
++static inline void dev_xmit_recursion_inc(void)
++{
++ current->net_xmit.recursion++;
++}
++
++static inline void dev_xmit_recursion_dec(void)
++{
++ current->net_xmit.recursion--;
++}
++#endif
+
+ #endif
diff --git a/debian/patches-rt/0008-serial-core-Provide-low-level-functions-to-lock-port.patch b/debian/patches-rt/0008-serial-core-Provide-low-level-functions-to-lock-port.patch
index 75761ae021..51a1155fec 100644
--- a/debian/patches-rt/0008-serial-core-Provide-low-level-functions-to-lock-port.patch
+++ b/debian/patches-rt/0008-serial-core-Provide-low-level-functions-to-lock-port.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 11 Dec 2023 09:19:18 +0000
-Subject: [PATCH 08/46] serial: core: Provide low-level functions to lock port
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 08/48] serial: core: Provide low-level functions to lock port
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
It will be necessary at times for the uart nbcon console
drivers to acquire the port lock directly (without the
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
-@@ -591,6 +591,24 @@ struct uart_port {
+@@ -590,6 +590,24 @@ struct uart_port {
void *private_data; /* generic platform data pointer */
};
diff --git a/debian/patches-rt/0009-dev-Remove-PREEMPT_RT-ifdefs-from-backlog_lock.patch b/debian/patches-rt/0009-dev-Remove-PREEMPT_RT-ifdefs-from-backlog_lock.patch
new file mode 100644
index 0000000000..bc5b67d203
--- /dev/null
+++ b/debian/patches-rt/0009-dev-Remove-PREEMPT_RT-ifdefs-from-backlog_lock.patch
@@ -0,0 +1,60 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 22 Aug 2023 18:14:44 +0200
+Subject: [PATCH 09/15] dev: Remove PREEMPT_RT ifdefs from backlog_lock.*().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The backlog_napi locking (previously RPS) relies on explicit locking if
+either RPS or backlog NAPI is enabled. If both are disabled then locking
+was achieved by disabling interrupts except on PREEMPT_RT. PREEMPT_RT
+was excluded because the needed synchronisation was already provided
+local_bh_disable().
+
+Since the introduction of backlog NAPI and making it mandatory for
+PREEMPT_RT the ifdef within backlog_lock.*() is obsolete and can be
+removed.
+
+Remove the ifdefs in backlog_lock.*().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/dev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -229,7 +229,7 @@ static inline void backlog_lock_irq_save
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
+- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ else
+ local_irq_save(*flags);
+ }
+
+@@ -237,7 +237,7 @@ static inline void backlog_lock_irq_disa
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_lock_irq(&sd->input_pkt_queue.lock);
+- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ else
+ local_irq_disable();
+ }
+
+@@ -246,7 +246,7 @@ static inline void backlog_unlock_irq_re
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
+- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ else
+ local_irq_restore(*flags);
+ }
+
+@@ -254,7 +254,7 @@ static inline void backlog_unlock_irq_en
+ {
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ spin_unlock_irq(&sd->input_pkt_queue.lock);
+- else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ else
+ local_irq_enable();
+ }
+
diff --git a/debian/patches-rt/0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch b/debian/patches-rt/0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch
index 68106c6e89..f2e50b7924 100644
--- a/debian/patches-rt/0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch
+++ b/debian/patches-rt/0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch
@@ -1,7 +1,10 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 17 Apr 2024 12:13:24 +0000
-Subject: [PATCH 09/46] serial: core: Introduce wrapper to set @uart_port->cons
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 09/48] serial: core: Introduce wrapper to set @uart_port->cons
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Introduce uart_port_set_cons() as a wrapper to set @cons of a
uart_port. The wrapper sets @cons under the port lock in order
@@ -11,6 +14,9 @@ commit relating to the port lock wrappers, which rely on @cons
not changing between lock and unlock.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Tested-by: Théo Lebrun <theo.lebrun@bootlin.com> # EyeQ5, AMBA-PL011
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/tty/serial/8250/8250_core.c | 6 +++---
@@ -21,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -627,11 +627,11 @@ static int univ8250_console_setup(struct
+@@ -624,11 +624,11 @@ static int univ8250_console_setup(struct
port = &serial8250_ports[co->index].port;
/* link port to console */
@@ -35,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return retval;
}
-@@ -689,7 +689,7 @@ static int univ8250_console_match(struct
+@@ -686,7 +686,7 @@ static int univ8250_console_match(struct
continue;
co->index = i;
@@ -46,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
-@@ -2488,7 +2488,7 @@ static int pl011_console_match(struct co
+@@ -2480,7 +2480,7 @@ static int pl011_console_match(struct co
continue;
co->index = i;
@@ -57,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
-@@ -3172,8 +3172,15 @@ static int serial_core_add_one_port(stru
+@@ -3168,8 +3168,15 @@ static int serial_core_add_one_port(stru
state->uart_port = uport;
uport->state = state;
@@ -74,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
uport->minor = drv->tty_driver->minor_start + uport->line;
uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name,
drv->tty_driver->name_base + uport->line);
-@@ -3182,13 +3189,6 @@ static int serial_core_add_one_port(stru
+@@ -3178,13 +3185,6 @@ static int serial_core_add_one_port(stru
goto out;
}
@@ -90,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
-@@ -610,6 +610,23 @@ static inline void __uart_port_unlock_ir
+@@ -609,6 +609,23 @@ static inline void __uart_port_unlock_ir
}
/**
diff --git a/debian/patches-rt/0010-console-Improve-console_srcu_read_flags-comments.patch b/debian/patches-rt/0010-console-Improve-console_srcu_read_flags-comments.patch
index ea346ac04c..94840da42f 100644
--- a/debian/patches-rt/0010-console-Improve-console_srcu_read_flags-comments.patch
+++ b/debian/patches-rt/0010-console-Improve-console_srcu_read_flags-comments.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 17 Apr 2024 14:34:50 +0000
-Subject: [PATCH 10/46] console: Improve console_srcu_read_flags() comments
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 10/48] console: Improve console_srcu_read_flags() comments
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
It was not clear when exactly console_srcu_read_flags() must be
used vs. directly reading @console->flags.
@@ -17,6 +17,7 @@ registered console and that exit/cleanup routines will not run
if the console is in the process of unregistration.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/console.h | 28 +++++++++++++++++-----------
diff --git a/debian/patches-rt/0010-dev-Use-nested-BH-locking-for-softnet_data.process_q.patch b/debian/patches-rt/0010-dev-Use-nested-BH-locking-for-softnet_data.process_q.patch
new file mode 100644
index 0000000000..c21de546f2
--- /dev/null
+++ b/debian/patches-rt/0010-dev-Use-nested-BH-locking-for-softnet_data.process_q.patch
@@ -0,0 +1,94 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 22 Aug 2023 18:15:33 +0200
+Subject: [PATCH 10/15] dev: Use nested-BH locking for
+ softnet_data.process_queue.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+softnet_data::process_queue is a per-CPU variable and relies on disabled
+BH for its locking. Without per-CPU locking in local_bh_disable() on
+PREEMPT_RT this data structure requires explicit locking.
+
+softnet_data::input_queue_head can be updated lockless. This is fine
+because this value is only update CPU local by the local backlog_napi
+thread.
+
+Add a local_lock_t to softnet_data and use local_lock_nested_bh() for locking
+of process_queue. This change adds only lockdep coverage and does not
+alter the functional behaviour for !PREEMPT_RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/netdevice.h | 1 +
+ net/core/dev.c | 12 +++++++++++-
+ 2 files changed, 12 insertions(+), 1 deletion(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3201,6 +3201,7 @@ static inline bool dev_has_header(const
+ struct softnet_data {
+ struct list_head poll_list;
+ struct sk_buff_head process_queue;
++ local_lock_t process_queue_bh_lock;
+
+ /* stats */
+ unsigned int processed;
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -449,7 +449,9 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
+ * queue in the local softnet handler.
+ */
+
+-DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
++DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
++ .process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock),
++};
+ EXPORT_PER_CPU_SYMBOL(softnet_data);
+
+ /* Page_pool has a lockless array/stack to alloc/recycle pages.
+@@ -5949,6 +5951,7 @@ static void flush_backlog(struct work_st
+ }
+ backlog_unlock_irq_enable(sd);
+
++ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
+ skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ if (skb->dev->reg_state == NETREG_UNREGISTERING) {
+ __skb_unlink(skb, &sd->process_queue);
+@@ -5956,6 +5959,7 @@ static void flush_backlog(struct work_st
+ rps_input_queue_head_incr(sd);
+ }
+ }
++ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
+ local_bh_enable();
+ }
+
+@@ -6077,7 +6081,9 @@ static int process_backlog(struct napi_s
+ while (again) {
+ struct sk_buff *skb;
+
++ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
+ while ((skb = __skb_dequeue(&sd->process_queue))) {
++ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
+ rcu_read_lock();
+ __netif_receive_skb(skb);
+ rcu_read_unlock();
+@@ -6086,7 +6092,9 @@ static int process_backlog(struct napi_s
+ return work;
+ }
+
++ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
+ }
++ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
+
+ backlog_lock_irq_disable(sd);
+ if (skb_queue_empty(&sd->input_pkt_queue)) {
+@@ -6101,8 +6109,10 @@ static int process_backlog(struct napi_s
+ napi->state &= NAPIF_STATE_THREADED;
+ again = false;
+ } else {
++ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
+ skb_queue_splice_tail_init(&sd->input_pkt_queue,
+ &sd->process_queue);
++ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
+ }
+ backlog_unlock_irq_enable(sd);
+ }
diff --git a/debian/patches-rt/0011-lwt-Don-t-disable-migration-prio-invoking-BPF.patch b/debian/patches-rt/0011-lwt-Don-t-disable-migration-prio-invoking-BPF.patch
new file mode 100644
index 0000000000..93c21b4511
--- /dev/null
+++ b/debian/patches-rt/0011-lwt-Don-t-disable-migration-prio-invoking-BPF.patch
@@ -0,0 +1,40 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Oct 2023 09:12:06 +0200
+Subject: [PATCH 11/15] lwt: Don't disable migration prio invoking BPF.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+There is no need to explicitly disable migration if bottom halves are
+also disabled. Disabling BH implies disabling migration.
+
+Remove migrate_disable() and rely solely on disabling BH to remain on
+the same CPU.
+
+Cc: bpf@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/lwt_bpf.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/net/core/lwt_bpf.c
++++ b/net/core/lwt_bpf.c
+@@ -40,10 +40,9 @@ static int run_lwt_bpf(struct sk_buff *s
+ {
+ int ret;
+
+- /* Migration disable and BH disable are needed to protect per-cpu
+- * redirect_info between BPF prog and skb_do_redirect().
++ /* Disabling BH is needed to protect per-CPU bpf_redirect_info between
++ * BPF prog and skb_do_redirect().
+ */
+- migrate_disable();
+ local_bh_disable();
+ bpf_compute_data_pointers(skb);
+ ret = bpf_prog_run_save_cb(lwt->prog, skb);
+@@ -78,7 +77,6 @@ static int run_lwt_bpf(struct sk_buff *s
+ }
+
+ local_bh_enable();
+- migrate_enable();
+
+ return ret;
+ }
diff --git a/debian/patches-rt/0011-nbcon-Provide-functions-for-drivers-to-acquire-conso.patch b/debian/patches-rt/0011-nbcon-Add-API-to-acquire-context-for-non-printing-op.patch
index 06cc991bff..0245fc1ecd 100644
--- a/debian/patches-rt/0011-nbcon-Provide-functions-for-drivers-to-acquire-conso.patch
+++ b/debian/patches-rt/0011-nbcon-Add-API-to-acquire-context-for-non-printing-op.patch
@@ -1,24 +1,39 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 17 Apr 2024 12:42:46 +0000
-Subject: [PATCH 11/46] nbcon: Provide functions for drivers to acquire console
- for non-printing
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 11/48] nbcon: Add API to acquire context for non-printing
+ operations
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
-Provide functions nbcon_driver_try_acquire() and
-nbcon_driver_release() to allow drivers to acquire the nbcon
-console and mark it unsafe for handover/takeover.
+Provide functions nbcon_device_try_acquire() and
+nbcon_device_release() which will try to acquire the nbcon
+console ownership with NBCON_PRIO_NORMAL and mark it unsafe for
+handover/takeover.
-These functions are to be used by nbcon drivers when performing
-non-printing activities that should be synchronized with their
-atomic_write() callback.
+These functions are to be used together with the device-specific
+locking when performing non-printing activities on the console
+device. They will allow synchronization against the
+atomic_write() callback which will be serialized, for higher
+priority contexts, only by acquiring the console context
+ownership.
+
+Pitfalls:
+
+The API requires to be called in a context with migration
+disabled because it uses per-CPU variables internally.
+
+The context is set unsafe for a takeover all the time. It
+guarantees full serialization against any atomic_write() caller
+except for the final flush in panic() which might try an unsafe
+takeover.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/console.h | 2 +
- include/linux/printk.h | 14 ++++++++++++
- kernel/printk/nbcon.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++-
- 3 files changed, 70 insertions(+), 1 deletion(-)
+ include/linux/printk.h | 15 ++++++++++++
+ kernel/printk/nbcon.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 3 files changed, 72 insertions(+), 1 deletion(-)
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -26,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* @nbcon_state: State for nbcon consoles
* @nbcon_seq: Sequence number of the next record for nbcon to print
-+ * @nbcon_driver_ctxt: Context available for driver non-printing operations
++ * @nbcon_device_ctxt: Context available for non-printing operations
* @pbufs: Pointer to nbcon private buffer
*/
struct console {
@@ -34,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
atomic_t __private nbcon_state;
atomic_long_t __private nbcon_seq;
-+ struct nbcon_context __private nbcon_driver_ctxt;
++ struct nbcon_context __private nbcon_device_ctxt;
struct printk_buffers *pbufs;
};
@@ -49,26 +64,30 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern const char linux_banner[];
extern const char linux_proc_banner[];
-@@ -193,6 +195,8 @@ void show_regs_print_info(const char *lo
- extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
+@@ -194,6 +196,8 @@ extern asmlinkage void dump_stack_lvl(co
extern asmlinkage void dump_stack(void) __cold;
void printk_trigger_flush(void);
-+extern bool nbcon_driver_try_acquire(struct console *con);
-+extern void nbcon_driver_release(struct console *con);
+ void console_replay_all(void);
++extern bool nbcon_device_try_acquire(struct console *con);
++extern void nbcon_device_release(struct console *con);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
-@@ -272,6 +276,16 @@ static inline void dump_stack(void)
+@@ -273,9 +277,20 @@ static inline void dump_stack(void)
static inline void printk_trigger_flush(void)
{
}
+
-+static inline bool nbcon_driver_try_acquire(struct console *con)
+ static inline void console_replay_all(void)
+ {
+ }
++
++static inline bool nbcon_device_try_acquire(struct console *con)
+{
+ return false;
+}
+
-+static inline void nbcon_driver_release(struct console *con)
++static inline void nbcon_device_release(struct console *con)
+{
+}
+
@@ -103,18 +122,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
{
unsigned int cpu = smp_processor_id();
-@@ -989,3 +991,54 @@ void nbcon_free(struct console *con)
+@@ -989,3 +991,55 @@ void nbcon_free(struct console *con)
con->pbufs = NULL;
}
+
+/**
-+ * nbcon_driver_try_acquire - Try to acquire nbcon console and enter unsafe
++ * nbcon_device_try_acquire - Try to acquire nbcon console and enter unsafe
+ * section
+ * @con: The nbcon console to acquire
+ *
+ * Context: Under the locking mechanism implemented in
+ * @con->device_lock() including disabling migration.
++ * Return: True if the console was acquired. False otherwise.
+ *
+ * Console drivers will usually use their own internal synchronization
+ * mechasism to synchronize between console printing and non-printing
@@ -126,9 +146,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * This function acquires the nbcon console using priority NBCON_PRIO_NORMAL
+ * and marks it unsafe for handover/takeover.
+ */
-+bool nbcon_driver_try_acquire(struct console *con)
++bool nbcon_device_try_acquire(struct console *con)
+{
-+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_driver_ctxt);
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
+
+ cant_migrate();
+
@@ -144,17 +164,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+ return true;
+}
-+EXPORT_SYMBOL_GPL(nbcon_driver_try_acquire);
++EXPORT_SYMBOL_GPL(nbcon_device_try_acquire);
+
+/**
-+ * nbcon_driver_release - Exit unsafe section and release the nbcon console
-+ * @con: The nbcon console acquired in nbcon_driver_try_acquire()
++ * nbcon_device_release - Exit unsafe section and release the nbcon console
++ * @con: The nbcon console acquired in nbcon_device_try_acquire()
+ */
-+void nbcon_driver_release(struct console *con)
++void nbcon_device_release(struct console *con)
+{
-+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_driver_ctxt);
++ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
+
+ if (nbcon_context_exit_unsafe(ctxt))
+ nbcon_context_release(ctxt);
+}
-+EXPORT_SYMBOL_GPL(nbcon_driver_release);
++EXPORT_SYMBOL_GPL(nbcon_device_release);
diff --git a/debian/patches-rt/0012-seg6-Use-nested-BH-locking-for-seg6_bpf_srh_states.patch b/debian/patches-rt/0012-seg6-Use-nested-BH-locking-for-seg6_bpf_srh_states.patch
new file mode 100644
index 0000000000..fa445e5fd6
--- /dev/null
+++ b/debian/patches-rt/0012-seg6-Use-nested-BH-locking-for-seg6_bpf_srh_states.patch
@@ -0,0 +1,152 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Oct 2023 12:10:40 +0200
+Subject: [PATCH 12/15] seg6: Use nested-BH locking for seg6_bpf_srh_states.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The access to seg6_bpf_srh_states is protected by disabling preemption.
+Based on the code, the entry point is input_action_end_bpf() and
+every other function (the bpf helper functions bpf_lwt_seg6_*()), that
+is accessing seg6_bpf_srh_states, should be called from within
+input_action_end_bpf().
+
+input_action_end_bpf() accesses seg6_bpf_srh_states first at the top of
+the function and then disables preemption. This looks wrong because if
+preemption needs to be disabled as part of the locking mechanism then
+the variable shouldn't be accessed beforehand.
+
+Looking at how it is used via test_lwt_seg6local.sh then
+input_action_end_bpf() is always invoked from softirq context. If this
+is always the case then the preempt_disable() statement is superfluous.
+If this is not always invoked from softirq then disabling only
+preemption is not sufficient.
+
+Replace the preempt_disable() statement with nested-BH locking. This is
+not an equivalent replacement as it assumes that the invocation of
+input_action_end_bpf() always occurs in softirq context and thus the
+preempt_disable() is superfluous.
+Add a local_lock_t the data structure and use local_lock_nested_bh() for
+locking. Add lockdep_assert_held() to ensure the lock is held while the
+per-CPU variable is referenced in the helper functions.
+
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: David Ahern <dsahern@kernel.org>
+Cc: Hao Luo <haoluo@google.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Martin KaFai Lau <martin.lau@linux.dev>
+Cc: Song Liu <song@kernel.org>
+Cc: Stanislav Fomichev <sdf@google.com>
+Cc: Yonghong Song <yonghong.song@linux.dev>
+Cc: bpf@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/seg6_local.h | 1 +
+ net/core/filter.c | 3 +++
+ net/ipv6/seg6_local.c | 22 ++++++++++++++--------
+ 3 files changed, 18 insertions(+), 8 deletions(-)
+
+--- a/include/net/seg6_local.h
++++ b/include/net/seg6_local.h
+@@ -19,6 +19,7 @@ extern int seg6_lookup_nexthop(struct sk
+ extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
+
+ struct seg6_bpf_srh_state {
++ local_lock_t bh_lock;
+ struct ipv6_sr_hdr *srh;
+ u16 hdrlen;
+ bool valid;
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -6455,6 +6455,7 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, str
+ void *srh_tlvs, *srh_end, *ptr;
+ int srhoff = 0;
+
++ lockdep_assert_held(&srh_state->bh_lock);
+ if (srh == NULL)
+ return -EINVAL;
+
+@@ -6511,6 +6512,7 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct s
+ int hdroff = 0;
+ int err;
+
++ lockdep_assert_held(&srh_state->bh_lock);
+ switch (action) {
+ case SEG6_LOCAL_ACTION_END_X:
+ if (!seg6_bpf_has_valid_srh(skb))
+@@ -6587,6 +6589,7 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, stru
+ int srhoff = 0;
+ int ret;
+
++ lockdep_assert_held(&srh_state->bh_lock);
+ if (unlikely(srh == NULL))
+ return -EINVAL;
+
+--- a/net/ipv6/seg6_local.c
++++ b/net/ipv6/seg6_local.c
+@@ -1380,7 +1380,9 @@ static int input_action_end_b6_encap(str
+ return err;
+ }
+
+-DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
++DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
+ {
+@@ -1388,6 +1390,7 @@ bool seg6_bpf_has_valid_srh(struct sk_bu
+ this_cpu_ptr(&seg6_bpf_srh_states);
+ struct ipv6_sr_hdr *srh = srh_state->srh;
+
++ lockdep_assert_held(&srh_state->bh_lock);
+ if (unlikely(srh == NULL))
+ return false;
+
+@@ -1408,8 +1411,7 @@ bool seg6_bpf_has_valid_srh(struct sk_bu
+ static int input_action_end_bpf(struct sk_buff *skb,
+ struct seg6_local_lwt *slwt)
+ {
+- struct seg6_bpf_srh_state *srh_state =
+- this_cpu_ptr(&seg6_bpf_srh_states);
++ struct seg6_bpf_srh_state *srh_state;
+ struct ipv6_sr_hdr *srh;
+ int ret;
+
+@@ -1420,10 +1422,14 @@ static int input_action_end_bpf(struct s
+ }
+ advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
+
+- /* preempt_disable is needed to protect the per-CPU buffer srh_state,
+- * which is also accessed by the bpf_lwt_seg6_* helpers
++ /* The access to the per-CPU buffer srh_state is protected by running
++ * always in softirq context (with disabled BH). On PREEMPT_RT the
++ * required locking is provided by the following local_lock_nested_bh()
++ * statement. It is also accessed by the bpf_lwt_seg6_* helpers via
++ * bpf_prog_run_save_cb().
+ */
+- preempt_disable();
++ local_lock_nested_bh(&seg6_bpf_srh_states.bh_lock);
++ srh_state = this_cpu_ptr(&seg6_bpf_srh_states);
+ srh_state->srh = srh;
+ srh_state->hdrlen = srh->hdrlen << 3;
+ srh_state->valid = true;
+@@ -1446,15 +1452,15 @@ static int input_action_end_bpf(struct s
+
+ if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
+ goto drop;
++ local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock);
+
+- preempt_enable();
+ if (ret != BPF_REDIRECT)
+ seg6_lookup_nexthop(skb, NULL, 0);
+
+ return dst_input(skb);
+
+ drop:
+- preempt_enable();
++ local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
diff --git a/debian/patches-rt/0012-serial-core-Implement-processing-in-port-lock-wrappe.patch b/debian/patches-rt/0012-serial-core-Implement-processing-in-port-lock-wrappe.patch
index 1d136131f8..05a1928149 100644
--- a/debian/patches-rt/0012-serial-core-Implement-processing-in-port-lock-wrappe.patch
+++ b/debian/patches-rt/0012-serial-core-Implement-processing-in-port-lock-wrappe.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 17 Apr 2024 14:41:16 +0000
-Subject: [PATCH 12/46] serial: core: Implement processing in port->lock
+Subject: [PATCH 12/48] serial: core: Implement processing in port->lock
wrapper
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Currently the port->lock wrappers uart_port_lock(),
uart_port_unlock() (and their variants) only lock/unlock
@@ -19,6 +19,8 @@ console list (i.e. all uart nbcon drivers *must* take the
port->lock in their device_lock() callbacks).
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/serial_core.h | 82 ++++++++++++++++++++++++++++++++++++++++++--
@@ -26,16 +28,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
-@@ -12,6 +12,8 @@
+@@ -11,6 +11,8 @@
+ #include <linux/compiler.h>
#include <linux/console.h>
#include <linux/interrupt.h>
- #include <linux/circ_buf.h>
+#include <linux/lockdep.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/tty.h>
-@@ -626,6 +628,60 @@ static inline void uart_port_set_cons(st
+@@ -625,6 +627,60 @@ static inline void uart_port_set_cons(st
up->cons = con;
__uart_port_unlock_irqrestore(up, flags);
}
@@ -71,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!__uart_port_using_nbcon(up))
+ return true;
+
-+ return nbcon_driver_try_acquire(up->cons);
++ return nbcon_device_try_acquire(up->cons);
+}
+
+/* Only for internal port lock wrapper usage. */
@@ -80,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!__uart_port_using_nbcon(up))
+ return;
+
-+ while (!nbcon_driver_try_acquire(up->cons))
++ while (!nbcon_device_try_acquire(up->cons))
+ cpu_relax();
+}
+
@@ -90,13 +92,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!__uart_port_using_nbcon(up))
+ return;
+
-+ nbcon_driver_release(up->cons);
++ nbcon_device_release(up->cons);
+}
+
/**
* uart_port_lock - Lock the UART port
* @up: Pointer to UART port structure
-@@ -633,6 +689,7 @@ static inline void uart_port_set_cons(st
+@@ -632,6 +688,7 @@ static inline void uart_port_set_cons(st
static inline void uart_port_lock(struct uart_port *up)
{
spin_lock(&up->lock);
@@ -104,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -642,6 +699,7 @@ static inline void uart_port_lock(struct
+@@ -641,6 +698,7 @@ static inline void uart_port_lock(struct
static inline void uart_port_lock_irq(struct uart_port *up)
{
spin_lock_irq(&up->lock);
@@ -112,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -652,6 +710,7 @@ static inline void uart_port_lock_irq(st
+@@ -651,6 +709,7 @@ static inline void uart_port_lock_irq(st
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
{
spin_lock_irqsave(&up->lock, *flags);
@@ -120,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -662,7 +721,15 @@ static inline void uart_port_lock_irqsav
+@@ -661,7 +720,15 @@ static inline void uart_port_lock_irqsav
*/
static inline bool uart_port_trylock(struct uart_port *up)
{
@@ -137,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -674,7 +741,15 @@ static inline bool uart_port_trylock(str
+@@ -673,7 +740,15 @@ static inline bool uart_port_trylock(str
*/
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
{
@@ -154,7 +156,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -683,6 +758,7 @@ static inline bool uart_port_trylock_irq
+@@ -682,6 +757,7 @@ static inline bool uart_port_trylock_irq
*/
static inline void uart_port_unlock(struct uart_port *up)
{
@@ -162,7 +164,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock(&up->lock);
}
-@@ -692,6 +768,7 @@ static inline void uart_port_unlock(stru
+@@ -691,6 +767,7 @@ static inline void uart_port_unlock(stru
*/
static inline void uart_port_unlock_irq(struct uart_port *up)
{
@@ -170,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&up->lock);
}
-@@ -702,6 +779,7 @@ static inline void uart_port_unlock_irq(
+@@ -701,6 +778,7 @@ static inline void uart_port_unlock_irq(
*/
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
{
diff --git a/debian/patches-rt/0013-net-Use-nested-BH-locking-for-bpf_scratchpad.patch b/debian/patches-rt/0013-net-Use-nested-BH-locking-for-bpf_scratchpad.patch
new file mode 100644
index 0000000000..9132b002e6
--- /dev/null
+++ b/debian/patches-rt/0013-net-Use-nested-BH-locking-for-bpf_scratchpad.patch
@@ -0,0 +1,70 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Oct 2023 15:17:32 +0200
+Subject: [PATCH 13/15] net: Use nested-BH locking for bpf_scratchpad.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+bpf_scratchpad is a per-CPU variable and relies on disabled BH for its
+locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
+this data structure requires explicit locking.
+
+Add a local_lock_t to the data structure and use local_lock_nested_bh()
+for locking. This change adds only lockdep coverage and does not alter
+the functional behaviour for !PREEMPT_RT.
+
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: Hao Luo <haoluo@google.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Martin KaFai Lau <martin.lau@linux.dev>
+Cc: Song Liu <song@kernel.org>
+Cc: Stanislav Fomichev <sdf@google.com>
+Cc: Yonghong Song <yonghong.song@linux.dev>
+Cc: bpf@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/filter.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1658,9 +1658,12 @@ struct bpf_scratchpad {
+ __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
+ u8 buff[MAX_BPF_STACK];
+ };
++ local_lock_t bh_lock;
+ };
+
+-static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
++static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp) = {
++ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
++};
+
+ static inline int __bpf_try_make_writable(struct sk_buff *skb,
+ unsigned int write_len)
+@@ -2021,6 +2024,7 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from
+ struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
+ u32 diff_size = from_size + to_size;
+ int i, j = 0;
++ __wsum ret;
+
+ /* This is quite flexible, some examples:
+ *
+@@ -2034,12 +2038,15 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from
+ diff_size > sizeof(sp->diff)))
+ return -EINVAL;
+
++ local_lock_nested_bh(&bpf_sp.bh_lock);
+ for (i = 0; i < from_size / sizeof(__be32); i++, j++)
+ sp->diff[j] = ~from[i];
+ for (i = 0; i < to_size / sizeof(__be32); i++, j++)
+ sp->diff[j] = to[i];
+
+- return csum_partial(sp->diff, diff_size, seed);
++ ret = csum_partial(sp->diff, diff_size, seed);
++ local_unlock_nested_bh(&bpf_sp.bh_lock);
++ return ret;
+ }
+
+ static const struct bpf_func_proto bpf_csum_diff_proto = {
diff --git a/debian/patches-rt/0013-printk-nbcon-Do-not-rely-on-proxy-headers.patch b/debian/patches-rt/0013-printk-nbcon-Do-not-rely-on-proxy-headers.patch
index 4524b5dcf4..f7d08af8a7 100644
--- a/debian/patches-rt/0013-printk-nbcon-Do-not-rely-on-proxy-headers.patch
+++ b/debian/patches-rt/0013-printk-nbcon-Do-not-rely-on-proxy-headers.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 19 Feb 2024 17:35:49 +0000
-Subject: [PATCH 13/46] printk: nbcon: Do not rely on proxy headers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 13/48] printk: nbcon: Do not rely on proxy headers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The headers kernel.h, serial_core.h, and console.h allow for the
definitions of many types and functions from other headers.
diff --git a/debian/patches-rt/0014-net-Reference-bpf_redirect_info-via-task_struct-on-P.patch b/debian/patches-rt/0014-net-Reference-bpf_redirect_info-via-task_struct-on-P.patch
new file mode 100644
index 0000000000..b69f9bd551
--- /dev/null
+++ b/debian/patches-rt/0014-net-Reference-bpf_redirect_info-via-task_struct-on-P.patch
@@ -0,0 +1,657 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 18 Jan 2024 09:28:53 +0100
+Subject: [PATCH v9 net-next 14/15] net: Reference bpf_redirect_info via
+ task_struct on PREEMPT_RT.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The XDP redirect process is two staged:
+- bpf_prog_run_xdp() is invoked to run a eBPF program which inspects the
+ packet and makes decisions. While doing that, the per-CPU variable
+ bpf_redirect_info is used.
+
+- Afterwards xdp_do_redirect() is invoked and accesses bpf_redirect_info
+ and it may also access other per-CPU variables like xskmap_flush_list.
+
+At the very end of the NAPI callback, xdp_do_flush() is invoked which
+does not access bpf_redirect_info but will touch the individual per-CPU
+lists.
+
+The per-CPU variables are only used in the NAPI callback hence disabling
+bottom halves is the only protection mechanism. Users from preemptible
+context (like cpu_map_kthread_run()) explicitly disable bottom halves
+for protections reasons.
+Without locking in local_bh_disable() on PREEMPT_RT this data structure
+requires explicit locking.
+
+PREEMPT_RT has forced-threaded interrupts enabled and every
+NAPI-callback runs in a thread. If each thread has its own data
+structure then locking can be avoided.
+
+Create a struct bpf_net_context which contains struct bpf_redirect_info.
+Define the variable on stack, use bpf_net_ctx_set() to save a pointer to
+it, bpf_net_ctx_clear() removes it again.
+The bpf_net_ctx_set() may nest. For instance a function can be used from
+within NET_RX_SOFTIRQ/ net_rx_action which uses bpf_net_ctx_set() and
+NET_TX_SOFTIRQ which does not. Therefore only the first invocations
+updates the pointer.
+Use bpf_net_ctx_get_ri() as a wrapper to retrieve the current struct
+bpf_redirect_info. The returned data structure is zero initialized to
+ensure nothing is leaked from stack. This is done on first usage of the
+struct. bpf_net_ctx_set() sets bpf_redirect_info::kern_flags to 0 to
+note that initialisation is required. First invocation of
+bpf_net_ctx_get_ri() will memset() the data structure and update
+bpf_redirect_info::kern_flags.
+bpf_redirect_info::nh is excluded from memset because it is only used
+once BPF_F_NEIGH is set which also sets the nh member. The kern_flags is
+moved past nh to exclude it from memset.
+
+The pointer to bpf_net_context is saved task's task_struct. Using
+always the bpf_net_context approach has the advantage that there is
+almost zero differences between PREEMPT_RT and non-PREEMPT_RT builds.
+
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: Eduard Zingerman <eddyz87@gmail.com>
+Cc: Hao Luo <haoluo@google.com>
+Cc: Jesper Dangaard Brouer <hawk@kernel.org>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Martin KaFai Lau <martin.lau@linux.dev>
+Cc: Song Liu <song@kernel.org>
+Cc: Stanislav Fomichev <sdf@google.com>
+Cc: Toke Høiland-Jørgensen <toke@redhat.com>
+Cc: Yonghong Song <yonghong.song@linux.dev>
+Cc: bpf@vger.kernel.org
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/filter.h | 56 ++++++++++++++++++++++++++++++++++++++++---------
+ include/linux/sched.h | 3 ++
+ kernel/bpf/cpumap.c | 3 ++
+ kernel/bpf/devmap.c | 9 +++++++
+ kernel/fork.c | 1
+ net/bpf/test_run.c | 11 ++++++++-
+ net/core/dev.c | 29 ++++++++++++++++++++++++-
+ net/core/filter.c | 44 ++++++++++----------------------------
+ net/core/lwt_bpf.c | 3 ++
+ 9 files changed, 114 insertions(+), 45 deletions(-)
+
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -733,21 +733,59 @@ struct bpf_nh_params {
+ };
+ };
+
++/* flags for bpf_redirect_info kern_flags */
++#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
++#define BPF_RI_F_RI_INIT BIT(1)
++
+ struct bpf_redirect_info {
+ u64 tgt_index;
+ void *tgt_value;
+ struct bpf_map *map;
+ u32 flags;
+- u32 kern_flags;
+ u32 map_id;
+ enum bpf_map_type map_type;
+ struct bpf_nh_params nh;
++ u32 kern_flags;
+ };
+
+-DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
++struct bpf_net_context {
++ struct bpf_redirect_info ri;
++};
+
+-/* flags for bpf_redirect_info kern_flags */
+-#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
++static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
++{
++ struct task_struct *tsk = current;
++
++ if (tsk->bpf_net_context != NULL)
++ return NULL;
++ bpf_net_ctx->ri.kern_flags = 0;
++
++ tsk->bpf_net_context = bpf_net_ctx;
++ return bpf_net_ctx;
++}
++
++static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx)
++{
++ if (bpf_net_ctx)
++ current->bpf_net_context = NULL;
++}
++
++static inline struct bpf_net_context *bpf_net_ctx_get(void)
++{
++ return current->bpf_net_context;
++}
++
++static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void)
++{
++ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
++
++ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) {
++ memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh));
++ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT;
++ }
++
++ return &bpf_net_ctx->ri;
++}
+
+ /* Compute the linear packet data range [data, data_end) which
+ * will be accessed by various program types (cls_bpf, act_bpf,
+@@ -1018,25 +1056,23 @@ struct bpf_prog *bpf_patch_insn_single(s
+ const struct bpf_insn *patch, u32 len);
+ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
+
+-void bpf_clear_redirect_map(struct bpf_map *map);
+-
+ static inline bool xdp_return_frame_no_direct(void)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
+ }
+
+ static inline void xdp_set_return_frame_no_direct(void)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
+ }
+
+ static inline void xdp_clear_return_frame_no_direct(void)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
+ }
+@@ -1592,7 +1628,7 @@ static __always_inline long __bpf_xdp_re
+ u64 flags, const u64 flag_mask,
+ void *lookup_elem(struct bpf_map *map, u32 key))
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
+
+ /* Lower bits of the flags are used as return code on lookup failure */
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -54,6 +54,7 @@ struct bio_list;
+ struct blk_plug;
+ struct bpf_local_storage;
+ struct bpf_run_ctx;
++struct bpf_net_context;
+ struct capture_control;
+ struct cfs_rq;
+ struct fs_struct;
+@@ -1516,6 +1517,8 @@ struct task_struct {
+ /* Used for BPF run context */
+ struct bpf_run_ctx *bpf_ctx;
+ #endif
++ /* Used by BPF for per-TASK xdp storage */
++ struct bpf_net_context *bpf_net_context;
+
+ #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ unsigned long lowest_stack;
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -240,12 +240,14 @@ static int cpu_map_bpf_prog_run(struct b
+ int xdp_n, struct xdp_cpumap_stats *stats,
+ struct list_head *list)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int nframes;
+
+ if (!rcpu->prog)
+ return xdp_n;
+
+ rcu_read_lock_bh();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
+ nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats);
+
+@@ -255,6 +257,7 @@ static int cpu_map_bpf_prog_run(struct b
+ if (unlikely(!list_empty(list)))
+ cpu_map_bpf_prog_run_skb(rcpu, list, stats);
+
++ bpf_net_ctx_clear(bpf_net_ctx);
+ rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
+
+ return nframes;
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -196,7 +196,14 @@ static void dev_map_free(struct bpf_map
+ list_del_rcu(&dtab->list);
+ spin_unlock(&dev_map_lock);
+
+- bpf_clear_redirect_map(map);
++ /* bpf_redirect_info->map is assigned in __bpf_xdp_redirect_map()
++ * during NAPI callback and cleared after the XDP redirect. There is no
++ * explicit RCU read section which protects bpf_redirect_info->map but
++ * local_bh_disable() also marks the beginning an RCU section. This
++ * makes the complete softirq callback RCU protected. Thus after
++ * following synchronize_rcu() there no bpf_redirect_info->map == map
++ * assignment.
++ */
+ synchronize_rcu();
+
+ /* Make sure prior __dev_map_entry_free() have completed. */
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2355,6 +2355,7 @@ static void rv_task_fork(struct task_str
+ RCU_INIT_POINTER(p->bpf_storage, NULL);
+ p->bpf_ctx = NULL;
+ #endif
++ p->bpf_net_context = NULL;
+
+ /* Perform scheduler related setup. Assign this task to a CPU. */
+ retval = sched_fork(clone_flags, p);
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -283,9 +283,10 @@ static int xdp_recv_frames(struct xdp_fr
+ static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
+ u32 repeat)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int err = 0, act, ret, i, nframes = 0, batch_sz;
+ struct xdp_frame **frames = xdp->frames;
++ struct bpf_redirect_info *ri;
+ struct xdp_page_head *head;
+ struct xdp_frame *frm;
+ bool redirect = false;
+@@ -295,6 +296,8 @@ static int xdp_test_run_batch(struct xdp
+ batch_sz = min_t(u32, repeat, xdp->batch_size);
+
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
++ ri = bpf_net_ctx_get_ri();
+ xdp_set_return_frame_no_direct();
+
+ for (i = 0; i < batch_sz; i++) {
+@@ -359,6 +362,7 @@ static int xdp_test_run_batch(struct xdp
+ }
+
+ xdp_clear_return_frame_no_direct();
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+ return err;
+ }
+@@ -394,6 +398,7 @@ static int bpf_test_run_xdp_live(struct
+ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
+ u32 *retval, u32 *time, bool xdp)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ struct bpf_prog_array_item item = {.prog = prog};
+ struct bpf_run_ctx *old_ctx;
+ struct bpf_cg_run_ctx run_ctx;
+@@ -419,10 +424,14 @@ static int bpf_test_run(struct bpf_prog
+ do {
+ run_ctx.prog_item = &item;
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
++
+ if (xdp)
+ *retval = bpf_prog_run_xdp(prog, ctx);
+ else
+ *retval = bpf_prog_run(prog, ctx);
++
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+ } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
+ bpf_reset_run_ctx(old_ctx);
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4045,10 +4045,13 @@ sch_handle_ingress(struct sk_buff *skb,
+ {
+ struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
+ enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS;
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int sch_ret;
+
+ if (!entry)
+ return skb;
++
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ if (*pt_prev) {
+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
+ *pt_prev = NULL;
+@@ -4077,10 +4080,12 @@ sch_handle_ingress(struct sk_buff *skb,
+ break;
+ }
+ *ret = NET_RX_SUCCESS;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ case TC_ACT_SHOT:
+ kfree_skb_reason(skb, drop_reason);
+ *ret = NET_RX_DROP;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ /* used by tc_run */
+ case TC_ACT_STOLEN:
+@@ -4090,8 +4095,10 @@ sch_handle_ingress(struct sk_buff *skb,
+ fallthrough;
+ case TC_ACT_CONSUMED:
+ *ret = NET_RX_SUCCESS;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ }
++ bpf_net_ctx_clear(bpf_net_ctx);
+
+ return skb;
+ }
+@@ -4101,11 +4108,14 @@ sch_handle_egress(struct sk_buff *skb, i
+ {
+ struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
+ enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS;
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int sch_ret;
+
+ if (!entry)
+ return skb;
+
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
++
+ /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
+ * already set by the caller.
+ */
+@@ -4121,10 +4131,12 @@ sch_handle_egress(struct sk_buff *skb, i
+ /* No need to push/pop skb's mac_header here on egress! */
+ skb_do_redirect(skb);
+ *ret = NET_XMIT_SUCCESS;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ case TC_ACT_SHOT:
+ kfree_skb_reason(skb, drop_reason);
+ *ret = NET_XMIT_DROP;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ /* used by tc_run */
+ case TC_ACT_STOLEN:
+@@ -4134,8 +4146,10 @@ sch_handle_egress(struct sk_buff *skb, i
+ fallthrough;
+ case TC_ACT_CONSUMED:
+ *ret = NET_XMIT_SUCCESS;
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return NULL;
+ }
++ bpf_net_ctx_clear(bpf_net_ctx);
+
+ return skb;
+ }
+@@ -6325,6 +6339,7 @@ enum {
+ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
+ unsigned flags, u16 budget)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ bool skip_schedule = false;
+ unsigned long timeout;
+ int rc;
+@@ -6342,6 +6357,7 @@ static void busy_poll_stop(struct napi_s
+ clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
+
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
+ if (flags & NAPI_F_PREFER_BUSY_POLL) {
+ napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
+@@ -6364,6 +6380,7 @@ static void busy_poll_stop(struct napi_s
+ netpoll_poll_unlock(have_poll_lock);
+ if (rc == budget)
+ __busy_poll_stop(napi, skip_schedule);
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+ }
+
+@@ -6373,6 +6390,7 @@ static void __napi_busy_loop(unsigned in
+ {
+ unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
+ int (*napi_poll)(struct napi_struct *napi, int budget);
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ void *have_poll_lock = NULL;
+ struct napi_struct *napi;
+
+@@ -6391,6 +6409,7 @@ static void __napi_busy_loop(unsigned in
+ int work = 0;
+
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ if (!napi_poll) {
+ unsigned long val = READ_ONCE(napi->state);
+
+@@ -6421,6 +6440,7 @@ static void __napi_busy_loop(unsigned in
+ __NET_ADD_STATS(dev_net(napi->dev),
+ LINUX_MIB_BUSYPOLLRXPACKETS, work);
+ skb_defer_free_flush(this_cpu_ptr(&softnet_data));
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+
+ if (!loop_end || loop_end(loop_end_arg, start_time))
+@@ -6848,6 +6868,7 @@ static int napi_thread_wait(struct napi_
+
+ static void napi_threaded_poll_loop(struct napi_struct *napi)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ struct softnet_data *sd;
+ unsigned long last_qs = jiffies;
+
+@@ -6856,6 +6877,8 @@ static void napi_threaded_poll_loop(stru
+ void *have;
+
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
++
+ sd = this_cpu_ptr(&softnet_data);
+ sd->in_napi_threaded_poll = true;
+
+@@ -6871,6 +6894,7 @@ static void napi_threaded_poll_loop(stru
+ net_rps_action_and_irq_enable(sd);
+ }
+ skb_defer_free_flush(sd);
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+
+ if (!repoll)
+@@ -6896,10 +6920,12 @@ static __latent_entropy void net_rx_acti
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+ unsigned long time_limit = jiffies +
+ usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int budget = READ_ONCE(net_hotdata.netdev_budget);
+ LIST_HEAD(list);
+ LIST_HEAD(repoll);
+
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ start:
+ sd->in_net_rx_action = true;
+ local_irq_disable();
+@@ -6952,7 +6978,8 @@ static __latent_entropy void net_rx_acti
+ sd->in_net_rx_action = false;
+
+ net_rps_action_and_irq_enable(sd);
+-end:;
++end:
++ bpf_net_ctx_clear(bpf_net_ctx);
+ }
+
+ struct netdev_adjacent {
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2483,9 +2483,6 @@ static const struct bpf_func_proto bpf_c
+ .arg3_type = ARG_ANYTHING,
+ };
+
+-DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+-EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
+-
+ static struct net_device *skb_get_peer_dev(struct net_device *dev)
+ {
+ const struct net_device_ops *ops = dev->netdev_ops;
+@@ -2498,7 +2495,7 @@ static struct net_device *skb_get_peer_d
+
+ int skb_do_redirect(struct sk_buff *skb)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ struct net *net = dev_net(skb->dev);
+ struct net_device *dev;
+ u32 flags = ri->flags;
+@@ -2531,7 +2528,7 @@ int skb_do_redirect(struct sk_buff *skb)
+
+ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
+ return TC_ACT_SHOT;
+@@ -2552,7 +2549,7 @@ static const struct bpf_func_proto bpf_r
+
+ BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ if (unlikely(flags))
+ return TC_ACT_SHOT;
+@@ -2574,7 +2571,7 @@ static const struct bpf_func_proto bpf_r
+ BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
+ int, plen, u64, flags)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ if (unlikely((plen && plen < sizeof(*params)) || flags))
+ return TC_ACT_SHOT;
+@@ -4300,30 +4297,13 @@ void xdp_do_check_flushed(struct napi_st
+ }
+ #endif
+
+-void bpf_clear_redirect_map(struct bpf_map *map)
+-{
+- struct bpf_redirect_info *ri;
+- int cpu;
+-
+- for_each_possible_cpu(cpu) {
+- ri = per_cpu_ptr(&bpf_redirect_info, cpu);
+- /* Avoid polluting remote cacheline due to writes if
+- * not needed. Once we pass this test, we need the
+- * cmpxchg() to make sure it hasn't been changed in
+- * the meantime by remote CPU.
+- */
+- if (unlikely(READ_ONCE(ri->map) == map))
+- cmpxchg(&ri->map, map, NULL);
+- }
+-}
+-
+ DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
+ EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key);
+
+ u32 xdp_master_redirect(struct xdp_buff *xdp)
+ {
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ struct net_device *master, *slave;
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
+ slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp);
+@@ -4395,7 +4375,7 @@ static __always_inline int __xdp_do_redi
+ map = READ_ONCE(ri->map);
+
+ /* The map pointer is cleared when the map is being torn
+- * down by bpf_clear_redirect_map()
++ * down by dev_map_free()
+ */
+ if (unlikely(!map)) {
+ err = -ENOENT;
+@@ -4440,7 +4420,7 @@ static __always_inline int __xdp_do_redi
+ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ enum bpf_map_type map_type = ri->map_type;
+
+ if (map_type == BPF_MAP_TYPE_XSKMAP)
+@@ -4454,7 +4434,7 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect);
+ int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
+ struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ enum bpf_map_type map_type = ri->map_type;
+
+ if (map_type == BPF_MAP_TYPE_XSKMAP)
+@@ -4471,7 +4451,7 @@ static int xdp_do_generic_redirect_map(s
+ enum bpf_map_type map_type, u32 map_id,
+ u32 flags)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ struct bpf_map *map;
+ int err;
+
+@@ -4483,7 +4463,7 @@ static int xdp_do_generic_redirect_map(s
+ map = READ_ONCE(ri->map);
+
+ /* The map pointer is cleared when the map is being torn
+- * down by bpf_clear_redirect_map()
++ * down by dev_map_free()
+ */
+ if (unlikely(!map)) {
+ err = -ENOENT;
+@@ -4525,7 +4505,7 @@ static int xdp_do_generic_redirect_map(s
+ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ enum bpf_map_type map_type = ri->map_type;
+ void *fwd = ri->tgt_value;
+ u32 map_id = ri->map_id;
+@@ -4561,7 +4541,7 @@ int xdp_do_generic_redirect(struct net_d
+
+ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
+ {
+- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
++ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+
+ if (unlikely(flags))
+ return XDP_ABORTED;
+--- a/net/core/lwt_bpf.c
++++ b/net/core/lwt_bpf.c
+@@ -38,12 +38,14 @@ static inline struct bpf_lwt *bpf_lwt_lw
+ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
+ struct dst_entry *dst, bool can_redirect)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ int ret;
+
+ /* Disabling BH is needed to protect per-CPU bpf_redirect_info between
+ * BPF prog and skb_do_redirect().
+ */
+ local_bh_disable();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ bpf_compute_data_pointers(skb);
+ ret = bpf_prog_run_save_cb(lwt->prog, skb);
+
+@@ -76,6 +78,7 @@ static int run_lwt_bpf(struct sk_buff *s
+ break;
+ }
+
++ bpf_net_ctx_clear(bpf_net_ctx);
+ local_bh_enable();
+
+ return ret;
diff --git a/debian/patches-rt/0014-printk-Make-console_is_usable-available-to-nbcon.patch b/debian/patches-rt/0014-printk-Make-console_is_usable-available-to-nbcon.patch
index 554a571639..3f88a5627b 100644
--- a/debian/patches-rt/0014-printk-Make-console_is_usable-available-to-nbcon.patch
+++ b/debian/patches-rt/0014-printk-Make-console_is_usable-available-to-nbcon.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Sep 2023 13:25:41 +0000
-Subject: [PATCH 14/46] printk: Make console_is_usable() available to nbcon
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 14/48] printk: Make console_is_usable() available to nbcon
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Move console_is_usable() as-is into internal.h so that it can
be used by nbcon printing functions as well.
diff --git a/debian/patches-rt/0015-net-Move-per-CPU-flush-lists-to-bpf_net_context-on-P.patch b/debian/patches-rt/0015-net-Move-per-CPU-flush-lists-to-bpf_net_context-on-P.patch
new file mode 100644
index 0000000000..ab01176e1d
--- /dev/null
+++ b/debian/patches-rt/0015-net-Move-per-CPU-flush-lists-to-bpf_net_context-on-P.patch
@@ -0,0 +1,270 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 1 Feb 2024 15:39:56 +0100
+Subject: [PATCH 15/15] net: Move per-CPU flush-lists to bpf_net_context on
+ PREEMPT_RT.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The per-CPU flush lists, which are accessed from within the NAPI callback
+(xdp_do_flush() for instance), are per-CPU. There are subject to the
+same problem as struct bpf_redirect_info.
+
+Add the per-CPU lists cpu_map_flush_list, dev_map_flush_list and
+xskmap_map_flush_list to struct bpf_net_context. Add wrappers for the
+access. The lists initialized on first usage (similar to
+bpf_net_ctx_get_ri()).
+
+Cc: "Björn Töpel" <bjorn@kernel.org>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: Eduard Zingerman <eddyz87@gmail.com>
+Cc: Hao Luo <haoluo@google.com>
+Cc: Jesper Dangaard Brouer <hawk@kernel.org>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Cc: Magnus Karlsson <magnus.karlsson@intel.com>
+Cc: Martin KaFai Lau <martin.lau@linux.dev>
+Cc: Song Liu <song@kernel.org>
+Cc: Stanislav Fomichev <sdf@google.com>
+Cc: Toke Høiland-Jørgensen <toke@redhat.com>
+Cc: Yonghong Song <yonghong.song@linux.dev>
+Cc: bpf@vger.kernel.org
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/filter.h | 42 ++++++++++++++++++++++++++++++++++++++++++
+ kernel/bpf/cpumap.c | 19 +++----------------
+ kernel/bpf/devmap.c | 11 +++--------
+ net/xdp/xsk.c | 12 ++++--------
+ 4 files changed, 52 insertions(+), 32 deletions(-)
+
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -736,6 +736,9 @@ struct bpf_nh_params {
+ /* flags for bpf_redirect_info kern_flags */
+ #define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+ #define BPF_RI_F_RI_INIT BIT(1)
++#define BPF_RI_F_CPU_MAP_INIT BIT(2)
++#define BPF_RI_F_DEV_MAP_INIT BIT(3)
++#define BPF_RI_F_XSK_MAP_INIT BIT(4)
+
+ struct bpf_redirect_info {
+ u64 tgt_index;
+@@ -750,6 +753,9 @@ struct bpf_redirect_info {
+
+ struct bpf_net_context {
+ struct bpf_redirect_info ri;
++ struct list_head cpu_map_flush_list;
++ struct list_head dev_map_flush_list;
++ struct list_head xskmap_map_flush_list;
+ };
+
+ static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
+@@ -787,6 +793,42 @@ static inline struct bpf_redirect_info *
+ return &bpf_net_ctx->ri;
+ }
+
++static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void)
++{
++ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
++
++ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) {
++ INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list);
++ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT;
++ }
++
++ return &bpf_net_ctx->cpu_map_flush_list;
++}
++
++static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void)
++{
++ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
++
++ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) {
++ INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list);
++ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT;
++ }
++
++ return &bpf_net_ctx->dev_map_flush_list;
++}
++
++static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void)
++{
++ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
++
++ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) {
++ INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list);
++ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT;
++ }
++
++ return &bpf_net_ctx->xskmap_map_flush_list;
++}
++
+ /* Compute the linear packet data range [data, data_end) which
+ * will be accessed by various program types (cls_bpf, act_bpf,
+ * lwt, ...). Subsystems allowing direct data access must (!)
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -79,8 +79,6 @@ struct bpf_cpu_map {
+ struct bpf_cpu_map_entry __rcu **cpu_map;
+ };
+
+-static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
+-
+ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
+ {
+ u32 value_size = attr->value_size;
+@@ -709,7 +707,7 @@ static void bq_flush_to_queue(struct xdp
+ */
+ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
+ struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
+
+ if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
+@@ -761,7 +759,7 @@ int cpu_map_generic_redirect(struct bpf_
+
+ void __cpu_map_flush(void)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
+ struct xdp_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
+@@ -775,20 +773,9 @@ void __cpu_map_flush(void)
+ #ifdef CONFIG_DEBUG_NET
+ bool cpu_map_check_flush(void)
+ {
+- if (list_empty(this_cpu_ptr(&cpu_map_flush_list)))
++ if (list_empty(bpf_net_ctx_get_cpu_map_flush_list()))
+ return false;
+ __cpu_map_flush();
+ return true;
+ }
+ #endif
+-
+-static int __init cpu_map_init(void)
+-{
+- int cpu;
+-
+- for_each_possible_cpu(cpu)
+- INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
+- return 0;
+-}
+-
+-subsys_initcall(cpu_map_init);
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -83,7 +83,6 @@ struct bpf_dtab {
+ u32 n_buckets;
+ };
+
+-static DEFINE_PER_CPU(struct list_head, dev_flush_list);
+ static DEFINE_SPINLOCK(dev_map_lock);
+ static LIST_HEAD(dev_map_list);
+
+@@ -415,7 +414,7 @@ static void bq_xmit_all(struct xdp_dev_b
+ */
+ void __dev_flush(void)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
+ struct xdp_dev_bulk_queue *bq, *tmp;
+
+ list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
+@@ -429,7 +428,7 @@ void __dev_flush(void)
+ #ifdef CONFIG_DEBUG_NET
+ bool dev_check_flush(void)
+ {
+- if (list_empty(this_cpu_ptr(&dev_flush_list)))
++ if (list_empty(bpf_net_ctx_get_dev_flush_list()))
+ return false;
+ __dev_flush();
+ return true;
+@@ -460,7 +459,7 @@ static void *__dev_map_lookup_elem(struc
+ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx, struct bpf_prog *xdp_prog)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
+ struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
+
+ if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
+@@ -1160,15 +1159,11 @@ static struct notifier_block dev_map_not
+
+ static int __init dev_map_init(void)
+ {
+- int cpu;
+-
+ /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
+ BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
+ offsetof(struct _bpf_dtab_netdev, dev));
+ register_netdevice_notifier(&dev_map_notifier);
+
+- for_each_possible_cpu(cpu)
+- INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
+ return 0;
+ }
+
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -35,8 +35,6 @@
+ #define TX_BATCH_SIZE 32
+ #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
+
+-static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
+-
+ void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
+ {
+ if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
+@@ -372,7 +370,7 @@ static int xsk_rcv(struct xdp_sock *xs,
+
+ int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+ int err;
+
+ err = xsk_rcv(xs, xdp);
+@@ -387,7 +385,7 @@ int __xsk_map_redirect(struct xdp_sock *
+
+ void __xsk_map_flush(void)
+ {
+- struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
++ struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+ struct xdp_sock *xs, *tmp;
+
+ list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
+@@ -399,7 +397,7 @@ void __xsk_map_flush(void)
+ #ifdef CONFIG_DEBUG_NET
+ bool xsk_map_check_flush(void)
+ {
+- if (list_empty(this_cpu_ptr(&xskmap_flush_list)))
++ if (list_empty(bpf_net_ctx_get_xskmap_flush_list()))
+ return false;
+ __xsk_map_flush();
+ return true;
+@@ -1772,7 +1770,7 @@ static struct pernet_operations xsk_net_
+
+ static int __init xsk_init(void)
+ {
+- int err, cpu;
++ int err;
+
+ err = proto_register(&xsk_proto, 0 /* no slab */);
+ if (err)
+@@ -1790,8 +1788,6 @@ static int __init xsk_init(void)
+ if (err)
+ goto out_pernet;
+
+- for_each_possible_cpu(cpu)
+- INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
+ return 0;
+
+ out_pernet:
diff --git a/debian/patches-rt/0015-printk-Let-console_is_usable-handle-nbcon.patch b/debian/patches-rt/0015-printk-Let-console_is_usable-handle-nbcon.patch
index 0172326b0c..cfc311a7a5 100644
--- a/debian/patches-rt/0015-printk-Let-console_is_usable-handle-nbcon.patch
+++ b/debian/patches-rt/0015-printk-Let-console_is_usable-handle-nbcon.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Sep 2023 13:53:21 +0000
-Subject: [PATCH 15/46] printk: Let console_is_usable() handle nbcon
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 15/48] printk: Let console_is_usable() handle nbcon
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The nbcon consoles use a different printing callback. For nbcon
consoles, check for the write_atomic() callback instead of
diff --git a/debian/patches-rt/0016-printk-Add-flags-argument-for-console_is_usable.patch b/debian/patches-rt/0016-printk-Add-flags-argument-for-console_is_usable.patch
index 86ab3528a3..068ad851e9 100644
--- a/debian/patches-rt/0016-printk-Add-flags-argument-for-console_is_usable.patch
+++ b/debian/patches-rt/0016-printk-Add-flags-argument-for-console_is_usable.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Sep 2023 13:45:33 +0000
-Subject: [PATCH 16/46] printk: Add @flags argument for console_is_usable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 16/48] printk: Add @flags argument for console_is_usable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The caller of console_is_usable() usually needs @console->flags
for its own checks. Rather than having console_is_usable() read
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
any_usable = true;
-@@ -3838,7 +3839,7 @@ static bool __pr_flush(struct console *c
+@@ -3852,7 +3853,7 @@ static bool __pr_flush(struct console *c
* that they make forward progress, so only increment
* @diff for usable consoles.
*/
diff --git a/debian/patches-rt/0017-printk-nbcon-Add-helper-to-assign-priority-based-on-.patch b/debian/patches-rt/0017-printk-nbcon-Add-helper-to-assign-priority-based-on-.patch
index 0fda8a8c5e..a7cb342ff6 100644
--- a/debian/patches-rt/0017-printk-nbcon-Add-helper-to-assign-priority-based-on-.patch
+++ b/debian/patches-rt/0017-printk-nbcon-Add-helper-to-assign-priority-based-on-.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Thu, 14 Dec 2023 14:38:42 +0000
-Subject: [PATCH 17/46] printk: nbcon: Add helper to assign priority based on
+Subject: [PATCH 17/48] printk: nbcon: Add helper to assign priority based on
CPU state
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Add a helper function to use the current state of the CPU to
determine which priority to assign to the printing context.
@@ -10,7 +10,7 @@ determine which priority to assign to the printing context.
The EMERGENCY priority handling is added in a follow-up commit.
It will use a per-CPU variable.
-Note: nbcon_driver_try_acquire(), which is used by console
+Note: nbcon_device_try_acquire(), which is used by console
drivers to acquire the nbcon console for non-printing
activities, will always use NORMAL priority.
diff --git a/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch b/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch
index b42d8623bb..bca566898c 100644
--- a/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch
+++ b/debian/patches-rt/0018-printk-nbcon-Provide-function-to-flush-using-write_a.patch
@@ -1,8 +1,8 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 12 Sep 2023 12:00:08 +0000
-Subject: [PATCH 18/46] printk: nbcon: Provide function to flush using
+Subject: [PATCH 18/48] printk: nbcon: Provide function to flush using
write_atomic()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Provide nbcon_atomic_flush_pending() to perform flushing of all
registered nbcon consoles using their write_atomic() callback.
@@ -30,19 +30,20 @@ Use it in console_flush_on_panic() before flushing legacy
consoles. The legacy write() callbacks are not fully safe when
oops_in_progress is set.
-Also use it in nbcon_driver_release() to flush records added
+Also use it in nbcon_device_release() to flush records added
while the driver had the console locked to perform non-printing
operations.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/internal.h | 2
- kernel/printk/nbcon.c | 162 ++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/printk/nbcon.c | 169 ++++++++++++++++++++++++++++++++++++++++++++++-
kernel/printk/printk.c | 2
- 3 files changed, 163 insertions(+), 3 deletions(-)
+ 3 files changed, 170 insertions(+), 3 deletions(-)
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -72,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt)
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
-@@ -953,6 +952,148 @@ enum nbcon_prio nbcon_get_default_prio(v
+@@ -953,6 +952,155 @@ enum nbcon_prio nbcon_get_default_prio(v
return NBCON_PRIO_NORMAL;
}
@@ -122,6 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ return -EAGAIN;
+
+ if (!ctxt->backlog) {
++ /* Are there reserved but not yet finalized records? */
+ if (nbcon_seq_read(con) < stop_seq)
+ err = -ENOENT;
+ break;
@@ -162,19 +164,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ local_irq_restore(flags);
+
+ /*
++ * If there was a new owner (-EPERM, -EAGAIN), that context is
++ * responsible for completing.
++ *
++ * Do not wait for records not yet finalized (-ENOENT) to avoid a
++ * possible deadlock. They will either get flushed by the writer or
++ * eventually skipped on panic CPU.
++ */
++ if (err)
++ return;
++
++ /*
+ * If flushing was successful but more records are available, this
+ * context must flush those remaining records because there is no
+ * other context that will do it.
+ */
-+ if (!err && prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
++ if (prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+ stop_seq = prb_next_reserve_seq(prb);
+ goto again;
+ }
-+
-+ /*
-+ * If there was a new owner, that context is responsible for
-+ * completing the flush.
-+ */
+}
+
+/**
@@ -221,10 +229,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* nbcon_alloc - Allocate buffers needed by the nbcon console
* @con: Console to allocate buffers for
-@@ -1064,8 +1205,23 @@ EXPORT_SYMBOL_GPL(nbcon_driver_try_acqui
- void nbcon_driver_release(struct console *con)
+@@ -1065,8 +1213,23 @@ EXPORT_SYMBOL_GPL(nbcon_device_try_acqui
+ void nbcon_device_release(struct console *con)
{
- struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_driver_ctxt);
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
+ int cookie;
- if (nbcon_context_exit_unsafe(ctxt))
@@ -246,12 +254,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ }
+ console_srcu_read_unlock(cookie);
}
- EXPORT_SYMBOL_GPL(nbcon_driver_release);
+ EXPORT_SYMBOL_GPL(nbcon_device_release);
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3172,6 +3172,8 @@ void console_flush_on_panic(enum con_flu
- console_srcu_read_unlock(cookie);
- }
+@@ -3184,6 +3184,8 @@ void console_flush_on_panic(enum con_flu
+ if (mode == CONSOLE_REPLAY_ALL)
+ __console_rewind_all();
+ nbcon_atomic_flush_pending();
+
diff --git a/debian/patches-rt/0019-printk-Track-registered-boot-consoles.patch b/debian/patches-rt/0019-printk-Track-registered-boot-consoles.patch
index f01676b13a..29ab9ef99f 100644
--- a/debian/patches-rt/0019-printk-Track-registered-boot-consoles.patch
+++ b/debian/patches-rt/0019-printk-Track-registered-boot-consoles.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 24 Oct 2023 14:13:14 +0000
-Subject: [PATCH 19/46] printk: Track registered boot consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 19/48] printk: Track registered boot consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Unfortunately it is not known if a boot console and a regular
(legacy or nbcon) console use the same hardware. For this reason
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* All 3 protected by @syslog_lock. */
-@@ -3526,6 +3534,9 @@ void register_console(struct console *ne
+@@ -3539,6 +3547,9 @@ void register_console(struct console *ne
newcon->seq = init_seq;
}
@@ -50,17 +50,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If another context is actively using the hardware of this new
* console, it will not be aware of the nbcon synchronization. This
-@@ -3595,7 +3606,9 @@ EXPORT_SYMBOL(register_console);
- /* Must be called under console_list_lock(). */
+@@ -3609,7 +3620,9 @@ EXPORT_SYMBOL(register_console);
static int unregister_console_locked(struct console *console)
{
+ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
+ bool found_boot_con = false;
unsigned long flags;
+ struct console *c;
int res;
lockdep_assert_console_list_lock_held();
-@@ -3653,6 +3666,17 @@ static int unregister_console_locked(str
+@@ -3667,6 +3680,17 @@ static int unregister_console_locked(str
if (console->exit)
res = console->exit(console);
diff --git a/debian/patches-rt/0020-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch b/debian/patches-rt/0020-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
index 3507ff2aa2..934e1b556c 100644
--- a/debian/patches-rt/0020-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
+++ b/debian/patches-rt/0020-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 19 Sep 2023 14:33:27 +0000
-Subject: [PATCH 20/46] printk: nbcon: Use nbcon consoles in
+Subject: [PATCH 20/48] printk: nbcon: Use nbcon consoles in
console_flush_all()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Allow nbcon consoles to print messages in the legacy printk()
caller context (printing via unlock) by integrating them into
@@ -22,6 +22,7 @@ handovers/takeovers occur on a per-console basis and thus do
not cause the console_flush_all() loop to abort.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/internal.h | 6 +++
diff --git a/debian/patches-rt/0021-printk-nbcon-Add-unsafe-flushing-on-panic.patch b/debian/patches-rt/0021-printk-nbcon-Add-unsafe-flushing-on-panic.patch
index 7b79f9d743..424d8dc3f1 100644
--- a/debian/patches-rt/0021-printk-nbcon-Add-unsafe-flushing-on-panic.patch
+++ b/debian/patches-rt/0021-printk-nbcon-Add-unsafe-flushing-on-panic.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 20 Oct 2023 10:03:42 +0000
-Subject: [PATCH 21/46] printk: nbcon: Add unsafe flushing on panic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 21/48] printk: nbcon: Add unsafe flushing on panic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Add nbcon_atomic_flush_unsafe() to flush all nbcon consoles
using the write_atomic() callback and allowing unsafe hostile
@@ -24,15 +24,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -197,6 +197,7 @@ extern asmlinkage void dump_stack(void)
- void printk_trigger_flush(void);
- extern bool nbcon_driver_try_acquire(struct console *con);
- extern void nbcon_driver_release(struct console *con);
+@@ -198,6 +198,7 @@ void printk_trigger_flush(void);
+ void console_replay_all(void);
+ extern bool nbcon_device_try_acquire(struct console *con);
+ extern void nbcon_device_release(struct console *con);
+void nbcon_atomic_flush_unsafe(void);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
-@@ -286,6 +287,10 @@ static inline void nbcon_driver_release(
+@@ -291,6 +292,10 @@ static inline void nbcon_device_release(
{
}
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool this_cpu_in_panic(void);
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -457,6 +457,7 @@ void panic(const char *fmt, ...)
+@@ -456,6 +456,7 @@ void panic(const char *fmt, ...)
* Explicitly flush the kernel log buffer one last time.
*/
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!nbcon_context_try_acquire(ctxt))
return -EPERM;
-@@ -1100,13 +1103,15 @@ static int __nbcon_atomic_flush_pending_
+@@ -1101,13 +1104,15 @@ static int __nbcon_atomic_flush_pending_
* write_atomic() callback
* @con: The nbcon console to flush
* @stop_seq: Flush up until this record
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned long flags;
int err;
-@@ -1120,7 +1125,7 @@ static void nbcon_atomic_flush_pending_c
+@@ -1121,7 +1126,7 @@ static void nbcon_atomic_flush_pending_c
*/
local_irq_save(flags);
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_restore(flags);
-@@ -1144,8 +1149,9 @@ static void nbcon_atomic_flush_pending_c
+@@ -1151,8 +1156,9 @@ static void nbcon_atomic_flush_pending_c
* __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
* write_atomic() callback
* @stop_seq: Flush up until this record
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct console *con;
int cookie;
-@@ -1163,7 +1169,7 @@ static void __nbcon_atomic_flush_pending
+@@ -1170,7 +1176,7 @@ static void __nbcon_atomic_flush_pending
if (nbcon_seq_read(con) >= stop_seq)
continue;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
console_srcu_read_unlock(cookie);
}
-@@ -1178,7 +1184,19 @@ static void __nbcon_atomic_flush_pending
+@@ -1185,7 +1191,19 @@ static void __nbcon_atomic_flush_pending
*/
void nbcon_atomic_flush_pending(void)
{
@@ -148,7 +148,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1307,7 +1325,7 @@ void nbcon_driver_release(struct console
+@@ -1315,7 +1333,7 @@ void nbcon_device_release(struct console
cookie = console_srcu_read_lock();
if (console_is_usable(con, console_srcu_read_flags(con)) &&
prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
diff --git a/debian/patches-rt/0022-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch b/debian/patches-rt/0022-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
index 1cfd27dd5d..01d254db59 100644
--- a/debian/patches-rt/0022-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
+++ b/debian/patches-rt/0022-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 26 Sep 2023 12:44:07 +0000
-Subject: [PATCH 22/46] printk: Avoid console_lock dance if no legacy or boot
+Subject: [PATCH 22/48] printk: Avoid console_lock dance if no legacy or boot
consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Currently the console lock is used to attempt legacy-type
printing even if there are no legacy or boot consoles registered.
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
-@@ -3189,7 +3204,8 @@ void console_flush_on_panic(enum con_flu
+@@ -3201,7 +3216,8 @@ void console_flush_on_panic(enum con_flu
nbcon_atomic_flush_pending();
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3538,6 +3554,7 @@ void register_console(struct console *ne
+@@ -3551,6 +3567,7 @@ void register_console(struct console *ne
if (newcon->flags & CON_NBCON) {
nbcon_init(newcon, init_seq);
} else {
@@ -88,15 +88,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
newcon->seq = init_seq;
}
-@@ -3613,6 +3630,7 @@ EXPORT_SYMBOL(register_console);
- /* Must be called under console_list_lock(). */
+@@ -3627,6 +3644,7 @@ EXPORT_SYMBOL(register_console);
static int unregister_console_locked(struct console *console)
{
+ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
+ bool found_legacy_con = false;
bool found_boot_con = false;
unsigned long flags;
struct console *c;
-@@ -3680,9 +3698,13 @@ static int unregister_console_locked(str
+@@ -3694,9 +3712,13 @@ static int unregister_console_locked(str
for_each_console(c) {
if (c->flags & CON_BOOT)
found_boot_con = true;
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return res;
}
-@@ -3843,22 +3865,34 @@ static bool __pr_flush(struct console *c
+@@ -3857,22 +3879,34 @@ static bool __pr_flush(struct console *c
seq = prb_next_reserve_seq(prb);
/* Flush the consoles so that records up to @seq are printed. */
@@ -154,7 +154,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
-@@ -3878,6 +3912,7 @@ static bool __pr_flush(struct console *c
+@@ -3892,6 +3926,7 @@ static bool __pr_flush(struct console *c
if (flags & CON_NBCON) {
printk_seq = nbcon_seq_read(c);
} else {
@@ -162,7 +162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
printk_seq = c->seq;
}
-@@ -3889,7 +3924,8 @@ static bool __pr_flush(struct console *c
+@@ -3903,7 +3938,8 @@ static bool __pr_flush(struct console *c
if (diff != last_diff && reset_on_progress)
remaining_jiffies = timeout_jiffies;
@@ -172,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Note: @diff is 0 if there are no usable consoles. */
if (diff == 0 || remaining_jiffies == 0)
-@@ -3959,6 +3995,7 @@ static void __wake_up_klogd(int val)
+@@ -3973,6 +4009,7 @@ static void __wake_up_klogd(int val)
return;
preempt_disable();
@@ -180,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Guarantee any new records can be seen by tasks preparing to wait
* before this context checks if the wait queue is empty.
-@@ -3970,11 +4007,22 @@ static void __wake_up_klogd(int val)
+@@ -3984,11 +4021,22 @@ static void __wake_up_klogd(int val)
*
* This pairs with devkmsg_read:A and syslog_print:A.
*/
diff --git a/debian/patches-rt/0023-printk-Track-nbcon-consoles.patch b/debian/patches-rt/0023-printk-Track-nbcon-consoles.patch
index 07220890af..c8f9a2f9a2 100644
--- a/debian/patches-rt/0023-printk-Track-nbcon-consoles.patch
+++ b/debian/patches-rt/0023-printk-Track-nbcon-consoles.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 11 Dec 2023 09:36:52 +0000
-Subject: [PATCH 23/46] printk: Track nbcon consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 23/48] printk: Track nbcon consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Add a global flag @have_nbcon_console to identify if any nbcon
consoles are registered. This will be used in follow-up commits
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Specifies if a boot console is registered. If boot consoles are present,
* nbcon consoles cannot print simultaneously and must be synchronized by
* the console lock. This is because boot consoles and nbcon consoles may
-@@ -3552,6 +3557,7 @@ void register_console(struct console *ne
+@@ -3565,6 +3570,7 @@ void register_console(struct console *ne
init_seq = get_init_console_seq(newcon, bootcon_registered);
if (newcon->flags & CON_NBCON) {
@@ -36,15 +36,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
nbcon_init(newcon, init_seq);
} else {
have_legacy_console = true;
-@@ -3631,6 +3637,7 @@ EXPORT_SYMBOL(register_console);
- static int unregister_console_locked(struct console *console)
+@@ -3645,6 +3651,7 @@ static int unregister_console_locked(str
{
+ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
bool found_legacy_con = false;
+ bool found_nbcon_con = false;
bool found_boot_con = false;
unsigned long flags;
struct console *c;
-@@ -3698,13 +3705,18 @@ static int unregister_console_locked(str
+@@ -3712,13 +3719,18 @@ static int unregister_console_locked(str
for_each_console(c) {
if (c->flags & CON_BOOT)
found_boot_con = true;
diff --git a/debian/patches-rt/0024-printk-Coordinate-direct-printing-in-panic.patch b/debian/patches-rt/0024-printk-Coordinate-direct-printing-in-panic.patch
index eb17ba5098..c318b1ee55 100644
--- a/debian/patches-rt/0024-printk-Coordinate-direct-printing-in-panic.patch
+++ b/debian/patches-rt/0024-printk-Coordinate-direct-printing-in-panic.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 22 Nov 2023 11:56:58 +0000
-Subject: [PATCH 24/46] printk: Coordinate direct printing in panic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 24/48] printk: Coordinate direct printing in panic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Perform printing by nbcon consoles on the panic CPU from the
printk() caller context in order to get panic messages printed
@@ -28,15 +28,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -195,6 +195,7 @@ void show_regs_print_info(const char *lo
- extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
+@@ -196,6 +196,7 @@ extern asmlinkage void dump_stack_lvl(co
extern asmlinkage void dump_stack(void) __cold;
void printk_trigger_flush(void);
+ void console_replay_all(void);
+void printk_legacy_allow_panic_sync(void);
- extern bool nbcon_driver_try_acquire(struct console *con);
- extern void nbcon_driver_release(struct console *con);
+ extern bool nbcon_device_try_acquire(struct console *con);
+ extern void nbcon_device_release(struct console *con);
void nbcon_atomic_flush_unsafe(void);
-@@ -278,6 +279,10 @@ static inline void printk_trigger_flush(
+@@ -283,6 +284,10 @@ static inline void console_replay_all(vo
{
}
@@ -44,12 +44,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+{
+}
+
- static inline bool nbcon_driver_try_acquire(struct console *con)
+ static inline bool nbcon_device_try_acquire(struct console *con)
{
return false;
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -368,6 +368,8 @@ void panic(const char *fmt, ...)
+@@ -367,6 +367,8 @@ void panic(const char *fmt, ...)
panic_other_cpus_shutdown(_crash_kexec_post_notifiers);
diff --git a/debian/patches-rt/0025-printk-nbcon-Implement-emergency-sections.patch b/debian/patches-rt/0025-printk-nbcon-Implement-emergency-sections.patch
index 44327c280a..7370f8513f 100644
--- a/debian/patches-rt/0025-printk-nbcon-Implement-emergency-sections.patch
+++ b/debian/patches-rt/0025-printk-nbcon-Implement-emergency-sections.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 11 Sep 2023 15:21:57 +0000
-Subject: [PATCH 25/46] printk: nbcon: Implement emergency sections
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 25/48] printk: nbcon: Implement emergency sections
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
In emergency situations (something has gone wrong but the
system continues to operate), usually important information
@@ -26,8 +26,8 @@ Do not print if the current CPU is in an emergency state.
When exiting all emergency nesting, flush nbcon consoles
directly using their atomic callback. Legacy consoles are
-triggered for flushing via irq_work because it is not known
-if the context was safe for a trylock on the console lock.
+flushed directly if safe, otherwise they are triggered for
+flushing via irq_work.
Note that the emergency state is not system-wide. While one CPU
is in an emergency state, another CPU may continue to print
@@ -36,13 +36,15 @@ console messages.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/console.h | 6 ++
- kernel/printk/internal.h | 11 ++++
- kernel/printk/nbcon.c | 116 +++++++++++++++++++++++++++++++++++++++++++++++
- kernel/printk/printk.c | 25 +++++-----
- 4 files changed, 146 insertions(+), 12 deletions(-)
+ include/linux/console.h | 6 ++
+ kernel/printk/internal.h | 13 ++++
+ kernel/printk/nbcon.c | 126 ++++++++++++++++++++++++++++++++++++++++++++
+ kernel/printk/printk.c | 25 ++++----
+ kernel/printk/printk_safe.c | 11 +++
+ 5 files changed, 168 insertions(+), 13 deletions(-)
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -65,7 +67,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -153,6 +153,17 @@ static inline bool console_is_usable(str
+@@ -76,6 +76,8 @@ bool printk_percpu_data_ready(void);
+
+ void defer_console_output(void);
+
++bool is_printk_deferred(void);
++
+ u16 printk_parse_prefix(const char *text, int *level,
+ enum printk_info_flags *flags);
+ void console_lock_spinning_enable(void);
+@@ -153,6 +155,17 @@ static inline bool console_is_usable(str
#endif /* CONFIG_PRINTK */
@@ -131,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return NBCON_PRIO_NORMAL;
}
-@@ -1200,6 +1229,93 @@ void nbcon_atomic_flush_unsafe(void)
+@@ -1207,6 +1236,103 @@ void nbcon_atomic_flush_unsafe(void)
}
/**
@@ -182,13 +193,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+ if (*cpu_emergency_nesting == 1) {
+ nbcon_atomic_flush_pending();
++
++ /*
++ * Safely attempt to flush the legacy consoles in this
++ * context. Otherwise an irq_work context is triggered
++ * to handle it.
++ */
+ do_trigger_flush = true;
++ if (printing_via_unlock && !is_printk_deferred()) {
++ if (console_trylock()) {
++ do_trigger_flush = false;
++ console_unlock();
++ }
++ }
+ }
+
-+ (*cpu_emergency_nesting)--;
-+
-+ if (WARN_ON_ONCE(*cpu_emergency_nesting < 0))
-+ *cpu_emergency_nesting = 0;
++ if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0))
++ (*cpu_emergency_nesting)--;
+
+ preempt_enable();
+
@@ -215,7 +236,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+ nbcon_atomic_flush_pending();
+
-+ if (printing_via_unlock && !in_nmi()) {
++ if (printing_via_unlock && !is_printk_deferred()) {
+ if (console_trylock())
+ console_unlock();
+ }
@@ -281,3 +302,30 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -38,6 +38,15 @@ void __printk_deferred_exit(void)
+ __printk_safe_exit();
+ }
+
++bool is_printk_deferred(void)
++{
++ /*
++ * The per-CPU variable @printk_context can be read safely in any
++ * context. The CPU migration always disabled when set.
++ */
++ return (this_cpu_read(printk_context) || in_nmi());
++}
++
+ asmlinkage int vprintk(const char *fmt, va_list args)
+ {
+ #ifdef CONFIG_KGDB_KDB
+@@ -50,7 +59,7 @@ asmlinkage int vprintk(const char *fmt,
+ * Use the main logbuf even in NMI. But avoid calling console
+ * drivers that might have their own locks.
+ */
+- if (this_cpu_read(printk_context) || in_nmi())
++ if (is_printk_deferred())
+ return vprintk_deferred(fmt, args);
+
+ /* No obstacles. */
diff --git a/debian/patches-rt/0026-panic-Mark-emergency-section-in-warn.patch b/debian/patches-rt/0026-panic-Mark-emergency-section-in-warn.patch
index 37bf7b8fab..3dd6faca7e 100644
--- a/debian/patches-rt/0026-panic-Mark-emergency-section-in-warn.patch
+++ b/debian/patches-rt/0026-panic-Mark-emergency-section-in-warn.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 11 Sep 2023 15:53:04 +0000
-Subject: [PATCH 26/46] panic: Mark emergency section in warn
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 26/48] panic: Mark emergency section in warn
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Mark the full contents of __warn() as an emergency section. In
this section, the CPU will not perform console output for the
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -671,6 +671,8 @@ struct warn_args {
+@@ -670,6 +670,8 @@ struct warn_args {
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args)
{
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
disable_trace_on_warning();
if (file)
-@@ -706,6 +708,8 @@ void __warn(const char *file, int line,
+@@ -705,6 +707,8 @@ void __warn(const char *file, int line,
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
diff --git a/debian/patches-rt/0027-panic-Mark-emergency-section-in-oops.patch b/debian/patches-rt/0027-panic-Mark-emergency-section-in-oops.patch
index 4c382ac927..d5f87811d1 100644
--- a/debian/patches-rt/0027-panic-Mark-emergency-section-in-oops.patch
+++ b/debian/patches-rt/0027-panic-Mark-emergency-section-in-oops.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 19 Sep 2023 17:07:34 +0000
-Subject: [PATCH 27/46] panic: Mark emergency section in oops
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 27/48] panic: Mark emergency section in oops
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Mark an emergency section beginning with oops_enter() until the
end of oops_exit(). In this section, the CPU will not perform
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -638,6 +638,7 @@ bool oops_may_print(void)
+@@ -637,6 +637,7 @@ bool oops_may_print(void)
*/
void oops_enter(void)
{
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
tracing_off();
/* can't trust the integrity of the kernel anymore: */
debug_locks_off();
-@@ -660,6 +661,7 @@ void oops_exit(void)
+@@ -659,6 +660,7 @@ void oops_exit(void)
{
do_oops_enter_exit();
print_oops_end_marker();
diff --git a/debian/patches-rt/0028-rcu-Mark-emergency-sections-in-rcu-stalls.patch b/debian/patches-rt/0028-rcu-Mark-emergency-sections-in-rcu-stalls.patch
index 7a13ac0042..9e2e8b1934 100644
--- a/debian/patches-rt/0028-rcu-Mark-emergency-sections-in-rcu-stalls.patch
+++ b/debian/patches-rt/0028-rcu-Mark-emergency-sections-in-rcu-stalls.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 11 Sep 2023 15:53:39 +0000
-Subject: [PATCH 28/46] rcu: Mark emergency sections in rcu stalls
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 28/48] rcu: Mark emergency sections in rcu stalls
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Mark emergency sections wherever multiple lines of
rcu stall information are generated. In an emergency
@@ -12,6 +12,7 @@ This allows the full message block to be stored as
quickly as possible in the ringbuffer.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/rcu/tree_exp.h | 9 +++++++++
@@ -38,15 +39,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
j = jiffies;
rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start));
trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
-@@ -612,6 +616,7 @@ static void synchronize_rcu_expedited_wa
+@@ -620,10 +624,14 @@ static void synchronize_rcu_expedited_wa
+ preempt_disable(); // For smp_processor_id() in dump_cpu_task().
+ dump_cpu_task(cpu);
+ preempt_enable();
++ nbcon_cpu_emergency_flush();
}
- pr_cont("\n");
- }
-+ nbcon_cpu_emergency_flush();
- rcu_for_each_leaf_node(rnp) {
- for_each_leaf_node_possible_cpu(rnp, cpu) {
- mask = leaf_node_cpu_bit(rnp, cpu);
-@@ -624,6 +629,9 @@ static void synchronize_rcu_expedited_wa
rcu_exp_print_detail_task_stall_rnp(rnp);
}
jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
@@ -82,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
-@@ -522,6 +524,7 @@ static void print_cpu_stall_info(int cpu
+@@ -523,6 +525,7 @@ static void print_cpu_stall_info(int cpu
falsepositive ? " (false positive?)" : "");
print_cpu_stat_info(cpu);
@@ -90,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* Complain about starvation of grace-period kthread. */
-@@ -604,6 +607,8 @@ static void print_other_cpu_stall(unsign
+@@ -605,6 +608,8 @@ static void print_other_cpu_stall(unsign
if (rcu_stall_is_suppressed())
return;
@@ -99,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* OK, time to rat on our buddy...
* See Documentation/RCU/stallwarn.rst for info on how to debug
-@@ -655,6 +660,8 @@ static void print_other_cpu_stall(unsign
+@@ -657,6 +662,8 @@ static void print_other_cpu_stall(unsign
rcu_check_gp_kthread_expired_fqs_timer();
rcu_check_gp_kthread_starvation();
@@ -108,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
panic_on_rcu_stall();
rcu_force_quiescent_state(); /* Kick them all. */
-@@ -675,6 +682,8 @@ static void print_cpu_stall(unsigned lon
+@@ -677,6 +684,8 @@ static void print_cpu_stall(unsigned lon
if (rcu_stall_is_suppressed())
return;
@@ -117,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* OK, time to rat on ourselves...
* See Documentation/RCU/stallwarn.rst for info on how to debug
-@@ -703,6 +712,8 @@ static void print_cpu_stall(unsigned lon
+@@ -706,6 +715,8 @@ static void print_cpu_stall(unsigned lon
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
diff --git a/debian/patches-rt/0029-lockdep-Mark-emergency-sections-in-lockdep-splats.patch b/debian/patches-rt/0029-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
index 1f3ab33c45..0141d9fd20 100644
--- a/debian/patches-rt/0029-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
+++ b/debian/patches-rt/0029-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 18 Sep 2023 20:27:41 +0000
-Subject: [PATCH 29/46] lockdep: Mark emergency sections in lockdep splats
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 29/48] lockdep: Mark emergency sections in lockdep splats
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Mark emergency sections wherever multiple lines of
lock debugging output are generated. In an emergency
@@ -18,6 +18,7 @@ functions can also be called in non-emergency
situations (such as sysrq).
Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/locking/lockdep.c | 84 +++++++++++++++++++++++++++++++++++++++++++++--
diff --git a/debian/patches-rt/0030-printk-Rename-console_replay_all-and-update-context.patch b/debian/patches-rt/0030-printk-Rename-console_replay_all-and-update-context.patch
new file mode 100644
index 0000000000..00a80c8832
--- /dev/null
+++ b/debian/patches-rt/0030-printk-Rename-console_replay_all-and-update-context.patch
@@ -0,0 +1,77 @@
+From: Sreenath Vijayan <sreenath.vijayan@sony.com>
+Date: Thu, 30 May 2024 13:15:47 +0530
+Subject: [PATCH 30/48] printk: Rename console_replay_all() and update context
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Rename console_replay_all() to console_try_replay_all() to make
+clear that the implementation is best effort. Also, the function
+should not be called in NMI context as it takes locks, so update
+the comment in code.
+
+Fixes: 693f75b91a91 ("printk: Add function to replay kernel log on consoles")
+Fixes: 1b743485e27f ("tty/sysrq: Replay kernel log messages on consoles via sysrq")
+Suggested-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Shimoyashiki Taichi <taichi.shimoyashiki@sony.com>
+Signed-off-by: Sreenath Vijayan <sreenath.vijayan@sony.com>
+Link: https://lore.kernel.org/r/Zlguq/wU21Z8MqI4@sreenath.vijayan@sony.com
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/sysrq.c | 2 +-
+ include/linux/printk.h | 4 ++--
+ kernel/printk/printk.c | 6 +++---
+ 3 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -452,7 +452,7 @@ static const struct sysrq_key_op sysrq_u
+
+ static void sysrq_handle_replay_logs(u8 key)
+ {
+- console_replay_all();
++ console_try_replay_all();
+ }
+ static struct sysrq_key_op sysrq_replay_logs_op = {
+ .handler = sysrq_handle_replay_logs,
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -195,7 +195,7 @@ void show_regs_print_info(const char *lo
+ extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
+ extern asmlinkage void dump_stack(void) __cold;
+ void printk_trigger_flush(void);
+-void console_replay_all(void);
++void console_try_replay_all(void);
+ void printk_legacy_allow_panic_sync(void);
+ extern bool nbcon_device_try_acquire(struct console *con);
+ extern void nbcon_device_release(struct console *con);
+@@ -280,7 +280,7 @@ static inline void printk_trigger_flush(
+ {
+ }
+
+-static inline void console_replay_all(void)
++static inline void console_try_replay_all(void)
+ {
+ }
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -4460,15 +4460,15 @@ void kmsg_dump_rewind(struct kmsg_dump_i
+ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+
+ /**
+- * console_replay_all - replay kernel log on consoles
++ * console_try_replay_all - try to replay kernel log on consoles
+ *
+ * Try to obtain lock on console subsystem and replay all
+ * available records in printk buffer on the consoles.
+ * Does nothing if lock is not obtained.
+ *
+- * Context: Any context.
++ * Context: Any, except for NMI.
+ */
+-void console_replay_all(void)
++void console_try_replay_all(void)
+ {
+ if (console_trylock()) {
+ __console_rewind_all();
diff --git a/debian/patches-rt/0030-printk-nbcon-Introduce-printing-kthreads.patch b/debian/patches-rt/0031-printk-nbcon-Introduce-printing-kthreads.patch
index 85792a75e2..8557ac1ea9 100644
--- a/debian/patches-rt/0030-printk-nbcon-Introduce-printing-kthreads.patch
+++ b/debian/patches-rt/0031-printk-nbcon-Introduce-printing-kthreads.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 22 Sep 2023 14:12:21 +0000
-Subject: [PATCH 30/46] printk: nbcon: Introduce printing kthreads
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 31/48] printk: nbcon: Introduce printing kthreads
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Provide the main implementation for running a printer kthread
per nbcon console that is takeover/handover aware.
@@ -14,8 +14,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/console.h | 26 ++++++
kernel/printk/internal.h | 26 ++++++
kernel/printk/nbcon.c | 196 +++++++++++++++++++++++++++++++++++++++++++++--
- kernel/printk/printk.c | 31 +++++++
- 4 files changed, 271 insertions(+), 8 deletions(-)
+ kernel/printk/printk.c | 34 ++++++++
+ 4 files changed, 275 insertions(+), 7 deletions(-)
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -324,6 +325,8 @@ struct nbcon_write_context {
* @nbcon_seq: Sequence number of the next record for nbcon to print
- * @nbcon_driver_ctxt: Context available for driver non-printing operations
+ * @nbcon_device_ctxt: Context available for non-printing operations
* @pbufs: Pointer to nbcon private buffer
+ * @kthread: Printer kthread for this console
+ * @rcuwait: RCU-safe wait object for @kthread waking
@@ -42,11 +42,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
+ * @write_thread:
+ *
-+ * NBCON callback to write out text in task context. (Optional)
++ * NBCON callback to write out text in task context.
+ *
-+ * This callback is called with the console already acquired. Any
-+ * additional driver synchronization should have been performed by
-+ * device_lock().
++ * This callback is called after device_lock() and with the nbcon
++ * console acquired. Any necessary driver synchronization should have
++ * been performed by the device_lock() callback.
+ *
+ * This callback is always called from task context but with migration
+ * disabled.
@@ -55,8 +55,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * sections applies as with write_atomic(). The difference between
+ * this callback and write_atomic() is that this callback is used
+ * during normal operation and is always called from task context.
-+ * This provides drivers with a relatively relaxed locking context
-+ * for synchronizing output to the hardware.
++ * This allows drivers to operate in their own locking context for
++ * synchronizing output to the hardware.
+ */
+ void (*write_thread)(struct console *con, struct nbcon_write_context *wctxt);
+
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* NBCON callback to begin synchronization with driver code.
@@ -420,6 +444,8 @@ struct console {
atomic_long_t __private nbcon_seq;
- struct nbcon_context __private nbcon_driver_ctxt;
+ struct nbcon_context __private nbcon_device_ctxt;
struct printk_buffers *pbufs;
+ struct task_struct *kthread;
+ struct rcuwait rcuwait;
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_LOCKDEP
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -90,6 +90,7 @@ enum nbcon_prio nbcon_get_default_prio(v
+@@ -92,6 +92,7 @@ enum nbcon_prio nbcon_get_default_prio(v
void nbcon_atomic_flush_pending(void);
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
int cookie);
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Check if the given console is currently capable and allowed to print
-@@ -108,6 +109,8 @@ static inline bool console_is_usable(str
+@@ -110,6 +111,8 @@ static inline bool console_is_usable(str
if (flags & CON_NBCON) {
if (!con->write_atomic)
return false;
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else {
if (!con->write)
return false;
-@@ -124,12 +127,35 @@ static inline bool console_is_usable(str
+@@ -126,12 +129,35 @@ static inline bool console_is_usable(str
return true;
}
@@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* nbcon_emit_next_record - Emit a record in the acquired context
* @wctxt: The write context that will be handed to the write function
-+ * @use_atomic: True if the write_atomic callback is to be used
++ * @use_atomic: True if the write_atomic() callback is to be used
*
* Return: True if this context still owns the console. False if
* ownership was handed over or taken.
@@ -171,15 +171,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else {
/*
* This function should never be called for legacy consoles.
-@@ -936,6 +944,118 @@ static bool nbcon_emit_next_record(struc
+@@ -936,6 +944,120 @@ static bool nbcon_emit_next_record(struc
return nbcon_context_exit_unsafe(ctxt);
}
+/**
+ * nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup
+ * @con: Console to operate on
-+ * @ctxt: The acquire context that contains the state
-+ * at console_acquire()
++ * @ctxt: The nbcon context from nbcon_context_try_acquire()
+ *
+ * Return: True if the thread should shutdown or if the console is
+ * allowed to print and a record is available. False otherwise.
@@ -213,6 +212,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+/**
+ * nbcon_kthread_func - The printer thread function
+ * @__console: Console to operate on
++ *
++ * Return: 0
+ */
+static int nbcon_kthread_func(void *__console)
+{
@@ -222,7 +223,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ .ctxt.prio = NBCON_PRIO_NORMAL,
+ };
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
-+ unsigned long flags;
+ short con_flags;
+ bool backlog;
+ int cookie;
@@ -258,7 +258,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ con_flags = console_srcu_read_flags(con);
+
+ if (console_is_usable(con, con_flags)) {
-+ con->device_lock(con, &flags);
++ unsigned long lock_flags;
++
++ con->device_lock(con, &lock_flags);
+
+ /*
+ * Ensure this stays on the CPU to make handover and
@@ -277,7 +279,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ }
+ }
+
-+ con->device_unlock(con, flags);
++ con->device_unlock(con, lock_flags);
+ }
+
+ console_srcu_read_unlock(cookie);
@@ -290,7 +292,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Track the nbcon emergency nesting per CPU. */
static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
-@@ -1012,7 +1132,7 @@ static bool nbcon_atomic_emit_one(struct
+@@ -1012,7 +1134,7 @@ static bool nbcon_atomic_emit_one(struct
* The higher priority printing context takes over responsibility
* to print the pending records.
*/
@@ -299,7 +301,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
nbcon_context_release(ctxt);
-@@ -1113,7 +1233,7 @@ static int __nbcon_atomic_flush_pending_
+@@ -1113,7 +1235,7 @@ static int __nbcon_atomic_flush_pending_
* handed over or taken over. In both cases the context is no
* longer valid.
*/
@@ -308,23 +310,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -EAGAIN;
if (!ctxt->backlog) {
-@@ -1159,11 +1279,11 @@ static void nbcon_atomic_flush_pending_c
- local_irq_restore(flags);
+@@ -1172,10 +1294,10 @@ static void nbcon_atomic_flush_pending_c
/*
-- * If flushing was successful but more records are available, this
+ * If flushing was successful but more records are available, this
- * context must flush those remaining records because there is no
- * other context that will do it.
-+ * If flushing was successful but more records are available this
+ * context must flush those remaining records if the printer thread
-+ * is not available to do it.
++ * is not available do it.
*/
-- if (!err && prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
-+ if (!err && !con->kthread && prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+- if (prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
++ if (!con->kthread && prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
stop_seq = prb_next_reserve_seq(prb);
goto again;
}
-@@ -1315,6 +1435,63 @@ void nbcon_cpu_emergency_flush(void)
+@@ -1332,6 +1454,63 @@ void nbcon_cpu_emergency_flush(void)
}
}
@@ -388,7 +388,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* nbcon_alloc - Allocate buffers needed by the nbcon console
* @con: Console to allocate buffers for
-@@ -1360,6 +1537,7 @@ void nbcon_init(struct console *con, u64
+@@ -1377,6 +1556,7 @@ void nbcon_init(struct console *con, u64
/* nbcon_alloc() must have been called and successful! */
BUG_ON(!con->pbufs);
@@ -396,7 +396,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
nbcon_seq_force(con, init_seq);
nbcon_state_set(con, &state);
}
-@@ -1372,6 +1550,7 @@ void nbcon_free(struct console *con)
+@@ -1389,6 +1569,7 @@ void nbcon_free(struct console *con)
{
struct nbcon_state state = { };
@@ -404,7 +404,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
nbcon_state_set(con, &state);
/* Boot consoles share global printk buffers. */
-@@ -1440,6 +1619,7 @@ void nbcon_driver_release(struct console
+@@ -1458,6 +1639,7 @@ void nbcon_device_release(struct console
*/
cookie = console_srcu_read_lock();
if (console_is_usable(con, console_srcu_read_flags(con)) &&
@@ -452,15 +452,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!console_is_usable(con, flags))
continue;
any_usable = true;
-@@ -3314,9 +3331,23 @@ EXPORT_SYMBOL(console_stop);
+@@ -3326,9 +3343,26 @@ EXPORT_SYMBOL(console_stop);
void console_start(struct console *console)
{
+ short flags;
++ int cookie;
+
console_list_lock();
console_srcu_write_flags(console, console->flags | CON_ENABLED);
-+ flags = console->flags;
console_list_unlock();
+
+ /*
@@ -470,8 +470,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+ synchronize_srcu(&console_srcu);
+
++ cookie = console_srcu_read_lock();
++ flags = console_srcu_read_flags(console);
+ if (flags & CON_NBCON)
+ nbcon_kthread_wake(console);
++ console_srcu_read_unlock(cookie);
+
__pr_flush(console, 1000, true);
}
diff --git a/debian/patches-rt/0031-printk-Atomic-print-in-printk-context-on-shutdown.patch b/debian/patches-rt/0032-printk-Atomic-print-in-printk-context-on-shutdown.patch
index 7c515605a7..6e162919ea 100644
--- a/debian/patches-rt/0031-printk-Atomic-print-in-printk-context-on-shutdown.patch
+++ b/debian/patches-rt/0032-printk-Atomic-print-in-printk-context-on-shutdown.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 23 Oct 2023 17:43:48 +0000
-Subject: [PATCH 31/46] printk: Atomic print in printk context on shutdown
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 32/48] printk: Atomic print in printk context on shutdown
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
For nbcon consoles, normally the printing is handled by the
dedicated console printing threads. However, on shutdown the
@@ -14,9 +14,31 @@ perform atomic printing from the printk() caller context.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
+ kernel/printk/nbcon.c | 5 +++--
kernel/printk/printk.c | 7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1297,7 +1297,8 @@ static void nbcon_atomic_flush_pending_c
+ * context must flush those remaining records if the printer thread
+ * is not available do it.
+ */
+- if (!con->kthread && prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
++ if ((!con->kthread || (system_state > SYSTEM_RUNNING)) &&
++ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+ stop_seq = prb_next_reserve_seq(prb);
+ goto again;
+ }
+@@ -1639,7 +1640,7 @@ void nbcon_device_release(struct console
+ */
+ cookie = console_srcu_read_lock();
+ if (console_is_usable(con, console_srcu_read_flags(con)) &&
+- !con->kthread &&
++ (!con->kthread || (system_state > SYSTEM_RUNNING)) &&
+ prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
+ __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
+ }
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2389,12 +2389,17 @@ asmlinkage int vprintk_emit(int facility
diff --git a/debian/patches-rt/0033-printk-nbcon-Fix-nbcon_cpu_emergency_flush-when-pree.patch b/debian/patches-rt/0033-printk-nbcon-Fix-nbcon_cpu_emergency_flush-when-pree.patch
new file mode 100644
index 0000000000..af8ccf56d5
--- /dev/null
+++ b/debian/patches-rt/0033-printk-nbcon-Fix-nbcon_cpu_emergency_flush-when-pree.patch
@@ -0,0 +1,46 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 5 Jun 2024 09:25:30 +0000
+Subject: [PATCH 33/48] printk: nbcon: Fix nbcon_cpu_emergency_flush() when
+ preemptible
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+nbcon_cpu_emergency_flush() can be called in a preemptible
+context. In that case the CPU is not in an emergency state.
+However, in order to see that the CPU is not in an emergency
+state (accessing the per-cpu variable), preemption must be
+disabled.
+
+Disable preemption when checking the CPU state.
+
+Reported-by: Juri Lelli <juri.lelli@redhat.com>
+Closes: https://lore.kernel.org/aqkcpca4vgadxc3yzcu74xwq3grslj5m43f3eb5fcs23yo2gy4@gcsnqcts5tos
+Fixes: 46a1379208b7 ("printk: nbcon: Implement emergency sections")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/nbcon.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1443,8 +1443,19 @@ void nbcon_cpu_emergency_exit(void)
+ */
+ void nbcon_cpu_emergency_flush(void)
+ {
++ bool is_emergency;
++
++ /*
++ * If this context is not an emergency context, preemption might be
++ * enabled. To be sure, disable preemption when checking if this is
++ * an emergency context.
++ */
++ preempt_disable();
++ is_emergency = (*nbcon_get_cpu_emergency_nesting() != 0);
++ preempt_enable();
++
+ /* The explicit flush is needed only in the emergency context. */
+- if (*(nbcon_get_cpu_emergency_nesting()) == 0)
++ if (!is_emergency)
+ return;
+
+ nbcon_atomic_flush_pending();
diff --git a/debian/patches-rt/0032-printk-nbcon-Add-context-to-console_is_usable.patch b/debian/patches-rt/0034-printk-nbcon-Add-context-to-console_is_usable.patch
index 6c74d8e079..93272372ad 100644
--- a/debian/patches-rt/0032-printk-nbcon-Add-context-to-console_is_usable.patch
+++ b/debian/patches-rt/0034-printk-nbcon-Add-context-to-console_is_usable.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 26 Sep 2023 14:43:30 +0000
-Subject: [PATCH 32/46] printk: nbcon: Add context to console_is_usable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 34/48] printk: nbcon: Add context to console_is_usable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The nbcon consoles have two callbacks to be used for different
contexts. In order to determine if an nbcon console is usable,
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -98,7 +98,7 @@ void nbcon_kthread_create(struct console
+@@ -100,7 +100,7 @@ void nbcon_kthread_create(struct console
* which can also play a role in deciding if @con can be used to print
* records.
*/
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
if (!(flags & CON_ENABLED))
return false;
-@@ -107,10 +107,13 @@ static inline bool console_is_usable(str
+@@ -109,10 +109,13 @@ static inline bool console_is_usable(str
return false;
if (flags & CON_NBCON) {
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else {
if (!con->write)
return false;
-@@ -175,7 +178,8 @@ static inline void nbcon_atomic_flush_pe
+@@ -177,7 +180,8 @@ static inline void nbcon_atomic_flush_pe
static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
int cookie) { return false; }
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
-@@ -968,7 +968,7 @@ static bool nbcon_kthread_should_wakeup(
+@@ -967,7 +967,7 @@ static bool nbcon_kthread_should_wakeup(
cookie = console_srcu_read_lock();
flags = console_srcu_read_flags(con);
@@ -74,10 +74,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- if (console_is_usable(con, con_flags)) {
+ if (console_is_usable(con, con_flags, false)) {
- con->device_lock(con, &flags);
+ unsigned long lock_flags;
- /*
-@@ -1312,7 +1312,7 @@ static void __nbcon_atomic_flush_pending
+ con->device_lock(con, &lock_flags);
+@@ -1322,7 +1322,7 @@ static void __nbcon_atomic_flush_pending
if (!(flags & CON_NBCON))
continue;
@@ -86,13 +86,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (nbcon_seq_read(con) >= stop_seq)
-@@ -1618,7 +1618,7 @@ void nbcon_driver_release(struct console
+@@ -1650,7 +1650,7 @@ void nbcon_device_release(struct console
* the console is usable throughout flushing.
*/
cookie = console_srcu_read_lock();
- if (console_is_usable(con, console_srcu_read_flags(con)) &&
+ if (console_is_usable(con, console_srcu_read_flags(con), true) &&
- !con->kthread &&
+ (!con->kthread || (system_state > SYSTEM_RUNNING)) &&
prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
--- a/kernel/printk/printk.c
@@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
any_usable = true;
-@@ -4001,8 +4001,10 @@ static bool __pr_flush(struct console *c
+@@ -4018,8 +4018,10 @@ static bool __pr_flush(struct console *c
* that they make forward progress, so only increment
* @diff for usable consoles.
*/
diff --git a/debian/patches-rt/0033-printk-nbcon-Add-printer-thread-wakeups.patch b/debian/patches-rt/0035-printk-nbcon-Add-printer-thread-wakeups.patch
index d560b6b7cf..c12f3e9f9f 100644
--- a/debian/patches-rt/0033-printk-nbcon-Add-printer-thread-wakeups.patch
+++ b/debian/patches-rt/0035-printk-nbcon-Add-printer-thread-wakeups.patch
@@ -1,17 +1,18 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 26 Sep 2023 13:03:52 +0000
-Subject: [PATCH 33/46] printk: nbcon: Add printer thread wakeups
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 35/48] printk: nbcon: Add printer thread wakeups
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
-Add a function to wakeup the printer threads. Use the new function
-when:
+Add a function to wakeup the printer threads. The printer threads
+are woken when:
- - records are added to the printk ringbuffer
+ - a record is added to the printk ringbuffer
- consoles are resumed
- triggered via printk_trigger_flush()
+ - consoles should be replayed via sysrq
-The actual waking is performed via irq_work so that the wakeup can
-be triggered from any context.
+The actual waking is performed via irq_work so that the function
+can be called from any context.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
@@ -21,8 +22,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/console.h | 3 ++
kernel/printk/internal.h | 1
kernel/printk/nbcon.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++
- kernel/printk/printk.c | 7 +++++
- 4 files changed, 67 insertions(+)
+ kernel/printk/printk.c | 8 ++++++
+ 4 files changed, 68 insertions(+)
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -52,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_LOCKDEP
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -91,6 +91,7 @@ void nbcon_atomic_flush_pending(void);
+@@ -93,6 +93,7 @@ void nbcon_atomic_flush_pending(void);
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
int cookie);
void nbcon_kthread_create(struct console *con);
@@ -62,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Check if the given console is currently capable and allowed to print
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
-@@ -1056,6 +1056,61 @@ static int nbcon_kthread_func(void *__co
+@@ -1058,6 +1058,61 @@ static int nbcon_kthread_func(void *__co
goto wait_for_event;
}
@@ -124,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Track the nbcon emergency nesting per CPU. */
static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
-@@ -1538,6 +1593,7 @@ void nbcon_init(struct console *con, u64
+@@ -1569,6 +1624,7 @@ void nbcon_init(struct console *con, u64
BUG_ON(!con->pbufs);
rcuwait_init(&con->rcuwait);
@@ -154,7 +155,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
flags = console_srcu_read_flags(con);
-@@ -4161,6 +4167,7 @@ void defer_console_output(void)
+@@ -4178,6 +4184,7 @@ void defer_console_output(void)
void printk_trigger_flush(void)
{
@@ -162,3 +163,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
defer_console_output();
}
+@@ -4513,6 +4520,7 @@ void console_try_replay_all(void)
+ {
+ if (console_trylock()) {
+ __console_rewind_all();
++ nbcon_wake_threads();
+ /* Consoles are flushed as part of console_unlock(). */
+ console_unlock();
+ }
diff --git a/debian/patches-rt/0034-printk-nbcon-Stop-threads-on-shutdown-reboot.patch b/debian/patches-rt/0036-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
index 83876c8f29..f043bc8be2 100644
--- a/debian/patches-rt/0034-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
+++ b/debian/patches-rt/0036-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
@@ -1,12 +1,12 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 26 Sep 2023 13:04:15 +0000
-Subject: [PATCH 34/46] printk: nbcon: Stop threads on shutdown/reboot
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 36/48] printk: nbcon: Stop threads on shutdown/reboot
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Register a syscore_ops shutdown function to stop all threaded
-printers on shutdown/reboot. This allows printk to transition back
-to atomic printing in order to provide a robust mechanism for
-outputting the final messages.
+printers on shutdown/reboot. This allows printk to cleanly
+transition back to atomic printing in order to provide a robust
+mechanism for outputting the final messages.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -24,10 +24,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/types.h>
#include "internal.h"
#include "printk_ringbuffer.h"
-@@ -1682,3 +1683,33 @@ void nbcon_driver_release(struct console
+@@ -1714,3 +1715,33 @@ void nbcon_device_release(struct console
console_srcu_read_unlock(cookie);
}
- EXPORT_SYMBOL_GPL(nbcon_driver_release);
+ EXPORT_SYMBOL_GPL(nbcon_device_release);
+
+/**
+ * printk_kthread_shutdown - shutdown all threaded printers
diff --git a/debian/patches-rt/0035-printk-nbcon-Start-printing-threads.patch b/debian/patches-rt/0037-printk-nbcon-Start-printing-threads.patch
index c475c1f51c..24f0a2c9c0 100644
--- a/debian/patches-rt/0035-printk-nbcon-Start-printing-threads.patch
+++ b/debian/patches-rt/0037-printk-nbcon-Start-printing-threads.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 5 Dec 2023 14:09:31 +0000
-Subject: [PATCH 35/46] printk: nbcon: Start printing threads
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 37/48] printk: nbcon: Start printing threads
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
If there are no boot consoles, the printing threads are started
in early_initcall.
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__printf(4, 0)
int vprintk_store(int facility, int level,
-@@ -159,6 +160,7 @@ static inline void nbcon_kthread_wake(st
+@@ -161,6 +162,7 @@ static inline void nbcon_kthread_wake(st
static inline void nbcon_kthread_wake(struct console *con) { }
static inline void nbcon_kthread_create(struct console *con) { }
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* nbcon_context_try_acquire_direct - Try to acquire directly
* @ctxt: The context of the caller
-@@ -1522,7 +1524,7 @@ void nbcon_kthread_create(struct console
+@@ -1553,7 +1555,7 @@ void nbcon_kthread_create(struct console
if (!(con->flags & CON_NBCON) || !con->write_thread)
return;
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
/*
-@@ -1548,6 +1550,19 @@ void nbcon_kthread_create(struct console
+@@ -1579,6 +1581,19 @@ void nbcon_kthread_create(struct console
sched_set_normal(con->kthread, -20);
}
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* nbcon_alloc - Allocate buffers needed by the nbcon console
* @con: Console to allocate buffers for
-@@ -1597,6 +1612,7 @@ void nbcon_init(struct console *con, u64
+@@ -1628,6 +1643,7 @@ void nbcon_init(struct console *con, u64
init_irq_work(&con->irq_work, nbcon_irq_work);
nbcon_seq_force(con, init_seq);
nbcon_state_set(con, &state);
@@ -109,15 +109,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
(system_state > SYSTEM_RUNNING)) {
nbcon_atomic_flush_pending();
}
-@@ -3725,6 +3729,7 @@ EXPORT_SYMBOL(register_console);
- /* Must be called under console_list_lock(). */
+@@ -3742,6 +3746,7 @@ EXPORT_SYMBOL(register_console);
static int unregister_console_locked(struct console *console)
{
+ bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
+ bool is_boot_con = (console->flags & CON_BOOT);
bool found_legacy_con = false;
bool found_nbcon_con = false;
bool found_boot_con = false;
-@@ -3807,6 +3812,15 @@ static int unregister_console_locked(str
+@@ -3824,6 +3829,15 @@ static int unregister_console_locked(str
if (!found_nbcon_con)
have_nbcon_console = found_nbcon_con;
diff --git a/debian/patches-rt/0036-printk-Provide-helper-for-message-prepending.patch b/debian/patches-rt/0038-printk-Provide-helper-for-message-prepending.patch
index 97530d96f9..91ab6355fc 100644
--- a/debian/patches-rt/0036-printk-Provide-helper-for-message-prepending.patch
+++ b/debian/patches-rt/0038-printk-Provide-helper-for-message-prepending.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Thu, 28 Mar 2024 13:29:10 +0000
-Subject: [PATCH 36/46] printk: Provide helper for message prepending
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 38/48] printk: Provide helper for message prepending
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
In order to support prepending different texts to printk
messages, split out the prepending code into a helper
diff --git a/debian/patches-rt/0037-printk-nbcon-Show-replay-message-on-takeover.patch b/debian/patches-rt/0039-printk-nbcon-Show-replay-message-on-takeover.patch
index 87b2799c12..28fc95581c 100644
--- a/debian/patches-rt/0037-printk-nbcon-Show-replay-message-on-takeover.patch
+++ b/debian/patches-rt/0039-printk-nbcon-Show-replay-message-on-takeover.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 25 Mar 2024 21:00:40 +0000
-Subject: [PATCH 37/46] printk: nbcon: Show replay message on takeover
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 39/48] printk: nbcon: Show replay message on takeover
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
An emergency or panic context can takeover console ownership
while the current owner was printing a printk message. The
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -325,6 +325,7 @@ struct nbcon_write_context {
* @nbcon_state: State for nbcon consoles
* @nbcon_seq: Sequence number of the next record for nbcon to print
- * @nbcon_driver_ctxt: Context available for driver non-printing operations
+ * @nbcon_device_ctxt: Context available for non-printing operations
+ * @nbcon_prev_seq: Seq num the previous nbcon owner was assigned to print
* @pbufs: Pointer to nbcon private buffer
* @kthread: Printer kthread for this console
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -445,6 +446,8 @@ struct console {
atomic_t __private nbcon_state;
atomic_long_t __private nbcon_seq;
- struct nbcon_context __private nbcon_driver_ctxt;
+ struct nbcon_context __private nbcon_device_ctxt;
+ atomic_long_t __private nbcon_prev_seq;
+
struct printk_buffers *pbufs;
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rcuwait rcuwait;
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -231,4 +231,5 @@ bool printk_get_next_message(struct prin
+@@ -233,4 +233,5 @@ bool printk_get_next_message(struct prin
#ifdef CONFIG_PRINTK
void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!nbcon_context_exit_unsafe(ctxt))
return false;
-@@ -1611,6 +1634,7 @@ void nbcon_init(struct console *con, u64
+@@ -1642,6 +1665,7 @@ void nbcon_init(struct console *con, u64
rcuwait_init(&con->rcuwait);
init_irq_work(&con->irq_work, nbcon_irq_work);
nbcon_seq_force(con, init_seq);
diff --git a/debian/patches-rt/0044-printk-Add-kthread-for-all-legacy-consoles.patch b/debian/patches-rt/0040-printk-Add-kthread-for-all-legacy-consoles.patch
index edcfbb989d..afb049333d 100644
--- a/debian/patches-rt/0044-printk-Add-kthread-for-all-legacy-consoles.patch
+++ b/debian/patches-rt/0040-printk-Add-kthread-for-all-legacy-consoles.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 22 Sep 2023 17:35:04 +0000
-Subject: [PATCH 44/46] printk: Add kthread for all legacy consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 40/48] printk: Add kthread for all legacy consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The write callback of legacy consoles makes use of spinlocks.
This is not permitted with PREEMPT_RT in atomic contexts.
@@ -24,9 +24,9 @@ Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/internal.h | 11 +-
- kernel/printk/nbcon.c | 52 ++++++----
- kernel/printk/printk.c | 242 ++++++++++++++++++++++++++++++++++++++---------
- 3 files changed, 243 insertions(+), 62 deletions(-)
+ kernel/printk/nbcon.c | 60 +++++++----
+ kernel/printk/printk.c | 241 +++++++++++++++++++++++++++++++++++++++--------
+ 3 files changed, 249 insertions(+), 63 deletions(-)
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PRINTK
#ifdef CONFIG_PRINTK_CALLER
-@@ -90,9 +96,10 @@ void nbcon_free(struct console *con);
+@@ -92,9 +98,10 @@ void nbcon_free(struct console *con);
enum nbcon_prio nbcon_get_default_prio(void);
void nbcon_atomic_flush_pending(void);
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Check if the given console is currently capable and allowed to print
-@@ -179,7 +186,7 @@ static inline void nbcon_free(struct con
+@@ -181,7 +188,7 @@ static inline void nbcon_free(struct con
static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
static inline void nbcon_atomic_flush_pending(void) { }
static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool use_atomic) { return false; }
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
-@@ -1224,9 +1224,10 @@ enum nbcon_prio nbcon_get_default_prio(v
+@@ -1185,9 +1185,10 @@ enum nbcon_prio nbcon_get_default_prio(v
}
/*
@@ -75,11 +75,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * nbcon_emit_one - Print one record for an nbcon console using the
+ * specified callback
* @wctxt: An initialized write context struct to use for this context
-+ * @use_atomic: True if the write_atomic callback is to be used
++ * @use_atomic: True if the write_atomic() callback is to be used
*
* Return: True, when a record has been printed and there are still
* pending records. The caller might want to continue flushing.
-@@ -1239,7 +1240,7 @@ enum nbcon_prio nbcon_get_default_prio(v
+@@ -1200,7 +1201,7 @@ enum nbcon_prio nbcon_get_default_prio(v
* This is an internal helper to handle the locking of the console before
* calling nbcon_emit_next_record().
*/
@@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
-@@ -1254,7 +1255,7 @@ static bool nbcon_atomic_emit_one(struct
+@@ -1215,7 +1216,7 @@ static bool nbcon_atomic_emit_one(struct
* The higher priority printing context takes over responsibility
* to print the pending records.
*/
@@ -97,15 +97,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
nbcon_context_release(ctxt);
-@@ -1271,6 +1272,7 @@ static bool nbcon_atomic_emit_one(struct
+@@ -1232,6 +1233,7 @@ static bool nbcon_atomic_emit_one(struct
* both the console_lock and the SRCU read lock. Otherwise it
* is set to false.
* @cookie: The cookie from the SRCU read lock.
-+ * @use_atomic: True if the write_atomic callback is to be used
++ * @use_atomic: True if the write_atomic() callback is to be used
*
* Context: Any context except NMI.
* Return: True, when a record has been printed and there are still
-@@ -1286,26 +1288,38 @@ static bool nbcon_atomic_emit_one(struct
+@@ -1247,26 +1249,38 @@ static bool nbcon_atomic_emit_one(struct
* Essentially it is the nbcon version of console_emit_next_record().
*/
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
@@ -158,7 +158,29 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return progress;
}
-@@ -1622,6 +1636,8 @@ static int __init printk_setup_threads(v
+@@ -1494,7 +1508,9 @@ void nbcon_cpu_emergency_exit(void)
+ * to handle it.
+ */
+ do_trigger_flush = true;
+- if (printing_via_unlock && !is_printk_deferred()) {
++ if (!force_printkthreads() &&
++ printing_via_unlock &&
++ !is_printk_deferred()) {
+ if (console_trylock()) {
+ do_trigger_flush = false;
+ console_unlock();
+@@ -1541,7 +1557,9 @@ void nbcon_cpu_emergency_flush(void)
+
+ nbcon_atomic_flush_pending();
+
+- if (printing_via_unlock && !is_printk_deferred()) {
++ if (!force_printkthreads() &&
++ printing_via_unlock &&
++ !is_printk_deferred()) {
+ if (console_trylock())
+ console_unlock();
+ }
+@@ -1612,6 +1630,8 @@ static int __init printk_setup_threads(v
printk_threads_enabled = true;
for_each_console(con)
nbcon_kthread_create(con);
@@ -184,8 +206,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
const char *fmt, va_list args)
{
- bool do_trylock_unlock = printing_via_unlock;
-+ bool do_trylock_unlock = printing_via_unlock &&
-+ !force_printkthreads();
++ bool do_trylock_unlock = !force_printkthreads() &&
++ printing_via_unlock;
int printed_len;
/* Suppress unimportant messages after panic happens */
@@ -222,17 +244,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_flush(1000, true);
}
-@@ -2740,7 +2756,8 @@ void resume_console(void)
+@@ -2740,7 +2756,9 @@ void resume_console(void)
*/
static int console_cpu_notify(unsigned int cpu)
{
- if (!cpuhp_tasks_frozen && printing_via_unlock) {
-+ if (!cpuhp_tasks_frozen && printing_via_unlock &&
-+ !force_printkthreads()) {
++ if (!force_printkthreads() &&
++ !cpuhp_tasks_frozen &&
++ printing_via_unlock) {
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
-@@ -3000,31 +3017,43 @@ static bool console_emit_next_record(str
+@@ -3000,31 +3018,43 @@ static bool console_emit_next_record(str
con->dropped = 0;
}
@@ -296,7 +319,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
skip:
return true;
}
-@@ -3088,12 +3117,13 @@ static bool console_flush_all(bool do_co
+@@ -3088,12 +3118,13 @@ static bool console_flush_all(bool do_co
if ((flags & CON_NBCON) && con->kthread)
continue;
@@ -312,7 +335,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
printk_seq = nbcon_seq_read(con);
} else {
progress = console_emit_next_record(con, handover, cookie);
-@@ -3132,19 +3162,7 @@ static bool console_flush_all(bool do_co
+@@ -3132,19 +3163,7 @@ static bool console_flush_all(bool do_co
return false;
}
@@ -333,7 +356,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
bool do_cond_resched;
bool handover;
-@@ -3188,6 +3206,32 @@ void console_unlock(void)
+@@ -3188,6 +3207,32 @@ void console_unlock(void)
*/
} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
}
@@ -366,12 +389,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
EXPORT_SYMBOL(console_unlock);
/**
-@@ -3397,11 +3441,106 @@ void console_start(struct console *conso
-
+@@ -3411,12 +3456,107 @@ void console_start(struct console *conso
+ flags = console_srcu_read_flags(console);
if (flags & CON_NBCON)
nbcon_kthread_wake(console);
+ else
+ wake_up_legacy_kthread();
+ console_srcu_read_unlock(cookie);
__pr_flush(console, 1000, true);
}
@@ -399,7 +423,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if ((flags & CON_NBCON) && con->kthread)
+ continue;
+
-+ if (!console_is_usable(con, flags, true))
++ if (!console_is_usable(con, flags, false))
+ continue;
+
+ if (flags & CON_NBCON) {
@@ -473,7 +497,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int __read_mostly keep_bootcon;
static int __init keep_bootcon_setup(char *str)
-@@ -3690,6 +3829,7 @@ void register_console(struct console *ne
+@@ -3706,6 +3846,7 @@ void register_console(struct console *ne
} else {
have_legacy_console = true;
newcon->seq = init_seq;
@@ -481,7 +505,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (newcon->flags & CON_BOOT)
-@@ -3856,6 +3996,13 @@ static int unregister_console_locked(str
+@@ -3873,6 +4014,13 @@ static int unregister_console_locked(str
nbcon_kthread_create(c);
}
@@ -495,22 +519,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return res;
}
-@@ -4014,8 +4161,12 @@ static bool __pr_flush(struct console *c
+@@ -4031,7 +4179,11 @@ static bool __pr_flush(struct console *c
seq = prb_next_reserve_seq(prb);
- /* Flush the consoles so that records up to @seq are printed. */
-- if (printing_via_unlock) {
+ /*
+ * Flush the consoles so that records up to @seq are printed.
+ * Otherwise this function will just wait for the threaded printers
+ * to print up to @seq.
+ */
-+ if (printing_via_unlock && !force_printkthreads()) {
+ if (printing_via_unlock) {
console_lock();
console_unlock();
- }
-@@ -4129,9 +4280,16 @@ static void wake_up_klogd_work_func(stru
+@@ -4146,9 +4298,16 @@ static void wake_up_klogd_work_func(stru
int pending = this_cpu_xchg(printk_pending, 0);
if (pending & PRINTK_PENDING_OUTPUT) {
diff --git a/debian/patches-rt/0038-proc-consoles-Add-notation-to-c_start-c_stop.patch b/debian/patches-rt/0041-proc-consoles-Add-notation-to-c_start-c_stop.patch
index 691c1a2b80..f31e4da245 100644
--- a/debian/patches-rt/0038-proc-consoles-Add-notation-to-c_start-c_stop.patch
+++ b/debian/patches-rt/0041-proc-consoles-Add-notation-to-c_start-c_stop.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Thu, 2 May 2024 08:02:58 +0000
-Subject: [PATCH 38/46] proc: consoles: Add notation to c_start/c_stop
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 41/48] proc: consoles: Add notation to c_start/c_stop
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
fs/proc/consoles.c:78:13: warning: context imbalance in 'c_start'
- wrong count at exit
diff --git a/debian/patches-rt/0039-proc-Add-nbcon-support-for-proc-consoles.patch b/debian/patches-rt/0042-proc-Add-nbcon-support-for-proc-consoles.patch
index 68a1df972d..1c27e7f9aa 100644
--- a/debian/patches-rt/0039-proc-Add-nbcon-support-for-proc-consoles.patch
+++ b/debian/patches-rt/0042-proc-Add-nbcon-support-for-proc-consoles.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 26 Sep 2023 13:31:00 +0000
-Subject: [PATCH 39/46] proc: Add nbcon support for /proc/consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 42/48] proc: Add nbcon support for /proc/consoles
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Update /proc/consoles output to show 'W' if an nbcon write
callback is implemented (write_atomic or write_thread).
diff --git a/debian/patches-rt/0040-tty-sysfs-Add-nbcon-support-for-active.patch b/debian/patches-rt/0043-tty-sysfs-Add-nbcon-support-for-active.patch
index 2f90024cc8..9f6999349e 100644
--- a/debian/patches-rt/0040-tty-sysfs-Add-nbcon-support-for-active.patch
+++ b/debian/patches-rt/0043-tty-sysfs-Add-nbcon-support-for-active.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 22 Sep 2023 14:31:09 +0000
-Subject: [PATCH 40/46] tty: sysfs: Add nbcon support for 'active'
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 43/48] tty: sysfs: Add nbcon support for 'active'
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Allow the 'active' attribute to list nbcon consoles.
diff --git a/debian/patches-rt/0045-printk-Provide-threadprintk-boot-argument.patch b/debian/patches-rt/0044-printk-Provide-threadprintk-boot-argument.patch
index afb91f6f64..dfe4ed595c 100644
--- a/debian/patches-rt/0045-printk-Provide-threadprintk-boot-argument.patch
+++ b/debian/patches-rt/0044-printk-Provide-threadprintk-boot-argument.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 6 Feb 2024 14:19:34 +0000
-Subject: [PATCH 45/46] printk: Provide threadprintk boot argument
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 44/48] printk: Provide threadprintk boot argument
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
For PREEMPT_RT, legacy console printing is performed in a dedicated
kthread. However, this behavior can also be interesting for other
@@ -12,7 +12,7 @@ Provide a new boot argument "threadprintk" that will create the
dedicated kthread for legacy console printing for !PREEMPT_RT
systems.
-The implementation is the same as "threadirqs" boot argument.
+The implementation is the same as the "threadirqs" boot argument.
Users should be aware that if this option is enabled, the shutdown,
reboot, and panic messages probably will not be visible on the
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -6552,6 +6552,18 @@
+@@ -6596,6 +6596,18 @@
Force threading of all interrupt handlers except those
marked explicitly IRQF_NO_THREAD.
diff --git a/debian/patches-rt/0046-printk-Avoid-false-positive-lockdep-report-for-legac.patch b/debian/patches-rt/0045-printk-Avoid-false-positive-lockdep-report-for-legac.patch
index 7906760f71..937d22b85e 100644
--- a/debian/patches-rt/0046-printk-Avoid-false-positive-lockdep-report-for-legac.patch
+++ b/debian/patches-rt/0045-printk-Avoid-false-positive-lockdep-report-for-legac.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 11 Dec 2023 09:34:16 +0000
-Subject: [PATCH 46/46] printk: Avoid false positive lockdep report for legacy
+Subject: [PATCH 45/48] printk: Avoid false positive lockdep report for legacy
printing
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Legacy console printing from printk() caller context may invoke
the console driver from atomic context. This leads to a lockdep
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2980,6 +2980,33 @@ bool printk_get_next_message(struct prin
+@@ -2981,6 +2981,33 @@ bool printk_get_next_message(struct prin
}
/*
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Used as the printk buffers for non-panic, serialized console printing.
* This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
* Its usage requires the console_lock held.
-@@ -3034,7 +3061,7 @@ static bool console_emit_next_record(str
+@@ -3035,7 +3062,7 @@ static bool console_emit_next_record(str
/*
* With forced threading this function is either in a thread
* or panic context. So there is no need for concern about
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
con->write(con, outbuf, pmsg.outbuf_len);
-@@ -3056,7 +3083,9 @@ static bool console_emit_next_record(str
+@@ -3057,7 +3084,9 @@ static bool console_emit_next_record(str
/* Do not trace print latency. */
stop_critical_timings();
diff --git a/debian/patches-rt/0041-printk-nbcon-Provide-function-to-reacquire-ownership.patch b/debian/patches-rt/0046-printk-nbcon-Add-function-for-printers-to-reacquire-.patch
index a9fb9f35cc..75a090edc3 100644
--- a/debian/patches-rt/0041-printk-nbcon-Provide-function-to-reacquire-ownership.patch
+++ b/debian/patches-rt/0046-printk-nbcon-Add-function-for-printers-to-reacquire-.patch
@@ -1,51 +1,50 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 20 Oct 2023 10:01:58 +0000
-Subject: [PATCH 41/46] printk: nbcon: Provide function to reacquire ownership
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
-
-Contexts may become nbcon owners for various reasons, not just
-for printing. Indeed, the port->lock wrapper takes ownership
-for anything relating to the hardware.
+Subject: [PATCH 46/48] printk: nbcon: Add function for printers to reacquire
+ ownership
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Since ownership can be lost at any time due to handover or
-takeover, a context _should_ be prepared to back out
-immediately and carefully. However, there are many scenarios
-where the context _must_ reacquire ownership in order to
+takeover, a printing context _must_ be prepared to back out
+immediately and carefully. However, there are scenarios where
+the printing context must reacquire ownership in order to
finalize or revert hardware changes.
-One such example is when interrupts are disabled by a context.
-No other context will automagically re-enable the interrupts.
-For this case, the disabling context _must_ reacquire nbcon
-ownership so that it can re-enable the interrupts.
+One such example is when interrupts are disabled during
+printing. No other context will automagically re-enable the
+interrupts. For this case, the disabling context _must_
+reacquire nbcon ownership so that it can re-enable the
+interrupts.
-Provide nbcon_reacquire() for exactly this purpose.
+Provide nbcon_reacquire() for exactly this purpose. It allows a
+printing context to reacquire ownership using the same priority
+as its previous ownership.
-Note that for printing contexts, after a successful reacquire
-the context will have no output buffer because that has been
-lost. nbcon_reacquire() cannot be used to resume printing.
+Note that after a successful reacquire the printing context
+will have no output buffer because that has been lost. This
+function cannot be used to resume printing.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/console.h | 7 +++++++
+ include/linux/console.h | 6 ++++++
kernel/printk/nbcon.c | 41 +++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 48 insertions(+)
+ 2 files changed, 47 insertions(+)
--- a/include/linux/console.h
+++ b/include/linux/console.h
-@@ -373,6 +373,11 @@ struct console {
+@@ -372,6 +372,10 @@ struct console {
+ *
* The callback should allow the takeover whenever it is safe. It
* increases the chance to see messages when the system is in trouble.
- *
+ * If the driver must reacquire ownership in order to finalize or
+ * revert hardware changes, nbcon_reacquire() can be used. However,
+ * on reacquire the buffer content is no longer available. A
+ * reacquire cannot be used to resume printing.
-+ *
+ *
* The callback can be called from any context (including NMI).
* Therefore it must avoid usage of any locking and instead rely
- * on the console ownership for synchronization.
-@@ -591,6 +596,7 @@ extern void nbcon_cpu_emergency_flush(vo
+@@ -591,6 +595,7 @@ extern void nbcon_cpu_emergency_flush(vo
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
@@ -53,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else
static inline void nbcon_cpu_emergency_enter(void) { }
static inline void nbcon_cpu_emergency_exit(void) { }
-@@ -598,6 +604,7 @@ static inline void nbcon_cpu_emergency_f
+@@ -598,6 +603,7 @@ static inline void nbcon_cpu_emergency_f
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
@@ -67,20 +66,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
/**
-+ * nbcon_reacquire - Reacquire a console after losing ownership
-+ * @wctxt: The write context that was handed to the write function
++ * nbcon_reacquire - Reacquire a console after losing ownership while printing
++ * @wctxt: The write context that was handed to the write callback
+ *
+ * Since ownership can be lost at any time due to handover or takeover, a
-+ * printing context _should_ be prepared to back out immediately and
-+ * carefully. However, there are many scenarios where the context _must_
++ * printing context _must_ be prepared to back out immediately and
++ * carefully. However, there are scenarios where the printing context must
+ * reacquire ownership in order to finalize or revert hardware changes.
+ *
-+ * This function allows a context to reacquire ownership using the same
-+ * priority as its previous ownership.
++ * This function allows a printing context to reacquire ownership using the
++ * same priority as its previous ownership.
+ *
-+ * Note that for printing contexts, after a successful reacquire the
-+ * context will have no output buffer because that has been lost. This
-+ * function cannot be used to resume printing.
++ * Note that after a successful reacquire the printing context will have no
++ * output buffer because that has been lost. This function cannot be used to
++ * resume printing.
+ */
+void nbcon_reacquire(struct nbcon_write_context *wctxt)
+{
@@ -101,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+/**
* nbcon_emit_next_record - Emit a record in the acquired context
* @wctxt: The write context that will be handed to the write function
- * @use_atomic: True if the write_atomic callback is to be used
+ * @use_atomic: True if the write_atomic() callback is to be used
@@ -944,6 +976,15 @@ static bool nbcon_emit_next_record(struc
nbcon_context_release(ctxt);
return false;
@@ -110,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!wctxt->outbuf) {
+ /*
+ * Ownership was lost and reacquired by the driver.
-+ * Handle it as if ownership was lost and try to continue.
++ * Handle it as if ownership was lost.
+ */
+ nbcon_context_release(ctxt);
+ return false;
diff --git a/debian/patches-rt/0042-serial-8250-Switch-to-nbcon-console.patch b/debian/patches-rt/0047-serial-8250-Switch-to-nbcon-console.patch
index 5f9d30af5d..ffefc3a024 100644
--- a/debian/patches-rt/0042-serial-8250-Switch-to-nbcon-console.patch
+++ b/debian/patches-rt/0047-serial-8250-Switch-to-nbcon-console.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 13 Sep 2023 15:30:36 +0000
-Subject: [PATCH 42/46] serial: 8250: Switch to nbcon console
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Subject: [PATCH 47/48] serial: 8250: Switch to nbcon console
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Implement the necessary callbacks to switch the 8250 console driver
to perform as an nbcon console.
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -592,6 +592,7 @@ serial8250_register_ports(struct uart_dr
+@@ -589,6 +589,7 @@ serial8250_register_ports(struct uart_dr
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void univ8250_console_write(struct console *co, const char *s,
unsigned int count)
{
-@@ -599,6 +600,37 @@ static void univ8250_console_write(struc
+@@ -596,6 +597,37 @@ static void univ8250_console_write(struc
serial8250_console_write(up, s, count);
}
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int univ8250_console_setup(struct console *co, char *options)
{
-@@ -698,12 +730,20 @@ static int univ8250_console_match(struct
+@@ -695,12 +727,20 @@ static int univ8250_console_match(struct
static struct console univ8250_console = {
.name = "ttyS",
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
-@@ -698,7 +705,11 @@ static void serial8250_set_sleep(struct
+@@ -691,7 +698,11 @@ static void serial8250_set_sleep(struct
serial8250_rpm_put(p);
}
@@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
if (up->capabilities & UART_CAP_UUE)
serial_out(up, UART_IER, UART_IER_UUE);
-@@ -706,6 +717,11 @@ static void serial8250_clear_IER(struct
+@@ -699,6 +710,11 @@ static void serial8250_clear_IER(struct
serial_out(up, UART_IER, 0);
}
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SERIAL_8250_RSA
/*
* Attempts to turn on the RSA FIFO. Returns zero on failure.
-@@ -3272,6 +3288,11 @@ static void serial8250_console_putchar(s
+@@ -3269,6 +3285,11 @@ static void serial8250_console_putchar(s
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out(port, UART_TX, ch);
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3300,6 +3321,7 @@ static void serial8250_console_restore(s
+@@ -3297,6 +3318,7 @@ static void serial8250_console_restore(s
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Print a string to the serial port using the device FIFO
*
-@@ -3358,7 +3380,7 @@ void serial8250_console_write(struct uar
+@@ -3355,7 +3377,7 @@ void serial8250_console_write(struct uar
* First save the IER then disable the interrupts
*/
ier = serial_port_in(port, UART_IER);
@@ -161,7 +161,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3424,6 +3446,131 @@ void serial8250_console_write(struct uar
+@@ -3421,6 +3443,131 @@ void serial8250_console_write(struct uar
if (locked)
uart_port_unlock_irqrestore(port, flags);
}
@@ -293,7 +293,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static unsigned int probe_baud(struct uart_port *port)
{
-@@ -3442,6 +3589,7 @@ static unsigned int probe_baud(struct ua
+@@ -3439,6 +3586,7 @@ static unsigned int probe_baud(struct ua
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
@@ -301,7 +301,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int baud = 9600;
int bits = 8;
int parity = 'n';
-@@ -3451,6 +3599,8 @@ int serial8250_console_setup(struct uart
+@@ -3448,6 +3596,8 @@ int serial8250_console_setup(struct uart
if (!port->iobase && !port->membase)
return -ENODEV;
diff --git a/debian/patches-rt/0043-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch b/debian/patches-rt/0048-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch
index a8e54a590a..3a02162768 100644
--- a/debian/patches-rt/0043-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch
+++ b/debian/patches-rt/0048-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch
@@ -1,8 +1,8 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Mon, 2 Oct 2023 15:30:43 +0000
-Subject: [PATCH 43/46] serial: 8250: Revert "drop lockdep annotation from
+Subject: [PATCH 48/48] serial: 8250: Revert "drop lockdep annotation from
serial8250_clear_IER()"
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The 8250 driver no longer depends on @oops_in_progress and
will no longer violate the port->lock locking constraints.
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -719,6 +719,9 @@ static void __serial8250_clear_IER(struc
+@@ -712,6 +712,9 @@ static void __serial8250_clear_IER(struc
static inline void serial8250_clear_IER(struct uart_8250_port *up)
{
diff --git a/debian/patches-rt/ARM64__Allow_to_enable_RT.patch b/debian/patches-rt/ARM64__Allow_to_enable_RT.patch
index 6035feec48..7f9b640ce0 100644
--- a/debian/patches-rt/ARM64__Allow_to_enable_RT.patch
+++ b/debian/patches-rt/ARM64__Allow_to_enable_RT.patch
@@ -1,7 +1,7 @@
Subject: ARM64: Allow to enable RT
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri Oct 11 13:14:35 2019 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/ARM__Allow_to_enable_RT.patch b/debian/patches-rt/ARM__Allow_to_enable_RT.patch
index f8b96792fb..7db905a726 100644
--- a/debian/patches-rt/ARM__Allow_to_enable_RT.patch
+++ b/debian/patches-rt/ARM__Allow_to_enable_RT.patch
@@ -1,7 +1,7 @@
Subject: ARM: Allow to enable RT
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri Oct 11 13:14:29 2019 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -17,15 +17,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -37,6 +37,7 @@ config ARM
- select ARCH_SUPPORTS_ATOMIC_RMW
+@@ -38,6 +38,7 @@ config ARM
+ select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_MEMTEST
-@@ -122,6 +123,7 @@ config ARM
+@@ -123,6 +124,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch b/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
index 30db897eb8..553a0159d7 100644
--- a/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
+++ b/debian/patches-rt/ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
@@ -1,7 +1,7 @@
Subject: ARM: enable irq in translation/section permission fault handlers
From: Yadi.hu <yadi.hu@windriver.com>
Date: Wed Dec 10 10:32:09 2014 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Yadi.hu <yadi.hu@windriver.com>
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
-@@ -443,6 +443,9 @@ do_translation_fault(unsigned long addr,
+@@ -474,6 +474,9 @@ do_translation_fault(unsigned long addr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (user_mode(regs))
goto bad_area;
-@@ -513,6 +516,9 @@ do_translation_fault(unsigned long addr,
+@@ -544,6 +547,9 @@ do_translation_fault(unsigned long addr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
diff --git a/debian/patches-rt/Add_localversion_for_-RT_release.patch b/debian/patches-rt/Add_localversion_for_-RT_release.patch
index ad37a7f522..57e9de279a 100644
--- a/debian/patches-rt/Add_localversion_for_-RT_release.patch
+++ b/debian/patches-rt/Add_localversion_for_-RT_release.patch
@@ -1,7 +1,7 @@
Subject: Add localversion for -RT release
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri Jul 8 20:25:16 2011 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Thomas Gleixner <tglx@linutronix.de>
@@ -16,4 +16,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt5
++-rt14
diff --git a/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch b/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch
index 9ac12f490a..3d22d2463f 100644
--- a/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch
+++ b/debian/patches-rt/POWERPC__Allow_to_enable_RT.patch
@@ -1,7 +1,7 @@
Subject: POWERPC: Allow to enable RT
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri Oct 11 13:14:41 2019 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -166,6 +166,7 @@ config PPC
+@@ -168,6 +168,7 @@ config PPC
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_MEMTEST
-@@ -270,6 +271,7 @@ config PPC
+@@ -272,6 +273,7 @@ config PPC
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
diff --git a/debian/patches-rt/PREEMPT_AUTO.patch b/debian/patches-rt/PREEMPT_AUTO.patch
index f9098eb616..de6d49cf41 100644
--- a/debian/patches-rt/PREEMPT_AUTO.patch
+++ b/debian/patches-rt/PREEMPT_AUTO.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 23 Sep 2023 03:11:05 +0200
Subject: [PATCH] sched: define TIF_ALLOW_RESCHED
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
On Fri, Sep 22 2023 at 00:55, Thomas Gleixner wrote:
> On Thu, Sep 21 2023 at 09:00, Linus Torvalds wrote:
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -279,6 +279,7 @@ config X86
+@@ -282,6 +282,7 @@
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
select HAVE_PREEMPT_DYNAMIC_CALL
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
select HAVE_SYSCALL_TRACEPOINTS
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
-@@ -87,8 +87,9 @@ struct thread_info {
+@@ -87,8 +87,9 @@
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
#define TIF_SPEC_L1D_FLUSH 10 /* Flush L1D on mm switches (processes) */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
-@@ -110,6 +111,7 @@ struct thread_info {
+@@ -110,6 +111,7 @@
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -154,7 +154,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
-@@ -108,7 +108,7 @@ static const struct dmi_system_id proces
+@@ -107,7 +107,7 @@
*/
static void __cpuidle acpi_safe_halt(void)
{
@@ -187,7 +187,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1938,17 +1938,17 @@ static inline void update_tsk_thread_fla
+@@ -1949,17 +1949,17 @@
update_ti_thread_flag(task_thread_info(tsk), flag, value);
}
@@ -208,7 +208,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
return test_ti_thread_flag(task_thread_info(tsk), flag);
}
-@@ -1961,9 +1961,11 @@ static inline void set_tsk_need_resched(
+@@ -1972,9 +1972,11 @@
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
@@ -221,7 +221,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
-@@ -2104,7 +2106,7 @@ static inline bool preempt_model_preempt
+@@ -2074,7 +2076,7 @@
static __always_inline bool need_resched(void)
{
@@ -232,7 +232,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
-@@ -63,7 +63,7 @@ static __always_inline bool __must_check
+@@ -63,7 +63,7 @@
*/
smp_mb__after_atomic();
@@ -241,7 +241,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static __always_inline bool __must_check current_clr_polling_and_test(void)
-@@ -76,7 +76,7 @@ static __always_inline bool __must_check
+@@ -76,7 +76,7 @@
*/
smp_mb__after_atomic();
@@ -250,7 +250,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#else
-@@ -85,11 +85,11 @@ static inline void __current_clr_polling
+@@ -85,11 +85,11 @@
static inline bool __must_check current_set_polling_and_test(void)
{
@@ -266,7 +266,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
-@@ -59,6 +59,16 @@ enum syscall_work_bit {
+@@ -59,6 +59,16 @@
#include <asm/thread_info.h>
@@ -283,7 +283,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef __KERNEL__
#ifndef arch_set_restart_data
-@@ -185,6 +195,13 @@ static __always_inline bool tif_need_res
+@@ -185,6 +195,13 @@
(unsigned long *)(&current_thread_info()->flags));
}
@@ -297,7 +297,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else
static __always_inline bool tif_need_resched(void)
-@@ -193,6 +210,13 @@ static __always_inline bool tif_need_res
+@@ -193,6 +210,13 @@
(unsigned long *)(&current_thread_info()->flags));
}
@@ -313,7 +313,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
-@@ -184,8 +184,8 @@ unsigned int tracing_gen_ctx_irq_test(un
+@@ -184,8 +184,8 @@
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
@@ -324,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
-@@ -211,11 +211,11 @@ static inline unsigned int tracing_gen_c
+@@ -211,11 +211,11 @@
static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
{
@@ -340,7 +340,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
-@@ -11,6 +11,13 @@ config PREEMPT_BUILD
+@@ -11,6 +11,13 @@
select PREEMPTION
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
@@ -354,7 +354,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
choice
prompt "Preemption Model"
default PREEMPT_NONE
-@@ -67,9 +74,17 @@ config PREEMPT
+@@ -67,9 +74,17 @@
embedded system with latency requirements in the milliseconds
range.
@@ -372,7 +372,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
select PREEMPTION
help
This option turns the kernel into a real-time kernel by replacing
-@@ -95,7 +110,7 @@ config PREEMPTION
+@@ -95,7 +110,7 @@
config PREEMPT_DYNAMIC
bool "Preemption behaviour defined on boot"
@@ -383,7 +383,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
default y if HAVE_PREEMPT_DYNAMIC_CALL
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
-@@ -98,7 +98,7 @@ void __weak arch_do_signal_or_restart(st
+@@ -98,7 +98,7 @@
local_irq_enable_exit_to_user(ti_work);
@@ -392,7 +392,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
schedule();
if (ti_work & _TIF_UPROBE)
-@@ -307,7 +307,7 @@ void raw_irqentry_exit_cond_resched(void
+@@ -307,7 +307,7 @@
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack());
@@ -403,7 +403,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/entry/kvm.c
+++ b/kernel/entry/kvm.c
-@@ -13,7 +13,7 @@ static int xfer_to_guest_mode_work(struc
+@@ -13,7 +13,7 @@
return -EINTR;
}
@@ -414,7 +414,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ti_work & _TIF_NOTIFY_RESUME)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -899,14 +899,15 @@ static inline void hrtick_rq_init(struct
+@@ -898,14 +898,15 @@
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
/*
@@ -433,7 +433,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -923,7 +924,7 @@ static bool set_nr_if_polling(struct tas
+@@ -922,7 +923,7 @@
do {
if (!(val & _TIF_POLLING_NRFLAG))
return false;
@@ -442,7 +442,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return true;
} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
-@@ -931,9 +932,9 @@ static bool set_nr_if_polling(struct tas
+@@ -930,9 +931,9 @@
}
#else
@@ -454,7 +454,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return true;
}
-@@ -1038,28 +1039,47 @@ void wake_up_q(struct wake_q_head *head)
+@@ -1037,28 +1038,47 @@
* might also involve a cross-CPU call to trigger the scheduler on
* the target CPU.
*/
@@ -510,7 +510,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void resched_cpu(int cpu)
-@@ -1154,7 +1174,7 @@ static void wake_up_idle_cpu(int cpu)
+@@ -1153,7 +1173,7 @@
* and testing of the above solutions didn't appear to report
* much benefits.
*/
@@ -521,7 +521,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
trace_sched_wake_idle_without_ipi(cpu);
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -333,6 +333,23 @@ static const struct file_operations sche
+@@ -333,6 +333,23 @@
.release = seq_release,
};
@@ -545,7 +545,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static struct dentry *debugfs_sched;
static __init int sched_init_debug(void)
-@@ -374,6 +391,8 @@ static __init int sched_init_debug(void)
+@@ -374,6 +391,8 @@
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
@@ -556,7 +556,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
late_initcall(sched_init_debug);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -981,8 +981,10 @@ static void clear_buddies(struct cfs_rq
+@@ -974,8 +974,10 @@
* XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
* this is probably good enough.
*/
@@ -568,7 +568,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if ((s64)(se->vruntime - se->deadline) < 0)
return;
-@@ -1001,10 +1003,19 @@ static void update_deadline(struct cfs_r
+@@ -994,10 +996,19 @@
/*
* The task has consumed its request, reschedule.
*/
@@ -591,7 +591,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#include "pelt.h"
-@@ -1159,7 +1170,7 @@ s64 update_curr_common(struct rq *rq)
+@@ -1153,7 +1164,7 @@
/*
* Update the current task's runtime statistics.
*/
@@ -600,7 +600,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct sched_entity *curr = cfs_rq->curr;
s64 delta_exec;
-@@ -1172,7 +1183,7 @@ static void update_curr(struct cfs_rq *c
+@@ -1166,7 +1177,7 @@
return;
curr->vruntime += calc_delta_fair(delta_exec, curr);
@@ -609,7 +609,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
update_min_vruntime(cfs_rq);
if (entity_is_task(curr))
-@@ -1181,6 +1192,11 @@ static void update_curr(struct cfs_rq *c
+@@ -1175,6 +1186,11 @@
account_cfs_rq_runtime(cfs_rq, delta_exec);
}
@@ -621,7 +621,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void update_curr_fair(struct rq *rq)
{
update_curr(cfs_rq_of(&rq->curr->se));
-@@ -5505,7 +5521,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -5511,7 +5527,7 @@
/*
* Update run-time statistics of the 'current'.
*/
@@ -630,7 +630,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Ensure that runnable average is periodically updated.
-@@ -5519,7 +5535,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -5525,7 +5541,7 @@
* validating it and just reschedule.
*/
if (queued) {
@@ -639,7 +639,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
}
/*
-@@ -5665,7 +5681,7 @@ static void __account_cfs_rq_runtime(str
+@@ -5671,7 +5687,7 @@
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -648,7 +648,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static __always_inline
-@@ -5925,7 +5941,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cf
+@@ -5931,7 +5947,7 @@
/* Determine whether we need to wake up potentially idle CPU: */
if (rq->curr == rq->idle && rq->cfs.nr_running)
@@ -657,7 +657,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#ifdef CONFIG_SMP
-@@ -6640,7 +6656,7 @@ static void hrtick_start_fair(struct rq
+@@ -6646,7 +6662,7 @@
if (delta < 0) {
if (task_current(rq, p))
@@ -666,7 +666,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -8316,7 +8332,7 @@ static void check_preempt_wakeup_fair(st
+@@ -8378,7 +8394,7 @@
* prevents us from potentially nominating it as a false LAST_BUDDY
* below.
*/
@@ -675,7 +675,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
/* Idle tasks are by definition preempted by non-idle tasks. */
-@@ -8358,7 +8374,7 @@ static void check_preempt_wakeup_fair(st
+@@ -8420,7 +8436,7 @@
return;
preempt:
@@ -684,7 +684,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#ifdef CONFIG_SMP
-@@ -12504,7 +12520,7 @@ static inline void task_tick_core(struct
+@@ -12566,7 +12582,7 @@
*/
if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
__entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
@@ -693,7 +693,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -12669,7 +12685,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -12733,7 +12749,7 @@
*/
if (task_current(rq, p)) {
if (p->prio > oldprio)
@@ -704,7 +704,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
-@@ -87,3 +87,5 @@ SCHED_FEAT(UTIL_EST, true)
+@@ -87,3 +87,5 @@
SCHED_FEAT(LATENCY_WARN, false)
SCHED_FEAT(HZ_BW, true)
@@ -712,7 +712,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+SCHED_FEAT(FORCE_NEED_RESCHED, false)
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
-@@ -57,8 +57,7 @@ static noinline int __cpuidle cpu_idle_p
+@@ -57,8 +57,7 @@
ct_cpuidle_enter();
raw_local_irq_enable();
@@ -724,8 +724,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -2465,6 +2465,7 @@ extern void init_sched_fair_class(void);
- extern void reweight_task(struct task_struct *p, int prio);
+@@ -2467,6 +2467,7 @@
+ extern void reweight_task(struct task_struct *p, const struct load_weight *lw);
extern void resched_curr(struct rq *rq);
+extern void resched_curr_lazy(struct rq *rq);
@@ -734,7 +734,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern struct rt_bandwidth def_rt_bandwidth;
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2513,6 +2513,8 @@ unsigned int tracing_gen_ctx_irq_test(un
+@@ -2519,6 +2519,8 @@
if (tif_need_resched())
trace_flags |= TRACE_FLAG_NEED_RESCHED;
@@ -745,7 +745,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
-@@ -460,17 +460,29 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -460,17 +460,29 @@
(entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
bh_off ? 'b' :
diff --git a/debian/patches-rt/arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch b/debian/patches-rt/arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch
index 478d746f75..8fdf6bea8f 100644
--- a/debian/patches-rt/arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch
+++ b/debian/patches-rt/arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 23 Jan 2024 12:56:21 +0100
Subject: [PATCH] arm: Disable FAST_GUP on PREEMPT_RT if HIGHPTE is also
enabled.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
gup_pgd_range() is invoked with disabled interrupts and invokes
__kmap_local_page_prot() via pte_offset_map(), gup_p4d_range().
@@ -24,12 +24,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -99,7 +99,7 @@ config ARM
+@@ -100,7 +100,7 @@ config ARM
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_EXIT_THREAD
-- select HAVE_FAST_GUP if ARM_LPAE
-+ select HAVE_FAST_GUP if ARM_LPAE && !(PREEMPT_RT && HIGHPTE)
+- select HAVE_GUP_FAST if ARM_LPAE
++ select HAVE_GUP_FAST if ARM_LPAE && !(PREEMPT_RT && HIGHPTE)
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/debian/patches-rt/bpf-Remove-tst_run-from-lwt_seg6local_prog_ops.patch b/debian/patches-rt/bpf-Remove-tst_run-from-lwt_seg6local_prog_ops.patch
new file mode 100644
index 0000000000..37d4e85805
--- /dev/null
+++ b/debian/patches-rt/bpf-Remove-tst_run-from-lwt_seg6local_prog_ops.patch
@@ -0,0 +1,40 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 10 Jul 2024 16:16:31 +0200
+Subject: [PATCH] bpf: Remove tst_run from lwt_seg6local_prog_ops.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The syzbot reported that the lwt_seg6 related BPF ops can be invoked
+via bpf_test_run() without without entering input_action_end_bpf()
+first.
+
+Martin KaFai Lau said that self test for BPF_PROG_TYPE_LWT_SEG6LOCAL
+probably didn't work since it was introduced in commit 04d4b274e2a
+("ipv6: sr: Add seg6local action End.BPF"). The reason is that the
+per-CPU variable seg6_bpf_srh_states::srh is never assigned in the self
+test case but each BPF function expects it.
+
+Remove test_run for BPF_PROG_TYPE_LWT_SEG6LOCAL.
+
+Suggested-by: Martin KaFai Lau <martin.lau@linux.dev>
+Reported-by: syzbot+608a2acde8c5a101d07d@syzkaller.appspotmail.com
+Fixes: d1542d4ae4df ("seg6: Use nested-BH locking for seg6_bpf_srh_states.")
+Fixes: 004d4b274e2a ("ipv6: sr: Add seg6local action End.BPF")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20240710141631.FbmHcQaX@linutronix.de
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/filter.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -11047,7 +11047,6 @@ const struct bpf_verifier_ops lwt_seg6lo
+ };
+
+ const struct bpf_prog_ops lwt_seg6local_prog_ops = {
+- .test_run = bpf_prog_test_run_skb,
+ };
+
+ const struct bpf_verifier_ops cg_sock_verifier_ops = {
diff --git a/debian/patches-rt/drm-ttm-tests-Let-ttm_bo_test-consider-different-ww_.patch b/debian/patches-rt/drm-ttm-tests-Let-ttm_bo_test-consider-different-ww_.patch
deleted file mode 100644
index 13ce972247..0000000000
--- a/debian/patches-rt/drm-ttm-tests-Let-ttm_bo_test-consider-different-ww_.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 4 Apr 2024 12:25:34 +0200
-Subject: [PATCH] drm/ttm/tests: Let ttm_bo_test consider different ww_mutex
- implementation.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
-
-PREEMPT_RT has a different locking implementation for ww_mutex. The
-base mutex of struct ww_mutex is declared as struct WW_MUTEX_BASE. The
-latter is defined as `mutex' for non-PREEMPT_RT builds and `rt_mutex'
-for PREEMPT_RT builds.
-
-Using mutex_lock() directly on the base mutex in
-ttm_bo_reserve_deadlock() leads to compile error on PREEMPT_RT.
-
-The locking-selftest has its own defines to deal with this and it is
-probably best to defines the needed one within the test program since
-their usefulness is limited outside of well known selftests.
-
-Provide ww_mutex_base_lock() which points to the correct function for
-PREEMPT_RT and non-PREEMPT_RT builds.
-
-Fixes: 995279d280d1e ("drm/ttm/tests: Add tests for ttm_bo functions")
-Link: https://lore.kernel.org/r/20240404102534.QTa80QPY@linutronix.de
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/gpu/drm/ttm/tests/ttm_bo_test.c | 8 +++++++-
- 1 file changed, 7 insertions(+), 1 deletion(-)
-
---- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
-+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
-@@ -18,6 +18,12 @@
-
- #define BO_SIZE SZ_8K
-
-+#ifdef CONFIG_PREEMPT_RT
-+#define ww_mutex_base_lock(b) rt_mutex_lock(b)
-+#else
-+#define ww_mutex_base_lock(b) mutex_lock(b)
-+#endif
-+
- struct ttm_bo_test_case {
- const char *description;
- bool interruptible;
-@@ -142,7 +148,7 @@ static void ttm_bo_reserve_deadlock(stru
- bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
-
- ww_acquire_init(&ctx1, &reservation_ww_class);
-- mutex_lock(&bo2->base.resv->lock.base);
-+ ww_mutex_base_lock(&bo2->base.resv->lock.base);
-
- /* The deadlock will be caught by WW mutex, don't warn about it */
- lock_release(&bo2->base.resv->lock.base.dep_map, 1);
diff --git a/debian/patches-rt/pinctrl-renesas-rzg2l-Use-spin_-lock-unlock-_irq-sav.patch b/debian/patches-rt/pinctrl-renesas-rzg2l-Use-spin_-lock-unlock-_irq-sav.patch
deleted file mode 100644
index 3462fa28ef..0000000000
--- a/debian/patches-rt/pinctrl-renesas-rzg2l-Use-spin_-lock-unlock-_irq-sav.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
-Date: Wed, 22 May 2024 08:54:21 +0300
-Subject: [PATCH] pinctrl: renesas: rzg2l: Use
- spin_{lock,unlock}_irq{save,restore}
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10-rc1-rt1.tar.xz
-
-On PREEMPT_RT kernels the spinlock_t maps to an rtmutex. Using
-raw_spin_lock_irqsave()/raw_spin_unlock_irqrestore() on
-&pctrl->lock.rlock breaks the PREEMPT_RT builds. To fix this use
-spin_lock_irqsave()/spin_unlock_irqrestore() on &pctrl->lock.
-
-Fixes: 02cd2d3be1c3 ("pinctrl: renesas: rzg2l: Configure the interrupt type on resume")
-Reported-by: Diederik de Haas <didi.debian@cknow.org>
-Closes: https://lore.kernel.org/all/131999629.KQPSlr0Zke@bagend
-Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
-Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
-Link: https://lore.kernel.org/r/20240522055421.2842689-1-claudiu.beznea.uj@bp.renesas.com
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/pinctrl/renesas/pinctrl-rzg2l.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
-+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
-@@ -2069,11 +2069,11 @@ static void rzg2l_gpio_irq_restore(struc
- * This has to be atomically executed to protect against a concurrent
- * interrupt.
- */
-- raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
-+ spin_lock_irqsave(&pctrl->lock, flags);
- ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
- if (!ret && !irqd_irq_disabled(data))
- rzg2l_gpio_irq_enable(data);
-- raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
-+ spin_unlock_irqrestore(&pctrl->lock, flags);
-
- if (ret)
- dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
diff --git a/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch b/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch
index c151c8dc89..82659cbff5 100644
--- a/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch
+++ b/debian/patches-rt/powerpc-pseries-Select-the-generic-memory-allocator.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 9 Mar 2023 09:13:52 +0100
Subject: [PATCH] powerpc/pseries: Select the generic memory allocator.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
The RTAS work area allocator is using the generic memory allocator and
as such it must select it.
diff --git a/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch b/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch
index 0f642da4a1..c780250682 100644
--- a/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch
+++ b/debian/patches-rt/powerpc__traps__Use_PREEMPT_RT.patch
@@ -1,7 +1,7 @@
Subject: powerpc: traps: Use PREEMPT_RT
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri Jul 26 11:30:49 2019 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch b/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
index 222142d683..97a04f4699 100644
--- a/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
+++ b/debian/patches-rt/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
@@ -1,7 +1,7 @@
Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT
From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
Date: Fri Apr 24 15:53:13 2015 +0000
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
diff --git a/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch b/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch
index eda53cbb05..dcead3382f 100644
--- a/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch
+++ b/debian/patches-rt/powerpc_pseries_iommu__Use_a_locallock_instead_local_irq_save.patch
@@ -1,7 +1,7 @@
Subject: powerpc/pseries/iommu: Use a locallock instead local_irq_save()
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue Mar 26 18:31:54 2019 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch b/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch
index 3031f56343..5a453c8f7d 100644
--- a/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch
+++ b/debian/patches-rt/powerpc_stackprotector__work_around_stack-guard_init_from_atomic.patch
@@ -1,7 +1,7 @@
Subject: powerpc/stackprotector: work around stack-guard init from atomic
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue Mar 26 18:31:29 2019 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/prinkt-nbcon-Add-a-scheduling-point-to-nbcon_kthread.patch b/debian/patches-rt/prinkt-nbcon-Add-a-scheduling-point-to-nbcon_kthread.patch
new file mode 100644
index 0000000000..b637815a28
--- /dev/null
+++ b/debian/patches-rt/prinkt-nbcon-Add-a-scheduling-point-to-nbcon_kthread.patch
@@ -0,0 +1,34 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 20 Jun 2024 11:21:13 +0200
+Subject: [PATCH] prinkt/nbcon: Add a scheduling point to nbcon_kthread_func().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+Constant printing can lead to a CPU hog in nbcon_kthread_func(). The
+context is preemptible but on !PREEMPT kernels there is no explicit
+preemption point which leads softlockup warnings.
+
+Add an explicit preemption point in nbcon_kthread_func().
+
+Reported-by: Derek Barbosa <debarbos@redhat.com>
+Link: https://lore.kernel.org/ZnHF5j1DUDjN1kkq@debarbos-thinkpadt14sgen2i.remote.csb
+Reviewed-by: John Ogness <john.ogness@linutronix.de>
+Acked-by: Andrew Halaney <ahalaney@redhat.com>
+Tested-by: Andrew Halaney <ahalaney@redhat.com>
+Acked-by: Derek Barbosa <debarbos@redhat.com>
+Tested-by: Derek Barbosa <debarbos@redhat.com>
+Link: https://lore.kernel.org/r/20240620094300.YJlW043f@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/nbcon.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/printk/nbcon.c
++++ b/kernel/printk/nbcon.c
+@@ -1119,6 +1119,7 @@ static int nbcon_kthread_func(void *__co
+ }
+
+ console_srcu_read_unlock(cookie);
++ cond_resched();
+
+ } while (backlog);
+
diff --git a/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch b/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
index bd0a683cc6..7ba366d557 100644
--- a/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
+++ b/debian/patches-rt/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
@@ -2,7 +2,7 @@ From: Frederic Weisbecker <frederic@kernel.org>
Date: Tue, 5 Apr 2022 03:07:51 +0200
Subject: [PATCH] rcutorture: Also force sched priority to timersd on
boosting test.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
ksoftirqd is statically boosted to the priority level right above the
one of rcu_torture_boost() so that timers, which torture readers rely on,
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
-@@ -2413,6 +2413,12 @@ static int rcutorture_booster_init(unsig
+@@ -2420,6 +2420,12 @@ static int rcutorture_booster_init(unsig
WARN_ON_ONCE(!t);
sp.sched_priority = 2;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
diff --git a/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch b/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch
index fb0322f9f3..8567f52f03 100644
--- a/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch
+++ b/debian/patches-rt/riscv-add-PREEMPT_AUTO-support.patch
@@ -1,7 +1,7 @@
From: Jisheng Zhang <jszhang@kernel.org>
Date: Tue, 31 Oct 2023 22:35:20 +0800
Subject: [PATCH] riscv: add PREEMPT_AUTO support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
riscv has switched to GENERIC_ENTRY, so adding PREEMPT_AUTO is as simple
as adding TIF_ARCH_RESCHED_LAZY related definitions and enabling
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
-@@ -152,6 +152,7 @@ config RISCV
+@@ -163,6 +163,7 @@ config RISCV
select HAVE_PERF_USER_STACK_DUMP
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_PREEMPT_DYNAMIC_KEY if !XIP_KERNEL
diff --git a/debian/patches-rt/riscv-allow-to-enable-RT.patch b/debian/patches-rt/riscv-allow-to-enable-RT.patch
index 346680179e..afacb81ea9 100644
--- a/debian/patches-rt/riscv-allow-to-enable-RT.patch
+++ b/debian/patches-rt/riscv-allow-to-enable-RT.patch
@@ -1,7 +1,7 @@
From: Jisheng Zhang <jszhang@kernel.org>
Date: Tue, 31 Oct 2023 22:35:21 +0800
Subject: [PATCH] riscv: allow to enable RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
Now, it's ready to enable RT on riscv.
@@ -13,11 +13,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
-@@ -56,6 +56,7 @@ config RISCV
+@@ -58,6 +58,7 @@ config RISCV
select ARCH_SUPPORTS_LTO_CLANG_THIN if LLD_VERSION >= 140000
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
+ select ARCH_SUPPORTS_RT
select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK
+ select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
select ARCH_USE_MEMTEST
- select ARCH_USE_QUEUED_RWLOCKS
diff --git a/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch b/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
index 1a1bc95251..c70409f0cc 100644
--- a/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
+++ b/debian/patches-rt/sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 1 Aug 2023 17:26:48 +0200
Subject: [PATCH] sched/rt: Don't try push tasks if there are none.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
I have a RT task X at a high priority and cyclictest on each CPU with
lower priority than X's. If X is active and each CPU wakes their own
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -2194,8 +2194,11 @@ static int rto_next_cpu(struct root_doma
+@@ -2193,8 +2193,11 @@ static int rto_next_cpu(struct root_doma
rd->rto_cpu = cpu;
diff --git a/debian/patches-rt/series b/debian/patches-rt/series
index 34486c1a6b..dd8d18c0d7 100644
--- a/debian/patches-rt/series
+++ b/debian/patches-rt/series
@@ -4,57 +4,19 @@
# Posted and applied
###########################################################################
-# signal_x86__Delay_calling_signals_in_atomic.patch
-
###########################################################################
# Posted
###########################################################################
-# net, RPS, v5
-0001-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch
-0002-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch
-0003-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch
-0004-net-Rename-rps_lock-to-backlog_lock.patch
-
-# perf, sigtrap, v3
+# perf, sigtrap, v5
0001-perf-Move-irq_work_queue-where-the-event-is-prepared.patch
-0002-perf-Enqueue-SIGTRAP-always-via-task_work.patch
-0003-perf-Remove-perf_swevent_get_recursion_context-from-.patch
-0004-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch
-
-# locking.
-drm-ttm-tests-Let-ttm_bo_test-consider-different-ww_.patch
-
-pinctrl-renesas-rzg2l-Use-spin_-lock-unlock-_irq-sav.patch
-
-###########################################################################
-# Post
-###########################################################################
-
-###########################################################################
-# X86:
-###########################################################################
-x86__Allow_to_enable_RT.patch
-x86__Enable_RT_also_on_32bit.patch
-
-###########################################################################
-# For later, not essencial
-###########################################################################
-# Posted
-sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
-
-# Needs discussion first.
-softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
-rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
-tick-Fix-timer-storm-since-introduction-of-timersd.patch
-softirq-Wake-ktimers-thread-also-in-softirq.patch
-zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
-# preempt-Put-preempt_enable-within-an-instrumentation.patch
-
-# Sched
-0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
-0002-softirq-Add-function-to-preempt-serving-softirqs.patch
-0003-time-Allow-to-preempt-after-a-callback.patch
+0002-task_work-Add-TWA_NMI_CURRENT-as-an-additional-notif.patch
+0003-perf-Enqueue-SIGTRAP-always-via-task_work.patch
+0004-perf-Shrink-the-size-of-the-recursion-counter.patch
+0005-perf-Move-swevent_htable-recursion-into-task_struct.patch
+0006-perf-Don-t-disable-preemption-in-perf_pending_task.patch
+0007-perf-Split-__perf_pending_irq-out-of-perf_pending_ir.patch
+task_work-make-TWA_NMI_CURRENT-handling-conditional-.patch
###########################################################################
# John's printk queue
@@ -69,7 +31,7 @@ zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
0008-serial-core-Provide-low-level-functions-to-lock-port.patch
0009-serial-core-Introduce-wrapper-to-set-uart_port-cons.patch
0010-console-Improve-console_srcu_read_flags-comments.patch
-0011-nbcon-Provide-functions-for-drivers-to-acquire-conso.patch
+0011-nbcon-Add-API-to-acquire-context-for-non-printing-op.patch
0012-serial-core-Implement-processing-in-port-lock-wrappe.patch
0013-printk-nbcon-Do-not-rely-on-proxy-headers.patch
0014-printk-Make-console_is_usable-available-to-nbcon.patch
@@ -88,56 +50,117 @@ zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
0027-panic-Mark-emergency-section-in-oops.patch
0028-rcu-Mark-emergency-sections-in-rcu-stalls.patch
0029-lockdep-Mark-emergency-sections-in-lockdep-splats.patch
-0030-printk-nbcon-Introduce-printing-kthreads.patch
-0031-printk-Atomic-print-in-printk-context-on-shutdown.patch
-0032-printk-nbcon-Add-context-to-console_is_usable.patch
-0033-printk-nbcon-Add-printer-thread-wakeups.patch
-0034-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
-0035-printk-nbcon-Start-printing-threads.patch
-0036-printk-Provide-helper-for-message-prepending.patch
-0037-printk-nbcon-Show-replay-message-on-takeover.patch
-0038-proc-consoles-Add-notation-to-c_start-c_stop.patch
-0039-proc-Add-nbcon-support-for-proc-consoles.patch
-0040-tty-sysfs-Add-nbcon-support-for-active.patch
-0041-printk-nbcon-Provide-function-to-reacquire-ownership.patch
-0042-serial-8250-Switch-to-nbcon-console.patch
-0043-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch
-0044-printk-Add-kthread-for-all-legacy-consoles.patch
-0045-printk-Provide-threadprintk-boot-argument.patch
-0046-printk-Avoid-false-positive-lockdep-report-for-legac.patch
+0030-printk-Rename-console_replay_all-and-update-context.patch
+0031-printk-nbcon-Introduce-printing-kthreads.patch
+0032-printk-Atomic-print-in-printk-context-on-shutdown.patch
+0033-printk-nbcon-Fix-nbcon_cpu_emergency_flush-when-pree.patch
+0034-printk-nbcon-Add-context-to-console_is_usable.patch
+0035-printk-nbcon-Add-printer-thread-wakeups.patch
+0036-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
+0037-printk-nbcon-Start-printing-threads.patch
+0038-printk-Provide-helper-for-message-prepending.patch
+0039-printk-nbcon-Show-replay-message-on-takeover.patch
+0040-printk-Add-kthread-for-all-legacy-consoles.patch
+0041-proc-consoles-Add-notation-to-c_start-c_stop.patch
+0042-proc-Add-nbcon-support-for-proc-consoles.patch
+0043-tty-sysfs-Add-nbcon-support-for-active.patch
+0044-printk-Provide-threadprintk-boot-argument.patch
+0045-printk-Avoid-false-positive-lockdep-report-for-legac.patch
+0046-printk-nbcon-Add-function-for-printers-to-reacquire-.patch
+0047-serial-8250-Switch-to-nbcon-console.patch
+0048-serial-8250-Revert-drop-lockdep-annotation-from-seri.patch
+#
+prinkt-nbcon-Add-a-scheduling-point-to-nbcon_kthread.patch
###########################################################################
-# DRM:
+# Post
###########################################################################
-# https://lore.kernel.org/all/20240405142737.920626-1-bigeasy@linutronix.de/
-0003-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
-0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
-0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
-0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch
-0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
-0010-drm-i915-Drop-the-irqs_disabled-check.patch
-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
-Revert-drm-i915-Depend-on-PREEMPT_RT.patch
###########################################################################
-# Lazy preemption
+# Enabling
###########################################################################
-PREEMPT_AUTO.patch
+x86__Allow_to_enable_RT.patch
+x86__Enable_RT_also_on_32bit.patch
+ARM64__Allow_to_enable_RT.patch
+riscv-allow-to-enable-RT.patch
+
+###########################################################################
+# For later, not essencial
+###########################################################################
+# Posted
+sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
+
+# Needs discussion first.
+softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
+rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
+tick-Fix-timer-storm-since-introduction-of-timersd.patch
+softirq-Wake-ktimers-thread-also-in-softirq.patch
+
+# zram
+0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch
+0002-zram-Remove-ZRAM_LOCK.patch
+0003-zram-Shrink-zram_table_entry-flags.patch
+
+# Sched
+0001-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
+0002-softirq-Add-function-to-preempt-serving-softirqs.patch
+0003-time-Allow-to-preempt-after-a-callback.patch
###########################################################################
-# ARM/ARM64
+# DRM:
+###########################################################################
+# https://lore.kernel.org/all/20240613102818.4056866-1-bigeasy@linutronix.de/
+0001-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch
+0002-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
+0003-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch
+0004-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch
+0005-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
+0006-drm-i915-Drop-the-irqs_disabled-check.patch
+0007-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
+0008-Revert-drm-i915-Depend-on-PREEMPT_RT.patch
+
+# Lazy preemption
+PREEMPT_AUTO.patch
+
+# BH series
+0001-locking-local_lock-Introduce-guard-definition-for-lo.patch
+0002-locking-local_lock-Add-local-nested-BH-locking-infra.patch
+0003-net-Use-__napi_alloc_frag_align-instead-of-open-codi.patch
+0004-net-Use-nested-BH-locking-for-napi_alloc_cache.patch
+0005-net-tcp_sigpool-Use-nested-BH-locking-for-sigpool_sc.patch
+0006-net-ipv4-Use-nested-BH-locking-for-ipv4_tcp_sk.patch
+0007-netfilter-br_netfilter-Use-nested-BH-locking-for-brn.patch
+0008-net-softnet_data-Make-xmit-per-task.patch
+0009-dev-Remove-PREEMPT_RT-ifdefs-from-backlog_lock.patch
+0010-dev-Use-nested-BH-locking-for-softnet_data.process_q.patch
+0011-lwt-Don-t-disable-migration-prio-invoking-BPF.patch
+0012-seg6-Use-nested-BH-locking-for-seg6_bpf_srh_states.patch
+0013-net-Use-nested-BH-locking-for-bpf_scratchpad.patch
+0014-net-Reference-bpf_redirect_info-via-task_struct-on-P.patch
+0015-net-Move-per-CPU-flush-lists-to-bpf_net_context-on-P.patch
+# optimisation + fixes
+0001-net-Remove-task_struct-bpf_net_context-init-on-fork.patch
+0002-net-Optimize-xdp_do_flush-with-bpf_net_context-infos.patch
+0003-net-Move-flush-list-retrieval-to-where-it-is-used.patch
+tun-Assign-missing-bpf_net_context.patch
+tun-Add-missing-bpf_net_ctx_clear-in-do_xdp_generic.patch
+bpf-Remove-tst_run-from-lwt_seg6local_prog_ops.patch
+# tw_timer
+0001-net-tcp-dccp-prepare-for-tw_timer-un-pinning.patch
+0002-net-tcp-un-pin-the-tw_timer.patch
+0003-tcp-move-inet_twsk_schedule-helper-out-of-header.patch
+
+###########################################################################
+# ARM
###########################################################################
0001-arm-Disable-jump-label-on-PREEMPT_RT.patch
ARM__enable_irq_in_translation_section_permission_fault_handlers.patch
arm-Disable-FAST_GUP-on-PREEMPT_RT-if-HIGHPTE-is-als.patch
-# arm64-signal-Use-ARCH_RT_DELAYS_SIGNAL_SEND.patch
0001-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
0002-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
0003-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
ARM__Allow_to_enable_RT.patch
-ARM64__Allow_to_enable_RT.patch
###########################################################################
# POWERPC
@@ -153,7 +176,6 @@ POWERPC__Allow_to_enable_RT.patch
# RISC-V
###########################################################################
riscv-add-PREEMPT_AUTO-support.patch
-riscv-allow-to-enable-RT.patch
# Sysfs file vs uname() -v
sysfs__Add__sys_kernel_realtime_entry.patch
diff --git a/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch b/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
index fdfc59c52e..4b78fdfd15 100644
--- a/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
+++ b/debian/patches-rt/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 1 Dec 2021 17:41:09 +0100
Subject: [PATCH] softirq: Use a dedicated thread for timer wakeups.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
A timer/hrtimer softirq is raised in-IRQ context. With threaded
interrupts enabled or on PREEMPT_RT this leads to waking the ksoftirqd
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
early_initcall(spawn_ksoftirqd);
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1812,7 +1812,7 @@ void hrtimer_interrupt(struct clock_even
+@@ -1809,7 +1809,7 @@ void hrtimer_interrupt(struct clock_even
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
-@@ -1925,7 +1925,7 @@ void hrtimer_run_queues(void)
+@@ -1904,7 +1904,7 @@ void hrtimer_run_queues(void)
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
@@ -211,7 +211,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -2466,7 +2466,7 @@ static void run_local_timers(void)
+@@ -2465,7 +2465,7 @@ static void run_local_timers(void)
/* Raise the softirq only if required. */
if (time_after_eq(jiffies, base->next_expiry) ||
(i == BASE_DEF && tmigr_requires_handle_remote())) {
diff --git a/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch b/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch
index ef9bb04143..2ad8979d2f 100644
--- a/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch
+++ b/debian/patches-rt/softirq-Wake-ktimers-thread-also-in-softirq.patch
@@ -1,7 +1,7 @@
From: Junxiao Chang <junxiao.chang@intel.com>
Date: Mon, 20 Feb 2023 09:12:20 +0100
Subject: [PATCH] softirq: Wake ktimers thread also in softirq.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
If the hrtimer is raised while a softirq is processed then it does not
wake the corresponding ktimers thread. This is due to the optimisation in the
diff --git a/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch b/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch
index 966bb57692..bac948a2db 100644
--- a/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch
+++ b/debian/patches-rt/sysfs__Add__sys_kernel_realtime_entry.patch
@@ -1,7 +1,7 @@
Subject: sysfs: Add /sys/kernel/realtime entry
From: Clark Williams <williams@redhat.com>
Date: Sat Jul 30 21:55:53 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Clark Williams <williams@redhat.com>
diff --git a/debian/patches-rt/task_work-make-TWA_NMI_CURRENT-handling-conditional-.patch b/debian/patches-rt/task_work-make-TWA_NMI_CURRENT-handling-conditional-.patch
new file mode 100644
index 0000000000..957e92705a
--- /dev/null
+++ b/debian/patches-rt/task_work-make-TWA_NMI_CURRENT-handling-conditional-.patch
@@ -0,0 +1,68 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 29 Jul 2024 12:05:06 -0700
+Subject: [PATCH] task_work: make TWA_NMI_CURRENT handling conditional on
+ IRQ_WORK
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+The TWA_NMI_CURRENT handling very much depends on IRQ_WORK, but that
+isn't universally enabled everywhere.
+
+Maybe the IRQ_WORK infrastructure should just be unconditional - x86
+ends up indirectly enabling it through unconditionally enabling
+PERF_EVENTS, for example. But it also gets enabled by having SMP
+support, or even if you just have PRINTK enabled.
+
+But in the meantime TWA_NMI_CURRENT causes tons of build failures on
+various odd minimal configs. Which did show up in linux-next, but
+despite that nobody bothered to fix it or even inform me until -rc1 was
+out.
+
+Fixes: 466e4d801cd4 ("task_work: Add TWA_NMI_CURRENT as an additional notify mode")
+Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
+Reported-by: kernelci.org bot <bot@kernelci.org>
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ kernel/task_work.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -7,12 +7,14 @@
+
+ static struct callback_head work_exited; /* all we need is ->next == NULL */
+
++#ifdef CONFIG_IRQ_WORK
+ static void task_work_set_notify_irq(struct irq_work *entry)
+ {
+ test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ }
+ static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
+ IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
++#endif
+
+ /**
+ * task_work_add - ask the @task to execute @work->func()
+@@ -58,6 +60,8 @@ int task_work_add(struct task_struct *ta
+ if (notify == TWA_NMI_CURRENT) {
+ if (WARN_ON_ONCE(task != current))
+ return -EINVAL;
++ if (!IS_ENABLED(CONFIG_IRQ_WORK))
++ return -EINVAL;
+ } else {
+ /* record the work call stack in order to print it in KASAN reports */
+ kasan_record_aux_stack(work);
+@@ -82,9 +86,11 @@ int task_work_add(struct task_struct *ta
+ case TWA_SIGNAL_NO_IPI:
+ __set_notify_signal(task);
+ break;
++#ifdef CONFIG_IRQ_WORK
+ case TWA_NMI_CURRENT:
+ irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
+ break;
++#endif
+ default:
+ WARN_ON_ONCE(1);
+ break;
diff --git a/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch b/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch
index 7c62b746e2..27c836a140 100644
--- a/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch
+++ b/debian/patches-rt/tick-Fix-timer-storm-since-introduction-of-timersd.patch
@@ -1,7 +1,7 @@
From: Frederic Weisbecker <frederic@kernel.org>
Date: Tue, 5 Apr 2022 03:07:52 +0200
Subject: [PATCH] tick: Fix timer storm since introduction of timersd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
If timers are pending while the tick is reprogrammed on nohz_mode, the
next expiry is not armed to fire now, it is delayed one jiffy forward
diff --git a/debian/patches-rt/tun-Add-missing-bpf_net_ctx_clear-in-do_xdp_generic.patch b/debian/patches-rt/tun-Add-missing-bpf_net_ctx_clear-in-do_xdp_generic.patch
new file mode 100644
index 0000000000..3c70d0f6dd
--- /dev/null
+++ b/debian/patches-rt/tun-Add-missing-bpf_net_ctx_clear-in-do_xdp_generic.patch
@@ -0,0 +1,34 @@
+From: Jeongjun Park <aha310510@gmail.com>
+Date: Fri, 26 Jul 2024 06:40:49 +0900
+Subject: [PATCH] tun: Add missing bpf_net_ctx_clear() in do_xdp_generic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+There are cases where do_xdp_generic returns bpf_net_context without
+clearing it. This causes various memory corruptions, so the missing
+bpf_net_ctx_clear must be added.
+
+Reported-by: syzbot+44623300f057a28baf1e@syzkaller.appspotmail.com
+Fixes: fecef4cd42c6 ("tun: Assign missing bpf_net_context.")
+Signed-off-by: Jeongjun Park <aha310510@gmail.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Reported-by: syzbot+3c2b6d5d4bec3b904933@syzkaller.appspotmail.com
+Reported-by: syzbot+707d98c8649695eaf329@syzkaller.appspotmail.com
+Reported-by: syzbot+c226757eb784a9da3e8b@syzkaller.appspotmail.com
+Reported-by: syzbot+61a1cfc2b6632363d319@syzkaller.appspotmail.com
+Reported-by: syzbot+709e4c85c904bcd62735@syzkaller.appspotmail.com
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/core/dev.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5150,6 +5150,7 @@ int do_xdp_generic(struct bpf_prog *xdp_
+ bpf_net_ctx_clear(bpf_net_ctx);
+ return XDP_DROP;
+ }
++ bpf_net_ctx_clear(bpf_net_ctx);
+ }
+ return XDP_PASS;
+ out_redir:
diff --git a/debian/patches-rt/tun-Assign-missing-bpf_net_context.patch b/debian/patches-rt/tun-Assign-missing-bpf_net_context.patch
new file mode 100644
index 0000000000..1d16417b19
--- /dev/null
+++ b/debian/patches-rt/tun-Assign-missing-bpf_net_context.patch
@@ -0,0 +1,114 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 16:48:15 +0200
+Subject: [PATCH] tun: Assign missing bpf_net_context.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+During the introduction of struct bpf_net_context handling for
+XDP-redirect, the tun driver has been missed.
+Jakub also pointed out that there is another call chain to
+do_xdp_generic() originating from netif_receive_skb() and drivers may
+use it outside from the NAPI context.
+
+Set the bpf_net_context before invoking BPF XDP program within the TUN
+driver. Set the bpf_net_context also in do_xdp_generic() if a xdp
+program is available.
+
+Reported-by: syzbot+0b5c75599f1d872bea6f@syzkaller.appspotmail.com
+Reported-by: syzbot+5ae46b237278e2369cac@syzkaller.appspotmail.com
+Reported-by: syzbot+c1e04a422bbc0f0f2921@syzkaller.appspotmail.com
+Fixes: 401cb7dae8130 ("net: Reference bpf_redirect_info via task_struct on PREEMPT_RT.")
+Link: https://lore.kernel.org/r/20240704144815.j8xQda5r@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/tun.c | 7 +++++++
+ net/core/dev.c | 5 +++++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1661,6 +1661,7 @@ static struct sk_buff *tun_build_skb(str
+ int len, int *skb_xdp)
+ {
+ struct page_frag *alloc_frag = &current->task_frag;
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ struct bpf_prog *xdp_prog;
+ int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ char *buf;
+@@ -1700,6 +1701,7 @@ static struct sk_buff *tun_build_skb(str
+
+ local_bh_disable();
+ rcu_read_lock();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ xdp_prog = rcu_dereference(tun->xdp_prog);
+ if (xdp_prog) {
+ struct xdp_buff xdp;
+@@ -1728,12 +1730,14 @@ static struct sk_buff *tun_build_skb(str
+ pad = xdp.data - xdp.data_hard_start;
+ len = xdp.data_end - xdp.data;
+ }
++ bpf_net_ctx_clear(bpf_net_ctx);
+ rcu_read_unlock();
+ local_bh_enable();
+
+ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
+
+ out:
++ bpf_net_ctx_clear(bpf_net_ctx);
+ rcu_read_unlock();
+ local_bh_enable();
+ return NULL;
+@@ -2566,6 +2570,7 @@ static int tun_sendmsg(struct socket *so
+
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
+ struct tun_page tpage;
+ int n = ctl->num;
+ int flush = 0, queued = 0;
+@@ -2574,6 +2579,7 @@ static int tun_sendmsg(struct socket *so
+
+ local_bh_disable();
+ rcu_read_lock();
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+
+ for (i = 0; i < n; i++) {
+ xdp = &((struct xdp_buff *)ctl->ptr)[i];
+@@ -2588,6 +2594,7 @@ static int tun_sendmsg(struct socket *so
+ if (tfile->napi_enabled && queued > 0)
+ napi_schedule(&tfile->napi);
+
++ bpf_net_ctx_clear(bpf_net_ctx);
+ rcu_read_unlock();
+ local_bh_enable();
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5126,11 +5126,14 @@ static DEFINE_STATIC_KEY_FALSE(generic_x
+
+ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
+ {
++ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
++
+ if (xdp_prog) {
+ struct xdp_buff xdp;
+ u32 act;
+ int err;
+
++ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
+ if (act != XDP_PASS) {
+ switch (act) {
+@@ -5144,11 +5147,13 @@ int do_xdp_generic(struct bpf_prog *xdp_
+ generic_xdp_tx(*pskb, xdp_prog);
+ break;
+ }
++ bpf_net_ctx_clear(bpf_net_ctx);
+ return XDP_DROP;
+ }
+ }
+ return XDP_PASS;
+ out_redir:
++ bpf_net_ctx_clear(bpf_net_ctx);
+ kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
+ return XDP_DROP;
+ }
diff --git a/debian/patches-rt/x86__Allow_to_enable_RT.patch b/debian/patches-rt/x86__Allow_to_enable_RT.patch
index b24342b73e..5c40768f74 100644
--- a/debian/patches-rt/x86__Allow_to_enable_RT.patch
+++ b/debian/patches-rt/x86__Allow_to_enable_RT.patch
@@ -1,7 +1,7 @@
Subject: x86: Allow to enable RT
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed Aug 7 18:15:38 2019 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch b/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch
index 4cd4dd98e3..1ecb254401 100644
--- a/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch
+++ b/debian/patches-rt/x86__Enable_RT_also_on_32bit.patch
@@ -1,7 +1,7 @@
Subject: x86: Enable RT also on 32bit
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu Nov 7 17:49:20 2019 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -121,6 +121,7 @@ config X86
+@@ -123,6 +123,7 @@ config X86
select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN