summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0138-tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0138-tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch')
-rw-r--r--debian/patches-rt/0138-tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch109
1 files changed, 109 insertions, 0 deletions
diff --git a/debian/patches-rt/0138-tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch b/debian/patches-rt/0138-tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch
new file mode 100644
index 000000000..9f446b54f
--- /dev/null
+++ b/debian/patches-rt/0138-tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch
@@ -0,0 +1,109 @@
+From 42114155db1a5548923ac48a09b3f228b29d3b09 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:10 +0100
+Subject: [PATCH 138/323] tasklets: Prevent tasklet_unlock_spin_wait() deadlock
+ on RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+tasklet_unlock_spin_wait() spin waits for the TASKLET_STATE_SCHED bit in
+the tasklet state to be cleared. This works on !RT nicely because the
+corresponding execution can only happen on a different CPU.
+
+On RT softirq processing is preemptible, therefore a task preempting the
+softirq processing thread can spin forever.
+
+Prevent this by invoking local_bh_disable()/enable() inside the loop. In
+case that the softirq processing thread was preempted by the current task,
+current will block on the local lock which yields the CPU to the preempted
+softirq processing thread. If the tasklet is processed on a different CPU
+then the local_bh_disable()/enable() pair is just a waste of processor
+cycles.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 12 ++----------
+ kernel/softirq.c | 28 +++++++++++++++++++++++++++-
+ 2 files changed, 29 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index c4fafbfa28a6..6eae70ec3a17 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -658,7 +658,7 @@ enum
+ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
+ };
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+ static inline int tasklet_trylock(struct tasklet_struct *t)
+ {
+ return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+@@ -666,16 +666,8 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
+
+ void tasklet_unlock(struct tasklet_struct *t);
+ void tasklet_unlock_wait(struct tasklet_struct *t);
++void tasklet_unlock_spin_wait(struct tasklet_struct *t);
+
+-/*
+- * Do not use in new code. Waiting for tasklets from atomic contexts is
+- * error prone and should be avoided.
+- */
+-static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t)
+-{
+- while (test_bit(TASKLET_STATE_RUN, &t->state))
+- cpu_relax();
+-}
+ #else
+ static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
+ static inline void tasklet_unlock(struct tasklet_struct *t) { }
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index ecc3ac4091c8..fcb201ceed71 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -614,6 +614,32 @@ void tasklet_init(struct tasklet_struct *t,
+ }
+ EXPORT_SYMBOL(tasklet_init);
+
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
++/*
++ * Do not use in new code. Waiting for tasklets from atomic contexts is
++ * error prone and should be avoided.
++ */
++void tasklet_unlock_spin_wait(struct tasklet_struct *t)
++{
++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ /*
++ * Prevent a live lock when current preempted soft
++ * interrupt processing or prevents ksoftirqd from
++ * running. If the tasklet runs on a different CPU
++ * then this has no effect other than doing the BH
++ * disable/enable dance for nothing.
++ */
++ local_bh_disable();
++ local_bh_enable();
++ } else {
++ cpu_relax();
++ }
++ }
++}
++EXPORT_SYMBOL(tasklet_unlock_spin_wait);
++#endif
++
+ void tasklet_kill(struct tasklet_struct *t)
+ {
+ if (in_interrupt())
+@@ -627,7 +653,7 @@ void tasklet_kill(struct tasklet_struct *t)
+ }
+ EXPORT_SYMBOL(tasklet_kill);
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+ void tasklet_unlock(struct tasklet_struct *t)
+ {
+ smp_mb__before_atomic();
+--
+2.43.0
+