diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0134-tasklets-Provide-tasklet_disable_in_atomic.patch | 68 |
1 files changed, 68 insertions, 0 deletions
diff --git a/debian/patches-rt/0134-tasklets-Provide-tasklet_disable_in_atomic.patch b/debian/patches-rt/0134-tasklets-Provide-tasklet_disable_in_atomic.patch new file mode 100644 index 000000000..2f6fb1ab9 --- /dev/null +++ b/debian/patches-rt/0134-tasklets-Provide-tasklet_disable_in_atomic.patch @@ -0,0 +1,68 @@ +From 35c080a5db30acfdfa20ca84bf9d0482e6ec1409 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Tue, 9 Mar 2021 09:42:06 +0100 +Subject: [PATCH 134/323] tasklets: Provide tasklet_disable_in_atomic() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +Replacing the spin wait loops in tasklet_unlock_wait() with +wait_var_event() is not possible as a handful of tasklet_disable() +invocations are happening in atomic context. All other invocations are in +teardown paths which can sleep. + +Provide tasklet_disable_in_atomic() and tasklet_unlock_spin_wait() to +convert the few atomic use cases over, which allows to change +tasklet_disable() and tasklet_unlock_wait() in a later step. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/interrupt.h | 22 ++++++++++++++++++++++ + 1 file changed, 22 insertions(+) + +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index 75c4380afe9b..abba3eff4f86 100644 +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -675,10 +675,21 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t) + while (test_bit(TASKLET_STATE_RUN, &t->state)) + cpu_relax(); + } ++ ++/* ++ * Do not use in new code. Waiting for tasklets from atomic contexts is ++ * error prone and should be avoided. ++ */ ++static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) ++{ ++ while (test_bit(TASKLET_STATE_RUN, &t->state)) ++ cpu_relax(); ++} + #else + static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } + static inline void tasklet_unlock(struct tasklet_struct *t) { } + static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } ++static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { } + #endif + + extern void __tasklet_schedule(struct tasklet_struct *t); +@@ -703,6 +714,17 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t) + smp_mb__after_atomic(); + } + ++/* ++ * Do not use in new code. Disabling tasklets from atomic contexts is ++ * error prone and should be avoided. ++ */ ++static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) ++{ ++ tasklet_disable_nosync(t); ++ tasklet_unlock_spin_wait(t); ++ smp_mb(); ++} ++ + static inline void tasklet_disable(struct tasklet_struct *t) + { + tasklet_disable_nosync(t); +-- +2.43.0 + |