From b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 27 Apr 2024 12:06:00 +0200 Subject: Adding debian version 5.10.209-2. Signed-off-by: Daniel Baumann --- ...-Replace-spin-wait-in-tasklet_unlock_wait.patch | 90 ++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 debian/patches-rt/0136-tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch (limited to 'debian/patches-rt/0136-tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch') diff --git a/debian/patches-rt/0136-tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch b/debian/patches-rt/0136-tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch new file mode 100644 index 000000000..c244c4010 --- /dev/null +++ b/debian/patches-rt/0136-tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch @@ -0,0 +1,90 @@ +From 1e2be8ee6d91630326a3a3bc32925205b41d73a5 Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Tue, 9 Mar 2021 09:42:08 +0100 +Subject: [PATCH 136/323] tasklets: Replace spin wait in tasklet_unlock_wait() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +tasklet_unlock_wait() spin waits for TASKLET_STATE_RUN to be cleared. This +is wasting CPU cycles in a tight loop which is especially painful in a +guest when the CPU running the tasklet is scheduled out. + +tasklet_unlock_wait() is invoked from tasklet_kill() which is used in +teardown paths and not performance critical at all. Replace the spin wait +with wait_var_event(). + +There are no users of tasklet_unlock_wait() which are invoked from atomic +contexts. The usage in tasklet_disable() has been replaced temporarily with +the spin waiting variant until the atomic users are fixed up and will be +converted to the sleep wait variant later. + +Signed-off-by: Peter Zijlstra +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/interrupt.h | 13 ++----------- + kernel/softirq.c | 18 ++++++++++++++++++ + 2 files changed, 20 insertions(+), 11 deletions(-) + +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index fe085c46f210..c4fafbfa28a6 100644 +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -664,17 +664,8 @@ static inline int tasklet_trylock(struct tasklet_struct *t) + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); + } + +-static inline void tasklet_unlock(struct tasklet_struct *t) +-{ +- smp_mb__before_atomic(); +- clear_bit(TASKLET_STATE_RUN, &(t)->state); +-} +- +-static inline void tasklet_unlock_wait(struct tasklet_struct *t) +-{ +- while (test_bit(TASKLET_STATE_RUN, &t->state)) +- cpu_relax(); +-} ++void tasklet_unlock(struct tasklet_struct *t); ++void tasklet_unlock_wait(struct tasklet_struct *t); + + /* + * Do not use in new code. Waiting for tasklets from atomic contexts is +diff --git a/kernel/softirq.c b/kernel/softirq.c +index d5bfd5e661fc..06bca024ce45 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + #define CREATE_TRACE_POINTS + #include +@@ -619,6 +620,23 @@ void tasklet_kill(struct tasklet_struct *t) + } + EXPORT_SYMBOL(tasklet_kill); + ++#ifdef CONFIG_SMP ++void tasklet_unlock(struct tasklet_struct *t) ++{ ++ smp_mb__before_atomic(); ++ clear_bit(TASKLET_STATE_RUN, &t->state); ++ smp_mb__after_atomic(); ++ wake_up_var(&t->state); ++} ++EXPORT_SYMBOL_GPL(tasklet_unlock); ++ ++void tasklet_unlock_wait(struct tasklet_struct *t) ++{ ++ wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state)); ++} ++EXPORT_SYMBOL_GPL(tasklet_unlock_wait); ++#endif ++ + void __init softirq_init(void) + { + int cpu; +-- +2.43.0 + -- cgit v1.2.3