diff options
Diffstat (limited to 'debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch')
-rw-r--r-- | debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch | 176 |
1 files changed, 176 insertions, 0 deletions
diff --git a/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch b/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch new file mode 100644 index 000000000..35a649740 --- /dev/null +++ b/debian/patches-rt/0089-timers-Prepare-for-full-preemption.patch @@ -0,0 +1,176 @@ +From 413cdf16e5561fbf143c28525fe70c1fd441a624 Mon Sep 17 00:00:00 2001 +From: Ingo Molnar <mingo@elte.hu> +Date: Fri, 3 Jul 2009 08:29:34 -0500 +Subject: [PATCH 089/347] timers: Prepare for full preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +When softirqs can be preempted we need to make sure that cancelling +the timer from the active thread can not deadlock vs. a running timer +callback. Add a waitqueue to resolve that. + +Signed-off-by: Ingo Molnar <mingo@elte.hu> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + include/linux/timer.h | 2 +- + kernel/sched/core.c | 9 +++++++-- + kernel/time/timer.c | 45 +++++++++++++++++++++++++++++++++++++++---- + 3 files changed, 49 insertions(+), 7 deletions(-) + +diff --git a/include/linux/timer.h b/include/linux/timer.h +index 7b066fd38248..54627d046b3a 100644 +--- a/include/linux/timer.h ++++ b/include/linux/timer.h +@@ -172,7 +172,7 @@ extern void add_timer(struct timer_list *timer); + + extern int try_to_del_timer_sync(struct timer_list *timer); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + extern int del_timer_sync(struct timer_list *timer); + #else + # define del_timer_sync(t) del_timer(t) +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 0a51a66f5a63..974d92afd23e 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -498,11 +498,14 @@ void resched_cpu(int cpu) + */ + int get_nohz_timer_target(void) + { +- int i, cpu = smp_processor_id(); ++ int i, cpu; + struct sched_domain *sd; + ++ preempt_disable_rt(); ++ cpu = smp_processor_id(); ++ + if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER)) +- return cpu; ++ goto preempt_en_rt; + + rcu_read_lock(); + for_each_domain(cpu, sd) { +@@ -521,6 +524,8 @@ int get_nohz_timer_target(void) + cpu = housekeeping_any_cpu(HK_FLAG_TIMER); + unlock: + rcu_read_unlock(); ++preempt_en_rt: ++ preempt_enable_rt(); + return cpu; + } + +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index a6e88d9bb931..0043bf8e2c90 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -45,6 +45,7 @@ + #include <linux/slab.h> + #include <linux/compat.h> + #include <linux/random.h> ++#include <linux/swait.h> + + #include <linux/uaccess.h> + #include <asm/unistd.h> +@@ -198,6 +199,9 @@ EXPORT_SYMBOL(jiffies_64); + struct timer_base { + raw_spinlock_t lock; + struct timer_list *running_timer; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct swait_queue_head wait_for_running_timer; ++#endif + unsigned long clk; + unsigned long next_expiry; + unsigned int cpu; +@@ -1190,6 +1194,33 @@ void add_timer_on(struct timer_list *timer, int cpu) + } + EXPORT_SYMBOL_GPL(add_timer_on); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Wait for a running timer ++ */ ++static void wait_for_running_timer(struct timer_list *timer) ++{ ++ struct timer_base *base; ++ u32 tf = timer->flags; ++ ++ if (tf & TIMER_MIGRATING) ++ return; ++ ++ base = get_timer_base(tf); ++ swait_event_exclusive(base->wait_for_running_timer, ++ base->running_timer != timer); ++} ++ ++# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer) ++#else ++static inline void wait_for_running_timer(struct timer_list *timer) ++{ ++ cpu_relax(); ++} ++ ++# define wakeup_timer_waiters(b) do { } while (0) ++#endif ++ + /** + * del_timer - deactivate a timer. + * @timer: the timer to be deactivated +@@ -1245,7 +1276,7 @@ int try_to_del_timer_sync(struct timer_list *timer) + } + EXPORT_SYMBOL(try_to_del_timer_sync); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + /** + * del_timer_sync - deactivate a timer and wait for the handler to finish. + * @timer: the timer to be deactivated +@@ -1305,7 +1336,7 @@ int del_timer_sync(struct timer_list *timer) + int ret = try_to_del_timer_sync(timer); + if (ret >= 0) + return ret; +- cpu_relax(); ++ wait_for_running_timer(timer); + } + } + EXPORT_SYMBOL(del_timer_sync); +@@ -1366,13 +1397,16 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head) + + fn = timer->function; + +- if (timer->flags & TIMER_IRQSAFE) { ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && ++ timer->flags & TIMER_IRQSAFE) { + raw_spin_unlock(&base->lock); + call_timer_fn(timer, fn); ++ base->running_timer = NULL; + raw_spin_lock(&base->lock); + } else { + raw_spin_unlock_irq(&base->lock); + call_timer_fn(timer, fn); ++ base->running_timer = NULL; + raw_spin_lock_irq(&base->lock); + } + } +@@ -1695,8 +1729,8 @@ static inline void __run_timers(struct timer_base *base) + while (levels--) + expire_timers(base, heads + levels); + } +- base->running_timer = NULL; + raw_spin_unlock_irq(&base->lock); ++ wakeup_timer_waiters(base); + } + + /* +@@ -1941,6 +1975,9 @@ static void __init init_timer_cpu(int cpu) + base->cpu = cpu; + raw_spin_lock_init(&base->lock); + base->clk = jiffies; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ init_swait_queue_head(&base->wait_for_running_timer); ++#endif + } + } + +-- +2.36.1 + |