summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0349-timers-Move-clearing-of-base-timer_running-under-bas.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0349-timers-Move-clearing-of-base-timer_running-under-bas.patch')
-rw-r--r--debian/patches-rt/0349-timers-Move-clearing-of-base-timer_running-under-bas.patch86
1 files changed, 86 insertions, 0 deletions
diff --git a/debian/patches-rt/0349-timers-Move-clearing-of-base-timer_running-under-bas.patch b/debian/patches-rt/0349-timers-Move-clearing-of-base-timer_running-under-bas.patch
new file mode 100644
index 000000000..d01940b4c
--- /dev/null
+++ b/debian/patches-rt/0349-timers-Move-clearing-of-base-timer_running-under-bas.patch
@@ -0,0 +1,86 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 31 Oct 2022 16:50:05 +0100
+Subject: [PATCH 349/351] timers: Move clearing of base::timer_running under
+ base:: Lock
+Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=eee43f1aec23fc7a8d80e59c805ddfeacfe1eac5
+
+Upstream commit bb7262b295472eb6858b5c49893954794027cd84
+
+syzbot reported KCSAN data races vs. timer_base::timer_running being set to
+NULL without holding base::lock in expire_timers().
+
+This looks innocent and most reads are clearly not problematic, but
+Frederic identified an issue which is:
+
+ int data = 0;
+
+ void timer_func(struct timer_list *t)
+ {
+ data = 1;
+ }
+
+ CPU 0 CPU 1
+ ------------------------------ --------------------------
+ base = lock_timer_base(timer, &flags); raw_spin_unlock(&base->lock);
+ if (base->running_timer != timer) call_timer_fn(timer, fn, baseclk);
+ ret = detach_if_pending(timer, base, true); base->running_timer = NULL;
+ raw_spin_unlock_irqrestore(&base->lock, flags); raw_spin_lock(&base->lock);
+
+ x = data;
+
+If the timer has previously executed on CPU 1 and then CPU 0 can observe
+base->running_timer == NULL and returns, assuming the timer has completed,
+but it's not guaranteed on all architectures. The comment for
+del_timer_sync() makes that guarantee. Moving the assignment under
+base->lock prevents this.
+
+For non-RT kernel it's performance wise completely irrelevant whether the
+store happens before or after taking the lock. For an RT kernel moving the
+store under the lock requires an extra unlock/lock pair in the case that
+there is a waiter for the timer, but that's not the end of the world.
+
+Reported-by: syzbot+aa7c2385d46c5eba0b89@syzkaller.appspotmail.com
+Reported-by: syzbot+abea4558531bae1ba9fe@syzkaller.appspotmail.com
+Fixes: 030dcdd197d7 ("timers: Prepare support for PREEMPT_RT")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/87lfea7gw8.fsf@nanos.tec.linutronix.de
+Cc: stable@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Daniel Wagner <wagi@monom.org>
+---
+ kernel/time/timer.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index b859ecf6424b..603985720f54 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1282,8 +1282,10 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
+ static void timer_sync_wait_running(struct timer_base *base)
+ {
+ if (atomic_read(&base->timer_waiters)) {
++ raw_spin_unlock_irq(&base->lock);
+ spin_unlock(&base->expiry_lock);
+ spin_lock(&base->expiry_lock);
++ raw_spin_lock_irq(&base->lock);
+ }
+ }
+
+@@ -1458,14 +1460,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
+ if (timer->flags & TIMER_IRQSAFE) {
+ raw_spin_unlock(&base->lock);
+ call_timer_fn(timer, fn);
+- base->running_timer = NULL;
+ raw_spin_lock(&base->lock);
++ base->running_timer = NULL;
+ } else {
+ raw_spin_unlock_irq(&base->lock);
+ call_timer_fn(timer, fn);
++ raw_spin_lock_irq(&base->lock);
+ base->running_timer = NULL;
+ timer_sync_wait_running(base);
+- raw_spin_lock_irq(&base->lock);
+ }
+ }
+ }