summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0323-tasklet-Address-a-race-resulting-in-double-enqueue.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0323-tasklet-Address-a-race-resulting-in-double-enqueue.patch')
-rw-r--r--debian/patches-rt/0323-tasklet-Address-a-race-resulting-in-double-enqueue.patch116
1 files changed, 116 insertions, 0 deletions
diff --git a/debian/patches-rt/0323-tasklet-Address-a-race-resulting-in-double-enqueue.patch b/debian/patches-rt/0323-tasklet-Address-a-race-resulting-in-double-enqueue.patch
new file mode 100644
index 000000000..7ebdfe086
--- /dev/null
+++ b/debian/patches-rt/0323-tasklet-Address-a-race-resulting-in-double-enqueue.patch
@@ -0,0 +1,116 @@
+From: Zhang Xiao <xiao.zhang@windriver.com>
+Date: Tue, 17 Mar 2020 12:47:43 +0100
+Subject: [PATCH 323/342] tasklet: Address a race resulting in double-enqueue
+Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3f0945b4e8c94333caf5c9384103ebfc0a055375
+
+The kernel bugzilla has the following race condition reported:
+
+CPU0 CPU1 CPU2
+------------------------------------------------
+test_set SCHED
+ test_set RUN
+ if SCHED
+ add_list
+ raise
+ clear RUN
+<softirq>
+test_set RUN
+test_clear SCHED
+ ->func
+ test_set SCHED
+tasklet_try_unlock ->0
+test_clear SCHED
+ test_set SCHED
+ ->func
+tasklet_try_unlock ->1
+ test_set RUN
+ if SCHED
+ add list
+ raise
+ clear RUN
+ test_set RUN
+ if SCHED
+ add list
+ raise
+ clear RUN
+
+As a result the tasklet is enqueued on both CPUs and run on both CPUs. Due
+to the nature of the list used here, it is possible that further
+(different) tasklets, which are enqueued after this double-enqueued
+tasklet, are scheduled on CPU2 but invoked on CPU1. It is also possible
+that these tasklets won't be invoked at all, because during the second
+enqueue process the t->next pointer is set to NULL - dropping everything
+from the list.
+
+This race will trigger one or two of the WARN_ON() in
+tasklet_action_common().
+The problem is that the tasklet may be invoked multiple times and clear
+SCHED bit on each invocation. This makes it possible to enqueue the
+very same tasklet on different CPUs.
+
+Current RT-devel is using the upstream implementation which does not
+re-run tasklets if they have SCHED set again and so it does not clear
+the SCHED bit multiple times on a single invocation.
+
+Introduce the CHAINED flag. The tasklet will only be enqueued if the
+CHAINED flag has been set successfully.
+If it is possible to exchange the flags (CHAINED | RUN) -> 0 then the
+tasklet won't be re-run. Otherwise the possible SCHED flag is removed
+and the tasklet is re-run again.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=61451
+Not-signed-off-by: Zhang Xiao <xiao.zhang@windriver.com>
+[bigeasy: patch description]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+Signed-off-by: Tom Zanussi <zanussi@kernel.org>
+---
+ include/linux/interrupt.h | 5 ++++-
+ kernel/softirq.c | 6 +++++-
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 97d9ba26915e..a3b5edb26bc5 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -579,12 +579,15 @@ enum
+ {
+ TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
+- TASKLET_STATE_PENDING /* Tasklet is pending */
++ TASKLET_STATE_PENDING, /* Tasklet is pending */
++ TASKLET_STATE_CHAINED /* Tasklet is chained */
+ };
+
+ #define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
+ #define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
+ #define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
++#define TASKLET_STATEF_CHAINED (1 << TASKLET_STATE_CHAINED)
++#define TASKLET_STATEF_RC (TASKLET_STATEF_RUN | TASKLET_STATEF_CHAINED)
+
+ #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ static inline int tasklet_trylock(struct tasklet_struct *t)
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 25bcf2f2714b..73dae64bfc9c 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -947,6 +947,10 @@ static void __tasklet_schedule_common(struct tasklet_struct *t,
+ * is locked before adding it to the list.
+ */
+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
++ if (test_and_set_bit(TASKLET_STATE_CHAINED, &t->state)) {
++ tasklet_unlock(t);
++ return;
++ }
+ t->next = NULL;
+ *head->tail = t;
+ head->tail = &(t->next);
+@@ -1040,7 +1044,7 @@ static void tasklet_action_common(struct softirq_action *a,
+ again:
+ t->func(t->data);
+
+- while (!tasklet_tryunlock(t)) {
++ while (cmpxchg(&t->state, TASKLET_STATEF_RC, 0) != TASKLET_STATEF_RC) {
+ /*
+ * If it got disabled meanwhile, bail out:
+ */