summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
commit08b74a000942a380fe028845f92cd3a0dee827d5 (patch)
treeaa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch
parentAdding upstream version 4.19.249. (diff)
downloadlinux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz
linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch')
-rw-r--r--debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch186
1 files changed, 186 insertions, 0 deletions
diff --git a/debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch b/debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch
new file mode 100644
index 000000000..d77219e80
--- /dev/null
+++ b/debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch
@@ -0,0 +1,186 @@
+From c05bd3f09ed30bc24aa6a2fe5cfc0abd2fe883b3 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 21:42:26 +0200
+Subject: [PATCH 191/347] workqueue: Use local irq lock instead of irq disable
+ regions
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+Use a local_irq_lock as a replacement for irq off regions. We keep the
+semantic of irq-off in regard to the pool->lock and remain preemptible.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/workqueue.c | 45 ++++++++++++++++++++++++++++++---------------
+ 1 file changed, 30 insertions(+), 15 deletions(-)
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 3e2f67b77ab8..2b4c052c51b3 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -50,6 +50,7 @@
+ #include <linux/sched/isolation.h>
+ #include <linux/nmi.h>
+ #include <linux/kvm_para.h>
++#include <linux/locallock.h>
+
+ #include "workqueue_internal.h"
+
+@@ -351,6 +352,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
+ struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
+ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
+
++static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
++
+ static int worker_thread(void *__worker);
+ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+
+@@ -1104,9 +1107,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
+ * As both pwqs and pools are RCU protected, the
+ * following lock operations are safe.
+ */
+- spin_lock_irq(&pwq->pool->lock);
++ rcu_read_lock();
++ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
+ put_pwq(pwq);
+- spin_unlock_irq(&pwq->pool->lock);
++ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
++ rcu_read_unlock();
+ }
+ }
+
+@@ -1210,7 +1215,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+ struct worker_pool *pool;
+ struct pool_workqueue *pwq;
+
+- local_irq_save(*flags);
++ local_lock_irqsave(pendingb_lock, *flags);
+
+ /* try to steal the timer if it exists */
+ if (is_dwork) {
+@@ -1274,7 +1279,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+ spin_unlock(&pool->lock);
+ fail:
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_unlock_irqrestore(pendingb_lock, *flags);
+ if (work_is_canceling(work))
+ return -ENOENT;
+ cpu_relax();
+@@ -1379,7 +1384,13 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ * queued or lose PENDING. Grabbing PENDING and queueing should
+ * happen with IRQ disabled.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
++ /*
++ * nort: On RT the "interrupts-disabled" rule has been replaced with
++ * pendingb_lock.
++ */
+ lockdep_assert_irqs_disabled();
++#endif
+
+
+ /* if draining, only works from the same workqueue are allowed */
+@@ -1487,14 +1498,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
+ bool ret = false;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pendingb_lock,flags);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_work(cpu, wq, work);
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(queue_work_on);
+@@ -1503,8 +1514,11 @@ void delayed_work_timer_fn(struct timer_list *t)
+ {
+ struct delayed_work *dwork = from_timer(dwork, t, timer);
+
++ /* XXX */
++ /* local_lock(pendingb_lock); */
+ /* should have been called from irqsafe timer with irq already off */
+ __queue_work(dwork->cpu, dwork->wq, &dwork->work);
++ /* local_unlock(pendingb_lock); */
+ }
+ EXPORT_SYMBOL(delayed_work_timer_fn);
+
+@@ -1559,14 +1573,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ unsigned long flags;
+
+ /* read the comment in __queue_work() */
+- local_irq_save(flags);
++ local_lock_irqsave(pendingb_lock, flags);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_delayed_work(cpu, wq, dwork, delay);
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(queue_delayed_work_on);
+@@ -1601,7 +1615,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+
+ if (likely(ret >= 0)) {
+ __queue_delayed_work(cpu, wq, dwork, delay);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ }
+
+ /* -ENOENT from try_to_grab_pending() becomes %true */
+@@ -1612,11 +1626,12 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on);
+ static void rcu_work_rcufn(struct rcu_head *rcu)
+ {
+ struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
++ unsigned long flags;
+
+ /* read the comment in __queue_work() */
+- local_irq_disable();
++ local_lock_irqsave(pendingb_lock, flags);
+ __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
+- local_irq_enable();
++ local_unlock_irqrestore(pendingb_lock, flags);
+ }
+
+ /**
+@@ -3008,7 +3023,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+
+ /* tell other tasks trying to grab @work to back off */
+ mark_work_canceling(work);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+
+ /*
+ * This allows canceling during early boot. We know that @work
+@@ -3069,10 +3084,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+ */
+ bool flush_delayed_work(struct delayed_work *dwork)
+ {
+- local_irq_disable();
++ local_lock_irq(pendingb_lock);
+ if (del_timer_sync(&dwork->timer))
+ __queue_work(dwork->cpu, dwork->wq, &dwork->work);
+- local_irq_enable();
++ local_unlock_irq(pendingb_lock);
+ return flush_work(&dwork->work);
+ }
+ EXPORT_SYMBOL(flush_delayed_work);
+@@ -3110,7 +3125,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
+ return false;
+
+ set_work_pool_and_clear_pending(work, get_work_pool_id(work));
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ return ret;
+ }
+
+--
+2.36.1
+