summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch')
-rw-r--r--debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch30
1 files changed, 15 insertions, 15 deletions
diff --git a/debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch b/debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch
index afc96c299..e2b10051b 100644
--- a/debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch
+++ b/debian/patches-rt/0191-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch
@@ -1,8 +1,8 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:42:26 +0200
-Subject: [PATCH 191/353] workqueue: Use local irq lock instead of irq disable
+Subject: [PATCH 191/354] workqueue: Use local irq lock instead of irq disable
regions
-Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f12d688441df0c1a59c7e65616a8a4eabe4d52c3
+Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=c620d30a8ecfc1a79d34431f64defe6afa24bfb9
Use a local_irq_lock as a replacement for irq off regions. We keep the
semantic of irq-off in regard to the pool->lock and remain preemptible.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 30 insertions(+), 15 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index d002a0ab68d6..3cf50eac5351 100644
+index 8bd6fe347a32..71afa2de6aba 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -50,6 +50,7 @@
@@ -33,7 +33,7 @@ index d002a0ab68d6..3cf50eac5351 100644
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
-@@ -1104,9 +1107,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
+@@ -1107,9 +1110,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
@@ -47,7 +47,7 @@ index d002a0ab68d6..3cf50eac5351 100644
}
}
-@@ -1210,7 +1215,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1213,7 +1218,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
struct worker_pool *pool;
struct pool_workqueue *pwq;
@@ -56,7 +56,7 @@ index d002a0ab68d6..3cf50eac5351 100644
/* try to steal the timer if it exists */
if (is_dwork) {
-@@ -1274,7 +1279,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1277,7 +1282,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
spin_unlock(&pool->lock);
fail:
rcu_read_unlock();
@@ -65,7 +65,7 @@ index d002a0ab68d6..3cf50eac5351 100644
if (work_is_canceling(work))
return -ENOENT;
cpu_relax();
-@@ -1379,7 +1384,13 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1382,7 +1387,13 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
@@ -79,7 +79,7 @@ index d002a0ab68d6..3cf50eac5351 100644
/* if draining, only works from the same workqueue are allowed */
-@@ -1487,14 +1498,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
+@@ -1490,14 +1501,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
bool ret = false;
unsigned long flags;
@@ -96,7 +96,7 @@ index d002a0ab68d6..3cf50eac5351 100644
return ret;
}
EXPORT_SYMBOL(queue_work_on);
-@@ -1503,8 +1514,11 @@ void delayed_work_timer_fn(struct timer_list *t)
+@@ -1506,8 +1517,11 @@ void delayed_work_timer_fn(struct timer_list *t)
{
struct delayed_work *dwork = from_timer(dwork, t, timer);
@@ -108,7 +108,7 @@ index d002a0ab68d6..3cf50eac5351 100644
}
EXPORT_SYMBOL(delayed_work_timer_fn);
-@@ -1559,14 +1573,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+@@ -1562,14 +1576,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
unsigned long flags;
/* read the comment in __queue_work() */
@@ -125,7 +125,7 @@ index d002a0ab68d6..3cf50eac5351 100644
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1601,7 +1615,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+@@ -1604,7 +1618,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
@@ -134,7 +134,7 @@ index d002a0ab68d6..3cf50eac5351 100644
}
/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -1612,11 +1626,12 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on);
+@@ -1615,11 +1629,12 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on);
static void rcu_work_rcufn(struct rcu_head *rcu)
{
struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
@@ -149,7 +149,7 @@ index d002a0ab68d6..3cf50eac5351 100644
}
/**
-@@ -3006,7 +3021,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+@@ -3009,7 +3024,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
@@ -158,7 +158,7 @@ index d002a0ab68d6..3cf50eac5351 100644
/*
* This allows canceling during early boot. We know that @work
-@@ -3067,10 +3082,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+@@ -3070,10 +3085,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -171,7 +171,7 @@ index d002a0ab68d6..3cf50eac5351 100644
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -3108,7 +3123,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
+@@ -3111,7 +3126,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));