summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0190-workqueue-Use-normal-rcu.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 04:21:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 04:21:41 +0000
commitb488a8b2c3eaf68ad4778dbdc99bdda0b0d1ce6d (patch)
tree9f60f0d41af253fa943e7811f231d36852f10f1e /debian/patches-rt/0190-workqueue-Use-normal-rcu.patch
parentMerging upstream version 4.19.304. (diff)
downloadlinux-debian.tar.xz
linux-debian.zip
Adding debian version 4.19.304-1.debian/4.19.304-1debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0190-workqueue-Use-normal-rcu.patch')
-rw-r--r--debian/patches-rt/0190-workqueue-Use-normal-rcu.patch48
1 files changed, 24 insertions, 24 deletions
diff --git a/debian/patches-rt/0190-workqueue-Use-normal-rcu.patch b/debian/patches-rt/0190-workqueue-Use-normal-rcu.patch
index 53e612863..c6ecccd8f 100644
--- a/debian/patches-rt/0190-workqueue-Use-normal-rcu.patch
+++ b/debian/patches-rt/0190-workqueue-Use-normal-rcu.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 24 Jul 2013 15:26:54 +0200
-Subject: [PATCH 190/353] workqueue: Use normal rcu
-Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=37451e023726bbdd8de45fa86e8c5038be90e3ae
+Subject: [PATCH 190/354] workqueue: Use normal rcu
+Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f077c23964fda23dcb109b53b423ca1b8f512573
There is no need for sched_rcu. The undocumented reason why sched_rcu
is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 52 insertions(+), 43 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 4ea2f7fd20ce..d002a0ab68d6 100644
+index 017939097451..8bd6fe347a32 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -128,7 +128,7 @@ enum {
@@ -107,7 +107,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
* read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
-@@ -695,8 +695,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
+@@ -700,8 +700,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
@@ -118,7 +118,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
-@@ -1101,7 +1101,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
+@@ -1104,7 +1104,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
{
if (pwq) {
/*
@@ -127,7 +127,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
* following lock operations are safe.
*/
spin_lock_irq(&pwq->pool->lock);
-@@ -1229,6 +1229,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1232,6 +1232,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
@@ -135,7 +135,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1267,10 +1268,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1270,10 +1271,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
@@ -148,7 +148,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
-@@ -1383,6 +1386,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1386,6 +1389,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -156,7 +156,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
retry:
/* pwq which will be used unless @work is executing elsewhere */
if (wq->flags & WQ_UNBOUND) {
-@@ -1441,10 +1445,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1444,10 +1448,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
@@ -169,7 +169,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
-@@ -1463,7 +1465,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1466,7 +1468,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
debug_work_activate(work);
insert_work(pwq, work, worklist, work_flags);
@@ -179,7 +179,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
}
/**
-@@ -2861,14 +2865,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+@@ -2864,14 +2868,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
might_sleep();
@@ -197,7 +197,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2900,10 +2904,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+@@ -2903,10 +2907,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
}
@@ -210,7 +210,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
return false;
}
-@@ -3348,7 +3353,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
+@@ -3351,7 +3356,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@@ -219,7 +219,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -3402,8 +3407,8 @@ static void put_unbound_pool(struct worker_pool *pool)
+@@ -3405,8 +3410,8 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
@@ -230,7 +230,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
}
/**
-@@ -3516,14 +3521,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
+@@ -3519,14 +3524,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
@@ -247,7 +247,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
}
/**
-@@ -4230,7 +4235,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
+@@ -4233,7 +4238,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
@@ -256,7 +256,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
} else {
/*
* We're the sole accessor of @wq at this point. Directly
-@@ -4340,7 +4345,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
+@@ -4343,7 +4348,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
struct pool_workqueue *pwq;
bool ret;
@@ -266,7 +266,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4351,7 +4357,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
+@@ -4354,7 +4360,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@@ -276,7 +276,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
return ret;
}
-@@ -4377,15 +4384,15 @@ unsigned int work_busy(struct work_struct *work)
+@@ -4380,15 +4387,15 @@ unsigned int work_busy(struct work_struct *work)
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@@ -296,7 +296,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
return ret;
}
-@@ -4570,7 +4577,7 @@ void show_workqueue_state(void)
+@@ -4573,7 +4580,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
@@ -305,7 +305,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
pr_info("Showing busy workqueues and worker pools:\n");
-@@ -4635,7 +4642,7 @@ void show_workqueue_state(void)
+@@ -4638,7 +4645,7 @@ void show_workqueue_state(void)
touch_nmi_watchdog();
}
@@ -314,7 +314,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
}
/* used to show worker information through /proc/PID/{comm,stat,status} */
-@@ -5022,16 +5029,16 @@ bool freeze_workqueues_busy(void)
+@@ -5025,16 +5032,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@@ -334,7 +334,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-@@ -5233,7 +5240,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
+@@ -5240,7 +5247,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
const char *delim = "";
int node, written = 0;
@@ -344,7 +344,7 @@ index 4ea2f7fd20ce..d002a0ab68d6 100644
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -5241,7 +5249,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
+@@ -5248,7 +5256,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");