summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0284-workqueue-rework.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0284-workqueue-rework.patch')
-rw-r--r--debian/patches-rt/0284-workqueue-rework.patch194
1 files changed, 97 insertions, 97 deletions
diff --git a/debian/patches-rt/0284-workqueue-rework.patch b/debian/patches-rt/0284-workqueue-rework.patch
index 7acedd9ad..9d553557f 100644
--- a/debian/patches-rt/0284-workqueue-rework.patch
+++ b/debian/patches-rt/0284-workqueue-rework.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 29 May 2019 18:52:27 +0200
-Subject: [PATCH 284/353] workqueue: rework
-Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=11c0b968a11a4a0c4e9b0e4f4129c5d3dbf995ac
+Subject: [PATCH 284/354] workqueue: rework
+Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=f331f53f8783ce6f1a36700c65f3760236ded195
[ Upstream commit d15a862f24df983458533aebd6fa207ecdd1095a ]
@@ -65,7 +65,7 @@ index aa40960ef4c0..732913d4966c 100644
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
-index b8c2c469ce7e..12eb48980df7 100644
+index 2ba7391c4560..2e6c3f658894 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -70,7 +70,7 @@
@@ -293,10 +293,10 @@ index f426a0661aa0..21ae66cd41d3 100644
+
#endif /* _LINUX_SWAIT_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
-index 60d673e15632..546aa73fba6a 100644
+index a7224fec99a7..7e77fe3900c2 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
-@@ -455,10 +455,6 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
+@@ -456,10 +456,6 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
extern void destroy_workqueue(struct workqueue_struct *wq);
@@ -308,10 +308,10 @@ index 60d673e15632..546aa73fba6a 100644
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
diff --git a/init/main.c b/init/main.c
-index 8555afc3f3e1..703b627a6060 100644
+index c90444ec2990..298257e2cd42 100644
--- a/init/main.c
+++ b/init/main.c
-@@ -1136,7 +1136,6 @@ static noinline void __init kernel_init_freeable(void)
+@@ -1126,7 +1126,6 @@ static noinline void __init kernel_init_freeable(void)
smp_prepare_cpus(setup_max_cpus);
workqueue_init();
@@ -364,7 +364,7 @@ index ab64a3c261bc..dded9ca916eb 100644
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 6e19604eaa00..92ee6cf49c24 100644
+index fb28793cb87b..089c74ff0511 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -748,29 +748,6 @@ static void hrtimer_switch_to_hres(void)
@@ -406,7 +406,7 @@ index 6e19604eaa00..92ee6cf49c24 100644
#else
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index cb81c7767b17..c047a647146e 100644
+index c148b0b8bda9..8e012510425e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -50,8 +50,6 @@
@@ -491,7 +491,7 @@ index cb81c7767b17..c047a647146e 100644
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
-@@ -863,20 +829,14 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
+@@ -866,20 +832,14 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
* Wake up the first idle worker of @pool.
*
* CONTEXT:
@@ -514,7 +514,7 @@ index cb81c7767b17..c047a647146e 100644
}
/**
-@@ -905,7 +865,7 @@ void wq_worker_running(struct task_struct *task)
+@@ -908,7 +868,7 @@ void wq_worker_running(struct task_struct *task)
*/
void wq_worker_sleeping(struct task_struct *task)
{
@@ -523,7 +523,7 @@ index cb81c7767b17..c047a647146e 100644
struct worker_pool *pool;
/*
-@@ -922,18 +882,26 @@ void wq_worker_sleeping(struct task_struct *task)
+@@ -925,18 +885,26 @@ void wq_worker_sleeping(struct task_struct *task)
return;
worker->sleeping = 1;
@@ -553,7 +553,7 @@ index cb81c7767b17..c047a647146e 100644
}
/**
-@@ -944,7 +912,7 @@ void wq_worker_sleeping(struct task_struct *task)
+@@ -947,7 +915,7 @@ void wq_worker_sleeping(struct task_struct *task)
* Set @flags in @worker->flags and adjust nr_running accordingly.
*
* CONTEXT:
@@ -562,7 +562,7 @@ index cb81c7767b17..c047a647146e 100644
*/
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
{
-@@ -969,7 +937,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
+@@ -972,7 +940,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
* Clear @flags in @worker->flags and adjust nr_running accordingly.
*
* CONTEXT:
@@ -571,7 +571,7 @@ index cb81c7767b17..c047a647146e 100644
*/
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
{
-@@ -1017,7 +985,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
+@@ -1020,7 +988,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
* actually occurs, it should be easy to locate the culprit work function.
*
* CONTEXT:
@@ -580,7 +580,7 @@ index cb81c7767b17..c047a647146e 100644
*
* Return:
* Pointer to worker which is executing @work if found, %NULL
-@@ -1052,7 +1020,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
+@@ -1055,7 +1023,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
* nested inside outer list_for_each_entry_safe().
*
* CONTEXT:
@@ -589,7 +589,7 @@ index cb81c7767b17..c047a647146e 100644
*/
static void move_linked_works(struct work_struct *work, struct list_head *head,
struct work_struct **nextp)
-@@ -1130,11 +1098,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
+@@ -1133,11 +1101,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
@@ -603,7 +603,7 @@ index cb81c7767b17..c047a647146e 100644
}
}
-@@ -1167,7 +1133,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
+@@ -1170,7 +1136,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
* decrement nr_in_flight of its pwq and handle workqueue flushing.
*
* CONTEXT:
@@ -612,7 +612,7 @@ index cb81c7767b17..c047a647146e 100644
*/
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
{
-@@ -1238,7 +1204,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1241,7 +1207,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
struct worker_pool *pool;
struct pool_workqueue *pwq;
@@ -621,7 +621,7 @@ index cb81c7767b17..c047a647146e 100644
/* try to steal the timer if it exists */
if (is_dwork) {
-@@ -1266,7 +1232,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1269,7 +1235,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!pool)
goto fail;
@@ -630,7 +630,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* work->data is guaranteed to point to pwq only while the work
* item is queued on pwq->wq, and both updating work->data to point
-@@ -1295,17 +1261,17 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1298,17 +1264,17 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
/* work->data points to pwq iff queued, point to pool */
set_work_pool_and_keep_pending(work, pool->id);
@@ -652,7 +652,7 @@ index cb81c7767b17..c047a647146e 100644
return -EAGAIN;
}
-@@ -1320,7 +1286,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1323,7 +1289,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
* work_struct flags.
*
* CONTEXT:
@@ -661,7 +661,7 @@ index cb81c7767b17..c047a647146e 100644
*/
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
struct list_head *head, unsigned int extra_flags)
-@@ -1407,13 +1373,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1410,13 +1376,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
@@ -675,7 +675,7 @@ index cb81c7767b17..c047a647146e 100644
/* if draining, only works from the same workqueue are allowed */
-@@ -1442,7 +1402,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1445,7 +1405,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (last_pool && last_pool != pwq->pool) {
struct worker *worker;
@@ -684,7 +684,7 @@ index cb81c7767b17..c047a647146e 100644
worker = find_worker_executing_work(last_pool, work);
-@@ -1450,11 +1410,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1453,11 +1413,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
pwq = worker->current_pwq;
} else {
/* meh... not running there, queue here */
@@ -699,7 +699,7 @@ index cb81c7767b17..c047a647146e 100644
}
/*
-@@ -1467,7 +1427,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1470,7 +1430,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
*/
if (unlikely(!pwq->refcnt)) {
if (wq->flags & WQ_UNBOUND) {
@@ -708,7 +708,7 @@ index cb81c7767b17..c047a647146e 100644
cpu_relax();
goto retry;
}
-@@ -1500,7 +1460,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1503,7 +1463,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
insert_work(pwq, work, worklist, work_flags);
out:
@@ -717,7 +717,7 @@ index cb81c7767b17..c047a647146e 100644
rcu_read_unlock();
}
-@@ -1521,14 +1481,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
+@@ -1524,14 +1484,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
bool ret = false;
unsigned long flags;
@@ -734,7 +734,7 @@ index cb81c7767b17..c047a647146e 100644
return ret;
}
EXPORT_SYMBOL(queue_work_on);
-@@ -1536,12 +1496,11 @@ EXPORT_SYMBOL(queue_work_on);
+@@ -1539,12 +1499,11 @@ EXPORT_SYMBOL(queue_work_on);
void delayed_work_timer_fn(struct timer_list *t)
{
struct delayed_work *dwork = from_timer(dwork, t, timer);
@@ -750,7 +750,7 @@ index cb81c7767b17..c047a647146e 100644
}
EXPORT_SYMBOL(delayed_work_timer_fn);
-@@ -1596,14 +1555,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+@@ -1599,14 +1558,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
unsigned long flags;
/* read the comment in __queue_work() */
@@ -767,7 +767,7 @@ index cb81c7767b17..c047a647146e 100644
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1638,7 +1597,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+@@ -1641,7 +1600,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
@@ -776,7 +776,7 @@ index cb81c7767b17..c047a647146e 100644
}
/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -1649,12 +1608,11 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on);
+@@ -1652,12 +1611,11 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on);
static void rcu_work_rcufn(struct rcu_head *rcu)
{
struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
@@ -791,7 +791,7 @@ index cb81c7767b17..c047a647146e 100644
}
/**
-@@ -1689,7 +1647,7 @@ EXPORT_SYMBOL(queue_rcu_work);
+@@ -1692,7 +1650,7 @@ EXPORT_SYMBOL(queue_rcu_work);
* necessary.
*
* LOCKING:
@@ -800,7 +800,7 @@ index cb81c7767b17..c047a647146e 100644
*/
static void worker_enter_idle(struct worker *worker)
{
-@@ -1706,9 +1664,7 @@ static void worker_enter_idle(struct worker *worker)
+@@ -1709,9 +1667,7 @@ static void worker_enter_idle(struct worker *worker)
worker->last_active = jiffies;
/* idle_list is LIFO */
@@ -810,7 +810,7 @@ index cb81c7767b17..c047a647146e 100644
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1731,7 +1687,7 @@ static void worker_enter_idle(struct worker *worker)
+@@ -1734,7 +1690,7 @@ static void worker_enter_idle(struct worker *worker)
* @worker is leaving idle state. Update stats.
*
* LOCKING:
@@ -819,7 +819,7 @@ index cb81c7767b17..c047a647146e 100644
*/
static void worker_leave_idle(struct worker *worker)
{
-@@ -1741,9 +1697,7 @@ static void worker_leave_idle(struct worker *worker)
+@@ -1744,9 +1700,7 @@ static void worker_leave_idle(struct worker *worker)
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
@@ -829,7 +829,7 @@ index cb81c7767b17..c047a647146e 100644
}
static struct worker *alloc_worker(int node)
-@@ -1868,11 +1822,11 @@ static struct worker *create_worker(struct worker_pool *pool)
+@@ -1871,11 +1825,11 @@ static struct worker *create_worker(struct worker_pool *pool)
worker_attach_to_pool(worker, pool);
/* start the newly created worker */
@@ -843,7 +843,7 @@ index cb81c7767b17..c047a647146e 100644
return worker;
-@@ -1891,7 +1845,7 @@ static struct worker *create_worker(struct worker_pool *pool)
+@@ -1894,7 +1848,7 @@ static struct worker *create_worker(struct worker_pool *pool)
* be idle.
*
* CONTEXT:
@@ -852,7 +852,7 @@ index cb81c7767b17..c047a647146e 100644
*/
static void destroy_worker(struct worker *worker)
{
-@@ -1908,9 +1862,7 @@ static void destroy_worker(struct worker *worker)
+@@ -1911,9 +1865,7 @@ static void destroy_worker(struct worker *worker)
pool->nr_workers--;
pool->nr_idle--;
@@ -862,7 +862,7 @@ index cb81c7767b17..c047a647146e 100644
worker->flags |= WORKER_DIE;
wake_up_process(worker->task);
}
-@@ -1919,7 +1871,7 @@ static void idle_worker_timeout(struct timer_list *t)
+@@ -1922,7 +1874,7 @@ static void idle_worker_timeout(struct timer_list *t)
{
struct worker_pool *pool = from_timer(pool, t, idle_timer);
@@ -871,7 +871,7 @@ index cb81c7767b17..c047a647146e 100644
while (too_many_workers(pool)) {
struct worker *worker;
-@@ -1937,7 +1889,7 @@ static void idle_worker_timeout(struct timer_list *t)
+@@ -1940,7 +1892,7 @@ static void idle_worker_timeout(struct timer_list *t)
destroy_worker(worker);
}
@@ -880,7 +880,7 @@ index cb81c7767b17..c047a647146e 100644
}
static void send_mayday(struct work_struct *work)
-@@ -1968,8 +1920,8 @@ static void pool_mayday_timeout(struct timer_list *t)
+@@ -1971,8 +1923,8 @@ static void pool_mayday_timeout(struct timer_list *t)
struct worker_pool *pool = from_timer(pool, t, mayday_timer);
struct work_struct *work;
@@ -891,7 +891,7 @@ index cb81c7767b17..c047a647146e 100644
if (need_to_create_worker(pool)) {
/*
-@@ -1982,8 +1934,8 @@ static void pool_mayday_timeout(struct timer_list *t)
+@@ -1985,8 +1937,8 @@ static void pool_mayday_timeout(struct timer_list *t)
send_mayday(work);
}
@@ -902,7 +902,7 @@ index cb81c7767b17..c047a647146e 100644
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
-@@ -2002,7 +1954,7 @@ static void pool_mayday_timeout(struct timer_list *t)
+@@ -2005,7 +1957,7 @@ static void pool_mayday_timeout(struct timer_list *t)
* may_start_working() %true.
*
* LOCKING:
@@ -911,7 +911,7 @@ index cb81c7767b17..c047a647146e 100644
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
*/
-@@ -2011,7 +1963,7 @@ __releases(&pool->lock)
+@@ -2014,7 +1966,7 @@ __releases(&pool->lock)
__acquires(&pool->lock)
{
restart:
@@ -920,7 +920,7 @@ index cb81c7767b17..c047a647146e 100644
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
-@@ -2027,7 +1979,7 @@ __acquires(&pool->lock)
+@@ -2030,7 +1982,7 @@ __acquires(&pool->lock)
}
del_timer_sync(&pool->mayday_timer);
@@ -929,7 +929,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* This is necessary even after a new worker was just successfully
* created as @pool->lock was dropped and the new worker might have
-@@ -2050,7 +2002,7 @@ __acquires(&pool->lock)
+@@ -2053,7 +2005,7 @@ __acquires(&pool->lock)
* and may_start_working() is true.
*
* CONTEXT:
@@ -938,7 +938,7 @@ index cb81c7767b17..c047a647146e 100644
* multiple times. Does GFP_KERNEL allocations.
*
* Return:
-@@ -2073,7 +2025,7 @@ static bool manage_workers(struct worker *worker)
+@@ -2076,7 +2028,7 @@ static bool manage_workers(struct worker *worker)
pool->manager = NULL;
pool->flags &= ~POOL_MANAGER_ACTIVE;
@@ -947,7 +947,7 @@ index cb81c7767b17..c047a647146e 100644
return true;
}
-@@ -2089,7 +2041,7 @@ static bool manage_workers(struct worker *worker)
+@@ -2092,7 +2044,7 @@ static bool manage_workers(struct worker *worker)
* call this function to process a work.
*
* CONTEXT:
@@ -956,7 +956,7 @@ index cb81c7767b17..c047a647146e 100644
*/
static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&pool->lock)
-@@ -2171,7 +2123,7 @@ __acquires(&pool->lock)
+@@ -2174,7 +2126,7 @@ __acquires(&pool->lock)
*/
set_work_pool_and_clear_pending(work, pool->id);
@@ -965,7 +965,7 @@ index cb81c7767b17..c047a647146e 100644
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
-@@ -2226,7 +2178,7 @@ __acquires(&pool->lock)
+@@ -2229,7 +2181,7 @@ __acquires(&pool->lock)
*/
cond_resched();
@@ -974,7 +974,7 @@ index cb81c7767b17..c047a647146e 100644
/* clear cpu intensive status */
if (unlikely(cpu_intensive))
-@@ -2249,7 +2201,7 @@ __acquires(&pool->lock)
+@@ -2252,7 +2204,7 @@ __acquires(&pool->lock)
* fetches a work from the top and executes it.
*
* CONTEXT:
@@ -983,7 +983,7 @@ index cb81c7767b17..c047a647146e 100644
* multiple times.
*/
static void process_scheduled_works(struct worker *worker)
-@@ -2291,11 +2243,11 @@ static int worker_thread(void *__worker)
+@@ -2294,11 +2246,11 @@ static int worker_thread(void *__worker)
/* tell the scheduler that this is a workqueue worker */
set_pf_worker(true);
woke_up:
@@ -997,7 +997,7 @@ index cb81c7767b17..c047a647146e 100644
WARN_ON_ONCE(!list_empty(&worker->entry));
set_pf_worker(false);
-@@ -2361,7 +2313,7 @@ static int worker_thread(void *__worker)
+@@ -2364,7 +2316,7 @@ static int worker_thread(void *__worker)
*/
worker_enter_idle(worker);
__set_current_state(TASK_IDLE);
@@ -1006,7 +1006,7 @@ index cb81c7767b17..c047a647146e 100644
schedule();
goto woke_up;
}
-@@ -2415,7 +2367,7 @@ static int rescuer_thread(void *__rescuer)
+@@ -2418,7 +2370,7 @@ static int rescuer_thread(void *__rescuer)
should_stop = kthread_should_stop();
/* see whether any pwq is asking for help */
@@ -1015,7 +1015,7 @@ index cb81c7767b17..c047a647146e 100644
while (!list_empty(&wq->maydays)) {
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
-@@ -2427,11 +2379,11 @@ static int rescuer_thread(void *__rescuer)
+@@ -2430,11 +2382,11 @@ static int rescuer_thread(void *__rescuer)
__set_current_state(TASK_RUNNING);
list_del_init(&pwq->mayday_node);
@@ -1029,7 +1029,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* Slurp in all works issued via this workqueue and
-@@ -2460,7 +2412,7 @@ static int rescuer_thread(void *__rescuer)
+@@ -2463,7 +2415,7 @@ static int rescuer_thread(void *__rescuer)
* incur MAYDAY_INTERVAL delay inbetween.
*/
if (need_to_create_worker(pool)) {
@@ -1038,7 +1038,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* Queue iff we aren't racing destruction
* and somebody else hasn't queued it already.
-@@ -2469,7 +2421,7 @@ static int rescuer_thread(void *__rescuer)
+@@ -2472,7 +2424,7 @@ static int rescuer_thread(void *__rescuer)
get_pwq(pwq);
list_add_tail(&pwq->mayday_node, &wq->maydays);
}
@@ -1047,7 +1047,7 @@ index cb81c7767b17..c047a647146e 100644
}
}
-@@ -2487,14 +2439,14 @@ static int rescuer_thread(void *__rescuer)
+@@ -2490,14 +2442,14 @@ static int rescuer_thread(void *__rescuer)
if (need_more_worker(pool))
wake_up_worker(pool);
@@ -1065,7 +1065,7 @@ index cb81c7767b17..c047a647146e 100644
if (should_stop) {
__set_current_state(TASK_RUNNING);
-@@ -2574,7 +2526,7 @@ static void wq_barrier_func(struct work_struct *work)
+@@ -2577,7 +2529,7 @@ static void wq_barrier_func(struct work_struct *work)
* underneath us, so we can't reliably determine pwq from @target.
*
* CONTEXT:
@@ -1074,7 +1074,7 @@ index cb81c7767b17..c047a647146e 100644
*/
static void insert_wq_barrier(struct pool_workqueue *pwq,
struct wq_barrier *barr,
-@@ -2661,7 +2613,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
+@@ -2664,7 +2616,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool;
@@ -1083,7 +1083,7 @@ index cb81c7767b17..c047a647146e 100644
if (flush_color >= 0) {
WARN_ON_ONCE(pwq->flush_color != -1);
-@@ -2678,7 +2630,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
+@@ -2681,7 +2633,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
pwq->work_color = work_color;
}
@@ -1092,7 +1092,7 @@ index cb81c7767b17..c047a647146e 100644
}
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
-@@ -2878,9 +2830,9 @@ void drain_workqueue(struct workqueue_struct *wq)
+@@ -2881,9 +2833,9 @@ void drain_workqueue(struct workqueue_struct *wq)
for_each_pwq(pwq, wq) {
bool drained;
@@ -1104,7 +1104,7 @@ index cb81c7767b17..c047a647146e 100644
if (drained)
continue;
-@@ -2916,7 +2868,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+@@ -2919,7 +2871,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
return false;
}
@@ -1113,7 +1113,7 @@ index cb81c7767b17..c047a647146e 100644
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2932,7 +2884,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+@@ -2935,7 +2887,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
check_flush_dependency(pwq->wq, work);
insert_wq_barrier(pwq, barr, work, worker);
@@ -1122,7 +1122,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* Force a lock recursion deadlock when using flush_work() inside a
-@@ -2951,7 +2903,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+@@ -2954,7 +2906,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
rcu_read_unlock();
return true;
already_gone:
@@ -1131,7 +1131,7 @@ index cb81c7767b17..c047a647146e 100644
rcu_read_unlock();
return false;
}
-@@ -3050,7 +3002,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+@@ -3053,7 +3005,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
@@ -1140,7 +1140,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* This allows canceling during early boot. We know that @work
-@@ -3111,10 +3063,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+@@ -3114,10 +3066,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -1153,7 +1153,7 @@ index cb81c7767b17..c047a647146e 100644
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -3152,7 +3104,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
+@@ -3155,7 +3107,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
@@ -1162,7 +1162,7 @@ index cb81c7767b17..c047a647146e 100644
return ret;
}
-@@ -3262,7 +3214,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context);
+@@ -3265,7 +3217,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context);
*
* Undo alloc_workqueue_attrs().
*/
@@ -1171,7 +1171,7 @@ index cb81c7767b17..c047a647146e 100644
{
if (attrs) {
free_cpumask_var(attrs->cpumask);
-@@ -3272,21 +3224,20 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs)
+@@ -3275,21 +3227,20 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs)
/**
* alloc_workqueue_attrs - allocate a workqueue_attrs
@@ -1196,7 +1196,7 @@ index cb81c7767b17..c047a647146e 100644
goto fail;
cpumask_copy(attrs->cpumask, cpu_possible_mask);
-@@ -3343,7 +3294,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
+@@ -3346,7 +3297,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
*/
static int init_worker_pool(struct worker_pool *pool)
{
@@ -1205,7 +1205,7 @@ index cb81c7767b17..c047a647146e 100644
pool->id = -1;
pool->cpu = -1;
pool->node = NUMA_NO_NODE;
-@@ -3364,7 +3315,7 @@ static int init_worker_pool(struct worker_pool *pool)
+@@ -3367,7 +3318,7 @@ static int init_worker_pool(struct worker_pool *pool)
pool->refcnt = 1;
/* shouldn't fail above this point */
@@ -1214,7 +1214,7 @@ index cb81c7767b17..c047a647146e 100644
if (!pool->attrs)
return -ENOMEM;
return 0;
-@@ -3429,15 +3380,15 @@ static void put_unbound_pool(struct worker_pool *pool)
+@@ -3432,15 +3383,15 @@ static void put_unbound_pool(struct worker_pool *pool)
* @pool's workers from blocking on attach_mutex. We're the last
* manager and @pool gets freed with the flag set.
*/
@@ -1233,7 +1233,7 @@ index cb81c7767b17..c047a647146e 100644
mutex_lock(&wq_pool_attach_mutex);
if (!list_empty(&pool->workers))
-@@ -3597,7 +3548,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
+@@ -3600,7 +3551,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
return;
/* this function can be called during early boot w/ irq disabled */
@@ -1242,7 +1242,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* During [un]freezing, the caller is responsible for ensuring that
-@@ -3627,7 +3578,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
+@@ -3630,7 +3581,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
pwq->max_active = 0;
}
@@ -1251,7 +1251,7 @@ index cb81c7767b17..c047a647146e 100644
}
/* initialize newly alloced @pwq which is associated with @wq and @pool */
-@@ -3800,8 +3751,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
+@@ -3803,8 +3754,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
@@ -1262,7 +1262,7 @@ index cb81c7767b17..c047a647146e 100644
if (!ctx || !new_attrs || !tmp_attrs)
goto out_free;
-@@ -3937,7 +3888,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
+@@ -3940,7 +3891,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
*
* Return: 0 on success and -errno on failure.
*/
@@ -1271,7 +1271,7 @@ index cb81c7767b17..c047a647146e 100644
const struct workqueue_attrs *attrs)
{
int ret;
-@@ -3948,7 +3899,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
+@@ -3951,7 +3902,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
return ret;
}
@@ -1279,7 +1279,7 @@ index cb81c7767b17..c047a647146e 100644
/**
* wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
-@@ -4026,9 +3976,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
+@@ -4029,9 +3979,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
use_dfl_pwq:
mutex_lock(&wq->mutex);
@@ -1291,7 +1291,7 @@ index cb81c7767b17..c047a647146e 100644
old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
out_unlock:
mutex_unlock(&wq->mutex);
-@@ -4147,7 +4097,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
+@@ -4150,7 +4100,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
return NULL;
if (flags & WQ_UNBOUND) {
@@ -1300,7 +1300,7 @@ index cb81c7767b17..c047a647146e 100644
if (!wq->unbound_attrs)
goto err_free_wq;
}
-@@ -4234,9 +4184,9 @@ void destroy_workqueue(struct workqueue_struct *wq)
+@@ -4237,9 +4187,9 @@ void destroy_workqueue(struct workqueue_struct *wq)
struct worker *rescuer = wq->rescuer;
/* this prevents new queueing */
@@ -1312,7 +1312,7 @@ index cb81c7767b17..c047a647146e 100644
/* rescuer will empty maydays list before exiting */
kthread_stop(rescuer->task);
-@@ -4431,10 +4381,10 @@ unsigned int work_busy(struct work_struct *work)
+@@ -4434,10 +4384,10 @@ unsigned int work_busy(struct work_struct *work)
rcu_read_lock();
pool = get_work_pool(work);
if (pool) {
@@ -1325,7 +1325,7 @@ index cb81c7767b17..c047a647146e 100644
}
rcu_read_unlock();
-@@ -4641,10 +4591,10 @@ void show_workqueue_state(void)
+@@ -4644,10 +4594,10 @@ void show_workqueue_state(void)
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
for_each_pwq(pwq, wq) {
@@ -1338,7 +1338,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
-@@ -4658,7 +4608,7 @@ void show_workqueue_state(void)
+@@ -4661,7 +4611,7 @@ void show_workqueue_state(void)
struct worker *worker;
bool first = true;
@@ -1347,7 +1347,7 @@ index cb81c7767b17..c047a647146e 100644
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
-@@ -4677,7 +4627,7 @@ void show_workqueue_state(void)
+@@ -4680,7 +4630,7 @@ void show_workqueue_state(void)
}
pr_cont("\n");
next_pool:
@@ -1356,7 +1356,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
-@@ -4707,7 +4657,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
+@@ -4710,7 +4660,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
struct worker_pool *pool = worker->pool;
if (pool) {
@@ -1365,7 +1365,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* ->desc tracks information (wq name or
* set_worker_desc()) for the latest execution. If
-@@ -4721,7 +4671,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
+@@ -4724,7 +4674,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
scnprintf(buf + off, size - off, "-%s",
worker->desc);
}
@@ -1374,7 +1374,7 @@ index cb81c7767b17..c047a647146e 100644
}
}
-@@ -4752,7 +4702,7 @@ static void unbind_workers(int cpu)
+@@ -4755,7 +4705,7 @@ static void unbind_workers(int cpu)
for_each_cpu_worker_pool(pool, cpu) {
mutex_lock(&wq_pool_attach_mutex);
@@ -1383,7 +1383,7 @@ index cb81c7767b17..c047a647146e 100644
/*
* We've blocked all attach/detach operations. Make all workers
-@@ -4766,7 +4716,7 @@ static void unbind_workers(int cpu)
+@@ -4769,7 +4719,7 @@ static void unbind_workers(int cpu)
pool->flags |= POOL_DISASSOCIATED;
@@ -1392,7 +1392,7 @@ index cb81c7767b17..c047a647146e 100644
mutex_unlock(&wq_pool_attach_mutex);
/*
-@@ -4792,9 +4742,9 @@ static void unbind_workers(int cpu)
+@@ -4795,9 +4745,9 @@ static void unbind_workers(int cpu)
* worker blocking could lead to lengthy stalls. Kick off
* unbound chain execution of currently pending work items.
*/
@@ -1404,7 +1404,7 @@ index cb81c7767b17..c047a647146e 100644
}
}
-@@ -4821,7 +4771,7 @@ static void rebind_workers(struct worker_pool *pool)
+@@ -4824,7 +4774,7 @@ static void rebind_workers(struct worker_pool *pool)
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
@@ -1413,7 +1413,7 @@ index cb81c7767b17..c047a647146e 100644
pool->flags &= ~POOL_DISASSOCIATED;
-@@ -4860,7 +4810,7 @@ static void rebind_workers(struct worker_pool *pool)
+@@ -4863,7 +4813,7 @@ static void rebind_workers(struct worker_pool *pool)
WRITE_ONCE(worker->flags, worker_flags);
}
@@ -1422,7 +1422,7 @@ index cb81c7767b17..c047a647146e 100644
}
/**
-@@ -5319,7 +5269,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
+@@ -5326,7 +5276,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
lockdep_assert_held(&wq_pool_mutex);
@@ -1431,7 +1431,7 @@ index cb81c7767b17..c047a647146e 100644
if (!attrs)
return NULL;
-@@ -5748,7 +5698,7 @@ static void __init wq_numa_init(void)
+@@ -5755,7 +5705,7 @@ static void __init wq_numa_init(void)
return;
}
@@ -1440,7 +1440,7 @@ index cb81c7767b17..c047a647146e 100644
BUG_ON(!wq_update_unbound_numa_attrs_buf);
/*
-@@ -5823,7 +5773,7 @@ int __init workqueue_init_early(void)
+@@ -5830,7 +5780,7 @@ int __init workqueue_init_early(void)
for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
struct workqueue_attrs *attrs;
@@ -1449,7 +1449,7 @@ index cb81c7767b17..c047a647146e 100644
attrs->nice = std_nice[i];
unbound_std_wq_attrs[i] = attrs;
-@@ -5832,7 +5782,7 @@ int __init workqueue_init_early(void)
+@@ -5839,7 +5789,7 @@ int __init workqueue_init_early(void)
* guaranteed by max_active which is enforced by pwqs.
* Turn off NUMA so that dfl_pwq is used for all nodes.
*/