summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0284-workqueue-rework.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 03:21:37 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 03:21:37 +0000
commit06343b27411344fc542f4f3a643f8441aa35252d (patch)
tree66aa45187c93c350bbdf7e6ae4467a70bf3a8f4c /debian/patches-rt/0284-workqueue-rework.patch
parentMerging upstream version 4.19.260. (diff)
downloadlinux-06343b27411344fc542f4f3a643f8441aa35252d.tar.xz
linux-06343b27411344fc542f4f3a643f8441aa35252d.zip
Adding debian version 4.19.260-1.debian/4.19.260-1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0284-workqueue-rework.patch')
-rw-r--r--debian/patches-rt/0284-workqueue-rework.patch1460
1 files changed, 1460 insertions, 0 deletions
diff --git a/debian/patches-rt/0284-workqueue-rework.patch b/debian/patches-rt/0284-workqueue-rework.patch
new file mode 100644
index 000000000..ea00e194a
--- /dev/null
+++ b/debian/patches-rt/0284-workqueue-rework.patch
@@ -0,0 +1,1460 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 29 May 2019 18:52:27 +0200
+Subject: [PATCH 284/342] workqueue: rework
+Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ef48b1fd6691fa627afcf72bba3295c66c23da41
+
+[ Upstream commit d15a862f24df983458533aebd6fa207ecdd1095a ]
+
+This is an all-in change of the workqueue rework.
+The worker_pool.lock is made to raw_spinlock_t. With this change we can
+schedule workitems from preempt-disable sections and sections with disabled
+interrupts. This change allows to remove all kthread_.* workarounds we used to
+have.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+---
+ block/blk-core.c | 6 +-
+ drivers/block/loop.c | 2 +-
+ drivers/spi/spi-rockchip.c | 1 -
+ drivers/thermal/x86_pkg_temp_thermal.c | 28 +--
+ fs/aio.c | 10 +-
+ include/linux/blk-cgroup.h | 2 +-
+ include/linux/blkdev.h | 2 +-
+ include/linux/kthread-cgroup.h | 17 --
+ include/linux/kthread.h | 15 +-
+ include/linux/swait.h | 14 ++
+ include/linux/workqueue.h | 4 -
+ init/main.c | 1 -
+ kernel/kthread.c | 14 --
+ kernel/sched/core.c | 1 +
+ kernel/time/hrtimer.c | 24 --
+ kernel/workqueue.c | 304 +++++++++++--------------
+ 16 files changed, 163 insertions(+), 282 deletions(-)
+ delete mode 100644 include/linux/kthread-cgroup.h
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index a67a50dd714a..ed6ae335756d 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -973,7 +973,7 @@ void blk_queue_exit(struct request_queue *q)
+ percpu_ref_put(&q->q_usage_counter);
+ }
+
+-static void blk_queue_usage_counter_release_wrk(struct kthread_work *work)
++static void blk_queue_usage_counter_release_wrk(struct work_struct *work)
+ {
+ struct request_queue *q =
+ container_of(work, struct request_queue, mq_pcpu_wake);
+@@ -987,7 +987,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+ container_of(ref, struct request_queue, q_usage_counter);
+
+ if (wq_has_sleeper(&q->mq_freeze_wq))
+- kthread_schedule_work(&q->mq_pcpu_wake);
++ schedule_work(&q->mq_pcpu_wake);
+ }
+
+ static void blk_rq_timed_out_timer(struct timer_list *t)
+@@ -1087,7 +1087,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
+ queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
+
+ init_waitqueue_head(&q->mq_freeze_wq);
+- kthread_init_work(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk);
++ INIT_WORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk);
+
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index d24660961343..c31a76485c9c 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -70,7 +70,7 @@
+ #include <linux/writeback.h>
+ #include <linux/completion.h>
+ #include <linux/highmem.h>
+-#include <linux/kthread-cgroup.h>
++#include <linux/kthread.h>
+ #include <linux/splice.h>
+ #include <linux/sysfs.h>
+ #include <linux/miscdevice.h>
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 63b10236eb05..185bbdce62b1 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -22,7 +22,6 @@
+ #include <linux/spi/spi.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/scatterlist.h>
+-#include <linux/interrupt.h>
+
+ #define DRIVER_NAME "rockchip-spi"
+
+diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
+index 82f21fd4afb0..1ef937d799e4 100644
+--- a/drivers/thermal/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/x86_pkg_temp_thermal.c
+@@ -29,7 +29,6 @@
+ #include <linux/pm.h>
+ #include <linux/thermal.h>
+ #include <linux/debugfs.h>
+-#include <linux/kthread.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/mce.h>
+
+@@ -330,7 +329,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work)
+ schedule_delayed_work_on(cpu, work, ms);
+ }
+
+-static void pkg_thermal_notify_work(struct kthread_work *work)
++static int pkg_thermal_notify(u64 msr_val)
+ {
+ int cpu = smp_processor_id();
+ struct pkg_device *pkgdev;
+@@ -349,32 +348,8 @@ static void pkg_thermal_notify_work(struct kthread_work *work)
+ }
+
+ spin_unlock_irqrestore(&pkg_temp_lock, flags);
+-}
+-
+-#ifdef CONFIG_PREEMPT_RT_FULL
+-static DEFINE_KTHREAD_WORK(notify_work, pkg_thermal_notify_work);
+-
+-static int pkg_thermal_notify(u64 msr_val)
+-{
+- kthread_schedule_work(&notify_work);
+- return 0;
+-}
+-
+-static void pkg_thermal_notify_flush(void)
+-{
+- kthread_flush_work(&notify_work);
+-}
+-
+-#else /* !CONFIG_PREEMPT_RT_FULL */
+-
+-static void pkg_thermal_notify_flush(void) { }
+-
+-static int pkg_thermal_notify(u64 msr_val)
+-{
+- pkg_thermal_notify_work(NULL);
+ return 0;
+ }
+-#endif /* CONFIG_PREEMPT_RT_FULL */
+
+ static int pkg_temp_thermal_device_add(unsigned int cpu)
+ {
+@@ -573,7 +548,6 @@ static void __exit pkg_temp_thermal_exit(void)
+ platform_thermal_package_rate_control = NULL;
+
+ cpuhp_remove_state(pkg_thermal_hp_state);
+- pkg_thermal_notify_flush();
+ debugfs_remove_recursive(debugfs);
+ kfree(packages);
+ }
+diff --git a/fs/aio.c b/fs/aio.c
+index 303e85033965..6deff68b92c7 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -121,7 +121,7 @@ struct kioctx {
+ long nr_pages;
+
+ struct rcu_work free_rwork; /* see free_ioctx() */
+- struct kthread_work free_kwork; /* see free_ioctx() */
++ struct work_struct free_work; /* see free_ioctx() */
+
+ /*
+ * signals when all in-flight requests are done
+@@ -609,9 +609,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
+ * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
+ * now it's safe to cancel any that need to be.
+ */
+-static void free_ioctx_users_work(struct kthread_work *work)
++static void free_ioctx_users_work(struct work_struct *work)
+ {
+- struct kioctx *ctx = container_of(work, struct kioctx, free_kwork);
++ struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+ struct aio_kiocb *req;
+
+ spin_lock_irq(&ctx->ctx_lock);
+@@ -633,8 +633,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
+ {
+ struct kioctx *ctx = container_of(ref, struct kioctx, users);
+
+- kthread_init_work(&ctx->free_kwork, free_ioctx_users_work);
+- kthread_schedule_work(&ctx->free_kwork);
++ INIT_WORK(&ctx->free_work, free_ioctx_users_work);
++ schedule_work(&ctx->free_work);
+ }
+
+ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
+diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
+index dc26ce6d840d..53f63f18ecdf 100644
+--- a/include/linux/blk-cgroup.h
++++ b/include/linux/blk-cgroup.h
+@@ -14,7 +14,7 @@
+ * Nauman Rafique <nauman@google.com>
+ */
+
+-#include <linux/kthread-cgroup.h>
++#include <linux/kthread.h>
+ #include <linux/percpu_counter.h>
+ #include <linux/seq_file.h>
+ #include <linux/radix-tree.h>
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 5566d0049c22..c01e5d9597f9 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -665,7 +665,7 @@ struct request_queue {
+ #endif
+ struct rcu_head rcu_head;
+ wait_queue_head_t mq_freeze_wq;
+- struct kthread_work mq_pcpu_wake;
++ struct work_struct mq_pcpu_wake;
+ struct percpu_ref q_usage_counter;
+ struct list_head all_q_node;
+
+diff --git a/include/linux/kthread-cgroup.h b/include/linux/kthread-cgroup.h
+deleted file mode 100644
+index 53d34bca9d72..000000000000
+--- a/include/linux/kthread-cgroup.h
++++ /dev/null
+@@ -1,17 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _LINUX_KTHREAD_CGROUP_H
+-#define _LINUX_KTHREAD_CGROUP_H
+-#include <linux/kthread.h>
+-#include <linux/cgroup.h>
+-
+-#ifdef CONFIG_BLK_CGROUP
+-void kthread_associate_blkcg(struct cgroup_subsys_state *css);
+-struct cgroup_subsys_state *kthread_blkcg(void);
+-#else
+-static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
+-static inline struct cgroup_subsys_state *kthread_blkcg(void)
+-{
+- return NULL;
+-}
+-#endif
+-#endif
+diff --git a/include/linux/kthread.h b/include/linux/kthread.h
+index 31140f0a6c2c..0e3b9b528c9e 100644
+--- a/include/linux/kthread.h
++++ b/include/linux/kthread.h
+@@ -4,6 +4,7 @@
+ /* Simple interface for creating and stopping kernel threads without mess. */
+ #include <linux/err.h>
+ #include <linux/sched.h>
++#include <linux/cgroup.h>
+
+ __printf(4, 5)
+ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
+@@ -200,12 +201,14 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
+
+ void kthread_destroy_worker(struct kthread_worker *worker);
+
+-extern struct kthread_worker kthread_global_worker;
+-void kthread_init_global_worker(void);
+-
+-static inline bool kthread_schedule_work(struct kthread_work *work)
++#ifdef CONFIG_BLK_CGROUP
++void kthread_associate_blkcg(struct cgroup_subsys_state *css);
++struct cgroup_subsys_state *kthread_blkcg(void);
++#else
++static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
++static inline struct cgroup_subsys_state *kthread_blkcg(void)
+ {
+- return kthread_queue_work(&kthread_global_worker, work);
++ return NULL;
+ }
+-
++#endif
+ #endif /* _LINUX_KTHREAD_H */
+diff --git a/include/linux/swait.h b/include/linux/swait.h
+index f426a0661aa0..21ae66cd41d3 100644
+--- a/include/linux/swait.h
++++ b/include/linux/swait.h
+@@ -299,4 +299,18 @@ do { \
+ __ret; \
+ })
+
++#define __swait_event_lock_irq(wq, condition, lock, cmd) \
++ ___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
++ raw_spin_unlock_irq(&lock); \
++ cmd; \
++ schedule(); \
++ raw_spin_lock_irq(&lock))
++
++#define swait_event_lock_irq(wq_head, condition, lock) \
++ do { \
++ if (condition) \
++ break; \
++ __swait_event_lock_irq(wq_head, condition, lock, ); \
++ } while (0)
++
+ #endif /* _LINUX_SWAIT_H */
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 60d673e15632..546aa73fba6a 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -455,10 +455,6 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
+
+ extern void destroy_workqueue(struct workqueue_struct *wq);
+
+-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
+-void free_workqueue_attrs(struct workqueue_attrs *attrs);
+-int apply_workqueue_attrs(struct workqueue_struct *wq,
+- const struct workqueue_attrs *attrs);
+ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
+
+ extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
+diff --git a/init/main.c b/init/main.c
+index 8555afc3f3e1..703b627a6060 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -1136,7 +1136,6 @@ static noinline void __init kernel_init_freeable(void)
+ smp_prepare_cpus(setup_max_cpus);
+
+ workqueue_init();
+- kthread_init_global_worker();
+
+ init_mm_internals();
+
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 6bbd391f0d9c..c8cf4731ced8 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -20,7 +20,6 @@
+ #include <linux/freezer.h>
+ #include <linux/ptrace.h>
+ #include <linux/uaccess.h>
+-#include <linux/cgroup.h>
+ #include <trace/events/sched.h>
+
+ static DEFINE_SPINLOCK(kthread_create_lock);
+@@ -1245,19 +1244,6 @@ void kthread_destroy_worker(struct kthread_worker *worker)
+ }
+ EXPORT_SYMBOL(kthread_destroy_worker);
+
+-DEFINE_KTHREAD_WORKER(kthread_global_worker);
+-EXPORT_SYMBOL(kthread_global_worker);
+-
+-__init void kthread_init_global_worker(void)
+-{
+- kthread_global_worker.task = kthread_create(kthread_worker_fn,
+- &kthread_global_worker,
+- "kswork");
+- if (WARN_ON(IS_ERR(kthread_global_worker.task)))
+- return;
+- wake_up_process(kthread_global_worker.task);
+-}
+-
+ #ifdef CONFIG_BLK_CGROUP
+ /**
+ * kthread_associate_blkcg - associate blkcg to current kthread
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 3b0f62be3ece..1d4d4780dd79 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3612,6 +3612,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
+ {
+ if (!tsk->state)
+ return;
++
+ /*
+ * If a worker went to sleep, notify and ask workqueue whether
+ * it wants to wake up a task to maintain concurrency.
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index bbc408f24f5d..eb2db7e6a241 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -748,29 +748,6 @@ static void hrtimer_switch_to_hres(void)
+ retrigger_next_event(NULL);
+ }
+
+-#ifdef CONFIG_PREEMPT_RT_FULL
+-
+-static struct swork_event clock_set_delay_work;
+-
+-static void run_clock_set_delay(struct swork_event *event)
+-{
+- clock_was_set();
+-}
+-
+-void clock_was_set_delayed(void)
+-{
+- swork_queue(&clock_set_delay_work);
+-}
+-
+-static __init int create_clock_set_delay_thread(void)
+-{
+- WARN_ON(swork_get());
+- INIT_SWORK(&clock_set_delay_work, run_clock_set_delay);
+- return 0;
+-}
+-early_initcall(create_clock_set_delay_thread);
+-#else /* PREEMPT_RT_FULL */
+-
+ static void clock_was_set_work(struct work_struct *work)
+ {
+ clock_was_set();
+@@ -786,7 +763,6 @@ void clock_was_set_delayed(void)
+ {
+ schedule_work(&hrtimer_work);
+ }
+-#endif
+
+ #else
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 91f5696cf335..4ed22776b2ee 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -50,8 +50,6 @@
+ #include <linux/sched/isolation.h>
+ #include <linux/nmi.h>
+ #include <linux/kvm_para.h>
+-#include <linux/locallock.h>
+-#include <linux/delay.h>
+
+ #include "workqueue_internal.h"
+
+@@ -126,11 +124,6 @@ enum {
+ * cpu or grabbing pool->lock is enough for read access. If
+ * POOL_DISASSOCIATED is set, it's identical to L.
+ *
+- * On RT we need the extra protection via rt_lock_idle_list() for
+- * the list manipulations against read access from
+- * wq_worker_sleeping(). All other places are nicely serialized via
+- * pool->lock.
+- *
+ * A: wq_pool_attach_mutex protected.
+ *
+ * PL: wq_pool_mutex protected.
+@@ -152,7 +145,7 @@ enum {
+ /* struct worker is defined in workqueue_internal.h */
+
+ struct worker_pool {
+- spinlock_t lock; /* the pool lock */
++ raw_spinlock_t lock; /* the pool lock */
+ int cpu; /* I: the associated cpu */
+ int node; /* I: the associated node ID */
+ int id; /* I: pool ID */
+@@ -305,8 +298,8 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
+
+ static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
+ static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
+-static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+-static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
++static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
++static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
+
+ static LIST_HEAD(workqueues); /* PR: list of all workqueues */
+ static bool workqueue_freezing; /* PL: have wqs started freezing? */
+@@ -358,8 +351,6 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
+ struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
+ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
+
+-static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
+-
+ static int worker_thread(void *__worker);
+ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+
+@@ -436,31 +427,6 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+ if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
+ else
+
+-#ifdef CONFIG_PREEMPT_RT_BASE
+-static inline void rt_lock_idle_list(struct worker_pool *pool)
+-{
+- preempt_disable();
+-}
+-static inline void rt_unlock_idle_list(struct worker_pool *pool)
+-{
+- preempt_enable();
+-}
+-static inline void sched_lock_idle_list(struct worker_pool *pool) { }
+-static inline void sched_unlock_idle_list(struct worker_pool *pool) { }
+-#else
+-static inline void rt_lock_idle_list(struct worker_pool *pool) { }
+-static inline void rt_unlock_idle_list(struct worker_pool *pool) { }
+-static inline void sched_lock_idle_list(struct worker_pool *pool)
+-{
+- spin_lock_irq(&pool->lock);
+-}
+-static inline void sched_unlock_idle_list(struct worker_pool *pool)
+-{
+- spin_unlock_irq(&pool->lock);
+-}
+-#endif
+-
+-
+ #ifdef CONFIG_DEBUG_OBJECTS_WORK
+
+ static struct debug_obj_descr work_debug_descr;
+@@ -863,20 +829,14 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
+ * Wake up the first idle worker of @pool.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void wake_up_worker(struct worker_pool *pool)
+ {
+- struct worker *worker;
+-
+- rt_lock_idle_list(pool);
+-
+- worker = first_idle_worker(pool);
++ struct worker *worker = first_idle_worker(pool);
+
+ if (likely(worker))
+ wake_up_process(worker->task);
+-
+- rt_unlock_idle_list(pool);
+ }
+
+ /**
+@@ -905,7 +865,7 @@ void wq_worker_running(struct task_struct *task)
+ */
+ void wq_worker_sleeping(struct task_struct *task)
+ {
+- struct worker *worker = kthread_data(task);
++ struct worker *next, *worker = kthread_data(task);
+ struct worker_pool *pool;
+
+ /*
+@@ -922,18 +882,26 @@ void wq_worker_sleeping(struct task_struct *task)
+ return;
+
+ worker->sleeping = 1;
++ raw_spin_lock_irq(&pool->lock);
+
+ /*
+ * The counterpart of the following dec_and_test, implied mb,
+ * worklist not empty test sequence is in insert_work().
+ * Please read comment there.
++ *
++ * NOT_RUNNING is clear. This means that we're bound to and
++ * running on the local cpu w/ rq lock held and preemption
++ * disabled, which in turn means that none else could be
++ * manipulating idle_list, so dereferencing idle_list without pool
++ * lock is safe.
+ */
+ if (atomic_dec_and_test(&pool->nr_running) &&
+ !list_empty(&pool->worklist)) {
+- sched_lock_idle_list(pool);
+- wake_up_worker(pool);
+- sched_unlock_idle_list(pool);
++ next = first_idle_worker(pool);
++ if (next)
++ wake_up_process(next->task);
+ }
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ /**
+@@ -944,7 +912,7 @@ void wq_worker_sleeping(struct task_struct *task)
+ * Set @flags in @worker->flags and adjust nr_running accordingly.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock)
++ * raw_spin_lock_irq(pool->lock)
+ */
+ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
+ {
+@@ -969,7 +937,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
+ * Clear @flags in @worker->flags and adjust nr_running accordingly.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock)
++ * raw_spin_lock_irq(pool->lock)
+ */
+ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
+ {
+@@ -1017,7 +985,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
+ * actually occurs, it should be easy to locate the culprit work function.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ *
+ * Return:
+ * Pointer to worker which is executing @work if found, %NULL
+@@ -1052,7 +1020,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
+ * nested inside outer list_for_each_entry_safe().
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void move_linked_works(struct work_struct *work, struct list_head *head,
+ struct work_struct **nextp)
+@@ -1130,11 +1098,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
+ * As both pwqs and pools are RCU protected, the
+ * following lock operations are safe.
+ */
+- rcu_read_lock();
+- local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
++ raw_spin_lock_irq(&pwq->pool->lock);
+ put_pwq(pwq);
+- local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
+- rcu_read_unlock();
++ raw_spin_unlock_irq(&pwq->pool->lock);
+ }
+ }
+
+@@ -1167,7 +1133,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
+ * decrement nr_in_flight of its pwq and handle workqueue flushing.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
+ {
+@@ -1238,7 +1204,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+ struct worker_pool *pool;
+ struct pool_workqueue *pwq;
+
+- local_lock_irqsave(pendingb_lock, *flags);
++ local_irq_save(*flags);
+
+ /* try to steal the timer if it exists */
+ if (is_dwork) {
+@@ -1266,7 +1232,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+ if (!pool)
+ goto fail;
+
+- spin_lock(&pool->lock);
++ raw_spin_lock(&pool->lock);
+ /*
+ * work->data is guaranteed to point to pwq only while the work
+ * item is queued on pwq->wq, and both updating work->data to point
+@@ -1295,17 +1261,17 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+ /* work->data points to pwq iff queued, point to pool */
+ set_work_pool_and_keep_pending(work, pool->id);
+
+- spin_unlock(&pool->lock);
++ raw_spin_unlock(&pool->lock);
+ rcu_read_unlock();
+ return 1;
+ }
+- spin_unlock(&pool->lock);
++ raw_spin_unlock(&pool->lock);
+ fail:
+ rcu_read_unlock();
+- local_unlock_irqrestore(pendingb_lock, *flags);
++ local_irq_restore(*flags);
+ if (work_is_canceling(work))
+ return -ENOENT;
+- cpu_chill();
++ cpu_relax();
+ return -EAGAIN;
+ }
+
+@@ -1320,7 +1286,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+ * work_struct flags.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
+ struct list_head *head, unsigned int extra_flags)
+@@ -1407,13 +1373,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ * queued or lose PENDING. Grabbing PENDING and queueing should
+ * happen with IRQ disabled.
+ */
+-#ifndef CONFIG_PREEMPT_RT_FULL
+- /*
+- * nort: On RT the "interrupts-disabled" rule has been replaced with
+- * pendingb_lock.
+- */
+ lockdep_assert_irqs_disabled();
+-#endif
+
+
+ /* if draining, only works from the same workqueue are allowed */
+@@ -1442,7 +1402,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ if (last_pool && last_pool != pwq->pool) {
+ struct worker *worker;
+
+- spin_lock(&last_pool->lock);
++ raw_spin_lock(&last_pool->lock);
+
+ worker = find_worker_executing_work(last_pool, work);
+
+@@ -1450,11 +1410,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ pwq = worker->current_pwq;
+ } else {
+ /* meh... not running there, queue here */
+- spin_unlock(&last_pool->lock);
+- spin_lock(&pwq->pool->lock);
++ raw_spin_unlock(&last_pool->lock);
++ raw_spin_lock(&pwq->pool->lock);
+ }
+ } else {
+- spin_lock(&pwq->pool->lock);
++ raw_spin_lock(&pwq->pool->lock);
+ }
+
+ /*
+@@ -1467,7 +1427,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ */
+ if (unlikely(!pwq->refcnt)) {
+ if (wq->flags & WQ_UNBOUND) {
+- spin_unlock(&pwq->pool->lock);
++ raw_spin_unlock(&pwq->pool->lock);
+ cpu_relax();
+ goto retry;
+ }
+@@ -1500,7 +1460,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ insert_work(pwq, work, worklist, work_flags);
+
+ out:
+- spin_unlock(&pwq->pool->lock);
++ raw_spin_unlock(&pwq->pool->lock);
+ rcu_read_unlock();
+ }
+
+@@ -1521,14 +1481,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
+ bool ret = false;
+ unsigned long flags;
+
+- local_lock_irqsave(pendingb_lock,flags);
++ local_irq_save(flags);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_work(cpu, wq, work);
+ ret = true;
+ }
+
+- local_unlock_irqrestore(pendingb_lock, flags);
++ local_irq_restore(flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(queue_work_on);
+@@ -1536,12 +1496,11 @@ EXPORT_SYMBOL(queue_work_on);
+ void delayed_work_timer_fn(struct timer_list *t)
+ {
+ struct delayed_work *dwork = from_timer(dwork, t, timer);
++ unsigned long flags;
+
+- /* XXX */
+- /* local_lock(pendingb_lock); */
+- /* should have been called from irqsafe timer with irq already off */
++ local_irq_save(flags);
+ __queue_work(dwork->cpu, dwork->wq, &dwork->work);
+- /* local_unlock(pendingb_lock); */
++ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(delayed_work_timer_fn);
+
+@@ -1596,14 +1555,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ unsigned long flags;
+
+ /* read the comment in __queue_work() */
+- local_lock_irqsave(pendingb_lock, flags);
++ local_irq_save(flags);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_delayed_work(cpu, wq, dwork, delay);
+ ret = true;
+ }
+
+- local_unlock_irqrestore(pendingb_lock, flags);
++ local_irq_restore(flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(queue_delayed_work_on);
+@@ -1638,7 +1597,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+
+ if (likely(ret >= 0)) {
+ __queue_delayed_work(cpu, wq, dwork, delay);
+- local_unlock_irqrestore(pendingb_lock, flags);
++ local_irq_restore(flags);
+ }
+
+ /* -ENOENT from try_to_grab_pending() becomes %true */
+@@ -1649,12 +1608,11 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on);
+ static void rcu_work_rcufn(struct rcu_head *rcu)
+ {
+ struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
+- unsigned long flags;
+
+ /* read the comment in __queue_work() */
+- local_lock_irqsave(pendingb_lock, flags);
++ local_irq_disable();
+ __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
+- local_unlock_irqrestore(pendingb_lock, flags);
++ local_irq_enable();
+ }
+
+ /**
+@@ -1689,7 +1647,7 @@ EXPORT_SYMBOL(queue_rcu_work);
+ * necessary.
+ *
+ * LOCKING:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void worker_enter_idle(struct worker *worker)
+ {
+@@ -1706,9 +1664,7 @@ static void worker_enter_idle(struct worker *worker)
+ worker->last_active = jiffies;
+
+ /* idle_list is LIFO */
+- rt_lock_idle_list(pool);
+ list_add(&worker->entry, &pool->idle_list);
+- rt_unlock_idle_list(pool);
+
+ if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
+ mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
+@@ -1731,7 +1687,7 @@ static void worker_enter_idle(struct worker *worker)
+ * @worker is leaving idle state. Update stats.
+ *
+ * LOCKING:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void worker_leave_idle(struct worker *worker)
+ {
+@@ -1741,9 +1697,7 @@ static void worker_leave_idle(struct worker *worker)
+ return;
+ worker_clr_flags(worker, WORKER_IDLE);
+ pool->nr_idle--;
+- rt_lock_idle_list(pool);
+ list_del_init(&worker->entry);
+- rt_unlock_idle_list(pool);
+ }
+
+ static struct worker *alloc_worker(int node)
+@@ -1868,11 +1822,11 @@ static struct worker *create_worker(struct worker_pool *pool)
+ worker_attach_to_pool(worker, pool);
+
+ /* start the newly created worker */
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ worker->pool->nr_workers++;
+ worker_enter_idle(worker);
+ wake_up_process(worker->task);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ return worker;
+
+@@ -1891,7 +1845,7 @@ static struct worker *create_worker(struct worker_pool *pool)
+ * be idle.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void destroy_worker(struct worker *worker)
+ {
+@@ -1908,9 +1862,7 @@ static void destroy_worker(struct worker *worker)
+ pool->nr_workers--;
+ pool->nr_idle--;
+
+- rt_lock_idle_list(pool);
+ list_del_init(&worker->entry);
+- rt_unlock_idle_list(pool);
+ worker->flags |= WORKER_DIE;
+ wake_up_process(worker->task);
+ }
+@@ -1919,7 +1871,7 @@ static void idle_worker_timeout(struct timer_list *t)
+ {
+ struct worker_pool *pool = from_timer(pool, t, idle_timer);
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ while (too_many_workers(pool)) {
+ struct worker *worker;
+@@ -1937,7 +1889,7 @@ static void idle_worker_timeout(struct timer_list *t)
+ destroy_worker(worker);
+ }
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ static void send_mayday(struct work_struct *work)
+@@ -1968,8 +1920,8 @@ static void pool_mayday_timeout(struct timer_list *t)
+ struct worker_pool *pool = from_timer(pool, t, mayday_timer);
+ struct work_struct *work;
+
+- spin_lock_irq(&pool->lock);
+- spin_lock(&wq_mayday_lock); /* for wq->maydays */
++ raw_spin_lock_irq(&pool->lock);
++ raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
+
+ if (need_to_create_worker(pool)) {
+ /*
+@@ -1982,8 +1934,8 @@ static void pool_mayday_timeout(struct timer_list *t)
+ send_mayday(work);
+ }
+
+- spin_unlock(&wq_mayday_lock);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock(&wq_mayday_lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
+ }
+@@ -2002,7 +1954,7 @@ static void pool_mayday_timeout(struct timer_list *t)
+ * may_start_working() %true.
+ *
+ * LOCKING:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times. Does GFP_KERNEL allocations. Called only from
+ * manager.
+ */
+@@ -2011,7 +1963,7 @@ __releases(&pool->lock)
+ __acquires(&pool->lock)
+ {
+ restart:
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
+ mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
+@@ -2027,7 +1979,7 @@ __acquires(&pool->lock)
+ }
+
+ del_timer_sync(&pool->mayday_timer);
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ /*
+ * This is necessary even after a new worker was just successfully
+ * created as @pool->lock was dropped and the new worker might have
+@@ -2050,7 +2002,7 @@ __acquires(&pool->lock)
+ * and may_start_working() is true.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times. Does GFP_KERNEL allocations.
+ *
+ * Return:
+@@ -2073,7 +2025,7 @@ static bool manage_workers(struct worker *worker)
+
+ pool->manager = NULL;
+ pool->flags &= ~POOL_MANAGER_ACTIVE;
+- wake_up(&wq_manager_wait);
++ swake_up_one(&wq_manager_wait);
+ return true;
+ }
+
+@@ -2089,7 +2041,7 @@ static bool manage_workers(struct worker *worker)
+ * call this function to process a work.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock) which is released and regrabbed.
++ * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
+ */
+ static void process_one_work(struct worker *worker, struct work_struct *work)
+ __releases(&pool->lock)
+@@ -2171,7 +2123,7 @@ __acquires(&pool->lock)
+ */
+ set_work_pool_and_clear_pending(work, pool->id);
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ lock_map_acquire(&pwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
+@@ -2226,7 +2178,7 @@ __acquires(&pool->lock)
+ */
+ cond_resched();
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /* clear cpu intensive status */
+ if (unlikely(cpu_intensive))
+@@ -2249,7 +2201,7 @@ __acquires(&pool->lock)
+ * fetches a work from the top and executes it.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times.
+ */
+ static void process_scheduled_works(struct worker *worker)
+@@ -2291,11 +2243,11 @@ static int worker_thread(void *__worker)
+ /* tell the scheduler that this is a workqueue worker */
+ set_pf_worker(true);
+ woke_up:
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /* am I supposed to die? */
+ if (unlikely(worker->flags & WORKER_DIE)) {
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ WARN_ON_ONCE(!list_empty(&worker->entry));
+ set_pf_worker(false);
+
+@@ -2361,7 +2313,7 @@ static int worker_thread(void *__worker)
+ */
+ worker_enter_idle(worker);
+ __set_current_state(TASK_IDLE);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ schedule();
+ goto woke_up;
+ }
+@@ -2415,7 +2367,7 @@ static int rescuer_thread(void *__rescuer)
+ should_stop = kthread_should_stop();
+
+ /* see whether any pwq is asking for help */
+- spin_lock_irq(&wq_mayday_lock);
++ raw_spin_lock_irq(&wq_mayday_lock);
+
+ while (!list_empty(&wq->maydays)) {
+ struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
+@@ -2427,11 +2379,11 @@ static int rescuer_thread(void *__rescuer)
+ __set_current_state(TASK_RUNNING);
+ list_del_init(&pwq->mayday_node);
+
+- spin_unlock_irq(&wq_mayday_lock);
++ raw_spin_unlock_irq(&wq_mayday_lock);
+
+ worker_attach_to_pool(rescuer, pool);
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /*
+ * Slurp in all works issued via this workqueue and
+@@ -2460,7 +2412,7 @@ static int rescuer_thread(void *__rescuer)
+ * incur MAYDAY_INTERVAL delay inbetween.
+ */
+ if (need_to_create_worker(pool)) {
+- spin_lock(&wq_mayday_lock);
++ raw_spin_lock(&wq_mayday_lock);
+ /*
+ * Queue iff we aren't racing destruction
+ * and somebody else hasn't queued it already.
+@@ -2469,7 +2421,7 @@ static int rescuer_thread(void *__rescuer)
+ get_pwq(pwq);
+ list_add_tail(&pwq->mayday_node, &wq->maydays);
+ }
+- spin_unlock(&wq_mayday_lock);
++ raw_spin_unlock(&wq_mayday_lock);
+ }
+ }
+
+@@ -2487,14 +2439,14 @@ static int rescuer_thread(void *__rescuer)
+ if (need_more_worker(pool))
+ wake_up_worker(pool);
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ worker_detach_from_pool(rescuer);
+
+- spin_lock_irq(&wq_mayday_lock);
++ raw_spin_lock_irq(&wq_mayday_lock);
+ }
+
+- spin_unlock_irq(&wq_mayday_lock);
++ raw_spin_unlock_irq(&wq_mayday_lock);
+
+ if (should_stop) {
+ __set_current_state(TASK_RUNNING);
+@@ -2574,7 +2526,7 @@ static void wq_barrier_func(struct work_struct *work)
+ * underneath us, so we can't reliably determine pwq from @target.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void insert_wq_barrier(struct pool_workqueue *pwq,
+ struct wq_barrier *barr,
+@@ -2661,7 +2613,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
+ for_each_pwq(pwq, wq) {
+ struct worker_pool *pool = pwq->pool;
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ if (flush_color >= 0) {
+ WARN_ON_ONCE(pwq->flush_color != -1);
+@@ -2678,7 +2630,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
+ pwq->work_color = work_color;
+ }
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
+@@ -2878,9 +2830,9 @@ void drain_workqueue(struct workqueue_struct *wq)
+ for_each_pwq(pwq, wq) {
+ bool drained;
+
+- spin_lock_irq(&pwq->pool->lock);
++ raw_spin_lock_irq(&pwq->pool->lock);
+ drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
+- spin_unlock_irq(&pwq->pool->lock);
++ raw_spin_unlock_irq(&pwq->pool->lock);
+
+ if (drained)
+ continue;
+@@ -2916,7 +2868,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+ return false;
+ }
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ /* see the comment in try_to_grab_pending() with the same code */
+ pwq = get_work_pwq(work);
+ if (pwq) {
+@@ -2932,7 +2884,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+ check_flush_dependency(pwq->wq, work);
+
+ insert_wq_barrier(pwq, barr, work, worker);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ /*
+ * Force a lock recursion deadlock when using flush_work() inside a
+@@ -2951,7 +2903,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+ rcu_read_unlock();
+ return true;
+ already_gone:
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ rcu_read_unlock();
+ return false;
+ }
+@@ -3052,7 +3004,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+
+ /* tell other tasks trying to grab @work to back off */
+ mark_work_canceling(work);
+- local_unlock_irqrestore(pendingb_lock, flags);
++ local_irq_restore(flags);
+
+ /*
+ * This allows canceling during early boot. We know that @work
+@@ -3113,10 +3065,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+ */
+ bool flush_delayed_work(struct delayed_work *dwork)
+ {
+- local_lock_irq(pendingb_lock);
++ local_irq_disable();
+ if (del_timer_sync(&dwork->timer))
+ __queue_work(dwork->cpu, dwork->wq, &dwork->work);
+- local_unlock_irq(pendingb_lock);
++ local_irq_enable();
+ return flush_work(&dwork->work);
+ }
+ EXPORT_SYMBOL(flush_delayed_work);
+@@ -3154,7 +3106,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
+ return false;
+
+ set_work_pool_and_clear_pending(work, get_work_pool_id(work));
+- local_unlock_irqrestore(pendingb_lock, flags);
++ local_irq_restore(flags);
+ return ret;
+ }
+
+@@ -3264,7 +3216,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context);
+ *
+ * Undo alloc_workqueue_attrs().
+ */
+-void free_workqueue_attrs(struct workqueue_attrs *attrs)
++static void free_workqueue_attrs(struct workqueue_attrs *attrs)
+ {
+ if (attrs) {
+ free_cpumask_var(attrs->cpumask);
+@@ -3274,21 +3226,20 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs)
+
+ /**
+ * alloc_workqueue_attrs - allocate a workqueue_attrs
+- * @gfp_mask: allocation mask to use
+ *
+ * Allocate a new workqueue_attrs, initialize with default settings and
+ * return it.
+ *
+ * Return: The allocated new workqueue_attr on success. %NULL on failure.
+ */
+-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
++static struct workqueue_attrs *alloc_workqueue_attrs(void)
+ {
+ struct workqueue_attrs *attrs;
+
+- attrs = kzalloc(sizeof(*attrs), gfp_mask);
++ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ goto fail;
+- if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
++ if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
+ goto fail;
+
+ cpumask_copy(attrs->cpumask, cpu_possible_mask);
+@@ -3345,7 +3296,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
+ */
+ static int init_worker_pool(struct worker_pool *pool)
+ {
+- spin_lock_init(&pool->lock);
++ raw_spin_lock_init(&pool->lock);
+ pool->id = -1;
+ pool->cpu = -1;
+ pool->node = NUMA_NO_NODE;
+@@ -3366,7 +3317,7 @@ static int init_worker_pool(struct worker_pool *pool)
+ pool->refcnt = 1;
+
+ /* shouldn't fail above this point */
+- pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
++ pool->attrs = alloc_workqueue_attrs();
+ if (!pool->attrs)
+ return -ENOMEM;
+ return 0;
+@@ -3431,15 +3382,15 @@ static void put_unbound_pool(struct worker_pool *pool)
+ * @pool's workers from blocking on attach_mutex. We're the last
+ * manager and @pool gets freed with the flag set.
+ */
+- spin_lock_irq(&pool->lock);
+- wait_event_lock_irq(wq_manager_wait,
++ raw_spin_lock_irq(&pool->lock);
++ swait_event_lock_irq(wq_manager_wait,
+ !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+ pool->flags |= POOL_MANAGER_ACTIVE;
+
+ while ((worker = first_idle_worker(pool)))
+ destroy_worker(worker);
+ WARN_ON(pool->nr_workers || pool->nr_idle);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ mutex_lock(&wq_pool_attach_mutex);
+ if (!list_empty(&pool->workers))
+@@ -3599,7 +3550,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
+ return;
+
+ /* this function can be called during early boot w/ irq disabled */
+- spin_lock_irqsave(&pwq->pool->lock, flags);
++ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
+
+ /*
+ * During [un]freezing, the caller is responsible for ensuring that
+@@ -3629,7 +3580,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
+ pwq->max_active = 0;
+ }
+
+- spin_unlock_irqrestore(&pwq->pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
+ }
+
+ /* initialize newly alloced @pwq which is associated with @wq and @pool */
+@@ -3802,8 +3753,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
+
+ ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
+
+- new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
+- tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
++ new_attrs = alloc_workqueue_attrs();
++ tmp_attrs = alloc_workqueue_attrs();
+ if (!ctx || !new_attrs || !tmp_attrs)
+ goto out_free;
+
+@@ -3939,7 +3890,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
+ *
+ * Return: 0 on success and -errno on failure.
+ */
+-int apply_workqueue_attrs(struct workqueue_struct *wq,
++static int apply_workqueue_attrs(struct workqueue_struct *wq,
+ const struct workqueue_attrs *attrs)
+ {
+ int ret;
+@@ -3950,7 +3901,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
+
+ return ret;
+ }
+-EXPORT_SYMBOL_GPL(apply_workqueue_attrs);
+
+ /**
+ * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
+@@ -4028,9 +3978,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
+
+ use_dfl_pwq:
+ mutex_lock(&wq->mutex);
+- spin_lock_irq(&wq->dfl_pwq->pool->lock);
++ raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
+ get_pwq(wq->dfl_pwq);
+- spin_unlock_irq(&wq->dfl_pwq->pool->lock);
++ raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
+ old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
+ out_unlock:
+ mutex_unlock(&wq->mutex);
+@@ -4149,7 +4099,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
+ return NULL;
+
+ if (flags & WQ_UNBOUND) {
+- wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
++ wq->unbound_attrs = alloc_workqueue_attrs();
+ if (!wq->unbound_attrs)
+ goto err_free_wq;
+ }
+@@ -4236,9 +4186,9 @@ void destroy_workqueue(struct workqueue_struct *wq)
+ struct worker *rescuer = wq->rescuer;
+
+ /* this prevents new queueing */
+- spin_lock_irq(&wq_mayday_lock);
++ raw_spin_lock_irq(&wq_mayday_lock);
+ wq->rescuer = NULL;
+- spin_unlock_irq(&wq_mayday_lock);
++ raw_spin_unlock_irq(&wq_mayday_lock);
+
+ /* rescuer will empty maydays list before exiting */
+ kthread_stop(rescuer->task);
+@@ -4433,10 +4383,10 @@ unsigned int work_busy(struct work_struct *work)
+ rcu_read_lock();
+ pool = get_work_pool(work);
+ if (pool) {
+- spin_lock_irqsave(&pool->lock, flags);
++ raw_spin_lock_irqsave(&pool->lock, flags);
+ if (find_worker_executing_work(pool, work))
+ ret |= WORK_BUSY_RUNNING;
+- spin_unlock_irqrestore(&pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pool->lock, flags);
+ }
+ rcu_read_unlock();
+
+@@ -4643,10 +4593,10 @@ void show_workqueue_state(void)
+ pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
+
+ for_each_pwq(pwq, wq) {
+- spin_lock_irqsave(&pwq->pool->lock, flags);
++ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
+ if (pwq->nr_active || !list_empty(&pwq->delayed_works))
+ show_pwq(pwq);
+- spin_unlock_irqrestore(&pwq->pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
+ /*
+ * We could be printing a lot from atomic context, e.g.
+ * sysrq-t -> show_workqueue_state(). Avoid triggering
+@@ -4660,7 +4610,7 @@ void show_workqueue_state(void)
+ struct worker *worker;
+ bool first = true;
+
+- spin_lock_irqsave(&pool->lock, flags);
++ raw_spin_lock_irqsave(&pool->lock, flags);
+ if (pool->nr_workers == pool->nr_idle)
+ goto next_pool;
+
+@@ -4679,7 +4629,7 @@ void show_workqueue_state(void)
+ }
+ pr_cont("\n");
+ next_pool:
+- spin_unlock_irqrestore(&pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pool->lock, flags);
+ /*
+ * We could be printing a lot from atomic context, e.g.
+ * sysrq-t -> show_workqueue_state(). Avoid triggering
+@@ -4709,7 +4659,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
+ struct worker_pool *pool = worker->pool;
+
+ if (pool) {
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ /*
+ * ->desc tracks information (wq name or
+ * set_worker_desc()) for the latest execution. If
+@@ -4723,7 +4673,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
+ scnprintf(buf + off, size - off, "-%s",
+ worker->desc);
+ }
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+ }
+
+@@ -4754,7 +4704,7 @@ static void unbind_workers(int cpu)
+
+ for_each_cpu_worker_pool(pool, cpu) {
+ mutex_lock(&wq_pool_attach_mutex);
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /*
+ * We've blocked all attach/detach operations. Make all workers
+@@ -4768,7 +4718,7 @@ static void unbind_workers(int cpu)
+
+ pool->flags |= POOL_DISASSOCIATED;
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ mutex_unlock(&wq_pool_attach_mutex);
+
+ /*
+@@ -4794,9 +4744,9 @@ static void unbind_workers(int cpu)
+ * worker blocking could lead to lengthy stalls. Kick off
+ * unbound chain execution of currently pending work items.
+ */
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ wake_up_worker(pool);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+ }
+
+@@ -4823,7 +4773,7 @@ static void rebind_workers(struct worker_pool *pool)
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+ pool->attrs->cpumask) < 0);
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ pool->flags &= ~POOL_DISASSOCIATED;
+
+@@ -4862,7 +4812,7 @@ static void rebind_workers(struct worker_pool *pool)
+ WRITE_ONCE(worker->flags, worker_flags);
+ }
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ /**
+@@ -5321,7 +5271,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
+
+ lockdep_assert_held(&wq_pool_mutex);
+
+- attrs = alloc_workqueue_attrs(GFP_KERNEL);
++ attrs = alloc_workqueue_attrs();
+ if (!attrs)
+ return NULL;
+
+@@ -5750,7 +5700,7 @@ static void __init wq_numa_init(void)
+ return;
+ }
+
+- wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
++ wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
+ BUG_ON(!wq_update_unbound_numa_attrs_buf);
+
+ /*
+@@ -5825,7 +5775,7 @@ int __init workqueue_init_early(void)
+ for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
+ struct workqueue_attrs *attrs;
+
+- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
++ BUG_ON(!(attrs = alloc_workqueue_attrs()));
+ attrs->nice = std_nice[i];
+ unbound_std_wq_attrs[i] = attrs;
+
+@@ -5834,7 +5784,7 @@ int __init workqueue_init_early(void)
+ * guaranteed by max_active which is enforced by pwqs.
+ * Turn off NUMA so that dfl_pwq is used for all nodes.
+ */
+- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
++ BUG_ON(!(attrs = alloc_workqueue_attrs()));
+ attrs->nice = std_nice[i];
+ attrs->no_numa = true;
+ ordered_wq_attrs[i] = attrs;