diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0190-workqueue-Use-normal-rcu.patch | 360 |
1 files changed, 360 insertions, 0 deletions
diff --git a/debian/patches-rt/0190-workqueue-Use-normal-rcu.patch b/debian/patches-rt/0190-workqueue-Use-normal-rcu.patch new file mode 100644 index 000000000..ca3010992 --- /dev/null +++ b/debian/patches-rt/0190-workqueue-Use-normal-rcu.patch @@ -0,0 +1,360 @@ +From 8a58c9e05c039f23a1192df034ad9f56961ba52d Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 24 Jul 2013 15:26:54 +0200 +Subject: [PATCH 190/347] workqueue: Use normal rcu +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +There is no need for sched_rcu. The undocumented reason why sched_rcu +is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by +abusing the fact that sched_rcu reader side critical sections are also +protected by preempt or irq disabled regions. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + kernel/workqueue.c | 95 +++++++++++++++++++++++++--------------------- + 1 file changed, 52 insertions(+), 43 deletions(-) + +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index b1bb6cb5802e..3e2f67b77ab8 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -128,7 +128,7 @@ enum { + * + * PL: wq_pool_mutex protected. + * +- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. ++ * PR: wq_pool_mutex protected for writes. RCU protected for reads. + * + * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. + * +@@ -137,7 +137,7 @@ enum { + * + * WQ: wq->mutex protected. + * +- * WR: wq->mutex protected for writes. Sched-RCU protected for reads. ++ * WR: wq->mutex protected for writes. RCU protected for reads. + * + * MD: wq_mayday_lock protected. + */ +@@ -184,7 +184,7 @@ struct worker_pool { + atomic_t nr_running ____cacheline_aligned_in_smp; + + /* +- * Destruction of pool is sched-RCU protected to allow dereferences ++ * Destruction of pool is RCU protected to allow dereferences + * from get_work_pool(). + */ + struct rcu_head rcu; +@@ -213,7 +213,7 @@ struct pool_workqueue { + /* + * Release of unbound pwq is punted to system_wq. See put_pwq() + * and pwq_unbound_release_workfn() for details. pool_workqueue +- * itself is also sched-RCU protected so that the first pwq can be ++ * itself is also RCU protected so that the first pwq can be + * determined without grabbing wq->mutex. + */ + struct work_struct unbound_release_work; +@@ -358,20 +358,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); + #include <trace/events/workqueue.h> + + #define assert_rcu_or_pool_mutex() \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq_pool_mutex), \ +- "sched RCU or wq_pool_mutex should be held") ++ "RCU or wq_pool_mutex should be held") + + #define assert_rcu_or_wq_mutex(wq) \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq->mutex), \ +- "sched RCU or wq->mutex should be held") ++ "RCU or wq->mutex should be held") + + #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq->mutex) && \ + !lockdep_is_held(&wq_pool_mutex), \ +- "sched RCU, wq->mutex or wq_pool_mutex should be held") ++ "RCU, wq->mutex or wq_pool_mutex should be held") + + #define for_each_cpu_worker_pool(pool, cpu) \ + for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ +@@ -383,7 +383,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); + * @pool: iteration cursor + * @pi: integer used for iteration + * +- * This must be called either with wq_pool_mutex held or sched RCU read ++ * This must be called either with wq_pool_mutex held or RCU read + * locked. If the pool needs to be used beyond the locking in effect, the + * caller is responsible for guaranteeing that the pool stays online. + * +@@ -415,7 +415,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); + * @pwq: iteration cursor + * @wq: the target workqueue + * +- * This must be called either with wq->mutex held or sched RCU read locked. ++ * This must be called either with wq->mutex held or RCU read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. + * +@@ -551,7 +551,7 @@ static int worker_pool_assign_id(struct worker_pool *pool) + * @wq: the target workqueue + * @node: the node ID + * +- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU ++ * This must be called with any of wq_pool_mutex, wq->mutex or RCU + * read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. +@@ -695,8 +695,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) + * @work: the work item of interest + * + * Pools are created and destroyed under wq_pool_mutex, and allows read +- * access under sched-RCU read lock. As such, this function should be +- * called under wq_pool_mutex or with preemption disabled. ++ * access under RCU read lock. As such, this function should be ++ * called under wq_pool_mutex or inside of a rcu_read_lock() region. + * + * All fields of the returned pool are accessible as long as the above + * mentioned locking is in effect. If the returned pool needs to be used +@@ -1101,7 +1101,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) + { + if (pwq) { + /* +- * As both pwqs and pools are sched-RCU protected, the ++ * As both pwqs and pools are RCU protected, the + * following lock operations are safe. + */ + spin_lock_irq(&pwq->pool->lock); +@@ -1229,6 +1229,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) + return 0; + ++ rcu_read_lock(); + /* + * The queueing is in progress, or it is already queued. Try to + * steal it from ->worklist without clearing WORK_STRUCT_PENDING. +@@ -1267,10 +1268,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, + set_work_pool_and_keep_pending(work, pool->id); + + spin_unlock(&pool->lock); ++ rcu_read_unlock(); + return 1; + } + spin_unlock(&pool->lock); + fail: ++ rcu_read_unlock(); + local_irq_restore(*flags); + if (work_is_canceling(work)) + return -ENOENT; +@@ -1383,6 +1386,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + if (unlikely(wq->flags & __WQ_DRAINING) && + WARN_ON_ONCE(!is_chained_work(wq))) + return; ++ rcu_read_lock(); + retry: + /* pwq which will be used unless @work is executing elsewhere */ + if (wq->flags & WQ_UNBOUND) { +@@ -1441,10 +1445,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + /* pwq determined, queue */ + trace_workqueue_queue_work(req_cpu, pwq, work); + +- if (WARN_ON(!list_empty(&work->entry))) { +- spin_unlock(&pwq->pool->lock); +- return; +- } ++ if (WARN_ON(!list_empty(&work->entry))) ++ goto out; + + pwq->nr_in_flight[pwq->work_color]++; + work_flags = work_color_to_flags(pwq->work_color); +@@ -1463,7 +1465,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + debug_work_activate(work); + insert_work(pwq, work, worklist, work_flags); + ++out: + spin_unlock(&pwq->pool->lock); ++ rcu_read_unlock(); + } + + /** +@@ -2861,14 +2865,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, + + might_sleep(); + +- local_irq_disable(); ++ rcu_read_lock(); + pool = get_work_pool(work); + if (!pool) { +- local_irq_enable(); ++ rcu_read_unlock(); + return false; + } + +- spin_lock(&pool->lock); ++ spin_lock_irq(&pool->lock); + /* see the comment in try_to_grab_pending() with the same code */ + pwq = get_work_pwq(work); + if (pwq) { +@@ -2900,10 +2904,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, + lock_map_acquire(&pwq->wq->lockdep_map); + lock_map_release(&pwq->wq->lockdep_map); + } +- ++ rcu_read_unlock(); + return true; + already_gone: + spin_unlock_irq(&pool->lock); ++ rcu_read_unlock(); + return false; + } + +@@ -3350,7 +3355,7 @@ static void rcu_free_pool(struct rcu_head *rcu) + * put_unbound_pool - put a worker_pool + * @pool: worker_pool to put + * +- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU ++ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU + * safe manner. get_unbound_pool() calls this function on its failure path + * and this function should be able to release pools which went through, + * successfully or not, init_worker_pool(). +@@ -3404,8 +3409,8 @@ static void put_unbound_pool(struct worker_pool *pool) + del_timer_sync(&pool->idle_timer); + del_timer_sync(&pool->mayday_timer); + +- /* sched-RCU protected to allow dereferences from get_work_pool() */ +- call_rcu_sched(&pool->rcu, rcu_free_pool); ++ /* RCU protected to allow dereferences from get_work_pool() */ ++ call_rcu(&pool->rcu, rcu_free_pool); + } + + /** +@@ -3518,14 +3523,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work) + put_unbound_pool(pool); + mutex_unlock(&wq_pool_mutex); + +- call_rcu_sched(&pwq->rcu, rcu_free_pwq); ++ call_rcu(&pwq->rcu, rcu_free_pwq); + + /* + * If we're the last pwq going away, @wq is already dead and no one + * is gonna access it anymore. Schedule RCU free. + */ + if (is_last) +- call_rcu_sched(&wq->rcu, rcu_free_wq); ++ call_rcu(&wq->rcu, rcu_free_wq); + } + + /** +@@ -4232,7 +4237,7 @@ void destroy_workqueue(struct workqueue_struct *wq) + * The base ref is never dropped on per-cpu pwqs. Directly + * schedule RCU free. + */ +- call_rcu_sched(&wq->rcu, rcu_free_wq); ++ call_rcu(&wq->rcu, rcu_free_wq); + } else { + /* + * We're the sole accessor of @wq at this point. Directly +@@ -4342,7 +4347,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) + struct pool_workqueue *pwq; + bool ret; + +- rcu_read_lock_sched(); ++ rcu_read_lock(); ++ preempt_disable(); + + if (cpu == WORK_CPU_UNBOUND) + cpu = smp_processor_id(); +@@ -4353,7 +4359,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) + pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); + + ret = !list_empty(&pwq->delayed_works); +- rcu_read_unlock_sched(); ++ preempt_enable(); ++ rcu_read_unlock(); + + return ret; + } +@@ -4379,15 +4386,15 @@ unsigned int work_busy(struct work_struct *work) + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + +- local_irq_save(flags); ++ rcu_read_lock(); + pool = get_work_pool(work); + if (pool) { +- spin_lock(&pool->lock); ++ spin_lock_irqsave(&pool->lock, flags); + if (find_worker_executing_work(pool, work)) + ret |= WORK_BUSY_RUNNING; +- spin_unlock(&pool->lock); ++ spin_unlock_irqrestore(&pool->lock, flags); + } +- local_irq_restore(flags); ++ rcu_read_unlock(); + + return ret; + } +@@ -4572,7 +4579,7 @@ void show_workqueue_state(void) + unsigned long flags; + int pi; + +- rcu_read_lock_sched(); ++ rcu_read_lock(); + + pr_info("Showing busy workqueues and worker pools:\n"); + +@@ -4637,7 +4644,7 @@ void show_workqueue_state(void) + touch_nmi_watchdog(); + } + +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + } + + /* used to show worker information through /proc/PID/{comm,stat,status} */ +@@ -5024,16 +5031,16 @@ bool freeze_workqueues_busy(void) + * nr_active is monotonically decreasing. It's safe + * to peek without lock. + */ +- rcu_read_lock_sched(); ++ rcu_read_lock(); + for_each_pwq(pwq, wq) { + WARN_ON_ONCE(pwq->nr_active < 0); + if (pwq->nr_active) { + busy = true; +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + goto out_unlock; + } + } +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + } + out_unlock: + mutex_unlock(&wq_pool_mutex); +@@ -5235,7 +5242,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, + const char *delim = ""; + int node, written = 0; + +- rcu_read_lock_sched(); ++ get_online_cpus(); ++ rcu_read_lock(); + for_each_node(node) { + written += scnprintf(buf + written, PAGE_SIZE - written, + "%s%d:%d", delim, node, +@@ -5243,7 +5251,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, + delim = " "; + } + written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); ++ put_online_cpus(); + + return written; + } +-- +2.36.1 + |