diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 03:43:38 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 03:43:38 +0000 |
commit | 1ff870768c5cacf4bdc05cce822ac61837666b0f (patch) | |
tree | 94a1b1fd6d42485c062620781ed0ac16e05bf60c /debian/patches-rt/0284-workqueue-rework.patch | |
parent | Merging upstream version 4.19.269. (diff) | |
download | linux-1ff870768c5cacf4bdc05cce822ac61837666b0f.tar.xz linux-1ff870768c5cacf4bdc05cce822ac61837666b0f.zip |
Adding debian version 4.19.269-1.debian/4.19.269-1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0284-workqueue-rework.patch')
-rw-r--r-- | debian/patches-rt/0284-workqueue-rework.patch | 72 |
1 files changed, 36 insertions, 36 deletions
diff --git a/debian/patches-rt/0284-workqueue-rework.patch b/debian/patches-rt/0284-workqueue-rework.patch index ea00e194a..38e6a71e4 100644 --- a/debian/patches-rt/0284-workqueue-rework.patch +++ b/debian/patches-rt/0284-workqueue-rework.patch @@ -1,7 +1,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Wed, 29 May 2019 18:52:27 +0200 -Subject: [PATCH 284/342] workqueue: rework -Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ef48b1fd6691fa627afcf72bba3295c66c23da41 +Subject: [PATCH 284/351] workqueue: rework +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=03a1d47bc6d2f2e32d7c26fae2809fd8050a8226 [ Upstream commit d15a862f24df983458533aebd6fa207ecdd1095a ] @@ -65,7 +65,7 @@ index a67a50dd714a..ed6ae335756d 100644 /* * Init percpu_ref in atomic mode so that it's faster to shutdown. diff --git a/drivers/block/loop.c b/drivers/block/loop.c -index d24660961343..c31a76485c9c 100644 +index b8c2c469ce7e..12eb48980df7 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -70,7 +70,7 @@ @@ -352,7 +352,7 @@ index 6bbd391f0d9c..c8cf4731ced8 100644 /** * kthread_associate_blkcg - associate blkcg to current kthread diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 3b0f62be3ece..1d4d4780dd79 100644 +index 5473824aee5b..ea275b4b3d0a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3612,6 +3612,7 @@ static inline void sched_submit_work(struct task_struct *tsk) @@ -406,7 +406,7 @@ index bbc408f24f5d..eb2db7e6a241 100644 #else diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 91f5696cf335..4ed22776b2ee 100644 +index cb81c7767b17..c047a647146e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -50,8 +50,6 @@ @@ -1131,7 +1131,7 @@ index 91f5696cf335..4ed22776b2ee 100644 rcu_read_unlock(); return false; } -@@ -3052,7 +3004,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) +@@ -3050,7 +3002,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); @@ -1140,7 +1140,7 @@ index 91f5696cf335..4ed22776b2ee 100644 /* * This allows canceling during early boot. We know that @work -@@ -3113,10 +3065,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); +@@ -3111,10 +3063,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); */ bool flush_delayed_work(struct delayed_work *dwork) { @@ -1153,7 +1153,7 @@ index 91f5696cf335..4ed22776b2ee 100644 return flush_work(&dwork->work); } EXPORT_SYMBOL(flush_delayed_work); -@@ -3154,7 +3106,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork) +@@ -3152,7 +3104,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork) return false; set_work_pool_and_clear_pending(work, get_work_pool_id(work)); @@ -1162,7 +1162,7 @@ index 91f5696cf335..4ed22776b2ee 100644 return ret; } -@@ -3264,7 +3216,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context); +@@ -3262,7 +3214,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context); * * Undo alloc_workqueue_attrs(). */ @@ -1171,7 +1171,7 @@ index 91f5696cf335..4ed22776b2ee 100644 { if (attrs) { free_cpumask_var(attrs->cpumask); -@@ -3274,21 +3226,20 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) +@@ -3272,21 +3224,20 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) /** * alloc_workqueue_attrs - allocate a workqueue_attrs @@ -1196,7 +1196,7 @@ index 91f5696cf335..4ed22776b2ee 100644 goto fail; cpumask_copy(attrs->cpumask, cpu_possible_mask); -@@ -3345,7 +3296,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, +@@ -3343,7 +3294,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, */ static int init_worker_pool(struct worker_pool *pool) { @@ -1205,7 +1205,7 @@ index 91f5696cf335..4ed22776b2ee 100644 pool->id = -1; pool->cpu = -1; pool->node = NUMA_NO_NODE; -@@ -3366,7 +3317,7 @@ static int init_worker_pool(struct worker_pool *pool) +@@ -3364,7 +3315,7 @@ static int init_worker_pool(struct worker_pool *pool) pool->refcnt = 1; /* shouldn't fail above this point */ @@ -1214,7 +1214,7 @@ index 91f5696cf335..4ed22776b2ee 100644 if (!pool->attrs) return -ENOMEM; return 0; -@@ -3431,15 +3382,15 @@ static void put_unbound_pool(struct worker_pool *pool) +@@ -3429,15 +3380,15 @@ static void put_unbound_pool(struct worker_pool *pool) * @pool's workers from blocking on attach_mutex. We're the last * manager and @pool gets freed with the flag set. */ @@ -1233,7 +1233,7 @@ index 91f5696cf335..4ed22776b2ee 100644 mutex_lock(&wq_pool_attach_mutex); if (!list_empty(&pool->workers)) -@@ -3599,7 +3550,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) +@@ -3597,7 +3548,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) return; /* this function can be called during early boot w/ irq disabled */ @@ -1242,7 +1242,7 @@ index 91f5696cf335..4ed22776b2ee 100644 /* * During [un]freezing, the caller is responsible for ensuring that -@@ -3629,7 +3580,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) +@@ -3627,7 +3578,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) pwq->max_active = 0; } @@ -1251,7 +1251,7 @@ index 91f5696cf335..4ed22776b2ee 100644 } /* initialize newly alloced @pwq which is associated with @wq and @pool */ -@@ -3802,8 +3753,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, +@@ -3800,8 +3751,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); @@ -1262,7 +1262,7 @@ index 91f5696cf335..4ed22776b2ee 100644 if (!ctx || !new_attrs || !tmp_attrs) goto out_free; -@@ -3939,7 +3890,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, +@@ -3937,7 +3888,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, * * Return: 0 on success and -errno on failure. */ @@ -1271,7 +1271,7 @@ index 91f5696cf335..4ed22776b2ee 100644 const struct workqueue_attrs *attrs) { int ret; -@@ -3950,7 +3901,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, +@@ -3948,7 +3899,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, return ret; } @@ -1279,7 +1279,7 @@ index 91f5696cf335..4ed22776b2ee 100644 /** * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug -@@ -4028,9 +3978,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, +@@ -4026,9 +3976,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, use_dfl_pwq: mutex_lock(&wq->mutex); @@ -1291,7 +1291,7 @@ index 91f5696cf335..4ed22776b2ee 100644 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); out_unlock: mutex_unlock(&wq->mutex); -@@ -4149,7 +4099,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, +@@ -4147,7 +4097,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, return NULL; if (flags & WQ_UNBOUND) { @@ -1300,7 +1300,7 @@ index 91f5696cf335..4ed22776b2ee 100644 if (!wq->unbound_attrs) goto err_free_wq; } -@@ -4236,9 +4186,9 @@ void destroy_workqueue(struct workqueue_struct *wq) +@@ -4234,9 +4184,9 @@ void destroy_workqueue(struct workqueue_struct *wq) struct worker *rescuer = wq->rescuer; /* this prevents new queueing */ @@ -1312,7 +1312,7 @@ index 91f5696cf335..4ed22776b2ee 100644 /* rescuer will empty maydays list before exiting */ kthread_stop(rescuer->task); -@@ -4433,10 +4383,10 @@ unsigned int work_busy(struct work_struct *work) +@@ -4431,10 +4381,10 @@ unsigned int work_busy(struct work_struct *work) rcu_read_lock(); pool = get_work_pool(work); if (pool) { @@ -1325,7 +1325,7 @@ index 91f5696cf335..4ed22776b2ee 100644 } rcu_read_unlock(); -@@ -4643,10 +4593,10 @@ void show_workqueue_state(void) +@@ -4641,10 +4591,10 @@ void show_workqueue_state(void) pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); for_each_pwq(pwq, wq) { @@ -1338,7 +1338,7 @@ index 91f5696cf335..4ed22776b2ee 100644 /* * We could be printing a lot from atomic context, e.g. * sysrq-t -> show_workqueue_state(). Avoid triggering -@@ -4660,7 +4610,7 @@ void show_workqueue_state(void) +@@ -4658,7 +4608,7 @@ void show_workqueue_state(void) struct worker *worker; bool first = true; @@ -1347,7 +1347,7 @@ index 91f5696cf335..4ed22776b2ee 100644 if (pool->nr_workers == pool->nr_idle) goto next_pool; -@@ -4679,7 +4629,7 @@ void show_workqueue_state(void) +@@ -4677,7 +4627,7 @@ void show_workqueue_state(void) } pr_cont("\n"); next_pool: @@ -1356,7 +1356,7 @@ index 91f5696cf335..4ed22776b2ee 100644 /* * We could be printing a lot from atomic context, e.g. * sysrq-t -> show_workqueue_state(). Avoid triggering -@@ -4709,7 +4659,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) +@@ -4707,7 +4657,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) struct worker_pool *pool = worker->pool; if (pool) { @@ -1365,7 +1365,7 @@ index 91f5696cf335..4ed22776b2ee 100644 /* * ->desc tracks information (wq name or * set_worker_desc()) for the latest execution. If -@@ -4723,7 +4673,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) +@@ -4721,7 +4671,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) scnprintf(buf + off, size - off, "-%s", worker->desc); } @@ -1374,7 +1374,7 @@ index 91f5696cf335..4ed22776b2ee 100644 } } -@@ -4754,7 +4704,7 @@ static void unbind_workers(int cpu) +@@ -4752,7 +4702,7 @@ static void unbind_workers(int cpu) for_each_cpu_worker_pool(pool, cpu) { mutex_lock(&wq_pool_attach_mutex); @@ -1383,7 +1383,7 @@ index 91f5696cf335..4ed22776b2ee 100644 /* * We've blocked all attach/detach operations. Make all workers -@@ -4768,7 +4718,7 @@ static void unbind_workers(int cpu) +@@ -4766,7 +4716,7 @@ static void unbind_workers(int cpu) pool->flags |= POOL_DISASSOCIATED; @@ -1392,7 +1392,7 @@ index 91f5696cf335..4ed22776b2ee 100644 mutex_unlock(&wq_pool_attach_mutex); /* -@@ -4794,9 +4744,9 @@ static void unbind_workers(int cpu) +@@ -4792,9 +4742,9 @@ static void unbind_workers(int cpu) * worker blocking could lead to lengthy stalls. Kick off * unbound chain execution of currently pending work items. */ @@ -1404,7 +1404,7 @@ index 91f5696cf335..4ed22776b2ee 100644 } } -@@ -4823,7 +4773,7 @@ static void rebind_workers(struct worker_pool *pool) +@@ -4821,7 +4771,7 @@ static void rebind_workers(struct worker_pool *pool) WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0); @@ -1413,7 +1413,7 @@ index 91f5696cf335..4ed22776b2ee 100644 pool->flags &= ~POOL_DISASSOCIATED; -@@ -4862,7 +4812,7 @@ static void rebind_workers(struct worker_pool *pool) +@@ -4860,7 +4810,7 @@ static void rebind_workers(struct worker_pool *pool) WRITE_ONCE(worker->flags, worker_flags); } @@ -1422,7 +1422,7 @@ index 91f5696cf335..4ed22776b2ee 100644 } /** -@@ -5321,7 +5271,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) +@@ -5319,7 +5269,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) lockdep_assert_held(&wq_pool_mutex); @@ -1431,7 +1431,7 @@ index 91f5696cf335..4ed22776b2ee 100644 if (!attrs) return NULL; -@@ -5750,7 +5700,7 @@ static void __init wq_numa_init(void) +@@ -5748,7 +5698,7 @@ static void __init wq_numa_init(void) return; } @@ -1440,7 +1440,7 @@ index 91f5696cf335..4ed22776b2ee 100644 BUG_ON(!wq_update_unbound_numa_attrs_buf); /* -@@ -5825,7 +5775,7 @@ int __init workqueue_init_early(void) +@@ -5823,7 +5773,7 @@ int __init workqueue_init_early(void) for (i = 0; i < NR_STD_WORKER_POOLS; i++) { struct workqueue_attrs *attrs; @@ -1449,7 +1449,7 @@ index 91f5696cf335..4ed22776b2ee 100644 attrs->nice = std_nice[i]; unbound_std_wq_attrs[i] = attrs; -@@ -5834,7 +5784,7 @@ int __init workqueue_init_early(void) +@@ -5832,7 +5782,7 @@ int __init workqueue_init_early(void) * guaranteed by max_active which is enforced by pwqs. * Turn off NUMA so that dfl_pwq is used for all nodes. */ |