diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
commit | b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch) | |
tree | 1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0255-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch | |
parent | Adding upstream version 5.10.209. (diff) | |
download | linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip |
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0255-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch')
-rw-r--r-- | debian/patches-rt/0255-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch | 367 |
1 files changed, 367 insertions, 0 deletions
diff --git a/debian/patches-rt/0255-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0255-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch new file mode 100644 index 000000000..bb6e6895a --- /dev/null +++ b/debian/patches-rt/0255-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch @@ -0,0 +1,367 @@ +From ebc27a7d1ceef838efef4feecd2bcb490552f0ec Mon Sep 17 00:00:00 2001 +From: Mike Galbraith <efault@gmx.de> +Date: Sun, 8 Jan 2017 09:32:25 +0100 +Subject: [PATCH 255/323] cpuset: Convert callback_lock to raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +The two commits below add up to a cpuset might_sleep() splat for RT: + +8447a0fee974 cpuset: convert callback_mutex to a spinlock +344736f29b35 cpuset: simplify cpuset_node_allowed API + +BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:995 +in_atomic(): 0, irqs_disabled(): 1, pid: 11718, name: cset +CPU: 135 PID: 11718 Comm: cset Tainted: G E 4.10.0-rt1-rt #4 +Hardware name: Intel Corporation BRICKLAND/BRICKLAND, BIOS BRHSXSD1.86B.0056.R01.1409242327 09/24/2014 +Call Trace: + ? dump_stack+0x5c/0x81 + ? ___might_sleep+0xf4/0x170 + ? rt_spin_lock+0x1c/0x50 + ? __cpuset_node_allowed+0x66/0xc0 + ? ___slab_alloc+0x390/0x570 <disables IRQs> + ? anon_vma_fork+0x8f/0x140 + ? copy_page_range+0x6cf/0xb00 + ? anon_vma_fork+0x8f/0x140 + ? __slab_alloc.isra.74+0x5a/0x81 + ? anon_vma_fork+0x8f/0x140 + ? kmem_cache_alloc+0x1b5/0x1f0 + ? anon_vma_fork+0x8f/0x140 + ? copy_process.part.35+0x1670/0x1ee0 + ? _do_fork+0xdd/0x3f0 + ? _do_fork+0xdd/0x3f0 + ? do_syscall_64+0x61/0x170 + ? entry_SYSCALL64_slow_path+0x25/0x25 + +The later ensured that a NUMA box WILL take callback_lock in atomic +context by removing the allocator and reclaim path __GFP_HARDWALL +usage which prevented such contexts from taking callback_mutex. + +One option would be to reinstate __GFP_HARDWALL protections for +RT, however, as the 8447a0fee974 changelog states: + +The callback_mutex is only used to synchronize reads/updates of cpusets' +flags and cpu/node masks. These operations should always proceed fast so +there's no reason why we can't use a spinlock instead of the mutex. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Mike Galbraith <efault@gmx.de> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com> +--- + kernel/cgroup/cpuset.c | 82 +++++++++++++++++++++--------------------- + 1 file changed, 41 insertions(+), 41 deletions(-) + +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index 195f9cccab20..fd371215b81f 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -368,7 +368,7 @@ void cpuset_unlock(void) + mutex_unlock(&cpuset_mutex); + } + +-static DEFINE_SPINLOCK(callback_lock); ++static DEFINE_RAW_SPINLOCK(callback_lock); + + static struct workqueue_struct *cpuset_migrate_mm_wq; + +@@ -1316,7 +1316,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, + * Newly added CPUs will be removed from effective_cpus and + * newly deleted ones will be added back to effective_cpus. + */ +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + if (adding) { + cpumask_or(parent->subparts_cpus, + parent->subparts_cpus, tmp->addmask); +@@ -1338,7 +1338,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, + + if (cpuset->partition_root_state != new_prs) + cpuset->partition_root_state = new_prs; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + return cmd == partcmd_update; + } +@@ -1441,7 +1441,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) + continue; + rcu_read_unlock(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + + cpumask_copy(cp->effective_cpus, tmp->new_cpus); + if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) { +@@ -1475,7 +1475,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) + if (new_prs != cp->partition_root_state) + cp->partition_root_state = new_prs; + +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + WARN_ON(!is_in_v2_mode() && + !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); +@@ -1603,7 +1603,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, + return -EINVAL; + } + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); + + /* +@@ -1613,7 +1613,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, + cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed); + cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); + } +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + update_cpumasks_hier(cs, &tmp); + +@@ -1807,9 +1807,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) + continue; + rcu_read_unlock(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cp->effective_mems = *new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + WARN_ON(!is_in_v2_mode() && + !nodes_equal(cp->mems_allowed, cp->effective_mems)); +@@ -1877,9 +1877,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, + if (retval < 0) + goto done; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->mems_allowed = trialcs->mems_allowed; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + /* use trialcs->mems_allowed as a temp variable */ + update_nodemasks_hier(cs, &trialcs->mems_allowed); +@@ -1970,9 +1970,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, + spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) + || (is_spread_page(cs) != is_spread_page(trialcs))); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->flags = trialcs->flags; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) + rebuild_sched_domains_locked(); +@@ -2058,9 +2058,9 @@ static int update_prstate(struct cpuset *cs, int new_prs) + rebuild_sched_domains_locked(); + out: + if (!err) { +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->partition_root_state = new_prs; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + } + + free_cpumasks(NULL, &tmpmask); +@@ -2527,7 +2527,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) + cpuset_filetype_t type = seq_cft(sf)->private; + int ret = 0; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + + switch (type) { + case FILE_CPULIST: +@@ -2549,7 +2549,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) + ret = -EINVAL; + } + +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + return ret; + } + +@@ -2862,14 +2862,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) + + cpuset_inc(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + if (is_in_v2_mode()) { + cpumask_copy(cs->effective_cpus, parent->effective_cpus); + cs->effective_mems = parent->effective_mems; + cs->use_parent_ecpus = true; + parent->child_ecpus_count++; + } +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) + goto out_unlock; +@@ -2896,12 +2896,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) + } + rcu_read_unlock(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->mems_allowed = parent->mems_allowed; + cs->effective_mems = parent->mems_allowed; + cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); + cpumask_copy(cs->effective_cpus, parent->cpus_allowed); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + out_unlock: + mutex_unlock(&cpuset_mutex); + put_online_cpus(); +@@ -2957,7 +2957,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) + static void cpuset_bind(struct cgroup_subsys_state *root_css) + { + mutex_lock(&cpuset_mutex); +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + + if (is_in_v2_mode()) { + cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); +@@ -2968,7 +2968,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) + top_cpuset.mems_allowed = top_cpuset.effective_mems; + } + +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + mutex_unlock(&cpuset_mutex); + } + +@@ -3063,12 +3063,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs, + { + bool is_empty; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cs->cpus_allowed, new_cpus); + cpumask_copy(cs->effective_cpus, new_cpus); + cs->mems_allowed = *new_mems; + cs->effective_mems = *new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + /* + * Don't call update_tasks_cpumask() if the cpuset becomes empty, +@@ -3105,10 +3105,10 @@ hotplug_update_tasks(struct cpuset *cs, + if (nodes_empty(*new_mems)) + *new_mems = parent_cs(cs)->effective_mems; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cs->effective_cpus, new_cpus); + cs->effective_mems = *new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + if (cpus_updated) + update_tasks_cpumask(cs); +@@ -3175,10 +3175,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) + if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || + (parent->partition_root_state == PRS_ERROR))) { + if (cs->nr_subparts_cpus) { +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->nr_subparts_cpus = 0; + cpumask_clear(cs->subparts_cpus); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + compute_effective_cpumask(&new_cpus, cs, parent); + } + +@@ -3192,9 +3192,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) + cpumask_empty(&new_cpus)) { + update_parent_subparts_cpumask(cs, partcmd_disable, + NULL, tmp); +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->partition_root_state = PRS_ERROR; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + } + cpuset_force_rebuild(); + } +@@ -3274,7 +3274,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) + + /* synchronize cpus_allowed to cpu_active_mask */ + if (cpus_updated) { +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + if (!on_dfl) + cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); + /* +@@ -3294,17 +3294,17 @@ static void cpuset_hotplug_workfn(struct work_struct *work) + } + } + cpumask_copy(top_cpuset.effective_cpus, &new_cpus); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + /* we don't mess with cpumasks of tasks in top_cpuset */ + } + + /* synchronize mems_allowed to N_MEMORY */ + if (mems_updated) { +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + if (!on_dfl) + top_cpuset.mems_allowed = new_mems; + top_cpuset.effective_mems = new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + update_tasks_nodemask(&top_cpuset); + } + +@@ -3408,11 +3408,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) + { + unsigned long flags; + +- spin_lock_irqsave(&callback_lock, flags); ++ raw_spin_lock_irqsave(&callback_lock, flags); + rcu_read_lock(); + guarantee_online_cpus(task_cs(tsk), pmask); + rcu_read_unlock(); +- spin_unlock_irqrestore(&callback_lock, flags); ++ raw_spin_unlock_irqrestore(&callback_lock, flags); + } + + /** +@@ -3473,11 +3473,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) + nodemask_t mask; + unsigned long flags; + +- spin_lock_irqsave(&callback_lock, flags); ++ raw_spin_lock_irqsave(&callback_lock, flags); + rcu_read_lock(); + guarantee_online_mems(task_cs(tsk), &mask); + rcu_read_unlock(); +- spin_unlock_irqrestore(&callback_lock, flags); ++ raw_spin_unlock_irqrestore(&callback_lock, flags); + + return mask; + } +@@ -3569,14 +3569,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask) + return true; + + /* Not hardwall and node outside mems_allowed: scan up cpusets */ +- spin_lock_irqsave(&callback_lock, flags); ++ raw_spin_lock_irqsave(&callback_lock, flags); + + rcu_read_lock(); + cs = nearest_hardwall_ancestor(task_cs(current)); + allowed = node_isset(node, cs->mems_allowed); + rcu_read_unlock(); + +- spin_unlock_irqrestore(&callback_lock, flags); ++ raw_spin_unlock_irqrestore(&callback_lock, flags); + return allowed; + } + +-- +2.43.0 + |