diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 04:15:14 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 04:15:14 +0000 |
commit | e549f10391e1fc78dab80e9b9ef524d214d4af40 (patch) | |
tree | 5358015c2d151febc170684ed8ddf2011b3ac4af /debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch | |
parent | Merging upstream version 4.19.282. (diff) | |
download | linux-e549f10391e1fc78dab80e9b9ef524d214d4af40.tar.xz linux-e549f10391e1fc78dab80e9b9ef524d214d4af40.zip |
Adding debian version 4.19.282-1.debian/4.19.282-1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch')
-rw-r--r-- | debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch | 80 |
1 files changed, 40 insertions, 40 deletions
diff --git a/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index 1732dfed8..aae3ea1c6 100644 --- a/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -1,11 +1,11 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Tue, 4 Apr 2017 12:50:16 +0200 -Subject: [PATCH 026/351] kernel: sched: Provide a pointer to the valid CPU +Subject: [PATCH 026/353] kernel: sched: Provide a pointer to the valid CPU mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b52972c9d7da09d15e44aad1448655ebdbb87553 +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ef5ef947f2c50bf428e512ab5499199c373b0311 In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed() wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not @@ -132,7 +132,7 @@ index a7c0f97e4b0d..1a08428eedcf 100644 out_unlock: diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c -index 0ca4185cc5e3..97bf5291130a 100644 +index 01c85d37c47a..ec938f26cc24 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1174,12 +1174,12 @@ static void mt_ase_fp_affinity(void) @@ -178,10 +178,10 @@ index a999a58ca331..d6410d0740ea 100644 return -EINVAL; } diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c -index 01ed0a667928..2c62de6b5bf1 100644 +index bb670249bebf..deced4654e66 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c -@@ -1039,7 +1039,7 @@ int hfi1_get_proc_affinity(int node) +@@ -1041,7 +1041,7 @@ int hfi1_get_proc_affinity(int node) struct hfi1_affinity_node *entry; cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; const struct cpumask *node_mask, @@ -190,7 +190,7 @@ index 01ed0a667928..2c62de6b5bf1 100644 struct hfi1_affinity_node_list *affinity = &node_affinity; struct cpu_mask_set *set = &affinity->proc; -@@ -1047,7 +1047,7 @@ int hfi1_get_proc_affinity(int node) +@@ -1049,7 +1049,7 @@ int hfi1_get_proc_affinity(int node) * check whether process/context affinity has already * been set */ @@ -199,7 +199,7 @@ index 01ed0a667928..2c62de6b5bf1 100644 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", current->pid, current->comm, cpumask_pr_args(proc_mask)); -@@ -1058,7 +1058,7 @@ int hfi1_get_proc_affinity(int node) +@@ -1060,7 +1060,7 @@ int hfi1_get_proc_affinity(int node) cpu = cpumask_first(proc_mask); cpumask_set_cpu(cpu, &set->used); goto done; @@ -307,10 +307,10 @@ index 994ffe018120..d71054b95528 100644 .mm = NULL, .active_mm = &init_mm, diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c -index dcd5755b1fe2..35541e1dfad9 100644 +index c6d412cebc43..b5a1dca1d4d1 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c -@@ -2096,7 +2096,7 @@ static void cpuset_fork(struct task_struct *task) +@@ -2101,7 +2101,7 @@ static void cpuset_fork(struct task_struct *task) if (task_css_is_root(task, cpuset_cgrp_id)) return; @@ -333,10 +333,10 @@ index b65871600507..a18d695259af 100644 /* * One for us, one for whoever does the "release_task()" (usually diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index a03464249771..985cfa10fda5 100644 +index 8d5a9fa8a951..81f86841b841 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -878,7 +878,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) +@@ -881,7 +881,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) */ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) { @@ -345,7 +345,7 @@ index a03464249771..985cfa10fda5 100644 return false; if (is_per_cpu_kthread(p)) -@@ -973,7 +973,7 @@ static int migration_cpu_stop(void *data) +@@ -976,7 +976,7 @@ static int migration_cpu_stop(void *data) local_irq_disable(); /* * We need to explicitly wake pending tasks before running @@ -354,7 +354,7 @@ index a03464249771..985cfa10fda5 100644 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. */ sched_ttwu_pending(); -@@ -1004,7 +1004,7 @@ static int migration_cpu_stop(void *data) +@@ -1007,7 +1007,7 @@ static int migration_cpu_stop(void *data) */ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) { @@ -363,7 +363,7 @@ index a03464249771..985cfa10fda5 100644 p->nr_cpus_allowed = cpumask_weight(new_mask); } -@@ -1074,7 +1074,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, +@@ -1077,7 +1077,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, goto out; } @@ -372,7 +372,7 @@ index a03464249771..985cfa10fda5 100644 goto out; dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); -@@ -1237,10 +1237,10 @@ static int migrate_swap_stop(void *data) +@@ -1240,10 +1240,10 @@ static int migrate_swap_stop(void *data) if (task_cpu(arg->src_task) != arg->src_cpu) goto unlock; @@ -385,7 +385,7 @@ index a03464249771..985cfa10fda5 100644 goto unlock; __migrate_swap_task(arg->src_task, arg->dst_cpu); -@@ -1282,10 +1282,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, +@@ -1285,10 +1285,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) goto out; @@ -398,7 +398,7 @@ index a03464249771..985cfa10fda5 100644 goto out; trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); -@@ -1430,7 +1430,7 @@ void kick_process(struct task_struct *p) +@@ -1433,7 +1433,7 @@ void kick_process(struct task_struct *p) EXPORT_SYMBOL_GPL(kick_process); /* @@ -407,7 +407,7 @@ index a03464249771..985cfa10fda5 100644 * * A few notes on cpu_active vs cpu_online: * -@@ -1470,14 +1470,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p) +@@ -1473,14 +1473,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p) for_each_cpu(dest_cpu, nodemask) { if (!cpu_active(dest_cpu)) continue; @@ -424,7 +424,7 @@ index a03464249771..985cfa10fda5 100644 if (!is_cpu_allowed(p, dest_cpu)) continue; -@@ -1521,7 +1521,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) +@@ -1524,7 +1524,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) } /* @@ -433,7 +433,7 @@ index a03464249771..985cfa10fda5 100644 */ static inline int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) -@@ -1531,11 +1531,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) +@@ -1534,11 +1534,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) if (p->nr_cpus_allowed > 1) cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); else @@ -447,7 +447,7 @@ index a03464249771..985cfa10fda5 100644 * CPU. * * Since this is common to all placement strategies, this lives here. -@@ -2406,7 +2406,7 @@ void wake_up_new_task(struct task_struct *p) +@@ -2409,7 +2409,7 @@ void wake_up_new_task(struct task_struct *p) #ifdef CONFIG_SMP /* * Fork balancing, do it here and not earlier because: @@ -456,7 +456,7 @@ index a03464249771..985cfa10fda5 100644 * - any previously selected CPU might disappear through hotplug * * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, -@@ -4323,7 +4323,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -4325,7 +4325,7 @@ static int __sched_setscheduler(struct task_struct *p, * the entire root_domain to become SCHED_DEADLINE. We * will also fail if there's no bandwidth available. */ @@ -465,7 +465,7 @@ index a03464249771..985cfa10fda5 100644 rq->rd->dl_bw.bw == 0) { task_rq_unlock(rq, p, &rf); return -EPERM; -@@ -4922,7 +4922,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) +@@ -4924,7 +4924,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) goto out_unlock; raw_spin_lock_irqsave(&p->pi_lock, flags); @@ -474,7 +474,7 @@ index a03464249771..985cfa10fda5 100644 raw_spin_unlock_irqrestore(&p->pi_lock, flags); out_unlock: -@@ -5499,7 +5499,7 @@ int task_can_attach(struct task_struct *p, +@@ -5501,7 +5501,7 @@ int task_can_attach(struct task_struct *p, * allowed nodes is unnecessary. Thus, cpusets are not * applicable for such threads. This prevents checking for * success of set_cpus_allowed_ptr() on all attached tasks @@ -483,7 +483,7 @@ index a03464249771..985cfa10fda5 100644 */ if (p->flags & PF_NO_SETAFFINITY) { ret = -EINVAL; -@@ -5526,7 +5526,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) +@@ -5528,7 +5528,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) if (curr_cpu == target_cpu) return 0; @@ -492,7 +492,7 @@ index a03464249771..985cfa10fda5 100644 return -EINVAL; /* TODO: This is not properly updating schedstats */ -@@ -5665,7 +5665,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) +@@ -5667,7 +5667,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) put_prev_task(rq, next); /* @@ -572,7 +572,7 @@ index 29ed5d8d30d6..9243d0049714 100644 !dl_task(task) || !task_on_rq_queued(task))) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index e84a056f783f..16940416d526 100644 +index d19981efd980..bd9a375c45f4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1691,7 +1691,7 @@ static void task_numa_compare(struct task_numa_env *env, @@ -593,7 +593,7 @@ index e84a056f783f..16940416d526 100644 continue; env->dst_cpu = cpu; -@@ -5803,7 +5803,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, +@@ -5850,7 +5850,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), @@ -602,7 +602,7 @@ index e84a056f783f..16940416d526 100644 continue; local_group = cpumask_test_cpu(this_cpu, -@@ -5935,7 +5935,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this +@@ -5982,7 +5982,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ @@ -611,7 +611,7 @@ index e84a056f783f..16940416d526 100644 if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); -@@ -5975,7 +5975,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p +@@ -6022,7 +6022,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p { int new_cpu = cpu; @@ -620,7 +620,7 @@ index e84a056f783f..16940416d526 100644 return prev_cpu; /* -@@ -6092,7 +6092,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int +@@ -6139,7 +6139,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int if (!test_idle_cores(target, false)) return -1; @@ -629,7 +629,7 @@ index e84a056f783f..16940416d526 100644 for_each_cpu_wrap(core, cpus, target) { bool idle = true; -@@ -6126,7 +6126,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t +@@ -6173,7 +6173,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { @@ -638,7 +638,7 @@ index e84a056f783f..16940416d526 100644 continue; if (available_idle_cpu(cpu)) return cpu; -@@ -6187,7 +6187,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t +@@ -6234,7 +6234,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t time = local_clock(); @@ -647,7 +647,7 @@ index e84a056f783f..16940416d526 100644 for_each_cpu_wrap(cpu, cpus, target) { if (!--nr) -@@ -6227,7 +6227,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) +@@ -6274,7 +6274,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && @@ -656,7 +656,7 @@ index e84a056f783f..16940416d526 100644 /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: -@@ -6445,7 +6445,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f +@@ -6492,7 +6492,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) @@ -665,7 +665,7 @@ index e84a056f783f..16940416d526 100644 } rcu_read_lock(); -@@ -7184,14 +7184,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) +@@ -7228,14 +7228,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or @@ -682,7 +682,7 @@ index e84a056f783f..16940416d526 100644 int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); -@@ -7211,7 +7211,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) +@@ -7255,7 +7255,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { @@ -691,7 +691,7 @@ index e84a056f783f..16940416d526 100644 env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; -@@ -7836,7 +7836,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) +@@ -7880,7 +7880,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) /* * Group imbalance indicates (and tries to solve) the problem where balancing @@ -700,7 +700,7 @@ index e84a056f783f..16940416d526 100644 * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. -@@ -8451,7 +8451,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) +@@ -8495,7 +8495,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically @@ -709,7 +709,7 @@ index e84a056f783f..16940416d526 100644 */ if (busiest->group_type == group_imbalanced) goto force_balance; -@@ -8847,7 +8847,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, +@@ -8891,7 +8891,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, * if the curr task on busiest CPU can't be * moved to this_cpu: */ |