summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0009-sched-Massage-set_cpus_allowed.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:06:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:06:00 +0000
commitb15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch)
tree1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0009-sched-Massage-set_cpus_allowed.patch
parentAdding upstream version 5.10.209. (diff)
downloadlinux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz
linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0009-sched-Massage-set_cpus_allowed.patch')
-rw-r--r--debian/patches-rt/0009-sched-Massage-set_cpus_allowed.patch175
1 files changed, 175 insertions, 0 deletions
diff --git a/debian/patches-rt/0009-sched-Massage-set_cpus_allowed.patch b/debian/patches-rt/0009-sched-Massage-set_cpus_allowed.patch
new file mode 100644
index 000000000..24fc3200e
--- /dev/null
+++ b/debian/patches-rt/0009-sched-Massage-set_cpus_allowed.patch
@@ -0,0 +1,175 @@
+From 1125b34b63aa303af592b0ea5be730dc92ce6d53 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 23 Oct 2020 12:12:06 +0200
+Subject: [PATCH 009/323] sched: Massage set_cpus_allowed()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+Thread a u32 flags word through the *set_cpus_allowed*() callchain.
+This will allow adding behavioural tweaks for future users.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 28 ++++++++++++++++++----------
+ kernel/sched/deadline.c | 5 +++--
+ kernel/sched/sched.h | 7 +++++--
+ 3 files changed, 26 insertions(+), 14 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index c5d5576c67fb..569cc5e48e68 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1838,13 +1838,14 @@ static int migration_cpu_stop(void *data)
+ * sched_class::set_cpus_allowed must do the below, but is not required to
+ * actually call this function.
+ */
+-void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
++void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
+ {
+ cpumask_copy(&p->cpus_mask, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+
+-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++static void
++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
+ {
+ struct rq *rq = task_rq(p);
+ bool queued, running;
+@@ -1865,7 +1866,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ if (running)
+ put_prev_task(rq, p);
+
+- p->sched_class->set_cpus_allowed(p, new_mask);
++ p->sched_class->set_cpus_allowed(p, new_mask, flags);
+
+ if (queued)
+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
+@@ -1873,6 +1874,11 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ set_next_task(rq, p);
+ }
+
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++ __do_set_cpus_allowed(p, new_mask, 0);
++}
++
+ /*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+@@ -1883,7 +1889,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ * call is not atomic; no spinlocks may be held.
+ */
+ static int __set_cpus_allowed_ptr(struct task_struct *p,
+- const struct cpumask *new_mask, bool check)
++ const struct cpumask *new_mask,
++ u32 flags)
+ {
+ const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ unsigned int dest_cpu;
+@@ -1905,7 +1912,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+ * Must re-check here, to close a race against __kthread_bind(),
+ * sched_setaffinity() is not guaranteed to observe the flag.
+ */
+- if (check && (p->flags & PF_NO_SETAFFINITY)) {
++ if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -1924,7 +1931,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+ goto out;
+ }
+
+- do_set_cpus_allowed(p, new_mask);
++ __do_set_cpus_allowed(p, new_mask, flags);
+
+ if (p->flags & PF_KTHREAD) {
+ /*
+@@ -1961,7 +1968,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+
+ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+ {
+- return __set_cpus_allowed_ptr(p, new_mask, false);
++ return __set_cpus_allowed_ptr(p, new_mask, 0);
+ }
+ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
+@@ -2420,7 +2427,8 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
+ #else
+
+ static inline int __set_cpus_allowed_ptr(struct task_struct *p,
+- const struct cpumask *new_mask, bool check)
++ const struct cpumask *new_mask,
++ u32 flags)
+ {
+ return set_cpus_allowed_ptr(p, new_mask);
+ }
+@@ -6022,7 +6030,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+ }
+ #endif
+ again:
+- retval = __set_cpus_allowed_ptr(p, new_mask, true);
++ retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
+
+ if (!retval) {
+ cpuset_cpus_allowed(p, cpus_allowed);
+@@ -6598,7 +6606,7 @@ void __init init_idle(struct task_struct *idle, int cpu)
+ *
+ * And since this is boot we can forgo the serialization.
+ */
+- set_cpus_allowed_common(idle, cpumask_of(cpu));
++ set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
+ #endif
+ /*
+ * We're having a chicken and egg problem, even though we are
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index af8569dbdc9c..5566f157640b 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2311,7 +2311,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
+ }
+
+ static void set_cpus_allowed_dl(struct task_struct *p,
+- const struct cpumask *new_mask)
++ const struct cpumask *new_mask,
++ u32 flags)
+ {
+ struct root_domain *src_rd;
+ struct rq *rq;
+@@ -2340,7 +2341,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
+ raw_spin_unlock(&src_dl_b->lock);
+ }
+
+- set_cpus_allowed_common(p, new_mask);
++ set_cpus_allowed_common(p, new_mask, flags);
+ }
+
+ /* Assumes rq->lock is held */
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index a72464d370cd..f0f396cc1bee 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1818,7 +1818,8 @@ struct sched_class {
+ void (*task_woken)(struct rq *this_rq, struct task_struct *task);
+
+ void (*set_cpus_allowed)(struct task_struct *p,
+- const struct cpumask *newmask);
++ const struct cpumask *newmask,
++ u32 flags);
+
+ void (*rq_online)(struct rq *rq);
+ void (*rq_offline)(struct rq *rq);
+@@ -1911,7 +1912,9 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu);
+
+ extern void trigger_load_balance(struct rq *rq);
+
+-extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
++#define SCA_CHECK 0x01
++
++extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
+
+ #endif
+
+--
+2.43.0
+