summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
commit08b74a000942a380fe028845f92cd3a0dee827d5 (patch)
treeaa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch
parentAdding upstream version 4.19.249. (diff)
downloadlinux-debian.tar.xz
linux-debian.zip
Adding debian version 4.19.249-2.debian/4.19.249-2debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch')
-rw-r--r--debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch266
1 files changed, 266 insertions, 0 deletions
diff --git a/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch b/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch
new file mode 100644
index 000000000..bd257b53a
--- /dev/null
+++ b/debian/patches-rt/0027-kernel-sched-core-add-migrate_disable.patch
@@ -0,0 +1,266 @@
+From 842b16f6165e848057d292bb2e2a17a5c143bc55 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Sat, 27 May 2017 19:02:06 +0200
+Subject: [PATCH 027/347] kernel/sched/core: add migrate_disable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+---
+ include/linux/preempt.h | 23 +++++++
+ include/linux/sched.h | 7 +++
+ include/linux/smp.h | 3 +
+ kernel/sched/core.c | 130 +++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/debug.c | 4 ++
+ 5 files changed, 165 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index c01813c3fbe9..3196d0e76719 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -185,6 +185,22 @@ do { \
+
+ #define preemptible() (preempt_count() == 0 && !irqs_disabled())
+
++#ifdef CONFIG_SMP
++
++extern void migrate_disable(void);
++extern void migrate_enable(void);
++
++int __migrate_disabled(struct task_struct *p);
++
++#else
++#define migrate_disable() barrier()
++#define migrate_enable() barrier()
++static inline int __migrate_disabled(struct task_struct *p)
++{
++ return 0;
++}
++#endif
++
+ #ifdef CONFIG_PREEMPT
+ #define preempt_enable() \
+ do { \
+@@ -253,6 +269,13 @@ do { \
+ #define preempt_enable_notrace() barrier()
+ #define preemptible() 0
+
++#define migrate_disable() barrier()
++#define migrate_enable() barrier()
++
++static inline int __migrate_disabled(struct task_struct *p)
++{
++ return 0;
++}
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+ #ifdef MODULE
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index fc5f476c2aca..52069b9ddce8 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -662,6 +662,13 @@ struct task_struct {
+ int nr_cpus_allowed;
+ const cpumask_t *cpus_ptr;
+ cpumask_t cpus_mask;
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++ int migrate_disable;
++ int migrate_disable_update;
++# ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable_atomic;
++# endif
++#endif
+
+ #ifdef CONFIG_PREEMPT_RCU
+ int rcu_read_lock_nesting;
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index 6bb7f07bc1dd..039da089482c 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -202,6 +202,9 @@ static inline int get_boot_cpu_id(void)
+ #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+ #define put_cpu() preempt_enable()
+
++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
++#define put_cpu_light() migrate_enable()
++
+ /*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 3fb7638a8863..5a691d2bfbfe 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1008,7 +1008,15 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+
+-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++int __migrate_disabled(struct task_struct *p)
++{
++ return p->migrate_disable;
++}
++#endif
++
++static void __do_set_cpus_allowed_tail(struct task_struct *p,
++ const struct cpumask *new_mask)
+ {
+ struct rq *rq = task_rq(p);
+ bool queued, running;
+@@ -1037,6 +1045,20 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ set_curr_task(rq, p);
+ }
+
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++ if (__migrate_disabled(p)) {
++ lockdep_assert_held(&p->pi_lock);
++
++ cpumask_copy(&p->cpus_mask, new_mask);
++ p->migrate_disable_update = 1;
++ return;
++ }
++#endif
++ __do_set_cpus_allowed_tail(p, new_mask);
++}
++
+ /*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+@@ -1096,9 +1118,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+ }
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+- if (cpumask_test_cpu(task_cpu(p), new_mask))
++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ goto out;
+
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++ if (__migrate_disabled(p)) {
++ p->migrate_disable_update = 1;
++ goto out;
++ }
++#endif
++
+ if (task_running(rq, p) || p->state == TASK_WAKING) {
+ struct migration_arg arg = { p, dest_cpu };
+ /* Need help from migration thread: drop lock and wait. */
+@@ -7108,3 +7137,100 @@ const u32 sched_prio_to_wmult[40] = {
+ };
+
+ #undef CREATE_TRACE_POINTS
++
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++
++void migrate_disable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic() || irqs_disabled()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic++;
++#endif
++ return;
++ }
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
++ if (p->migrate_disable) {
++ p->migrate_disable++;
++ return;
++ }
++
++ preempt_disable();
++ p->migrate_disable = 1;
++
++ p->cpus_ptr = cpumask_of(smp_processor_id());
++ p->nr_cpus_allowed = 1;
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(migrate_disable);
++
++void migrate_enable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic() || irqs_disabled()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic--;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
++ WARN_ON_ONCE(p->migrate_disable <= 0);
++ if (p->migrate_disable > 1) {
++ p->migrate_disable--;
++ return;
++ }
++
++ preempt_disable();
++
++ p->cpus_ptr = &p->cpus_mask;
++ p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
++ p->migrate_disable = 0;
++
++ if (p->migrate_disable_update) {
++ struct rq *rq;
++ struct rq_flags rf;
++
++ rq = task_rq_lock(p, &rf);
++ update_rq_clock(rq);
++
++ __do_set_cpus_allowed_tail(p, &p->cpus_mask);
++ task_rq_unlock(rq, p, &rf);
++
++ p->migrate_disable_update = 0;
++
++ WARN_ON(smp_processor_id() != task_cpu(p));
++ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
++ const struct cpumask *cpu_valid_mask = cpu_active_mask;
++ struct migration_arg arg;
++ unsigned int dest_cpu;
++
++ if (p->flags & PF_KTHREAD) {
++ /*
++ * Kernel threads are allowed on online && !active CPUs
++ */
++ cpu_valid_mask = cpu_online_mask;
++ }
++ dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask);
++ arg.task = p;
++ arg.dest_cpu = dest_cpu;
++
++ preempt_enable();
++ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
++ tlb_migrate_finish(p->mm);
++ return;
++ }
++ }
++ preempt_enable();
++}
++EXPORT_SYMBOL(migrate_enable);
++#endif
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index b1ef4f2e7edc..dc3f3e6fa0bd 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -988,6 +988,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
+ P(dl.runtime);
+ P(dl.deadline);
+ }
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++ P(migrate_disable);
++#endif
++ P(nr_cpus_allowed);
+ #undef PN_SCHEDSTAT
+ #undef PN
+ #undef __PN
+--
+2.36.1
+