summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0230-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0230-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch')
-rw-r--r--debian/patches-rt/0230-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch102
1 files changed, 102 insertions, 0 deletions
diff --git a/debian/patches-rt/0230-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/debian/patches-rt/0230-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
new file mode 100644
index 000000000..f60b81343
--- /dev/null
+++ b/debian/patches-rt/0230-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
@@ -0,0 +1,102 @@
+From 3ed5cb0e7f86a61b58b20f64a79148a2a80dac29 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 4 Aug 2017 18:31:00 +0200
+Subject: [PATCH 230/347] hotplug: duct-tape RT-rwlock usage for non-RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+This type is only available on -RT. We need to craft something for
+non-RT. Since the only migrate_disable() user is -RT only, there is no
+damage.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/cpu.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 797216a0b4c6..f6511c62c9a4 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -77,7 +77,7 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
+ .fail = CPUHP_INVALID,
+ };
+
+-#ifdef CONFIG_HOTPLUG_CPU
++#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PREEMPT_RT_FULL)
+ static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
+ __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock);
+ #endif
+@@ -293,6 +293,7 @@ static int cpu_hotplug_disabled;
+ */
+ void pin_current_cpu(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
+ struct rt_rw_lock *cpuhp_pin;
+ unsigned int cpu;
+ int ret;
+@@ -315,6 +316,7 @@ void pin_current_cpu(void)
+ goto again;
+ }
+ current->pinned_on_cpu = cpu;
++#endif
+ }
+
+ /**
+@@ -322,6 +324,7 @@ void pin_current_cpu(void)
+ */
+ void unpin_current_cpu(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
+ struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
+
+ if (WARN_ON(current->pinned_on_cpu != smp_processor_id()))
+@@ -329,6 +332,7 @@ void unpin_current_cpu(void)
+
+ current->pinned_on_cpu = -1;
+ __read_rt_unlock(cpuhp_pin);
++#endif
+ }
+
+ DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
+@@ -953,7 +957,9 @@ static int take_cpu_down(void *_param)
+
+ static int takedown_cpu(unsigned int cpu)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
+ struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu);
++#endif
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ int err;
+
+@@ -966,14 +972,18 @@ static int takedown_cpu(unsigned int cpu)
+ */
+ irq_lock_sparse();
+
++#ifdef CONFIG_PREEMPT_RT_FULL
+ __write_rt_lock(cpuhp_pin);
++#endif
+
+ /*
+ * So now all preempt/rcu users must observe !cpu_active().
+ */
+ err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
+ if (err) {
++#ifdef CONFIG_PREEMPT_RT_FULL
+ __write_rt_unlock(cpuhp_pin);
++#endif
+ /* CPU refused to die */
+ irq_unlock_sparse();
+ /* Unpark the hotplug thread so we can rollback there */
+@@ -992,7 +1002,9 @@ static int takedown_cpu(unsigned int cpu)
+ wait_for_ap_thread(st, false);
+ BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
+ __write_rt_unlock(cpuhp_pin);
++#endif
+ /* Interrupts are moved away from the dying cpu, reenable alloc/free */
+ irq_unlock_sparse();
+
+--
+2.36.1
+