summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0289-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0289-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch')
-rw-r--r--debian/patches-rt/0289-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch120
1 files changed, 120 insertions, 0 deletions
diff --git a/debian/patches-rt/0289-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch b/debian/patches-rt/0289-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch
new file mode 100644
index 000000000..3fe37ca3c
--- /dev/null
+++ b/debian/patches-rt/0289-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch
@@ -0,0 +1,120 @@
+From 402252a8f4f986a5eb23639e3b9a1d9df4902d27 Mon Sep 17 00:00:00 2001
+From: Clark Williams <williams@redhat.com>
+Date: Mon, 15 Jul 2019 15:25:00 -0500
+Subject: [PATCH 289/347] thermal/x86_pkg_temp: make pkg_temp_lock a raw
+ spinlock
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+[ Upstream commit 8b03bb3ea7861b70b506199a69b1c8f81fe2d4d0 ]
+
+The spinlock pkg_temp_lock has the potential of being taken in atomic
+context on v5.2-rt PREEMPT_RT. It's static and limited scope so
+go ahead and make it a raw spinlock.
+
+Signed-off-by: Clark Williams <williams@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+---
+ drivers/thermal/x86_pkg_temp_thermal.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
+index 1ef937d799e4..540becb78a0f 100644
+--- a/drivers/thermal/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/x86_pkg_temp_thermal.c
+@@ -75,7 +75,7 @@ static int max_packages __read_mostly;
+ /* Array of package pointers */
+ static struct pkg_device **packages;
+ /* Serializes interrupt notification, work and hotplug */
+-static DEFINE_SPINLOCK(pkg_temp_lock);
++static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
+ /* Protects zone operation in the work function against hotplug removal */
+ static DEFINE_MUTEX(thermal_zone_mutex);
+
+@@ -291,12 +291,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
+ u64 msr_val, wr_val;
+
+ mutex_lock(&thermal_zone_mutex);
+- spin_lock_irq(&pkg_temp_lock);
++ raw_spin_lock_irq(&pkg_temp_lock);
+ ++pkg_work_cnt;
+
+ pkgdev = pkg_temp_thermal_get_dev(cpu);
+ if (!pkgdev) {
+- spin_unlock_irq(&pkg_temp_lock);
++ raw_spin_unlock_irq(&pkg_temp_lock);
+ mutex_unlock(&thermal_zone_mutex);
+ return;
+ }
+@@ -310,7 +310,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
+ }
+
+ enable_pkg_thres_interrupt();
+- spin_unlock_irq(&pkg_temp_lock);
++ raw_spin_unlock_irq(&pkg_temp_lock);
+
+ /*
+ * If tzone is not NULL, then thermal_zone_mutex will prevent the
+@@ -335,7 +335,7 @@ static int pkg_thermal_notify(u64 msr_val)
+ struct pkg_device *pkgdev;
+ unsigned long flags;
+
+- spin_lock_irqsave(&pkg_temp_lock, flags);
++ raw_spin_lock_irqsave(&pkg_temp_lock, flags);
+ ++pkg_interrupt_cnt;
+
+ disable_pkg_thres_interrupt();
+@@ -347,7 +347,7 @@ static int pkg_thermal_notify(u64 msr_val)
+ pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
+ }
+
+- spin_unlock_irqrestore(&pkg_temp_lock, flags);
++ raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
+ return 0;
+ }
+
+@@ -393,9 +393,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
+ pkgdev->msr_pkg_therm_high);
+
+ cpumask_set_cpu(cpu, &pkgdev->cpumask);
+- spin_lock_irq(&pkg_temp_lock);
++ raw_spin_lock_irq(&pkg_temp_lock);
+ packages[pkgid] = pkgdev;
+- spin_unlock_irq(&pkg_temp_lock);
++ raw_spin_unlock_irq(&pkg_temp_lock);
+ return 0;
+ }
+
+@@ -432,7 +432,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
+ }
+
+ /* Protect against work and interrupts */
+- spin_lock_irq(&pkg_temp_lock);
++ raw_spin_lock_irq(&pkg_temp_lock);
+
+ /*
+ * Check whether this cpu was the current target and store the new
+@@ -464,9 +464,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
+ * To cancel the work we need to drop the lock, otherwise
+ * we might deadlock if the work needs to be flushed.
+ */
+- spin_unlock_irq(&pkg_temp_lock);
++ raw_spin_unlock_irq(&pkg_temp_lock);
+ cancel_delayed_work_sync(&pkgdev->work);
+- spin_lock_irq(&pkg_temp_lock);
++ raw_spin_lock_irq(&pkg_temp_lock);
+ /*
+ * If this is not the last cpu in the package and the work
+ * did not run after we dropped the lock above, then we
+@@ -477,7 +477,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
+ pkg_thermal_schedule_work(target, &pkgdev->work);
+ }
+
+- spin_unlock_irq(&pkg_temp_lock);
++ raw_spin_unlock_irq(&pkg_temp_lock);
+
+ /* Final cleanup if this is the last cpu */
+ if (lastcpu)
+--
+2.36.1
+