summaryrefslogtreecommitdiffstats
path: root/drivers/thermal/intel
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/thermal/intel
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/thermal/intel')
-rw-r--r--drivers/thermal/intel/Kconfig78
-rw-r--r--drivers/thermal/intel/Makefile12
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig49
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile8
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c385
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h85
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c614
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3402_thermal.c104
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c304
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3406_thermal.c209
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c312
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h62
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c794
-rw-r--r--drivers/thermal/intel/intel_bxt_pmic_thermal.c290
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c438
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c787
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c438
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c477
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.h51
-rw-r--r--drivers/thermal/intel/intel_soc_dts_thermal.c122
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c539
21 files changed, 6158 insertions, 0 deletions
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
new file mode 100644
index 000000000..b5427579f
--- /dev/null
+++ b/drivers/thermal/intel/Kconfig
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INTEL_POWERCLAMP
+ tristate "Intel PowerClamp idle injection driver"
+ depends on X86
+ depends on CPU_SUP_INTEL
+ help
+ Enable this to enable Intel PowerClamp idle injection driver. This
+ enforce idle time which results in more package C-state residency. The
+ user interface is exposed via generic thermal framework.
+
+config X86_PKG_TEMP_THERMAL
+ tristate "X86 package temperature thermal driver"
+ depends on X86_THERMAL_VECTOR
+ select THERMAL_GOV_USER_SPACE
+ select THERMAL_WRITABLE_TRIPS
+ default m
+ help
+ Enable this to register CPU digital sensor for package temperature as
+ thermal zone. Each package will have its own thermal zone. There are
+ two trip points which can be set by user to get notifications via thermal
+ notification methods.
+
+config INTEL_SOC_DTS_IOSF_CORE
+ tristate
+ depends on X86 && PCI
+ select IOSF_MBI
+ help
+ This is becoming a common feature for Intel SoCs to expose the additional
+ digital temperature sensors (DTSs) using side band interface (IOSF). This
+ implements the common set of helper functions to register, get temperature
+ and get/set thresholds on DTSs.
+
+config INTEL_SOC_DTS_THERMAL
+ tristate "Intel SoCs DTS thermal driver"
+ depends on X86 && PCI && ACPI
+ select INTEL_SOC_DTS_IOSF_CORE
+ select THERMAL_WRITABLE_TRIPS
+ help
+ Enable this to register Intel SoCs (e.g. Bay Trail) platform digital
+ temperature sensor (DTS). These SoCs have two additional DTSs in
+ addition to DTSs on CPU cores. Each DTS will be registered as a
+ thermal zone. There are two trip points. One of the trip point can
+ be set by user mode programs to get notifications via Linux thermal
+ notification methods.The other trip is a critical trip point, which
+ was set by the driver based on the TJ MAX temperature.
+
+config INTEL_QUARK_DTS_THERMAL
+ tristate "Intel Quark DTS thermal driver"
+ depends on X86_INTEL_QUARK
+ help
+ Enable this to register Intel Quark SoC (e.g. X1000) platform digital
+ temperature sensor (DTS). For X1000 SoC, it has one on-die DTS.
+ The DTS will be registered as a thermal zone. There are two trip points:
+ hot & critical. The critical trip point default value is set by
+ underlying BIOS/Firmware.
+
+menu "ACPI INT340X thermal drivers"
+source "drivers/thermal/intel/int340x_thermal/Kconfig"
+endmenu
+
+config INTEL_BXT_PMIC_THERMAL
+ tristate "Intel Broxton PMIC thermal driver"
+ depends on X86 && INTEL_SOC_PMIC_BXTWC
+ select REGMAP
+ help
+ Select this driver for Intel Broxton PMIC with ADC channels monitoring
+ system temperature measurements and alerts.
+ This driver is used for monitoring the ADC channels of PMIC and handles
+ the alert trip point interrupts and notifies the thermal framework with
+ the trip point and temperature details of the zone.
+
+config INTEL_PCH_THERMAL
+ tristate "Intel PCH Thermal Reporting Driver"
+ depends on X86 && PCI
+ help
+ Enable this to support thermal reporting on certain intel PCHs.
+ Thermal reporting device will provide temperature reading,
+ programmable trip points and other information.
diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile
new file mode 100644
index 000000000..0d9736ced
--- /dev/null
+++ b/drivers/thermal/intel/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for various Intel thermal drivers.
+
+obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
+obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
+obj-$(CONFIG_INTEL_SOC_DTS_IOSF_CORE) += intel_soc_dts_iosf.o
+obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
+obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o
+obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
+obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
+obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
new file mode 100644
index 000000000..797907542
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ACPI INT340x thermal drivers configuration
+#
+
+config INT340X_THERMAL
+ tristate "ACPI INT340X thermal drivers"
+ depends on X86 && ACPI && PCI
+ select THERMAL_GOV_USER_SPACE
+ select ACPI_THERMAL_REL
+ select ACPI_FAN
+ select INTEL_SOC_DTS_IOSF_CORE
+ help
+ Newer laptops and tablets that use ACPI may have thermal sensors and
+ other devices with thermal control capabilities outside the core
+ CPU/SOC, for thermal safety reasons.
+ They are exposed for the OS to use via the INT3400 ACPI device object
+ as the master, and INT3401~INT340B ACPI device objects as the slaves.
+ Enable this to expose the temperature information and cooling ability
+ from these objects to userspace via the normal thermal framework.
+ This means that a wide range of applications and GUI widgets can show
+ the information to the user or use this information for making
+ decisions. For example, the Intel Thermal Daemon can use this
+ information to allow the user to select his laptop to run without
+ turning on the fans.
+
+config ACPI_THERMAL_REL
+ tristate
+ depends on ACPI
+
+if INT340X_THERMAL
+
+config INT3406_THERMAL
+ tristate "ACPI INT3406 display thermal driver"
+ depends on ACPI_VIDEO
+ help
+ The display thermal device represents the LED/LCD display panel
+ that may or may not include touch support. The main function of
+ the display thermal device is to allow control of the display
+ brightness in order to address a thermal condition or to reduce
+ power consumed by display device.
+
+config PROC_THERMAL_MMIO_RAPL
+ bool
+ depends on 64BIT
+ depends on POWERCAP
+ select INTEL_RAPL_CORE
+ default y
+endif
diff --git a/drivers/thermal/intel/int340x_thermal/Makefile b/drivers/thermal/intel/int340x_thermal/Makefile
new file mode 100644
index 000000000..287eb0a14
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
+obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal_zone.o
+obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
+obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
+obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
+obj-$(CONFIG_INT3406_THERMAL) += int3406_thermal.o
+obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
new file mode 100644
index 000000000..a478cff81
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* acpi_thermal_rel.c driver for exporting ACPI thermal relationship
+ *
+ * Copyright (c) 2014 Intel Corp
+ */
+
+/*
+ * Two functionalities included:
+ * 1. Export _TRT, _ART, via misc device interface to the userspace.
+ * 2. Provide parsing result to kernel drivers
+ *
+ */
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include "acpi_thermal_rel.h"
+
+static acpi_handle acpi_thermal_rel_handle;
+static DEFINE_SPINLOCK(acpi_thermal_rel_chrdev_lock);
+static int acpi_thermal_rel_chrdev_count; /* #times opened */
+static int acpi_thermal_rel_chrdev_exclu; /* already open exclusive? */
+
+static int acpi_thermal_rel_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&acpi_thermal_rel_chrdev_lock);
+ if (acpi_thermal_rel_chrdev_exclu ||
+ (acpi_thermal_rel_chrdev_count && (file->f_flags & O_EXCL))) {
+ spin_unlock(&acpi_thermal_rel_chrdev_lock);
+ return -EBUSY;
+ }
+
+ if (file->f_flags & O_EXCL)
+ acpi_thermal_rel_chrdev_exclu = 1;
+ acpi_thermal_rel_chrdev_count++;
+
+ spin_unlock(&acpi_thermal_rel_chrdev_lock);
+
+ return nonseekable_open(inode, file);
+}
+
+static int acpi_thermal_rel_release(struct inode *inode, struct file *file)
+{
+ spin_lock(&acpi_thermal_rel_chrdev_lock);
+ acpi_thermal_rel_chrdev_count--;
+ acpi_thermal_rel_chrdev_exclu = 0;
+ spin_unlock(&acpi_thermal_rel_chrdev_lock);
+
+ return 0;
+}
+
+/**
+ * acpi_parse_trt - Thermal Relationship Table _TRT for passive cooling
+ *
+ * @handle: ACPI handle of the device contains _TRT
+ * @trt_count: the number of valid entries resulted from parsing _TRT
+ * @trtp: pointer to pointer of array of _TRT entries in parsing result
+ * @create_dev: whether to create platform devices for target and source
+ *
+ */
+int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
+ bool create_dev)
+{
+ acpi_status status;
+ int result = 0;
+ int i;
+ int nr_bad_entries = 0;
+ struct trt *trts;
+ struct acpi_device *adev;
+ union acpi_object *p;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer element = { 0, NULL };
+ struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
+
+ status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ p = buffer.pointer;
+ if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+ pr_err("Invalid _TRT data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ *trt_count = p->package.count;
+ trts = kcalloc(*trt_count, sizeof(struct trt), GFP_KERNEL);
+ if (!trts) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ for (i = 0; i < *trt_count; i++) {
+ struct trt *trt = &trts[i - nr_bad_entries];
+
+ element.length = sizeof(struct trt);
+ element.pointer = trt;
+
+ status = acpi_extract_package(&(p->package.elements[i]),
+ &trt_format, &element);
+ if (ACPI_FAILURE(status)) {
+ nr_bad_entries++;
+ pr_warn("_TRT package %d is invalid, ignored\n", i);
+ continue;
+ }
+ if (!create_dev)
+ continue;
+
+ result = acpi_bus_get_device(trt->source, &adev);
+ if (result)
+ pr_warn("Failed to get source ACPI device\n");
+
+ result = acpi_bus_get_device(trt->target, &adev);
+ if (result)
+ pr_warn("Failed to get target ACPI device\n");
+ }
+
+ result = 0;
+
+ *trtp = trts;
+ /* don't count bad entries */
+ *trt_count -= nr_bad_entries;
+end:
+ kfree(buffer.pointer);
+ return result;
+}
+EXPORT_SYMBOL(acpi_parse_trt);
+
+/**
+ * acpi_parse_art - Parse Active Relationship Table _ART
+ *
+ * @handle: ACPI handle of the device contains _ART
+ * @art_count: the number of valid entries resulted from parsing _ART
+ * @artp: pointer to pointer of array of art entries in parsing result
+ * @create_dev: whether to create platform devices for target and source
+ *
+ */
+int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
+ bool create_dev)
+{
+ acpi_status status;
+ int result = 0;
+ int i;
+ int nr_bad_entries = 0;
+ struct art *arts;
+ struct acpi_device *adev;
+ union acpi_object *p;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer element = { 0, NULL };
+ struct acpi_buffer art_format = {
+ sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
+
+ status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ p = buffer.pointer;
+ if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+ pr_err("Invalid _ART data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ /* ignore p->package.elements[0], as this is _ART Revision field */
+ *art_count = p->package.count - 1;
+ arts = kcalloc(*art_count, sizeof(struct art), GFP_KERNEL);
+ if (!arts) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ for (i = 0; i < *art_count; i++) {
+ struct art *art = &arts[i - nr_bad_entries];
+
+ element.length = sizeof(struct art);
+ element.pointer = art;
+
+ status = acpi_extract_package(&(p->package.elements[i + 1]),
+ &art_format, &element);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("_ART package %d is invalid, ignored", i);
+ nr_bad_entries++;
+ continue;
+ }
+ if (!create_dev)
+ continue;
+
+ if (art->source) {
+ result = acpi_bus_get_device(art->source, &adev);
+ if (result)
+ pr_warn("Failed to get source ACPI device\n");
+ }
+ if (art->target) {
+ result = acpi_bus_get_device(art->target, &adev);
+ if (result)
+ pr_warn("Failed to get target ACPI device\n");
+ }
+ }
+
+ *artp = arts;
+ /* don't count bad entries */
+ *art_count -= nr_bad_entries;
+end:
+ kfree(buffer.pointer);
+ return result;
+}
+EXPORT_SYMBOL(acpi_parse_art);
+
+
+/* get device name from acpi handle */
+static void get_single_name(acpi_handle handle, char *name)
+{
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
+
+ if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)))
+ pr_warn("Failed to get device name from acpi handle\n");
+ else {
+ memcpy(name, buffer.pointer, ACPI_NAMESEG_SIZE);
+ kfree(buffer.pointer);
+ }
+}
+
+static int fill_art(char __user *ubuf)
+{
+ int i;
+ int ret;
+ int count;
+ int art_len;
+ struct art *arts = NULL;
+ union art_object *art_user;
+
+ ret = acpi_parse_art(acpi_thermal_rel_handle, &count, &arts, false);
+ if (ret)
+ goto free_art;
+ art_len = count * sizeof(union art_object);
+ art_user = kzalloc(art_len, GFP_KERNEL);
+ if (!art_user) {
+ ret = -ENOMEM;
+ goto free_art;
+ }
+ /* now fill in user art data */
+ for (i = 0; i < count; i++) {
+ /* userspace art needs device name instead of acpi reference */
+ get_single_name(arts[i].source, art_user[i].source_device);
+ get_single_name(arts[i].target, art_user[i].target_device);
+ /* copy the rest int data in addition to source and target */
+ memcpy(&art_user[i].weight, &arts[i].weight,
+ sizeof(u64) * (ACPI_NR_ART_ELEMENTS - 2));
+ }
+
+ if (copy_to_user(ubuf, art_user, art_len))
+ ret = -EFAULT;
+ kfree(art_user);
+free_art:
+ kfree(arts);
+ return ret;
+}
+
+static int fill_trt(char __user *ubuf)
+{
+ int i;
+ int ret;
+ int count;
+ int trt_len;
+ struct trt *trts = NULL;
+ union trt_object *trt_user;
+
+ ret = acpi_parse_trt(acpi_thermal_rel_handle, &count, &trts, false);
+ if (ret)
+ goto free_trt;
+ trt_len = count * sizeof(union trt_object);
+ trt_user = kzalloc(trt_len, GFP_KERNEL);
+ if (!trt_user) {
+ ret = -ENOMEM;
+ goto free_trt;
+ }
+ /* now fill in user trt data */
+ for (i = 0; i < count; i++) {
+ /* userspace trt needs device name instead of acpi reference */
+ get_single_name(trts[i].source, trt_user[i].source_device);
+ get_single_name(trts[i].target, trt_user[i].target_device);
+ trt_user[i].sample_period = trts[i].sample_period;
+ trt_user[i].influence = trts[i].influence;
+ }
+
+ if (copy_to_user(ubuf, trt_user, trt_len))
+ ret = -EFAULT;
+ kfree(trt_user);
+free_trt:
+ kfree(trts);
+ return ret;
+}
+
+static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
+ unsigned long __arg)
+{
+ int ret = 0;
+ unsigned long length = 0;
+ int count = 0;
+ char __user *arg = (void __user *)__arg;
+ struct trt *trts = NULL;
+ struct art *arts = NULL;
+
+ switch (cmd) {
+ case ACPI_THERMAL_GET_TRT_COUNT:
+ ret = acpi_parse_trt(acpi_thermal_rel_handle, &count,
+ &trts, false);
+ kfree(trts);
+ if (!ret)
+ return put_user(count, (unsigned long __user *)__arg);
+ return ret;
+ case ACPI_THERMAL_GET_TRT_LEN:
+ ret = acpi_parse_trt(acpi_thermal_rel_handle, &count,
+ &trts, false);
+ kfree(trts);
+ length = count * sizeof(union trt_object);
+ if (!ret)
+ return put_user(length, (unsigned long __user *)__arg);
+ return ret;
+ case ACPI_THERMAL_GET_TRT:
+ return fill_trt(arg);
+ case ACPI_THERMAL_GET_ART_COUNT:
+ ret = acpi_parse_art(acpi_thermal_rel_handle, &count,
+ &arts, false);
+ kfree(arts);
+ if (!ret)
+ return put_user(count, (unsigned long __user *)__arg);
+ return ret;
+ case ACPI_THERMAL_GET_ART_LEN:
+ ret = acpi_parse_art(acpi_thermal_rel_handle, &count,
+ &arts, false);
+ kfree(arts);
+ length = count * sizeof(union art_object);
+ if (!ret)
+ return put_user(length, (unsigned long __user *)__arg);
+ return ret;
+
+ case ACPI_THERMAL_GET_ART:
+ return fill_art(arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations acpi_thermal_rel_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_thermal_rel_open,
+ .release = acpi_thermal_rel_release,
+ .unlocked_ioctl = acpi_thermal_rel_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice acpi_thermal_rel_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ "acpi_thermal_rel",
+ &acpi_thermal_rel_fops
+};
+
+int acpi_thermal_rel_misc_device_add(acpi_handle handle)
+{
+ acpi_thermal_rel_handle = handle;
+
+ return misc_register(&acpi_thermal_rel_misc_device);
+}
+EXPORT_SYMBOL(acpi_thermal_rel_misc_device_add);
+
+int acpi_thermal_rel_misc_device_remove(acpi_handle handle)
+{
+ misc_deregister(&acpi_thermal_rel_misc_device);
+
+ return 0;
+}
+EXPORT_SYMBOL(acpi_thermal_rel_misc_device_remove);
+
+MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com");
+MODULE_DESCRIPTION("Intel acpi thermal rel misc dev driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h
new file mode 100644
index 000000000..58822575f
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ACPI_ACPI_THERMAL_H
+#define __ACPI_ACPI_THERMAL_H
+
+#include <asm/ioctl.h>
+
+#define ACPI_THERMAL_MAGIC 's'
+
+#define ACPI_THERMAL_GET_TRT_LEN _IOR(ACPI_THERMAL_MAGIC, 1, unsigned long)
+#define ACPI_THERMAL_GET_ART_LEN _IOR(ACPI_THERMAL_MAGIC, 2, unsigned long)
+#define ACPI_THERMAL_GET_TRT_COUNT _IOR(ACPI_THERMAL_MAGIC, 3, unsigned long)
+#define ACPI_THERMAL_GET_ART_COUNT _IOR(ACPI_THERMAL_MAGIC, 4, unsigned long)
+
+#define ACPI_THERMAL_GET_TRT _IOR(ACPI_THERMAL_MAGIC, 5, unsigned long)
+#define ACPI_THERMAL_GET_ART _IOR(ACPI_THERMAL_MAGIC, 6, unsigned long)
+
+struct art {
+ acpi_handle source;
+ acpi_handle target;
+ u64 weight;
+ u64 ac0_max;
+ u64 ac1_max;
+ u64 ac2_max;
+ u64 ac3_max;
+ u64 ac4_max;
+ u64 ac5_max;
+ u64 ac6_max;
+ u64 ac7_max;
+ u64 ac8_max;
+ u64 ac9_max;
+} __packed;
+
+struct trt {
+ acpi_handle source;
+ acpi_handle target;
+ u64 influence;
+ u64 sample_period;
+ u64 reserved1;
+ u64 reserved2;
+ u64 reserved3;
+ u64 reserved4;
+} __packed;
+
+#define ACPI_NR_ART_ELEMENTS 13
+/* for usrspace */
+union art_object {
+ struct {
+ char source_device[8]; /* ACPI single name */
+ char target_device[8]; /* ACPI single name */
+ u64 weight;
+ u64 ac0_max_level;
+ u64 ac1_max_level;
+ u64 ac2_max_level;
+ u64 ac3_max_level;
+ u64 ac4_max_level;
+ u64 ac5_max_level;
+ u64 ac6_max_level;
+ u64 ac7_max_level;
+ u64 ac8_max_level;
+ u64 ac9_max_level;
+ };
+ u64 __data[ACPI_NR_ART_ELEMENTS];
+};
+
+union trt_object {
+ struct {
+ char source_device[8]; /* ACPI single name */
+ char target_device[8]; /* ACPI single name */
+ u64 influence;
+ u64 sample_period;
+ u64 reserved[4];
+ };
+ u64 __data[8];
+};
+
+#ifdef __KERNEL__
+int acpi_thermal_rel_misc_device_add(acpi_handle handle);
+int acpi_thermal_rel_misc_device_remove(acpi_handle handle);
+int acpi_parse_art(acpi_handle handle, int *art_count, struct art **arts,
+ bool create_dev);
+int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trts,
+ bool create_dev);
+#endif
+
+#endif /* __ACPI_ACPI_THERMAL_H */
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
new file mode 100644
index 000000000..28913867c
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * INT3400 thermal driver
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Authors: Zhang Rui <rui.zhang@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+#include "acpi_thermal_rel.h"
+
+#define INT3400_THERMAL_TABLE_CHANGED 0x83
+#define INT3400_ODVP_CHANGED 0x88
+#define INT3400_KEEP_ALIVE 0xA0
+
+enum int3400_thermal_uuid {
+ INT3400_THERMAL_PASSIVE_1,
+ INT3400_THERMAL_ACTIVE,
+ INT3400_THERMAL_CRITICAL,
+ INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
+ INT3400_THERMAL_EMERGENCY_CALL_MODE,
+ INT3400_THERMAL_PASSIVE_2,
+ INT3400_THERMAL_POWER_BOSS,
+ INT3400_THERMAL_VIRTUAL_SENSOR,
+ INT3400_THERMAL_COOLING_MODE,
+ INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
+ INT3400_THERMAL_MAXIMUM_UUID,
+};
+
+static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
+ "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
+ "3A95C389-E4B8-4629-A526-C52C88626BAE",
+ "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
+ "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
+ "5349962F-71E6-431D-9AE8-0A635B710AEE",
+ "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
+ "F5A35014-C209-46A4-993A-EB56DE7530A1",
+ "6ED722A7-9240-48A5-B479-31EEF723D7CF",
+ "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
+ "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
+};
+
+struct odvp_attr;
+
+struct int3400_thermal_priv {
+ struct acpi_device *adev;
+ struct platform_device *pdev;
+ struct thermal_zone_device *thermal;
+ int art_count;
+ struct art *arts;
+ int trt_count;
+ struct trt *trts;
+ u32 uuid_bitmap;
+ int rel_misc_dev_res;
+ int current_uuid_index;
+ char *data_vault;
+ int odvp_count;
+ int *odvp;
+ struct odvp_attr *odvp_attrs;
+};
+
+static int evaluate_odvp(struct int3400_thermal_priv *priv);
+
+struct odvp_attr {
+ int odvp;
+ struct int3400_thermal_priv *priv;
+ struct device_attribute attr;
+};
+
+static ssize_t data_vault_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+ memcpy(buf, attr->private + off, count);
+ return count;
+}
+
+static BIN_ATTR_RO(data_vault, 0);
+
+static struct bin_attribute *data_attributes[] = {
+ &bin_attr_data_vault,
+ NULL,
+};
+
+static ssize_t imok_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
+ acpi_status status;
+ int input, ret;
+
+ ret = kstrtouint(buf, 10, &input);
+ if (ret)
+ return ret;
+ status = acpi_execute_simple_method(priv->adev->handle, "IMOK", input);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(imok);
+
+static struct attribute *imok_attr[] = {
+ &dev_attr_imok.attr,
+ NULL
+};
+
+static const struct attribute_group data_attribute_group = {
+ .bin_attrs = data_attributes,
+ .attrs = imok_attr,
+};
+
+static ssize_t available_uuids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
+ int i;
+ int length = 0;
+
+ if (!priv->uuid_bitmap)
+ return sprintf(buf, "UNKNOWN\n");
+
+ for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) {
+ if (priv->uuid_bitmap & (1 << i))
+ if (PAGE_SIZE - length > 0)
+ length += scnprintf(&buf[length],
+ PAGE_SIZE - length,
+ "%s\n",
+ int3400_thermal_uuids[i]);
+ }
+
+ return length;
+}
+
+static ssize_t current_uuid_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
+
+ if (priv->current_uuid_index == -1)
+ return sprintf(buf, "INVALID\n");
+
+ return sprintf(buf, "%s\n",
+ int3400_thermal_uuids[priv->current_uuid_index]);
+}
+
+static ssize_t current_uuid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; ++i) {
+ if (!strncmp(buf, int3400_thermal_uuids[i],
+ sizeof(int3400_thermal_uuids[i]) - 1)) {
+ /*
+ * If we have a list of supported UUIDs, make sure
+ * this one is supported.
+ */
+ if (priv->uuid_bitmap &&
+ !(priv->uuid_bitmap & (1 << i)))
+ return -EINVAL;
+
+ priv->current_uuid_index = i;
+ return count;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR_RW(current_uuid);
+static DEVICE_ATTR_RO(available_uuids);
+static struct attribute *uuid_attrs[] = {
+ &dev_attr_available_uuids.attr,
+ &dev_attr_current_uuid.attr,
+ NULL
+};
+
+static const struct attribute_group uuid_attribute_group = {
+ .attrs = uuid_attrs,
+ .name = "uuids"
+};
+
+static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obja, *objb;
+ int i, j;
+ int result = 0;
+ acpi_status status;
+
+ status = acpi_evaluate_object(priv->adev->handle, "IDSP", NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ obja = (union acpi_object *)buf.pointer;
+ if (obja->type != ACPI_TYPE_PACKAGE) {
+ result = -EINVAL;
+ goto end;
+ }
+
+ for (i = 0; i < obja->package.count; i++) {
+ objb = &obja->package.elements[i];
+ if (objb->type != ACPI_TYPE_BUFFER) {
+ result = -EINVAL;
+ goto end;
+ }
+
+ /* UUID must be 16 bytes */
+ if (objb->buffer.length != 16) {
+ result = -EINVAL;
+ goto end;
+ }
+
+ for (j = 0; j < INT3400_THERMAL_MAXIMUM_UUID; j++) {
+ guid_t guid;
+
+ guid_parse(int3400_thermal_uuids[j], &guid);
+ if (guid_equal((guid_t *)objb->buffer.pointer, &guid)) {
+ priv->uuid_bitmap |= (1 << j);
+ break;
+ }
+ }
+ }
+
+end:
+ kfree(buf.pointer);
+ return result;
+}
+
+static int int3400_thermal_run_osc(acpi_handle handle,
+ enum int3400_thermal_uuid uuid, bool enable)
+{
+ u32 ret, buf[2];
+ acpi_status status;
+ int result = 0;
+ struct acpi_osc_context context = {
+ .uuid_str = NULL,
+ .rev = 1,
+ .cap.length = 8,
+ };
+
+ if (uuid < 0 || uuid >= INT3400_THERMAL_MAXIMUM_UUID)
+ return -EINVAL;
+
+ context.uuid_str = int3400_thermal_uuids[uuid];
+
+ buf[OSC_QUERY_DWORD] = 0;
+ buf[OSC_SUPPORT_DWORD] = enable;
+
+ context.cap.pointer = buf;
+
+ status = acpi_run_osc(handle, &context);
+ if (ACPI_SUCCESS(status)) {
+ ret = *((u32 *)(context.ret.pointer + 4));
+ if (ret != enable)
+ result = -EPERM;
+ } else
+ result = -EPERM;
+
+ kfree(context.ret.pointer);
+
+ return result;
+}
+
+static ssize_t odvp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct odvp_attr *odvp_attr;
+
+ odvp_attr = container_of(attr, struct odvp_attr, attr);
+
+ return sprintf(buf, "%d\n", odvp_attr->priv->odvp[odvp_attr->odvp]);
+}
+
+static void cleanup_odvp(struct int3400_thermal_priv *priv)
+{
+ int i;
+
+ if (priv->odvp_attrs) {
+ for (i = 0; i < priv->odvp_count; i++) {
+ sysfs_remove_file(&priv->pdev->dev.kobj,
+ &priv->odvp_attrs[i].attr.attr);
+ kfree(priv->odvp_attrs[i].attr.attr.name);
+ }
+ kfree(priv->odvp_attrs);
+ }
+ kfree(priv->odvp);
+ priv->odvp_count = 0;
+}
+
+static int evaluate_odvp(struct int3400_thermal_priv *priv)
+{
+ struct acpi_buffer odvp = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj = NULL;
+ acpi_status status;
+ int i, ret;
+
+ status = acpi_evaluate_object(priv->adev->handle, "ODVP", NULL, &odvp);
+ if (ACPI_FAILURE(status)) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ obj = odvp.pointer;
+ if (obj->type != ACPI_TYPE_PACKAGE) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ if (priv->odvp == NULL) {
+ priv->odvp_count = obj->package.count;
+ priv->odvp = kmalloc_array(priv->odvp_count, sizeof(int),
+ GFP_KERNEL);
+ if (!priv->odvp) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ }
+
+ if (priv->odvp_attrs == NULL) {
+ priv->odvp_attrs = kcalloc(priv->odvp_count,
+ sizeof(struct odvp_attr),
+ GFP_KERNEL);
+ if (!priv->odvp_attrs) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ for (i = 0; i < priv->odvp_count; i++) {
+ struct odvp_attr *odvp = &priv->odvp_attrs[i];
+
+ sysfs_attr_init(&odvp->attr.attr);
+ odvp->priv = priv;
+ odvp->odvp = i;
+ odvp->attr.attr.name = kasprintf(GFP_KERNEL,
+ "odvp%d", i);
+
+ if (!odvp->attr.attr.name) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ odvp->attr.attr.mode = 0444;
+ odvp->attr.show = odvp_show;
+ odvp->attr.store = NULL;
+ ret = sysfs_create_file(&priv->pdev->dev.kobj,
+ &odvp->attr.attr);
+ if (ret)
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < obj->package.count; i++) {
+ if (obj->package.elements[i].type == ACPI_TYPE_INTEGER)
+ priv->odvp[i] = obj->package.elements[i].integer.value;
+ }
+
+ kfree(obj);
+ return 0;
+
+out_err:
+ cleanup_odvp(priv);
+ kfree(obj);
+ return ret;
+}
+
+static void int3400_notify(acpi_handle handle,
+ u32 event,
+ void *data)
+{
+ struct int3400_thermal_priv *priv = data;
+ char *thermal_prop[5];
+ int therm_event;
+
+ if (!priv)
+ return;
+
+ switch (event) {
+ case INT3400_THERMAL_TABLE_CHANGED:
+ therm_event = THERMAL_TABLE_CHANGED;
+ break;
+ case INT3400_KEEP_ALIVE:
+ therm_event = THERMAL_EVENT_KEEP_ALIVE;
+ break;
+ case INT3400_ODVP_CHANGED:
+ evaluate_odvp(priv);
+ therm_event = THERMAL_DEVICE_POWER_CAPABILITY_CHANGED;
+ break;
+ default:
+ /* Ignore unknown notification codes sent to INT3400 device */
+ return;
+ }
+
+ thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", priv->thermal->type);
+ thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", priv->thermal->temperature);
+ thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=");
+ thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event);
+ thermal_prop[4] = NULL;
+ kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop);
+ kfree(thermal_prop[0]);
+ kfree(thermal_prop[1]);
+ kfree(thermal_prop[2]);
+ kfree(thermal_prop[3]);
+}
+
+static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
+ int *temp)
+{
+ *temp = 20 * 1000; /* faked temp sensor with 20C */
+ return 0;
+}
+
+static int int3400_thermal_change_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode mode)
+{
+ struct int3400_thermal_priv *priv = thermal->devdata;
+ int result = 0;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (mode != thermal->mode)
+ result = int3400_thermal_run_osc(priv->adev->handle,
+ priv->current_uuid_index,
+ mode == THERMAL_DEVICE_ENABLED);
+
+
+ evaluate_odvp(priv);
+
+ return result;
+}
+
+static struct thermal_zone_device_ops int3400_thermal_ops = {
+ .get_temp = int3400_thermal_get_temp,
+ .change_mode = int3400_thermal_change_mode,
+};
+
+static struct thermal_zone_params int3400_thermal_params = {
+ .governor_name = "user_space",
+ .no_hwmon = true,
+};
+
+static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = acpi_evaluate_object(priv->adev->handle, "GDDV", NULL,
+ &buffer);
+ if (ACPI_FAILURE(status) || !buffer.length)
+ return;
+
+ obj = buffer.pointer;
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 1
+ || obj->package.elements[0].type != ACPI_TYPE_BUFFER) {
+ kfree(buffer.pointer);
+ return;
+ }
+
+ priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
+ obj->package.elements[0].buffer.length,
+ GFP_KERNEL);
+ if (!priv->data_vault) {
+ kfree(buffer.pointer);
+ return;
+ }
+
+ bin_attr_data_vault.private = priv->data_vault;
+ bin_attr_data_vault.size = obj->package.elements[0].buffer.length;
+ kfree(buffer.pointer);
+}
+
+static int int3400_thermal_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct int3400_thermal_priv *priv;
+ int result;
+
+ if (!adev)
+ return -ENODEV;
+
+ priv = kzalloc(sizeof(struct int3400_thermal_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+ priv->adev = adev;
+
+ result = int3400_thermal_get_uuids(priv);
+
+ /* Missing IDSP isn't fatal */
+ if (result && result != -ENODEV)
+ goto free_priv;
+
+ priv->current_uuid_index = -1;
+
+ result = acpi_parse_art(priv->adev->handle, &priv->art_count,
+ &priv->arts, true);
+ if (result)
+ dev_dbg(&pdev->dev, "_ART table parsing error\n");
+
+ result = acpi_parse_trt(priv->adev->handle, &priv->trt_count,
+ &priv->trts, true);
+ if (result)
+ dev_dbg(&pdev->dev, "_TRT table parsing error\n");
+
+ platform_set_drvdata(pdev, priv);
+
+ int3400_setup_gddv(priv);
+
+ evaluate_odvp(priv);
+
+ priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
+ priv, &int3400_thermal_ops,
+ &int3400_thermal_params, 0, 0);
+ if (IS_ERR(priv->thermal)) {
+ result = PTR_ERR(priv->thermal);
+ goto free_art_trt;
+ }
+
+ priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
+ priv->adev->handle);
+
+ result = sysfs_create_group(&pdev->dev.kobj, &uuid_attribute_group);
+ if (result)
+ goto free_rel_misc;
+
+ if (priv->data_vault) {
+ result = sysfs_create_group(&pdev->dev.kobj,
+ &data_attribute_group);
+ if (result)
+ goto free_uuid;
+ }
+
+ result = acpi_install_notify_handler(
+ priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify,
+ (void *)priv);
+ if (result)
+ goto free_sysfs;
+
+ return 0;
+
+free_sysfs:
+ cleanup_odvp(priv);
+ if (priv->data_vault) {
+ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
+ kfree(priv->data_vault);
+ }
+free_uuid:
+ sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
+free_rel_misc:
+ if (!priv->rel_misc_dev_res)
+ acpi_thermal_rel_misc_device_remove(priv->adev->handle);
+ thermal_zone_device_unregister(priv->thermal);
+free_art_trt:
+ kfree(priv->trts);
+ kfree(priv->arts);
+free_priv:
+ kfree(priv);
+ return result;
+}
+
+static int int3400_thermal_remove(struct platform_device *pdev)
+{
+ struct int3400_thermal_priv *priv = platform_get_drvdata(pdev);
+
+ acpi_remove_notify_handler(
+ priv->adev->handle, ACPI_DEVICE_NOTIFY,
+ int3400_notify);
+
+ cleanup_odvp(priv);
+
+ if (!priv->rel_misc_dev_res)
+ acpi_thermal_rel_misc_device_remove(priv->adev->handle);
+
+ if (priv->data_vault)
+ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
+ sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
+ thermal_zone_device_unregister(priv->thermal);
+ kfree(priv->data_vault);
+ kfree(priv->trts);
+ kfree(priv->arts);
+ kfree(priv);
+ return 0;
+}
+
+static const struct acpi_device_id int3400_thermal_match[] = {
+ {"INT3400", 0},
+ {"INTC1040", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, int3400_thermal_match);
+
+static struct platform_driver int3400_thermal_driver = {
+ .probe = int3400_thermal_probe,
+ .remove = int3400_thermal_remove,
+ .driver = {
+ .name = "int3400 thermal",
+ .acpi_match_table = ACPI_PTR(int3400_thermal_match),
+ },
+};
+
+module_platform_driver(int3400_thermal_driver);
+
+MODULE_DESCRIPTION("INT3400 Thermal driver");
+MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
new file mode 100644
index 000000000..43fa351e2
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * INT3402 thermal driver for memory temperature reporting
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Authors: Aaron Lu <aaron.lu@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+#include "int340x_thermal_zone.h"
+
+#define INT3402_PERF_CHANGED_EVENT 0x80
+#define INT3402_THERMAL_EVENT 0x90
+
+struct int3402_thermal_data {
+ acpi_handle *handle;
+ struct int34x_thermal_zone *int340x_zone;
+};
+
+static void int3402_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct int3402_thermal_data *priv = data;
+
+ if (!priv)
+ return;
+
+ switch (event) {
+ case INT3402_PERF_CHANGED_EVENT:
+ break;
+ case INT3402_THERMAL_EVENT:
+ int340x_thermal_zone_device_update(priv->int340x_zone,
+ THERMAL_TRIP_VIOLATED);
+ break;
+ default:
+ break;
+ }
+}
+
+static int int3402_thermal_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct int3402_thermal_data *d;
+ int ret;
+
+ if (!acpi_has_method(adev->handle, "_TMP"))
+ return -ENODEV;
+
+ d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->int340x_zone = int340x_thermal_zone_add(adev, NULL);
+ if (IS_ERR(d->int340x_zone))
+ return PTR_ERR(d->int340x_zone);
+
+ ret = acpi_install_notify_handler(adev->handle,
+ ACPI_DEVICE_NOTIFY,
+ int3402_notify,
+ d);
+ if (ret) {
+ int340x_thermal_zone_remove(d->int340x_zone);
+ return ret;
+ }
+
+ d->handle = adev->handle;
+ platform_set_drvdata(pdev, d);
+
+ return 0;
+}
+
+static int int3402_thermal_remove(struct platform_device *pdev)
+{
+ struct int3402_thermal_data *d = platform_get_drvdata(pdev);
+
+ acpi_remove_notify_handler(d->handle,
+ ACPI_DEVICE_NOTIFY, int3402_notify);
+ int340x_thermal_zone_remove(d->int340x_zone);
+
+ return 0;
+}
+
+static const struct acpi_device_id int3402_thermal_match[] = {
+ {"INT3402", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, int3402_thermal_match);
+
+static struct platform_driver int3402_thermal_driver = {
+ .probe = int3402_thermal_probe,
+ .remove = int3402_thermal_remove,
+ .driver = {
+ .name = "int3402 thermal",
+ .acpi_match_table = int3402_thermal_match,
+ },
+};
+
+module_platform_driver(int3402_thermal_driver);
+
+MODULE_DESCRIPTION("INT3402 Thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
new file mode 100644
index 000000000..ec1d58c4c
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ACPI INT3403 thermal driver
+ * Copyright (c) 2013, Intel Corporation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+#include "int340x_thermal_zone.h"
+
+#define INT3403_TYPE_SENSOR 0x03
+#define INT3403_TYPE_CHARGER 0x0B
+#define INT3403_TYPE_BATTERY 0x0C
+#define INT3403_PERF_CHANGED_EVENT 0x80
+#define INT3403_PERF_TRIP_POINT_CHANGED 0x81
+#define INT3403_THERMAL_EVENT 0x90
+
+/* Preserved structure for future expandbility */
+struct int3403_sensor {
+ struct int34x_thermal_zone *int340x_zone;
+};
+
+struct int3403_performance_state {
+ u64 performance;
+ u64 power;
+ u64 latency;
+ u64 linear;
+ u64 control;
+ u64 raw_performace;
+ char *raw_unit;
+ int reserved;
+};
+
+struct int3403_cdev {
+ struct thermal_cooling_device *cdev;
+ unsigned long max_state;
+};
+
+struct int3403_priv {
+ struct platform_device *pdev;
+ struct acpi_device *adev;
+ unsigned long long type;
+ void *priv;
+};
+
+static void int3403_notify(acpi_handle handle,
+ u32 event, void *data)
+{
+ struct int3403_priv *priv = data;
+ struct int3403_sensor *obj;
+
+ if (!priv)
+ return;
+
+ obj = priv->priv;
+ if (priv->type != INT3403_TYPE_SENSOR || !obj)
+ return;
+
+ switch (event) {
+ case INT3403_PERF_CHANGED_EVENT:
+ break;
+ case INT3403_THERMAL_EVENT:
+ int340x_thermal_zone_device_update(obj->int340x_zone,
+ THERMAL_TRIP_VIOLATED);
+ break;
+ case INT3403_PERF_TRIP_POINT_CHANGED:
+ int340x_thermal_read_trips(obj->int340x_zone);
+ int340x_thermal_zone_device_update(obj->int340x_zone,
+ THERMAL_TRIP_CHANGED);
+ break;
+ default:
+ dev_dbg(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
+ break;
+ }
+}
+
+static int int3403_sensor_add(struct int3403_priv *priv)
+{
+ int result = 0;
+ struct int3403_sensor *obj;
+
+ obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ priv->priv = obj;
+
+ obj->int340x_zone = int340x_thermal_zone_add(priv->adev, NULL);
+ if (IS_ERR(obj->int340x_zone))
+ return PTR_ERR(obj->int340x_zone);
+
+ result = acpi_install_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, int3403_notify,
+ (void *)priv);
+ if (result)
+ goto err_free_obj;
+
+ return 0;
+
+ err_free_obj:
+ int340x_thermal_zone_remove(obj->int340x_zone);
+ return result;
+}
+
+static int int3403_sensor_remove(struct int3403_priv *priv)
+{
+ struct int3403_sensor *obj = priv->priv;
+
+ acpi_remove_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, int3403_notify);
+ int340x_thermal_zone_remove(obj->int340x_zone);
+
+ return 0;
+}
+
+/* INT3403 Cooling devices */
+static int int3403_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct int3403_priv *priv = cdev->devdata;
+ struct int3403_cdev *obj = priv->priv;
+
+ *state = obj->max_state;
+ return 0;
+}
+
+static int int3403_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct int3403_priv *priv = cdev->devdata;
+ unsigned long long level;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(priv->adev->handle, "PPPC", NULL, &level);
+ if (ACPI_SUCCESS(status)) {
+ *state = level;
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+static int
+int3403_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
+{
+ struct int3403_priv *priv = cdev->devdata;
+ acpi_status status;
+
+ status = acpi_execute_simple_method(priv->adev->handle, "SPPC", state);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static const struct thermal_cooling_device_ops int3403_cooling_ops = {
+ .get_max_state = int3403_get_max_state,
+ .get_cur_state = int3403_get_cur_state,
+ .set_cur_state = int3403_set_cur_state,
+};
+
+static int int3403_cdev_add(struct int3403_priv *priv)
+{
+ int result = 0;
+ acpi_status status;
+ struct int3403_cdev *obj;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *p;
+
+ obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ status = acpi_evaluate_object(priv->adev->handle, "PPSS", NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ p = buf.pointer;
+ if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+ pr_warn("Invalid PPSS data\n");
+ kfree(buf.pointer);
+ return -EFAULT;
+ }
+
+ priv->priv = obj;
+ obj->max_state = p->package.count - 1;
+ obj->cdev =
+ thermal_cooling_device_register(acpi_device_bid(priv->adev),
+ priv, &int3403_cooling_ops);
+ if (IS_ERR(obj->cdev))
+ result = PTR_ERR(obj->cdev);
+
+ kfree(buf.pointer);
+ /* TODO: add ACPI notification support */
+
+ return result;
+}
+
+static int int3403_cdev_remove(struct int3403_priv *priv)
+{
+ struct int3403_cdev *obj = priv->priv;
+
+ thermal_cooling_device_unregister(obj->cdev);
+ return 0;
+}
+
+static int int3403_add(struct platform_device *pdev)
+{
+ struct int3403_priv *priv;
+ int result = 0;
+ unsigned long long tmp;
+ acpi_status status;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct int3403_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+ priv->adev = ACPI_COMPANION(&(pdev->dev));
+ if (!priv->adev) {
+ result = -EINVAL;
+ goto err;
+ }
+
+
+ status = acpi_evaluate_integer(priv->adev->handle, "_TMP",
+ NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
+ NULL, &priv->type);
+ if (ACPI_FAILURE(status)) {
+ result = -EINVAL;
+ goto err;
+ }
+ } else {
+ priv->type = INT3403_TYPE_SENSOR;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ switch (priv->type) {
+ case INT3403_TYPE_SENSOR:
+ result = int3403_sensor_add(priv);
+ break;
+ case INT3403_TYPE_CHARGER:
+ case INT3403_TYPE_BATTERY:
+ result = int3403_cdev_add(priv);
+ break;
+ default:
+ result = -EINVAL;
+ }
+
+ if (result)
+ goto err;
+ return result;
+
+err:
+ return result;
+}
+
+static int int3403_remove(struct platform_device *pdev)
+{
+ struct int3403_priv *priv = platform_get_drvdata(pdev);
+
+ switch (priv->type) {
+ case INT3403_TYPE_SENSOR:
+ int3403_sensor_remove(priv);
+ break;
+ case INT3403_TYPE_CHARGER:
+ case INT3403_TYPE_BATTERY:
+ int3403_cdev_remove(priv);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct acpi_device_id int3403_device_ids[] = {
+ {"INT3403", 0},
+ {"INTC1043", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
+
+static struct platform_driver int3403_driver = {
+ .probe = int3403_add,
+ .remove = int3403_remove,
+ .driver = {
+ .name = "int3403 thermal",
+ .acpi_match_table = int3403_device_ids,
+ },
+};
+
+module_platform_driver(int3403_driver);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ACPI INT3403 thermal driver");
diff --git a/drivers/thermal/intel/int340x_thermal/int3406_thermal.c b/drivers/thermal/intel/int340x_thermal/int3406_thermal.c
new file mode 100644
index 000000000..f5e42fc2a
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/int3406_thermal.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * INT3406 thermal driver for display participant device
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Authors: Aaron Lu <aaron.lu@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/thermal.h>
+#include <acpi/video.h>
+
+#define INT3406_BRIGHTNESS_LIMITS_CHANGED 0x80
+
+struct int3406_thermal_data {
+ int upper_limit;
+ int lower_limit;
+ acpi_handle handle;
+ struct acpi_video_device_brightness *br;
+ struct backlight_device *raw_bd;
+ struct thermal_cooling_device *cooling_dev;
+};
+
+/*
+ * According to the ACPI spec,
+ * "Each brightness level is represented by a number between 0 and 100,
+ * and can be thought of as a percentage. For example, 50 can be 50%
+ * power consumption or 50% brightness, as defined by the OEM."
+ *
+ * As int3406 device uses this value to communicate with the native
+ * graphics driver, we make the assumption that it represents
+ * the percentage of brightness only
+ */
+#define ACPI_TO_RAW(v, d) (d->raw_bd->props.max_brightness * v / 100)
+#define RAW_TO_ACPI(v, d) (v * 100 / d->raw_bd->props.max_brightness)
+
+static int
+int3406_thermal_get_max_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long *state)
+{
+ struct int3406_thermal_data *d = cooling_dev->devdata;
+
+ *state = d->upper_limit - d->lower_limit;
+ return 0;
+}
+
+static int
+int3406_thermal_set_cur_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long state)
+{
+ struct int3406_thermal_data *d = cooling_dev->devdata;
+ int acpi_level, raw_level;
+
+ if (state > d->upper_limit - d->lower_limit)
+ return -EINVAL;
+
+ acpi_level = d->br->levels[d->upper_limit - state];
+
+ raw_level = ACPI_TO_RAW(acpi_level, d);
+
+ return backlight_device_set_brightness(d->raw_bd, raw_level);
+}
+
+static int
+int3406_thermal_get_cur_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long *state)
+{
+ struct int3406_thermal_data *d = cooling_dev->devdata;
+ int acpi_level;
+ int index;
+
+ acpi_level = RAW_TO_ACPI(d->raw_bd->props.brightness, d);
+
+ /*
+ * There is no 1:1 mapping between the firmware interface level
+ * with the raw interface level, we will have to find one that is
+ * right above it.
+ */
+ for (index = d->lower_limit; index < d->upper_limit; index++) {
+ if (acpi_level <= d->br->levels[index])
+ break;
+ }
+
+ *state = d->upper_limit - index;
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops video_cooling_ops = {
+ .get_max_state = int3406_thermal_get_max_state,
+ .get_cur_state = int3406_thermal_get_cur_state,
+ .set_cur_state = int3406_thermal_set_cur_state,
+};
+
+static int int3406_thermal_get_index(int *array, int nr, int value)
+{
+ int i;
+
+ for (i = 2; i < nr; i++) {
+ if (array[i] == value)
+ break;
+ }
+ return i == nr ? -ENOENT : i;
+}
+
+static void int3406_thermal_get_limit(struct int3406_thermal_data *d)
+{
+ acpi_status status;
+ unsigned long long lower_limit, upper_limit;
+
+ status = acpi_evaluate_integer(d->handle, "DDDL", NULL, &lower_limit);
+ if (ACPI_SUCCESS(status))
+ d->lower_limit = int3406_thermal_get_index(d->br->levels,
+ d->br->count, lower_limit);
+
+ status = acpi_evaluate_integer(d->handle, "DDPC", NULL, &upper_limit);
+ if (ACPI_SUCCESS(status))
+ d->upper_limit = int3406_thermal_get_index(d->br->levels,
+ d->br->count, upper_limit);
+
+ /* lower_limit and upper_limit should be always set */
+ d->lower_limit = d->lower_limit > 0 ? d->lower_limit : 2;
+ d->upper_limit = d->upper_limit > 0 ? d->upper_limit : d->br->count - 1;
+}
+
+static void int3406_notify(acpi_handle handle, u32 event, void *data)
+{
+ if (event == INT3406_BRIGHTNESS_LIMITS_CHANGED)
+ int3406_thermal_get_limit(data);
+}
+
+static int int3406_thermal_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct int3406_thermal_data *d;
+ struct backlight_device *bd;
+ int ret;
+
+ if (!ACPI_HANDLE(&pdev->dev))
+ return -ENODEV;
+
+ d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ d->handle = ACPI_HANDLE(&pdev->dev);
+
+ bd = backlight_device_get_by_type(BACKLIGHT_RAW);
+ if (!bd)
+ return -ENODEV;
+ d->raw_bd = bd;
+
+ ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL);
+ if (ret)
+ return ret;
+
+ int3406_thermal_get_limit(d);
+
+ d->cooling_dev = thermal_cooling_device_register(acpi_device_bid(adev),
+ d, &video_cooling_ops);
+ if (IS_ERR(d->cooling_dev))
+ goto err;
+
+ ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+ int3406_notify, d);
+ if (ret)
+ goto err_cdev;
+
+ platform_set_drvdata(pdev, d);
+
+ return 0;
+
+err_cdev:
+ thermal_cooling_device_unregister(d->cooling_dev);
+err:
+ kfree(d->br);
+ return -ENODEV;
+}
+
+static int int3406_thermal_remove(struct platform_device *pdev)
+{
+ struct int3406_thermal_data *d = platform_get_drvdata(pdev);
+
+ thermal_cooling_device_unregister(d->cooling_dev);
+ kfree(d->br);
+ return 0;
+}
+
+static const struct acpi_device_id int3406_thermal_match[] = {
+ {"INT3406", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, int3406_thermal_match);
+
+static struct platform_driver int3406_thermal_driver = {
+ .probe = int3406_thermal_probe,
+ .remove = int3406_thermal_remove,
+ .driver = {
+ .name = "int3406 thermal",
+ .acpi_match_table = int3406_thermal_match,
+ },
+};
+
+module_platform_driver(int3406_thermal_driver);
+
+MODULE_DESCRIPTION("INT3406 Thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
new file mode 100644
index 000000000..6952f4e23
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * int340x_thermal_zone.c
+ * Copyright (c) 2015, Intel Corporation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+#include <linux/units.h>
+#include "int340x_thermal_zone.h"
+
+static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
+ int *temp)
+{
+ struct int34x_thermal_zone *d = zone->devdata;
+ unsigned long long tmp;
+ acpi_status status;
+
+ if (d->override_ops && d->override_ops->get_temp)
+ return d->override_ops->get_temp(zone, temp);
+
+ status = acpi_evaluate_integer(d->adev->handle, "_TMP", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (d->lpat_table) {
+ int conv_temp;
+
+ conv_temp = acpi_lpat_raw_to_temp(d->lpat_table, (int)tmp);
+ if (conv_temp < 0)
+ return conv_temp;
+
+ *temp = (unsigned long)conv_temp * 10;
+ } else
+ /* _TMP returns the temperature in tenths of degrees Kelvin */
+ *temp = deci_kelvin_to_millicelsius(tmp);
+
+ return 0;
+}
+
+static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ int trip, int *temp)
+{
+ struct int34x_thermal_zone *d = zone->devdata;
+ int i, ret = 0;
+
+ if (d->override_ops && d->override_ops->get_trip_temp)
+ return d->override_ops->get_trip_temp(zone, trip, temp);
+
+ mutex_lock(&d->trip_mutex);
+
+ if (trip < d->aux_trip_nr)
+ *temp = d->aux_trips[trip];
+ else if (trip == d->crt_trip_id)
+ *temp = d->crt_temp;
+ else if (trip == d->psv_trip_id)
+ *temp = d->psv_temp;
+ else if (trip == d->hot_trip_id)
+ *temp = d->hot_temp;
+ else {
+ for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+ if (d->act_trips[i].valid &&
+ d->act_trips[i].id == trip) {
+ *temp = d->act_trips[i].temp;
+ break;
+ }
+ }
+ if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&d->trip_mutex);
+
+ return ret;
+}
+
+static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+ int trip,
+ enum thermal_trip_type *type)
+{
+ struct int34x_thermal_zone *d = zone->devdata;
+ int i, ret = 0;
+
+ if (d->override_ops && d->override_ops->get_trip_type)
+ return d->override_ops->get_trip_type(zone, trip, type);
+
+ mutex_lock(&d->trip_mutex);
+
+ if (trip < d->aux_trip_nr)
+ *type = THERMAL_TRIP_PASSIVE;
+ else if (trip == d->crt_trip_id)
+ *type = THERMAL_TRIP_CRITICAL;
+ else if (trip == d->hot_trip_id)
+ *type = THERMAL_TRIP_HOT;
+ else if (trip == d->psv_trip_id)
+ *type = THERMAL_TRIP_PASSIVE;
+ else {
+ for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+ if (d->act_trips[i].valid &&
+ d->act_trips[i].id == trip) {
+ *type = THERMAL_TRIP_ACTIVE;
+ break;
+ }
+ }
+ if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&d->trip_mutex);
+
+ return ret;
+}
+
+static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
+ int trip, int temp)
+{
+ struct int34x_thermal_zone *d = zone->devdata;
+ acpi_status status;
+ char name[10];
+
+ if (d->override_ops && d->override_ops->set_trip_temp)
+ return d->override_ops->set_trip_temp(zone, trip, temp);
+
+ snprintf(name, sizeof(name), "PAT%d", trip);
+ status = acpi_execute_simple_method(d->adev->handle, name,
+ millicelsius_to_deci_kelvin(temp));
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ d->aux_trips[trip] = temp;
+
+ return 0;
+}
+
+
+static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
+ int trip, int *temp)
+{
+ struct int34x_thermal_zone *d = zone->devdata;
+ acpi_status status;
+ unsigned long long hyst;
+
+ if (d->override_ops && d->override_ops->get_trip_hyst)
+ return d->override_ops->get_trip_hyst(zone, trip, temp);
+
+ status = acpi_evaluate_integer(d->adev->handle, "GTSH", NULL, &hyst);
+ if (ACPI_FAILURE(status))
+ *temp = 0;
+ else
+ *temp = hyst * 100;
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops int340x_thermal_zone_ops = {
+ .get_temp = int340x_thermal_get_zone_temp,
+ .get_trip_temp = int340x_thermal_get_trip_temp,
+ .get_trip_type = int340x_thermal_get_trip_type,
+ .set_trip_temp = int340x_thermal_set_trip_temp,
+ .get_trip_hyst = int340x_thermal_get_trip_hyst,
+};
+
+static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
+ int *temp)
+{
+ unsigned long long r;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, name, NULL, &r);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ *temp = deci_kelvin_to_millicelsius(r);
+
+ return 0;
+}
+
+int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
+{
+ int trip_cnt = int34x_zone->aux_trip_nr;
+ int i;
+
+ mutex_lock(&int34x_zone->trip_mutex);
+
+ int34x_zone->crt_trip_id = -1;
+ if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
+ &int34x_zone->crt_temp))
+ int34x_zone->crt_trip_id = trip_cnt++;
+
+ int34x_zone->hot_trip_id = -1;
+ if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_HOT",
+ &int34x_zone->hot_temp))
+ int34x_zone->hot_trip_id = trip_cnt++;
+
+ int34x_zone->psv_trip_id = -1;
+ if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_PSV",
+ &int34x_zone->psv_temp))
+ int34x_zone->psv_trip_id = trip_cnt++;
+
+ for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+ char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
+
+ if (int340x_thermal_get_trip_config(int34x_zone->adev->handle,
+ name,
+ &int34x_zone->act_trips[i].temp))
+ break;
+
+ int34x_zone->act_trips[i].id = trip_cnt++;
+ int34x_zone->act_trips[i].valid = true;
+ }
+
+ mutex_unlock(&int34x_zone->trip_mutex);
+
+ return trip_cnt;
+}
+EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
+
+static struct thermal_zone_params int340x_thermal_params = {
+ .governor_name = "user_space",
+ .no_hwmon = true,
+};
+
+struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
+ struct thermal_zone_device_ops *override_ops)
+{
+ struct int34x_thermal_zone *int34x_thermal_zone;
+ acpi_status status;
+ unsigned long long trip_cnt;
+ int trip_mask = 0;
+ int ret;
+
+ int34x_thermal_zone = kzalloc(sizeof(*int34x_thermal_zone),
+ GFP_KERNEL);
+ if (!int34x_thermal_zone)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&int34x_thermal_zone->trip_mutex);
+
+ int34x_thermal_zone->adev = adev;
+ int34x_thermal_zone->override_ops = override_ops;
+
+ status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
+ if (ACPI_FAILURE(status))
+ trip_cnt = 0;
+ else {
+ int i;
+
+ int34x_thermal_zone->aux_trips =
+ kcalloc(trip_cnt,
+ sizeof(*int34x_thermal_zone->aux_trips),
+ GFP_KERNEL);
+ if (!int34x_thermal_zone->aux_trips) {
+ ret = -ENOMEM;
+ goto err_trip_alloc;
+ }
+ trip_mask = BIT(trip_cnt) - 1;
+ int34x_thermal_zone->aux_trip_nr = trip_cnt;
+ for (i = 0; i < trip_cnt; ++i)
+ int34x_thermal_zone->aux_trips[i] = THERMAL_TEMP_INVALID;
+ }
+
+ trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone);
+
+ int34x_thermal_zone->lpat_table = acpi_lpat_get_conversion_table(
+ adev->handle);
+
+ int34x_thermal_zone->zone = thermal_zone_device_register(
+ acpi_device_bid(adev),
+ trip_cnt,
+ trip_mask, int34x_thermal_zone,
+ &int340x_thermal_zone_ops,
+ &int340x_thermal_params,
+ 0, 0);
+ if (IS_ERR(int34x_thermal_zone->zone)) {
+ ret = PTR_ERR(int34x_thermal_zone->zone);
+ goto err_thermal_zone;
+ }
+ ret = thermal_zone_device_enable(int34x_thermal_zone->zone);
+ if (ret)
+ goto err_enable;
+
+ return int34x_thermal_zone;
+
+err_enable:
+ thermal_zone_device_unregister(int34x_thermal_zone->zone);
+err_thermal_zone:
+ acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
+err_trip_alloc:
+ mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ kfree(int34x_thermal_zone);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(int340x_thermal_zone_add);
+
+void int340x_thermal_zone_remove(struct int34x_thermal_zone
+ *int34x_thermal_zone)
+{
+ thermal_zone_device_unregister(int34x_thermal_zone->zone);
+ acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ kfree(int34x_thermal_zone->aux_trips);
+ mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ kfree(int34x_thermal_zone);
+}
+EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
+
+MODULE_AUTHOR("Aaron Lu <aaron.lu@intel.com>");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Intel INT340x common thermal zone handler");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
new file mode 100644
index 000000000..8f9872afd
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * int340x_thermal_zone.h
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef __INT340X_THERMAL_ZONE_H__
+#define __INT340X_THERMAL_ZONE_H__
+
+#include <acpi/acpi_lpat.h>
+
+#define INT340X_THERMAL_MAX_ACT_TRIP_COUNT 10
+
+struct active_trip {
+ int temp;
+ int id;
+ bool valid;
+};
+
+struct int34x_thermal_zone {
+ struct acpi_device *adev;
+ struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT];
+ unsigned long *aux_trips;
+ int aux_trip_nr;
+ int psv_temp;
+ int psv_trip_id;
+ int crt_temp;
+ int crt_trip_id;
+ int hot_temp;
+ int hot_trip_id;
+ struct thermal_zone_device *zone;
+ struct thermal_zone_device_ops *override_ops;
+ void *priv_data;
+ struct acpi_lpat_conversion_table *lpat_table;
+ struct mutex trip_mutex;
+};
+
+struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
+ struct thermal_zone_device_ops *override_ops);
+void int340x_thermal_zone_remove(struct int34x_thermal_zone *);
+int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone);
+
+static inline void int340x_thermal_zone_set_priv_data(
+ struct int34x_thermal_zone *tzone, void *priv_data)
+{
+ tzone->priv_data = priv_data;
+}
+
+static inline void *int340x_thermal_zone_get_priv_data(
+ struct int34x_thermal_zone *tzone)
+{
+ return tzone->priv_data;
+}
+
+static inline void int340x_thermal_zone_device_update(
+ struct int34x_thermal_zone *tzone,
+ enum thermal_notify_event event)
+{
+ thermal_zone_device_update(tzone->zone, event);
+}
+
+#endif
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
new file mode 100644
index 000000000..f5204f96f
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -0,0 +1,794 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * processor_thermal_device.c
+ * Copyright (c) 2014, Intel Corporation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+#include <linux/cpuhotplug.h>
+#include <linux/intel_rapl.h>
+#include "int340x_thermal_zone.h"
+#include "../intel_soc_dts_iosf.h"
+
+/* Broadwell-U/HSB thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
+#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03
+
+/* Skylake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_SKL_THERMAL 0x1903
+
+/* CannonLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_CNL_THERMAL 0x5a03
+#define PCI_DEVICE_ID_PROC_CFL_THERMAL 0x3E83
+
+/* Braswell thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
+
+/* Broxton thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BXT0_THERMAL 0x0A8C
+#define PCI_DEVICE_ID_PROC_BXT1_THERMAL 0x1A8C
+#define PCI_DEVICE_ID_PROC_BXTX_THERMAL 0x4A8C
+#define PCI_DEVICE_ID_PROC_BXTP_THERMAL 0x5A8C
+
+/* GeminiLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_GLK_THERMAL 0x318C
+
+/* IceLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_ICL_THERMAL 0x8a03
+
+/* JasperLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_JSL_THERMAL 0x4E03
+
+/* TigerLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_TGL_THERMAL 0x9A03
+
+#define DRV_NAME "proc_thermal"
+
+struct power_config {
+ u32 index;
+ u32 min_uw;
+ u32 max_uw;
+ u32 tmin_us;
+ u32 tmax_us;
+ u32 step_uw;
+};
+
+struct proc_thermal_device {
+ struct device *dev;
+ struct acpi_device *adev;
+ struct power_config power_limits[2];
+ struct int34x_thermal_zone *int340x_zone;
+ struct intel_soc_dts_sensors *soc_dts;
+ void __iomem *mmio_base;
+};
+
+enum proc_thermal_emum_mode_type {
+ PROC_THERMAL_NONE,
+ PROC_THERMAL_PCI,
+ PROC_THERMAL_PLATFORM_DEV
+};
+
+struct rapl_mmio_regs {
+ u64 reg_unit;
+ u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
+ int limits[RAPL_DOMAIN_MAX];
+};
+
+/*
+ * We can have only one type of enumeration, PCI or Platform,
+ * not both. So we don't need instance specific data.
+ */
+static enum proc_thermal_emum_mode_type proc_thermal_emum_mode =
+ PROC_THERMAL_NONE;
+
+#define POWER_LIMIT_SHOW(index, suffix) \
+static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct proc_thermal_device *proc_dev = dev_get_drvdata(dev); \
+ \
+ if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
+ dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
+ return 0; \
+ } \
+ \
+ return sprintf(buf, "%lu\n",\
+ (unsigned long)proc_dev->power_limits[index].suffix * 1000); \
+}
+
+POWER_LIMIT_SHOW(0, min_uw)
+POWER_LIMIT_SHOW(0, max_uw)
+POWER_LIMIT_SHOW(0, step_uw)
+POWER_LIMIT_SHOW(0, tmin_us)
+POWER_LIMIT_SHOW(0, tmax_us)
+
+POWER_LIMIT_SHOW(1, min_uw)
+POWER_LIMIT_SHOW(1, max_uw)
+POWER_LIMIT_SHOW(1, step_uw)
+POWER_LIMIT_SHOW(1, tmin_us)
+POWER_LIMIT_SHOW(1, tmax_us)
+
+static DEVICE_ATTR_RO(power_limit_0_min_uw);
+static DEVICE_ATTR_RO(power_limit_0_max_uw);
+static DEVICE_ATTR_RO(power_limit_0_step_uw);
+static DEVICE_ATTR_RO(power_limit_0_tmin_us);
+static DEVICE_ATTR_RO(power_limit_0_tmax_us);
+
+static DEVICE_ATTR_RO(power_limit_1_min_uw);
+static DEVICE_ATTR_RO(power_limit_1_max_uw);
+static DEVICE_ATTR_RO(power_limit_1_step_uw);
+static DEVICE_ATTR_RO(power_limit_1_tmin_us);
+static DEVICE_ATTR_RO(power_limit_1_tmax_us);
+
+static struct attribute *power_limit_attrs[] = {
+ &dev_attr_power_limit_0_min_uw.attr,
+ &dev_attr_power_limit_1_min_uw.attr,
+ &dev_attr_power_limit_0_max_uw.attr,
+ &dev_attr_power_limit_1_max_uw.attr,
+ &dev_attr_power_limit_0_step_uw.attr,
+ &dev_attr_power_limit_1_step_uw.attr,
+ &dev_attr_power_limit_0_tmin_us.attr,
+ &dev_attr_power_limit_1_tmin_us.attr,
+ &dev_attr_power_limit_0_tmax_us.attr,
+ &dev_attr_power_limit_1_tmax_us.attr,
+ NULL
+};
+
+static const struct attribute_group power_limit_attribute_group = {
+ .attrs = power_limit_attrs,
+ .name = "power_limits"
+};
+
+static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val;
+ int err;
+
+ err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
+ if (err)
+ return err;
+
+ val = (val >> 24) & 0x3f;
+ return sprintf(buf, "%d\n", (int)val);
+}
+
+static int tcc_offset_update(unsigned int tcc)
+{
+ u64 val;
+ int err;
+
+ if (tcc > 63)
+ return -EINVAL;
+
+ err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
+ if (err)
+ return err;
+
+ if (val & BIT(31))
+ return -EPERM;
+
+ val &= ~GENMASK_ULL(29, 24);
+ val |= (tcc & 0x3f) << 24;
+
+ err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tcc_offset_save = -1;
+
+static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ unsigned int tcc;
+ u64 val;
+ int err;
+
+ err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
+ if (err)
+ return err;
+
+ if (!(val & BIT(30)))
+ return -EACCES;
+
+ if (kstrtouint(buf, 0, &tcc))
+ return -EINVAL;
+
+ err = tcc_offset_update(tcc);
+ if (err)
+ return err;
+
+ tcc_offset_save = tcc;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(tcc_offset_degree_celsius);
+
+static int stored_tjmax; /* since it is fixed, we can have local storage */
+
+static int get_tjmax(void)
+{
+ u32 eax, edx;
+ u32 val;
+ int err;
+
+ err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+ if (err)
+ return err;
+
+ val = (eax >> 16) & 0xff;
+ if (val)
+ return val;
+
+ return -EINVAL;
+}
+
+static int read_temp_msr(int *temp)
+{
+ int cpu;
+ u32 eax, edx;
+ int err;
+ unsigned long curr_temp_off = 0;
+
+ *temp = 0;
+
+ for_each_online_cpu(cpu) {
+ err = rdmsr_safe_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax,
+ &edx);
+ if (err)
+ goto err_ret;
+ else {
+ if (eax & 0x80000000) {
+ curr_temp_off = (eax >> 16) & 0x7f;
+ if (!*temp || curr_temp_off < *temp)
+ *temp = curr_temp_off;
+ } else {
+ err = -EINVAL;
+ goto err_ret;
+ }
+ }
+ }
+
+ return 0;
+err_ret:
+ return err;
+}
+
+static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
+ int *temp)
+{
+ int ret;
+
+ ret = read_temp_msr(temp);
+ if (!ret)
+ *temp = (stored_tjmax - *temp) * 1000;
+
+ return ret;
+}
+
+static struct thermal_zone_device_ops proc_thermal_local_ops = {
+ .get_temp = proc_thermal_get_zone_temp,
+};
+
+static int proc_thermal_read_ppcc(struct proc_thermal_device *proc_priv)
+{
+ int i;
+ acpi_status status;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *elements, *ppcc;
+ union acpi_object *p;
+ int ret = 0;
+
+ status = acpi_evaluate_object(proc_priv->adev->handle, "PPCC",
+ NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ p = buf.pointer;
+ if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+ dev_err(proc_priv->dev, "Invalid PPCC data\n");
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+
+ if (!p->package.count) {
+ dev_err(proc_priv->dev, "Invalid PPCC package size\n");
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+
+ for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
+ elements = &(p->package.elements[i+1]);
+ if (elements->type != ACPI_TYPE_PACKAGE ||
+ elements->package.count != 6) {
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+ ppcc = elements->package.elements;
+ proc_priv->power_limits[i].index = ppcc[0].integer.value;
+ proc_priv->power_limits[i].min_uw = ppcc[1].integer.value;
+ proc_priv->power_limits[i].max_uw = ppcc[2].integer.value;
+ proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value;
+ proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value;
+ proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
+ }
+
+free_buffer:
+ kfree(buf.pointer);
+
+ return ret;
+}
+
+#define PROC_POWER_CAPABILITY_CHANGED 0x83
+static void proc_thermal_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct proc_thermal_device *proc_priv = data;
+
+ if (!proc_priv)
+ return;
+
+ switch (event) {
+ case PROC_POWER_CAPABILITY_CHANGED:
+ proc_thermal_read_ppcc(proc_priv);
+ int340x_thermal_zone_device_update(proc_priv->int340x_zone,
+ THERMAL_DEVICE_POWER_CAPABILITY_CHANGED);
+ break;
+ default:
+ dev_dbg(proc_priv->dev, "Unsupported event [0x%x]\n", event);
+ break;
+ }
+}
+
+
+static int proc_thermal_add(struct device *dev,
+ struct proc_thermal_device **priv)
+{
+ struct proc_thermal_device *proc_priv;
+ struct acpi_device *adev;
+ acpi_status status;
+ unsigned long long tmp;
+ struct thermal_zone_device_ops *ops = NULL;
+ int ret;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
+
+ proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
+ if (!proc_priv)
+ return -ENOMEM;
+
+ proc_priv->dev = dev;
+ proc_priv->adev = adev;
+ *priv = proc_priv;
+
+ ret = proc_thermal_read_ppcc(proc_priv);
+ if (ret)
+ return ret;
+
+ status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ /* there is no _TMP method, add local method */
+ stored_tjmax = get_tjmax();
+ if (stored_tjmax > 0)
+ ops = &proc_thermal_local_ops;
+ }
+
+ proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
+ if (IS_ERR(proc_priv->int340x_zone)) {
+ return PTR_ERR(proc_priv->int340x_zone);
+ } else
+ ret = 0;
+
+ ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+ proc_thermal_notify,
+ (void *)proc_priv);
+ if (ret)
+ goto remove_zone;
+
+ return 0;
+
+remove_zone:
+ int340x_thermal_zone_remove(proc_priv->int340x_zone);
+
+ return ret;
+}
+
+static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
+{
+ acpi_remove_notify_handler(proc_priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, proc_thermal_notify);
+ int340x_thermal_zone_remove(proc_priv->int340x_zone);
+ sysfs_remove_file(&proc_priv->dev->kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+ sysfs_remove_group(&proc_priv->dev->kobj,
+ &power_limit_attribute_group);
+}
+
+static int int3401_add(struct platform_device *pdev)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ if (proc_thermal_emum_mode == PROC_THERMAL_PCI) {
+ dev_err(&pdev->dev, "error: enumerated as PCI dev\n");
+ return -ENODEV;
+ }
+
+ ret = proc_thermal_add(&pdev->dev, &proc_priv);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, proc_priv);
+ proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
+
+ dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
+
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &power_limit_attribute_group);
+ if (ret)
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+
+ return ret;
+}
+
+static int int3401_remove(struct platform_device *pdev)
+{
+ proc_thermal_remove(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static irqreturn_t proc_thermal_pci_msi_irq(int irq, void *devid)
+{
+ struct proc_thermal_device *proc_priv;
+ struct pci_dev *pdev = devid;
+
+ proc_priv = pci_get_drvdata(pdev);
+
+ intel_soc_dts_iosf_interrupt_handler(proc_priv->soc_dts);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PROC_THERMAL_MMIO_RAPL
+
+#define MCHBAR 0
+
+/* RAPL Support via MMIO interface */
+static struct rapl_if_priv rapl_mmio_priv;
+
+static int rapl_mmio_cpu_online(unsigned int cpu)
+{
+ struct rapl_package *rp;
+
+ /* mmio rapl supports package 0 only for now */
+ if (topology_physical_package_id(cpu))
+ return 0;
+
+ rp = rapl_find_package_domain(cpu, &rapl_mmio_priv);
+ if (!rp) {
+ rp = rapl_add_package(cpu, &rapl_mmio_priv);
+ if (IS_ERR(rp))
+ return PTR_ERR(rp);
+ }
+ cpumask_set_cpu(cpu, &rp->cpumask);
+ return 0;
+}
+
+static int rapl_mmio_cpu_down_prep(unsigned int cpu)
+{
+ struct rapl_package *rp;
+ int lead_cpu;
+
+ rp = rapl_find_package_domain(cpu, &rapl_mmio_priv);
+ if (!rp)
+ return 0;
+
+ cpumask_clear_cpu(cpu, &rp->cpumask);
+ lead_cpu = cpumask_first(&rp->cpumask);
+ if (lead_cpu >= nr_cpu_ids)
+ rapl_remove_package(rp);
+ else if (rp->lead_cpu == cpu)
+ rp->lead_cpu = lead_cpu;
+ return 0;
+}
+
+static int rapl_mmio_read_raw(int cpu, struct reg_action *ra)
+{
+ if (!ra->reg)
+ return -EINVAL;
+
+ ra->value = readq((void __iomem *)ra->reg);
+ ra->value &= ra->mask;
+ return 0;
+}
+
+static int rapl_mmio_write_raw(int cpu, struct reg_action *ra)
+{
+ u64 val;
+
+ if (!ra->reg)
+ return -EINVAL;
+
+ val = readq((void __iomem *)ra->reg);
+ val &= ~ra->mask;
+ val |= ra->value;
+ writeq(val, (void __iomem *)ra->reg);
+ return 0;
+}
+
+static int proc_thermal_rapl_add(struct pci_dev *pdev,
+ struct proc_thermal_device *proc_priv,
+ struct rapl_mmio_regs *rapl_regs)
+{
+ enum rapl_domain_reg_id reg;
+ enum rapl_domain_type domain;
+ int ret;
+
+ if (!rapl_regs)
+ return 0;
+
+ ret = pcim_iomap_regions(pdev, 1 << MCHBAR, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
+ return -ENOMEM;
+ }
+
+ proc_priv->mmio_base = pcim_iomap_table(pdev)[MCHBAR];
+
+ for (domain = RAPL_DOMAIN_PACKAGE; domain < RAPL_DOMAIN_MAX; domain++) {
+ for (reg = RAPL_DOMAIN_REG_LIMIT; reg < RAPL_DOMAIN_REG_MAX; reg++)
+ if (rapl_regs->regs[domain][reg])
+ rapl_mmio_priv.regs[domain][reg] =
+ (u64)proc_priv->mmio_base +
+ rapl_regs->regs[domain][reg];
+ rapl_mmio_priv.limits[domain] = rapl_regs->limits[domain];
+ }
+ rapl_mmio_priv.reg_unit = (u64)proc_priv->mmio_base + rapl_regs->reg_unit;
+
+ rapl_mmio_priv.read_raw = rapl_mmio_read_raw;
+ rapl_mmio_priv.write_raw = rapl_mmio_write_raw;
+
+ rapl_mmio_priv.control_type = powercap_register_control_type(NULL, "intel-rapl-mmio", NULL);
+ if (IS_ERR(rapl_mmio_priv.control_type)) {
+ pr_debug("failed to register powercap control_type.\n");
+ return PTR_ERR(rapl_mmio_priv.control_type);
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online",
+ rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep);
+ if (ret < 0) {
+ powercap_unregister_control_type(rapl_mmio_priv.control_type);
+ rapl_mmio_priv.control_type = NULL;
+ return ret;
+ }
+ rapl_mmio_priv.pcap_rapl_online = ret;
+
+ return 0;
+}
+
+static void proc_thermal_rapl_remove(void)
+{
+ if (IS_ERR_OR_NULL(rapl_mmio_priv.control_type))
+ return;
+
+ cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online);
+ powercap_unregister_control_type(rapl_mmio_priv.control_type);
+}
+
+static const struct rapl_mmio_regs rapl_mmio_hsw = {
+ .reg_unit = 0x5938,
+ .regs[RAPL_DOMAIN_PACKAGE] = { 0x59a0, 0x593c, 0x58f0, 0, 0x5930},
+ .regs[RAPL_DOMAIN_DRAM] = { 0x58e0, 0x58e8, 0x58ec, 0, 0},
+ .limits[RAPL_DOMAIN_PACKAGE] = 2,
+ .limits[RAPL_DOMAIN_DRAM] = 2,
+};
+
+#else
+
+static int proc_thermal_rapl_add(struct pci_dev *pdev,
+ struct proc_thermal_device *proc_priv,
+ struct rapl_mmio_regs *rapl_regs)
+{
+ return 0;
+}
+static void proc_thermal_rapl_remove(void) {}
+static const struct rapl_mmio_regs rapl_mmio_hsw;
+
+#endif /* CONFIG_MMIO_RAPL */
+
+static int proc_thermal_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) {
+ dev_err(&pdev->dev, "error: enumerated as platform dev\n");
+ return -ENODEV;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ ret = proc_thermal_add(&pdev->dev, &proc_priv);
+ if (ret)
+ return ret;
+
+ ret = proc_thermal_rapl_add(pdev, proc_priv,
+ (struct rapl_mmio_regs *)id->driver_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add RAPL MMIO interface\n");
+ proc_thermal_remove(proc_priv);
+ return ret;
+ }
+
+ pci_set_drvdata(pdev, proc_priv);
+ proc_thermal_emum_mode = PROC_THERMAL_PCI;
+
+ if (pdev->device == PCI_DEVICE_ID_PROC_BSW_THERMAL) {
+ /*
+ * Enumerate additional DTS sensors available via IOSF.
+ * But we are not treating as a failure condition, if
+ * there are no aux DTSs enabled or fails. This driver
+ * already exposes sensors, which can be accessed via
+ * ACPI/MSR. So we don't want to fail for auxiliary DTSs.
+ */
+ proc_priv->soc_dts = intel_soc_dts_iosf_init(
+ INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
+
+ if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
+ ret = pci_enable_msi(pdev);
+ if (!ret) {
+ ret = request_threaded_irq(pdev->irq, NULL,
+ proc_thermal_pci_msi_irq,
+ IRQF_ONESHOT, "proc_thermal",
+ pdev);
+ if (ret) {
+ intel_soc_dts_iosf_exit(
+ proc_priv->soc_dts);
+ pci_disable_msi(pdev);
+ proc_priv->soc_dts = NULL;
+ }
+ }
+ } else
+ dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
+ }
+
+ dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
+
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &power_limit_attribute_group);
+ if (ret)
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+
+ return ret;
+}
+
+static void proc_thermal_pci_remove(struct pci_dev *pdev)
+{
+ struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev);
+
+ if (proc_priv->soc_dts) {
+ intel_soc_dts_iosf_exit(proc_priv->soc_dts);
+ if (pdev->irq) {
+ free_irq(pdev->irq, pdev);
+ pci_disable_msi(pdev);
+ }
+ }
+ proc_thermal_rapl_remove();
+ proc_thermal_remove(proc_priv);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int proc_thermal_resume(struct device *dev)
+{
+ struct proc_thermal_device *proc_dev;
+
+ proc_dev = dev_get_drvdata(dev);
+ proc_thermal_read_ppcc(proc_dev);
+
+ if (tcc_offset_save >= 0)
+ tcc_offset_update(tcc_offset_save);
+
+ return 0;
+}
+#else
+#define proc_thermal_resume NULL
+#endif
+
+static SIMPLE_DEV_PM_OPS(proc_thermal_pm, NULL, proc_thermal_resume);
+
+static const struct pci_device_id proc_thermal_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_SKL_THERMAL),
+ .driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT0_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT1_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTX_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTP_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_CNL_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_CFL_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_GLK_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_ICL_THERMAL),
+ .driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_JSL_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_TGL_THERMAL),
+ .driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
+
+static struct pci_driver proc_thermal_pci_driver = {
+ .name = DRV_NAME,
+ .probe = proc_thermal_pci_probe,
+ .remove = proc_thermal_pci_remove,
+ .id_table = proc_thermal_pci_ids,
+ .driver.pm = &proc_thermal_pm,
+};
+
+static const struct acpi_device_id int3401_device_ids[] = {
+ {"INT3401", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3401_device_ids);
+
+static struct platform_driver int3401_driver = {
+ .probe = int3401_add,
+ .remove = int3401_remove,
+ .driver = {
+ .name = "int3401 thermal",
+ .acpi_match_table = int3401_device_ids,
+ .pm = &proc_thermal_pm,
+ },
+};
+
+static int __init proc_thermal_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&int3401_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&proc_thermal_pci_driver);
+
+ return ret;
+}
+
+static void __exit proc_thermal_exit(void)
+{
+ platform_driver_unregister(&int3401_driver);
+ pci_unregister_driver(&proc_thermal_pci_driver);
+}
+
+module_init(proc_thermal_init);
+module_exit(proc_thermal_exit);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/intel_bxt_pmic_thermal.c b/drivers/thermal/intel/intel_bxt_pmic_thermal.c
new file mode 100644
index 000000000..6312c6ba0
--- /dev/null
+++ b/drivers/thermal/intel/intel_bxt_pmic_thermal.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Broxton PMIC thermal driver
+ *
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/mfd/intel_soc_pmic.h>
+
+#define BXTWC_THRM0IRQ 0x4E04
+#define BXTWC_THRM1IRQ 0x4E05
+#define BXTWC_THRM2IRQ 0x4E06
+#define BXTWC_MTHRM0IRQ 0x4E12
+#define BXTWC_MTHRM1IRQ 0x4E13
+#define BXTWC_MTHRM2IRQ 0x4E14
+#define BXTWC_STHRM0IRQ 0x4F19
+#define BXTWC_STHRM1IRQ 0x4F1A
+#define BXTWC_STHRM2IRQ 0x4F1B
+
+struct trip_config_map {
+ u16 irq_reg;
+ u16 irq_en;
+ u16 evt_stat;
+ u8 irq_mask;
+ u8 irq_en_mask;
+ u8 evt_mask;
+ u8 trip_num;
+};
+
+struct thermal_irq_map {
+ char handle[20];
+ int num_trips;
+ const struct trip_config_map *trip_config;
+};
+
+struct pmic_thermal_data {
+ const struct thermal_irq_map *maps;
+ int num_maps;
+};
+
+static const struct trip_config_map bxtwc_str0_trip_config[] = {
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x01,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x01,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x01,
+ .trip_num = 0
+ },
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x10,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x10,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x10,
+ .trip_num = 1
+ }
+};
+
+static const struct trip_config_map bxtwc_str1_trip_config[] = {
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x02,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x02,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x02,
+ .trip_num = 0
+ },
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x20,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x20,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x20,
+ .trip_num = 1
+ },
+};
+
+static const struct trip_config_map bxtwc_str2_trip_config[] = {
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x04,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x04,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x04,
+ .trip_num = 0
+ },
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x40,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x40,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x40,
+ .trip_num = 1
+ },
+};
+
+static const struct trip_config_map bxtwc_str3_trip_config[] = {
+ {
+ .irq_reg = BXTWC_THRM2IRQ,
+ .irq_mask = 0x10,
+ .irq_en = BXTWC_MTHRM2IRQ,
+ .irq_en_mask = 0x10,
+ .evt_stat = BXTWC_STHRM2IRQ,
+ .evt_mask = 0x10,
+ .trip_num = 0
+ },
+};
+
+static const struct thermal_irq_map bxtwc_thermal_irq_map[] = {
+ {
+ .handle = "STR0",
+ .trip_config = bxtwc_str0_trip_config,
+ .num_trips = ARRAY_SIZE(bxtwc_str0_trip_config),
+ },
+ {
+ .handle = "STR1",
+ .trip_config = bxtwc_str1_trip_config,
+ .num_trips = ARRAY_SIZE(bxtwc_str1_trip_config),
+ },
+ {
+ .handle = "STR2",
+ .trip_config = bxtwc_str2_trip_config,
+ .num_trips = ARRAY_SIZE(bxtwc_str2_trip_config),
+ },
+ {
+ .handle = "STR3",
+ .trip_config = bxtwc_str3_trip_config,
+ .num_trips = ARRAY_SIZE(bxtwc_str3_trip_config),
+ },
+};
+
+static const struct pmic_thermal_data bxtwc_thermal_data = {
+ .maps = bxtwc_thermal_irq_map,
+ .num_maps = ARRAY_SIZE(bxtwc_thermal_irq_map),
+};
+
+static irqreturn_t pmic_thermal_irq_handler(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct thermal_zone_device *tzd;
+ struct pmic_thermal_data *td;
+ struct intel_soc_pmic *pmic;
+ struct regmap *regmap;
+ u8 reg_val, mask, irq_stat;
+ u16 reg, evt_stat_reg;
+ int i, j, ret;
+
+ pmic = dev_get_drvdata(pdev->dev.parent);
+ regmap = pmic->regmap;
+ td = (struct pmic_thermal_data *)
+ platform_get_device_id(pdev)->driver_data;
+
+ /* Resolve thermal irqs */
+ for (i = 0; i < td->num_maps; i++) {
+ for (j = 0; j < td->maps[i].num_trips; j++) {
+ reg = td->maps[i].trip_config[j].irq_reg;
+ mask = td->maps[i].trip_config[j].irq_mask;
+ /*
+ * Read the irq register to resolve whether the
+ * interrupt was triggered for this sensor
+ */
+ if (regmap_read(regmap, reg, &ret))
+ return IRQ_HANDLED;
+
+ reg_val = (u8)ret;
+ irq_stat = ((u8)ret & mask);
+
+ if (!irq_stat)
+ continue;
+
+ /*
+ * Read the status register to find out what
+ * event occurred i.e a high or a low
+ */
+ evt_stat_reg = td->maps[i].trip_config[j].evt_stat;
+ if (regmap_read(regmap, evt_stat_reg, &ret))
+ return IRQ_HANDLED;
+
+ tzd = thermal_zone_get_zone_by_name(td->maps[i].handle);
+ if (!IS_ERR(tzd))
+ thermal_zone_device_update(tzd,
+ THERMAL_EVENT_UNSPECIFIED);
+
+ /* Clear the appropriate irq */
+ regmap_write(regmap, reg, reg_val & mask);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int pmic_thermal_probe(struct platform_device *pdev)
+{
+ struct regmap_irq_chip_data *regmap_irq_chip;
+ struct pmic_thermal_data *thermal_data;
+ int ret, irq, virq, i, j, pmic_irq_count;
+ struct intel_soc_pmic *pmic;
+ struct regmap *regmap;
+ struct device *dev;
+ u16 reg;
+ u8 mask;
+
+ dev = &pdev->dev;
+ pmic = dev_get_drvdata(pdev->dev.parent);
+ if (!pmic) {
+ dev_err(dev, "Failed to get struct intel_soc_pmic pointer\n");
+ return -ENODEV;
+ }
+
+ thermal_data = (struct pmic_thermal_data *)
+ platform_get_device_id(pdev)->driver_data;
+ if (!thermal_data) {
+ dev_err(dev, "No thermal data initialized!!\n");
+ return -ENODEV;
+ }
+
+ regmap = pmic->regmap;
+ regmap_irq_chip = pmic->irq_chip_data;
+
+ pmic_irq_count = 0;
+ while ((irq = platform_get_irq(pdev, pmic_irq_count)) != -ENXIO) {
+ virq = regmap_irq_get_virq(regmap_irq_chip, irq);
+ if (virq < 0) {
+ dev_err(dev, "failed to get virq by irq %d\n", irq);
+ return virq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq,
+ NULL, pmic_thermal_irq_handler,
+ IRQF_ONESHOT, "pmic_thermal", pdev);
+
+ if (ret) {
+ dev_err(dev, "request irq(%d) failed: %d\n", virq, ret);
+ return ret;
+ }
+ pmic_irq_count++;
+ }
+
+ /* Enable thermal interrupts */
+ for (i = 0; i < thermal_data->num_maps; i++) {
+ for (j = 0; j < thermal_data->maps[i].num_trips; j++) {
+ reg = thermal_data->maps[i].trip_config[j].irq_en;
+ mask = thermal_data->maps[i].trip_config[j].irq_en_mask;
+ ret = regmap_update_bits(regmap, reg, mask, 0x00);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id pmic_thermal_id_table[] = {
+ {
+ .name = "bxt_wcove_thermal",
+ .driver_data = (kernel_ulong_t)&bxtwc_thermal_data,
+ },
+ {},
+};
+
+static struct platform_driver pmic_thermal_driver = {
+ .probe = pmic_thermal_probe,
+ .driver = {
+ .name = "pmic_thermal",
+ },
+ .id_table = pmic_thermal_id_table,
+};
+
+MODULE_DEVICE_TABLE(platform, pmic_thermal_id_table);
+module_platform_driver(pmic_thermal_driver);
+
+MODULE_AUTHOR("Yegnesh S Iyer <yegnesh.s.iyer@intel.com>");
+MODULE_DESCRIPTION("Intel Broxton PMIC Thermal Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
new file mode 100644
index 000000000..3b813ebb6
--- /dev/null
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* intel_pch_thermal.c - Intel PCH Thermal driver
+ *
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * Authors:
+ * Tushar Dave <tushar.n.dave@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+#include <linux/units.h>
+#include <linux/pm.h>
+
+/* Intel PCH thermal Device IDs */
+#define PCH_THERMAL_DID_HSW_1 0x9C24 /* Haswell PCH */
+#define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */
+#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
+#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */
+#define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */
+#define PCH_THERMAL_DID_CNL 0x9Df9 /* CNL PCH */
+#define PCH_THERMAL_DID_CNL_H 0xA379 /* CNL-H PCH */
+#define PCH_THERMAL_DID_CNL_LP 0x02F9 /* CNL-LP PCH */
+#define PCH_THERMAL_DID_CML_H 0X06F9 /* CML-H PCH */
+
+/* Wildcat Point-LP PCH Thermal registers */
+#define WPT_TEMP 0x0000 /* Temperature */
+#define WPT_TSC 0x04 /* Thermal Sensor Control */
+#define WPT_TSS 0x06 /* Thermal Sensor Status */
+#define WPT_TSEL 0x08 /* Thermal Sensor Enable and Lock */
+#define WPT_TSREL 0x0A /* Thermal Sensor Report Enable and Lock */
+#define WPT_TSMIC 0x0C /* Thermal Sensor SMI Control */
+#define WPT_CTT 0x0010 /* Catastrophic Trip Point */
+#define WPT_TAHV 0x0014 /* Thermal Alert High Value */
+#define WPT_TALV 0x0018 /* Thermal Alert Low Value */
+#define WPT_TL 0x00000040 /* Throttle Value */
+#define WPT_PHL 0x0060 /* PCH Hot Level */
+#define WPT_PHLC 0x62 /* PHL Control */
+#define WPT_TAS 0x80 /* Thermal Alert Status */
+#define WPT_TSPIEN 0x82 /* PCI Interrupt Event Enables */
+#define WPT_TSGPEN 0x84 /* General Purpose Event Enables */
+
+/* Wildcat Point-LP PCH Thermal Register bit definitions */
+#define WPT_TEMP_TSR 0x01ff /* Temp TS Reading */
+#define WPT_TSC_CPDE 0x01 /* Catastrophic Power-Down Enable */
+#define WPT_TSS_TSDSS 0x10 /* Thermal Sensor Dynamic Shutdown Status */
+#define WPT_TSS_GPES 0x08 /* GPE status */
+#define WPT_TSEL_ETS 0x01 /* Enable TS */
+#define WPT_TSEL_PLDB 0x80 /* TSEL Policy Lock-Down Bit */
+#define WPT_TL_TOL 0x000001FF /* T0 Level */
+#define WPT_TL_T1L 0x1ff00000 /* T1 Level */
+#define WPT_TL_TTEN 0x20000000 /* TT Enable */
+
+static char driver_name[] = "Intel PCH thermal driver";
+
+struct pch_thermal_device {
+ void __iomem *hw_base;
+ const struct pch_dev_ops *ops;
+ struct pci_dev *pdev;
+ struct thermal_zone_device *tzd;
+ int crt_trip_id;
+ unsigned long crt_temp;
+ int hot_trip_id;
+ unsigned long hot_temp;
+ int psv_trip_id;
+ unsigned long psv_temp;
+ bool bios_enabled;
+};
+
+#ifdef CONFIG_ACPI
+
+/*
+ * On some platforms, there is a companion ACPI device, which adds
+ * passive trip temperature using _PSV method. There is no specific
+ * passive temperature setting in MMIO interface of this PCI device.
+ */
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ int *nr_trips)
+{
+ struct acpi_device *adev;
+
+ ptd->psv_trip_id = -1;
+
+ adev = ACPI_COMPANION(&ptd->pdev->dev);
+ if (adev) {
+ unsigned long long r;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle, "_PSV", NULL,
+ &r);
+ if (ACPI_SUCCESS(status)) {
+ unsigned long trip_temp;
+
+ trip_temp = deci_kelvin_to_millicelsius(r);
+ if (trip_temp) {
+ ptd->psv_temp = trip_temp;
+ ptd->psv_trip_id = *nr_trips;
+ ++(*nr_trips);
+ }
+ }
+ }
+}
+#else
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ int *nr_trips)
+{
+ ptd->psv_trip_id = -1;
+
+}
+#endif
+
+static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
+{
+ u8 tsel;
+ u16 trip_temp;
+
+ *nr_trips = 0;
+
+ /* Check if BIOS has already enabled thermal sensor */
+ if (WPT_TSEL_ETS & readb(ptd->hw_base + WPT_TSEL)) {
+ ptd->bios_enabled = true;
+ goto read_trips;
+ }
+
+ tsel = readb(ptd->hw_base + WPT_TSEL);
+ /*
+ * When TSEL's Policy Lock-Down bit is 1, TSEL become RO.
+ * If so, thermal sensor cannot enable. Bail out.
+ */
+ if (tsel & WPT_TSEL_PLDB) {
+ dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n");
+ return -ENODEV;
+ }
+
+ writeb(tsel|WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
+ if (!(WPT_TSEL_ETS & readb(ptd->hw_base + WPT_TSEL))) {
+ dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n");
+ return -ENODEV;
+ }
+
+read_trips:
+ ptd->crt_trip_id = -1;
+ trip_temp = readw(ptd->hw_base + WPT_CTT);
+ trip_temp &= 0x1FF;
+ if (trip_temp) {
+ /* Resolution of 1/2 degree C and an offset of -50C */
+ ptd->crt_temp = trip_temp * 1000 / 2 - 50000;
+ ptd->crt_trip_id = 0;
+ ++(*nr_trips);
+ }
+
+ ptd->hot_trip_id = -1;
+ trip_temp = readw(ptd->hw_base + WPT_PHL);
+ trip_temp &= 0x1FF;
+ if (trip_temp) {
+ /* Resolution of 1/2 degree C and an offset of -50C */
+ ptd->hot_temp = trip_temp * 1000 / 2 - 50000;
+ ptd->hot_trip_id = *nr_trips;
+ ++(*nr_trips);
+ }
+
+ pch_wpt_add_acpi_psv_trip(ptd, nr_trips);
+
+ return 0;
+}
+
+static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp)
+{
+ u16 wpt_temp;
+
+ wpt_temp = WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TEMP);
+
+ /* Resolution of 1/2 degree C and an offset of -50C */
+ *temp = (wpt_temp * 1000 / 2 - 50000);
+
+ return 0;
+}
+
+static int pch_wpt_suspend(struct pch_thermal_device *ptd)
+{
+ u8 tsel;
+
+ if (ptd->bios_enabled)
+ return 0;
+
+ tsel = readb(ptd->hw_base + WPT_TSEL);
+
+ writeb(tsel & 0xFE, ptd->hw_base + WPT_TSEL);
+
+ return 0;
+}
+
+static int pch_wpt_resume(struct pch_thermal_device *ptd)
+{
+ u8 tsel;
+
+ if (ptd->bios_enabled)
+ return 0;
+
+ tsel = readb(ptd->hw_base + WPT_TSEL);
+
+ writeb(tsel | WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
+
+ return 0;
+}
+
+struct pch_dev_ops {
+ int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips);
+ int (*get_temp)(struct pch_thermal_device *ptd, int *temp);
+ int (*suspend)(struct pch_thermal_device *ptd);
+ int (*resume)(struct pch_thermal_device *ptd);
+};
+
+
+/* dev ops for Wildcat Point */
+static const struct pch_dev_ops pch_dev_ops_wpt = {
+ .hw_init = pch_wpt_init,
+ .get_temp = pch_wpt_get_temp,
+ .suspend = pch_wpt_suspend,
+ .resume = pch_wpt_resume,
+};
+
+static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
+{
+ struct pch_thermal_device *ptd = tzd->devdata;
+
+ return ptd->ops->get_temp(ptd, temp);
+}
+
+static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
+ enum thermal_trip_type *type)
+{
+ struct pch_thermal_device *ptd = tzd->devdata;
+
+ if (ptd->crt_trip_id == trip)
+ *type = THERMAL_TRIP_CRITICAL;
+ else if (ptd->hot_trip_id == trip)
+ *type = THERMAL_TRIP_HOT;
+ else if (ptd->psv_trip_id == trip)
+ *type = THERMAL_TRIP_PASSIVE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *temp)
+{
+ struct pch_thermal_device *ptd = tzd->devdata;
+
+ if (ptd->crt_trip_id == trip)
+ *temp = ptd->crt_temp;
+ else if (ptd->hot_trip_id == trip)
+ *temp = ptd->hot_temp;
+ else if (ptd->psv_trip_id == trip)
+ *temp = ptd->psv_temp;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops tzd_ops = {
+ .get_temp = pch_thermal_get_temp,
+ .get_trip_type = pch_get_trip_type,
+ .get_trip_temp = pch_get_trip_temp,
+};
+
+enum board_ids {
+ board_hsw,
+ board_wpt,
+ board_skl,
+ board_cnl,
+ board_cml,
+};
+
+static const struct board_info {
+ const char *name;
+ const struct pch_dev_ops *ops;
+} board_info[] = {
+ [board_hsw] = {
+ .name = "pch_haswell",
+ .ops = &pch_dev_ops_wpt,
+ },
+ [board_wpt] = {
+ .name = "pch_wildcat_point",
+ .ops = &pch_dev_ops_wpt,
+ },
+ [board_skl] = {
+ .name = "pch_skylake",
+ .ops = &pch_dev_ops_wpt,
+ },
+ [board_cnl] = {
+ .name = "pch_cannonlake",
+ .ops = &pch_dev_ops_wpt,
+ },
+ [board_cml] = {
+ .name = "pch_cometlake",
+ .ops = &pch_dev_ops_wpt,
+ }
+};
+
+static int intel_pch_thermal_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ enum board_ids board_id = id->driver_data;
+ const struct board_info *bi = &board_info[board_id];
+ struct pch_thermal_device *ptd;
+ int err;
+ int nr_trips;
+
+ ptd = devm_kzalloc(&pdev->dev, sizeof(*ptd), GFP_KERNEL);
+ if (!ptd)
+ return -ENOMEM;
+
+ ptd->ops = bi->ops;
+
+ pci_set_drvdata(pdev, ptd);
+ ptd->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request pci region\n");
+ goto error_disable;
+ }
+
+ ptd->hw_base = pci_ioremap_bar(pdev, 0);
+ if (!ptd->hw_base) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "failed to map mem base\n");
+ goto error_release;
+ }
+
+ err = ptd->ops->hw_init(ptd, &nr_trips);
+ if (err)
+ goto error_cleanup;
+
+ ptd->tzd = thermal_zone_device_register(bi->name, nr_trips, 0, ptd,
+ &tzd_ops, NULL, 0, 0);
+ if (IS_ERR(ptd->tzd)) {
+ dev_err(&pdev->dev, "Failed to register thermal zone %s\n",
+ bi->name);
+ err = PTR_ERR(ptd->tzd);
+ goto error_cleanup;
+ }
+ err = thermal_zone_device_enable(ptd->tzd);
+ if (err)
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ thermal_zone_device_unregister(ptd->tzd);
+error_cleanup:
+ iounmap(ptd->hw_base);
+error_release:
+ pci_release_regions(pdev);
+error_disable:
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "pci device failed to probe\n");
+ return err;
+}
+
+static void intel_pch_thermal_remove(struct pci_dev *pdev)
+{
+ struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+ thermal_zone_device_unregister(ptd->tzd);
+ iounmap(ptd->hw_base);
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int intel_pch_thermal_suspend(struct device *device)
+{
+ struct pch_thermal_device *ptd = dev_get_drvdata(device);
+
+ return ptd->ops->suspend(ptd);
+}
+
+static int intel_pch_thermal_resume(struct device *device)
+{
+ struct pch_thermal_device *ptd = dev_get_drvdata(device);
+
+ return ptd->ops->resume(ptd);
+}
+
+static const struct pci_device_id intel_pch_thermal_id[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1),
+ .driver_data = board_hsw, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2),
+ .driver_data = board_hsw, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT),
+ .driver_data = board_wpt, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL),
+ .driver_data = board_skl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL_H),
+ .driver_data = board_skl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL),
+ .driver_data = board_cnl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_H),
+ .driver_data = board_cnl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_LP),
+ .driver_data = board_cnl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CML_H),
+ .driver_data = board_cml, },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
+
+static const struct dev_pm_ops intel_pch_pm_ops = {
+ .suspend = intel_pch_thermal_suspend,
+ .resume = intel_pch_thermal_resume,
+};
+
+static struct pci_driver intel_pch_thermal_driver = {
+ .name = "intel_pch_thermal",
+ .id_table = intel_pch_thermal_id,
+ .probe = intel_pch_thermal_probe,
+ .remove = intel_pch_thermal_remove,
+ .driver.pm = &intel_pch_pm_ops,
+};
+
+module_pci_driver(intel_pch_thermal_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel PCH Thermal driver");
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
new file mode 100644
index 000000000..6e7c230d3
--- /dev/null
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * intel_powerclamp.c - package c-state idle injection
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Authors:
+ * Arjan van de Ven <arjan@linux.intel.com>
+ * Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *
+ * TODO:
+ * 1. better handle wakeup from external interrupts, currently a fixed
+ * compensation is added to clamping duration when excessive amount
+ * of wakeups are observed during idle time. the reason is that in
+ * case of external interrupts without need for ack, clamping down
+ * cpu in non-irq context does not reduce irq. for majority of the
+ * cases, clamping down cpu does help reduce irq as well, we should
+ * be able to differentiate the two cases and give a quantitative
+ * solution for the irqs that we can control. perhaps based on
+ * get_cpu_iowait_time_us()
+ *
+ * 2. synchronization with other hw blocks
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/cpu.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched/rt.h>
+#include <uapi/linux/sched/types.h>
+
+#include <asm/nmi.h>
+#include <asm/msr.h>
+#include <asm/mwait.h>
+#include <asm/cpu_device_id.h>
+#include <asm/hardirq.h>
+
+#define MAX_TARGET_RATIO (50U)
+/* For each undisturbed clamping period (no extra wake ups during idle time),
+ * we increment the confidence counter for the given target ratio.
+ * CONFIDENCE_OK defines the level where runtime calibration results are
+ * valid.
+ */
+#define CONFIDENCE_OK (3)
+/* Default idle injection duration, driver adjust sleep time to meet target
+ * idle ratio. Similar to frequency modulation.
+ */
+#define DEFAULT_DURATION_JIFFIES (6)
+
+static unsigned int target_mwait;
+static struct dentry *debug_dir;
+static bool poll_pkg_cstate_enable;
+
+/* user selected target */
+static unsigned int set_target_ratio;
+static unsigned int current_ratio;
+static bool should_skip;
+static bool reduce_irq;
+static atomic_t idle_wakeup_counter;
+static unsigned int control_cpu; /* The cpu assigned to collect stat and update
+ * control parameters. default to BSP but BSP
+ * can be offlined.
+ */
+static bool clamping;
+
+struct powerclamp_worker_data {
+ struct kthread_worker *worker;
+ struct kthread_work balancing_work;
+ struct kthread_delayed_work idle_injection_work;
+ unsigned int cpu;
+ unsigned int count;
+ unsigned int guard;
+ unsigned int window_size_now;
+ unsigned int target_ratio;
+ unsigned int duration_jiffies;
+ bool clamping;
+};
+
+static struct powerclamp_worker_data __percpu *worker_data;
+static struct thermal_cooling_device *cooling_dev;
+static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
+ * clamping kthread worker
+ */
+
+static unsigned int duration;
+static unsigned int pkg_cstate_ratio_cur;
+static unsigned int window_size;
+
+static int duration_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ unsigned long new_duration;
+
+ ret = kstrtoul(arg, 10, &new_duration);
+ if (ret)
+ goto exit;
+ if (new_duration > 25 || new_duration < 6) {
+ pr_err("Out of recommended range %lu, between 6-25ms\n",
+ new_duration);
+ ret = -EINVAL;
+ }
+
+ duration = clamp(new_duration, 6ul, 25ul);
+ smp_mb();
+
+exit:
+
+ return ret;
+}
+
+static const struct kernel_param_ops duration_ops = {
+ .set = duration_set,
+ .get = param_get_int,
+};
+
+
+module_param_cb(duration, &duration_ops, &duration, 0644);
+MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec.");
+
+struct powerclamp_calibration_data {
+ unsigned long confidence; /* used for calibration, basically a counter
+ * gets incremented each time a clamping
+ * period is completed without extra wakeups
+ * once that counter is reached given level,
+ * compensation is deemed usable.
+ */
+ unsigned long steady_comp; /* steady state compensation used when
+ * no extra wakeups occurred.
+ */
+ unsigned long dynamic_comp; /* compensate excessive wakeup from idle
+ * mostly from external interrupts.
+ */
+};
+
+static struct powerclamp_calibration_data cal_data[MAX_TARGET_RATIO];
+
+static int window_size_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ unsigned long new_window_size;
+
+ ret = kstrtoul(arg, 10, &new_window_size);
+ if (ret)
+ goto exit_win;
+ if (new_window_size > 10 || new_window_size < 2) {
+ pr_err("Out of recommended window size %lu, between 2-10\n",
+ new_window_size);
+ ret = -EINVAL;
+ }
+
+ window_size = clamp(new_window_size, 2ul, 10ul);
+ smp_mb();
+
+exit_win:
+
+ return ret;
+}
+
+static const struct kernel_param_ops window_size_ops = {
+ .set = window_size_set,
+ .get = param_get_int,
+};
+
+module_param_cb(window_size, &window_size_ops, &window_size, 0644);
+MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
+ "\tpowerclamp controls idle ratio within this window. larger\n"
+ "\twindow size results in slower response time but more smooth\n"
+ "\tclamping results. default to 2.");
+
+static void find_target_mwait(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int highest_cstate = 0;
+ unsigned int highest_subcstate = 0;
+ int i;
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ return;
+
+ edx >>= MWAIT_SUBSTATE_SIZE;
+ for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+ if (edx & MWAIT_SUBSTATE_MASK) {
+ highest_cstate = i;
+ highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+ }
+ }
+ target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+ (highest_subcstate - 1);
+
+}
+
+struct pkg_cstate_info {
+ bool skip;
+ int msr_index;
+ int cstate_id;
+};
+
+#define PKG_CSTATE_INIT(id) { \
+ .msr_index = MSR_PKG_C##id##_RESIDENCY, \
+ .cstate_id = id \
+ }
+
+static struct pkg_cstate_info pkg_cstates[] = {
+ PKG_CSTATE_INIT(2),
+ PKG_CSTATE_INIT(3),
+ PKG_CSTATE_INIT(6),
+ PKG_CSTATE_INIT(7),
+ PKG_CSTATE_INIT(8),
+ PKG_CSTATE_INIT(9),
+ PKG_CSTATE_INIT(10),
+ {NULL},
+};
+
+static bool has_pkg_state_counter(void)
+{
+ u64 val;
+ struct pkg_cstate_info *info = pkg_cstates;
+
+ /* check if any one of the counter msrs exists */
+ while (info->msr_index) {
+ if (!rdmsrl_safe(info->msr_index, &val))
+ return true;
+ info++;
+ }
+
+ return false;
+}
+
+static u64 pkg_state_counter(void)
+{
+ u64 val;
+ u64 count = 0;
+ struct pkg_cstate_info *info = pkg_cstates;
+
+ while (info->msr_index) {
+ if (!info->skip) {
+ if (!rdmsrl_safe(info->msr_index, &val))
+ count += val;
+ else
+ info->skip = true;
+ }
+ info++;
+ }
+
+ return count;
+}
+
+static unsigned int get_compensation(int ratio)
+{
+ unsigned int comp = 0;
+
+ if (!poll_pkg_cstate_enable)
+ return 0;
+
+ /* we only use compensation if all adjacent ones are good */
+ if (ratio == 1 &&
+ cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 2].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio + 1].steady_comp +
+ cal_data[ratio + 2].steady_comp) / 3;
+ } else if (ratio == MAX_TARGET_RATIO - 1 &&
+ cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 2].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio - 1].steady_comp +
+ cal_data[ratio - 2].steady_comp) / 3;
+ } else if (cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 1].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio - 1].steady_comp +
+ cal_data[ratio + 1].steady_comp) / 3;
+ }
+
+ /* REVISIT: simple penalty of double idle injection */
+ if (reduce_irq)
+ comp = ratio;
+ /* do not exceed limit */
+ if (comp + ratio >= MAX_TARGET_RATIO)
+ comp = MAX_TARGET_RATIO - ratio - 1;
+
+ return comp;
+}
+
+static void adjust_compensation(int target_ratio, unsigned int win)
+{
+ int delta;
+ struct powerclamp_calibration_data *d = &cal_data[target_ratio];
+
+ /*
+ * adjust compensations if confidence level has not been reached or
+ * there are too many wakeups during the last idle injection period, we
+ * cannot trust the data for compensation.
+ */
+ if (d->confidence >= CONFIDENCE_OK ||
+ atomic_read(&idle_wakeup_counter) >
+ win * num_online_cpus())
+ return;
+
+ delta = set_target_ratio - current_ratio;
+ /* filter out bad data */
+ if (delta >= 0 && delta <= (1+target_ratio/10)) {
+ if (d->steady_comp)
+ d->steady_comp =
+ roundup(delta+d->steady_comp, 2)/2;
+ else
+ d->steady_comp = delta;
+ d->confidence++;
+ }
+}
+
+static bool powerclamp_adjust_controls(unsigned int target_ratio,
+ unsigned int guard, unsigned int win)
+{
+ static u64 msr_last, tsc_last;
+ u64 msr_now, tsc_now;
+ u64 val64;
+
+ /* check result for the last window */
+ msr_now = pkg_state_counter();
+ tsc_now = rdtsc();
+
+ /* calculate pkg cstate vs tsc ratio */
+ if (!msr_last || !tsc_last)
+ current_ratio = 1;
+ else if (tsc_now-tsc_last) {
+ val64 = 100*(msr_now-msr_last);
+ do_div(val64, (tsc_now-tsc_last));
+ current_ratio = val64;
+ }
+
+ /* update record */
+ msr_last = msr_now;
+ tsc_last = tsc_now;
+
+ adjust_compensation(target_ratio, win);
+ /*
+ * too many external interrupts, set flag such
+ * that we can take measure later.
+ */
+ reduce_irq = atomic_read(&idle_wakeup_counter) >=
+ 2 * win * num_online_cpus();
+
+ atomic_set(&idle_wakeup_counter, 0);
+ /* if we are above target+guard, skip */
+ return set_target_ratio + guard <= current_ratio;
+}
+
+static void clamp_balancing_func(struct kthread_work *work)
+{
+ struct powerclamp_worker_data *w_data;
+ int sleeptime;
+ unsigned long target_jiffies;
+ unsigned int compensated_ratio;
+ int interval; /* jiffies to sleep for each attempt */
+
+ w_data = container_of(work, struct powerclamp_worker_data,
+ balancing_work);
+
+ /*
+ * make sure user selected ratio does not take effect until
+ * the next round. adjust target_ratio if user has changed
+ * target such that we can converge quickly.
+ */
+ w_data->target_ratio = READ_ONCE(set_target_ratio);
+ w_data->guard = 1 + w_data->target_ratio / 20;
+ w_data->window_size_now = window_size;
+ w_data->duration_jiffies = msecs_to_jiffies(duration);
+ w_data->count++;
+
+ /*
+ * systems may have different ability to enter package level
+ * c-states, thus we need to compensate the injected idle ratio
+ * to achieve the actual target reported by the HW.
+ */
+ compensated_ratio = w_data->target_ratio +
+ get_compensation(w_data->target_ratio);
+ if (compensated_ratio <= 0)
+ compensated_ratio = 1;
+ interval = w_data->duration_jiffies * 100 / compensated_ratio;
+
+ /* align idle time */
+ target_jiffies = roundup(jiffies, interval);
+ sleeptime = target_jiffies - jiffies;
+ if (sleeptime <= 0)
+ sleeptime = 1;
+
+ if (clamping && w_data->clamping && cpu_online(w_data->cpu))
+ kthread_queue_delayed_work(w_data->worker,
+ &w_data->idle_injection_work,
+ sleeptime);
+}
+
+static void clamp_idle_injection_func(struct kthread_work *work)
+{
+ struct powerclamp_worker_data *w_data;
+
+ w_data = container_of(work, struct powerclamp_worker_data,
+ idle_injection_work.work);
+
+ /*
+ * only elected controlling cpu can collect stats and update
+ * control parameters.
+ */
+ if (w_data->cpu == control_cpu &&
+ !(w_data->count % w_data->window_size_now)) {
+ should_skip =
+ powerclamp_adjust_controls(w_data->target_ratio,
+ w_data->guard,
+ w_data->window_size_now);
+ smp_mb();
+ }
+
+ if (should_skip)
+ goto balance;
+
+ play_idle(jiffies_to_usecs(w_data->duration_jiffies));
+
+balance:
+ if (clamping && w_data->clamping && cpu_online(w_data->cpu))
+ kthread_queue_work(w_data->worker, &w_data->balancing_work);
+}
+
+/*
+ * 1 HZ polling while clamping is active, useful for userspace
+ * to monitor actual idle ratio.
+ */
+static void poll_pkg_cstate(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
+static void poll_pkg_cstate(struct work_struct *dummy)
+{
+ static u64 msr_last;
+ static u64 tsc_last;
+
+ u64 msr_now;
+ u64 tsc_now;
+ u64 val64;
+
+ msr_now = pkg_state_counter();
+ tsc_now = rdtsc();
+
+ /* calculate pkg cstate vs tsc ratio */
+ if (!msr_last || !tsc_last)
+ pkg_cstate_ratio_cur = 1;
+ else {
+ if (tsc_now - tsc_last) {
+ val64 = 100 * (msr_now - msr_last);
+ do_div(val64, (tsc_now - tsc_last));
+ pkg_cstate_ratio_cur = val64;
+ }
+ }
+
+ /* update record */
+ msr_last = msr_now;
+ tsc_last = tsc_now;
+
+ if (true == clamping)
+ schedule_delayed_work(&poll_pkg_cstate_work, HZ);
+}
+
+static void start_power_clamp_worker(unsigned long cpu)
+{
+ struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
+ struct kthread_worker *worker;
+
+ worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
+ if (IS_ERR(worker))
+ return;
+
+ w_data->worker = worker;
+ w_data->count = 0;
+ w_data->cpu = cpu;
+ w_data->clamping = true;
+ set_bit(cpu, cpu_clamping_mask);
+ sched_set_fifo(worker->task);
+ kthread_init_work(&w_data->balancing_work, clamp_balancing_func);
+ kthread_init_delayed_work(&w_data->idle_injection_work,
+ clamp_idle_injection_func);
+ kthread_queue_work(w_data->worker, &w_data->balancing_work);
+}
+
+static void stop_power_clamp_worker(unsigned long cpu)
+{
+ struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
+
+ if (!w_data->worker)
+ return;
+
+ w_data->clamping = false;
+ /*
+ * Make sure that all works that get queued after this point see
+ * the clamping disabled. The counter part is not needed because
+ * there is an implicit memory barrier when the queued work
+ * is proceed.
+ */
+ smp_wmb();
+ kthread_cancel_work_sync(&w_data->balancing_work);
+ kthread_cancel_delayed_work_sync(&w_data->idle_injection_work);
+ /*
+ * The balancing work still might be queued here because
+ * the handling of the "clapming" variable, cancel, and queue
+ * operations are not synchronized via a lock. But it is not
+ * a big deal. The balancing work is fast and destroy kthread
+ * will wait for it.
+ */
+ clear_bit(w_data->cpu, cpu_clamping_mask);
+ kthread_destroy_worker(w_data->worker);
+
+ w_data->worker = NULL;
+}
+
+static int start_power_clamp(void)
+{
+ unsigned long cpu;
+
+ set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
+ /* prevent cpu hotplug */
+ get_online_cpus();
+
+ /* prefer BSP */
+ control_cpu = cpumask_first(cpu_online_mask);
+
+ clamping = true;
+ if (poll_pkg_cstate_enable)
+ schedule_delayed_work(&poll_pkg_cstate_work, 0);
+
+ /* start one kthread worker per online cpu */
+ for_each_online_cpu(cpu) {
+ start_power_clamp_worker(cpu);
+ }
+ put_online_cpus();
+
+ return 0;
+}
+
+static void end_power_clamp(void)
+{
+ int i;
+
+ /*
+ * Block requeuing in all the kthread workers. They will flush and
+ * stop faster.
+ */
+ clamping = false;
+ if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
+ for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
+ pr_debug("clamping worker for cpu %d alive, destroy\n",
+ i);
+ stop_power_clamp_worker(i);
+ }
+ }
+}
+
+static int powerclamp_cpu_online(unsigned int cpu)
+{
+ if (clamping == false)
+ return 0;
+ start_power_clamp_worker(cpu);
+ /* prefer BSP as controlling CPU */
+ if (cpu == 0) {
+ control_cpu = 0;
+ smp_mb();
+ }
+ return 0;
+}
+
+static int powerclamp_cpu_predown(unsigned int cpu)
+{
+ if (clamping == false)
+ return 0;
+
+ stop_power_clamp_worker(cpu);
+ if (cpu != control_cpu)
+ return 0;
+
+ control_cpu = cpumask_first(cpu_online_mask);
+ if (control_cpu == cpu)
+ control_cpu = cpumask_next(cpu, cpu_online_mask);
+ smp_mb();
+ return 0;
+}
+
+static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MAX_TARGET_RATIO;
+
+ return 0;
+}
+
+static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ if (clamping) {
+ if (poll_pkg_cstate_enable)
+ *state = pkg_cstate_ratio_cur;
+ else
+ *state = set_target_ratio;
+ } else {
+ /* to save power, do not poll idle ratio while not clamping */
+ *state = -1; /* indicates invalid state */
+ }
+
+ return 0;
+}
+
+static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long new_target_ratio)
+{
+ int ret = 0;
+
+ new_target_ratio = clamp(new_target_ratio, 0UL,
+ (unsigned long) (MAX_TARGET_RATIO-1));
+ if (set_target_ratio == 0 && new_target_ratio > 0) {
+ pr_info("Start idle injection to reduce power\n");
+ set_target_ratio = new_target_ratio;
+ ret = start_power_clamp();
+ goto exit_set;
+ } else if (set_target_ratio > 0 && new_target_ratio == 0) {
+ pr_info("Stop forced idle injection\n");
+ end_power_clamp();
+ set_target_ratio = 0;
+ } else /* adjust currently running */ {
+ set_target_ratio = new_target_ratio;
+ /* make new set_target_ratio visible to other cpus */
+ smp_mb();
+ }
+
+exit_set:
+ return ret;
+}
+
+/* bind to generic thermal layer as cooling device*/
+static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
+ .get_max_state = powerclamp_get_max_state,
+ .get_cur_state = powerclamp_get_cur_state,
+ .set_cur_state = powerclamp_set_cur_state,
+};
+
+static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
+ X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_MWAIT, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
+
+static int __init powerclamp_probe(void)
+{
+
+ if (!x86_match_cpu(intel_powerclamp_ids)) {
+ pr_err("CPU does not support MWAIT\n");
+ return -ENODEV;
+ }
+
+ /* The goal for idle time alignment is to achieve package cstate. */
+ if (!has_pkg_state_counter()) {
+ pr_info("No package C-state available\n");
+ return -ENODEV;
+ }
+
+ /* find the deepest mwait value */
+ find_target_mwait();
+
+ return 0;
+}
+
+static int powerclamp_debug_show(struct seq_file *m, void *unused)
+{
+ int i = 0;
+
+ seq_printf(m, "controlling cpu: %d\n", control_cpu);
+ seq_printf(m, "pct confidence steady dynamic (compensation)\n");
+ for (i = 0; i < MAX_TARGET_RATIO; i++) {
+ seq_printf(m, "%d\t%lu\t%lu\t%lu\n",
+ i,
+ cal_data[i].confidence,
+ cal_data[i].steady_comp,
+ cal_data[i].dynamic_comp);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(powerclamp_debug);
+
+static inline void powerclamp_create_debug_files(void)
+{
+ debug_dir = debugfs_create_dir("intel_powerclamp", NULL);
+
+ debugfs_create_file("powerclamp_calib", S_IRUGO, debug_dir, cal_data,
+ &powerclamp_debug_fops);
+}
+
+static enum cpuhp_state hp_state;
+
+static int __init powerclamp_init(void)
+{
+ int retval;
+ int bitmap_size;
+
+ bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long);
+ cpu_clamping_mask = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!cpu_clamping_mask)
+ return -ENOMEM;
+
+ /* probe cpu features and ids here */
+ retval = powerclamp_probe();
+ if (retval)
+ goto exit_free;
+
+ /* set default limit, maybe adjusted during runtime based on feedback */
+ window_size = 2;
+ retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "thermal/intel_powerclamp:online",
+ powerclamp_cpu_online,
+ powerclamp_cpu_predown);
+ if (retval < 0)
+ goto exit_free;
+
+ hp_state = retval;
+
+ worker_data = alloc_percpu(struct powerclamp_worker_data);
+ if (!worker_data) {
+ retval = -ENOMEM;
+ goto exit_unregister;
+ }
+
+ if (topology_max_packages() == 1 && topology_max_die_per_package() == 1)
+ poll_pkg_cstate_enable = true;
+
+ cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
+ &powerclamp_cooling_ops);
+ if (IS_ERR(cooling_dev)) {
+ retval = -ENODEV;
+ goto exit_free_thread;
+ }
+
+ if (!duration)
+ duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+
+ powerclamp_create_debug_files();
+
+ return 0;
+
+exit_free_thread:
+ free_percpu(worker_data);
+exit_unregister:
+ cpuhp_remove_state_nocalls(hp_state);
+exit_free:
+ kfree(cpu_clamping_mask);
+ return retval;
+}
+module_init(powerclamp_init);
+
+static void __exit powerclamp_exit(void)
+{
+ end_power_clamp();
+ cpuhp_remove_state_nocalls(hp_state);
+ free_percpu(worker_data);
+ thermal_cooling_device_unregister(cooling_dev);
+ kfree(cpu_clamping_mask);
+
+ cancel_delayed_work_sync(&poll_pkg_cstate_work);
+ debugfs_remove_recursive(debug_dir);
+}
+module_exit(powerclamp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
+MODULE_DESCRIPTION("Package Level C-state Idle Injection for Intel CPUs");
diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c
new file mode 100644
index 000000000..b43fbd5ea
--- /dev/null
+++ b/drivers/thermal/intel/intel_quark_dts_thermal.c
@@ -0,0 +1,438 @@
+/*
+ * intel_quark_dts_thermal.c
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * Ong Boon Leong <boon.leong.ong@intel.com>
+ * Intel Malaysia, Penang
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Quark DTS thermal driver is implemented by referencing
+ * intel_soc_dts_thermal.c.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/thermal.h>
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+
+/* DTS reset is programmed via QRK_MBI_UNIT_SOC */
+#define QRK_DTS_REG_OFFSET_RESET 0x34
+#define QRK_DTS_RESET_BIT BIT(0)
+
+/* DTS enable is programmed via QRK_MBI_UNIT_RMU */
+#define QRK_DTS_REG_OFFSET_ENABLE 0xB0
+#define QRK_DTS_ENABLE_BIT BIT(15)
+
+/* Temperature Register is read via QRK_MBI_UNIT_RMU */
+#define QRK_DTS_REG_OFFSET_TEMP 0xB1
+#define QRK_DTS_MASK_TEMP 0xFF
+#define QRK_DTS_OFFSET_TEMP 0
+#define QRK_DTS_OFFSET_REL_TEMP 16
+#define QRK_DTS_TEMP_BASE 50
+
+/* Programmable Trip Point Register is configured via QRK_MBI_UNIT_RMU */
+#define QRK_DTS_REG_OFFSET_PTPS 0xB2
+#define QRK_DTS_MASK_TP_THRES 0xFF
+#define QRK_DTS_SHIFT_TP 8
+#define QRK_DTS_ID_TP_CRITICAL 0
+#define QRK_DTS_SAFE_TP_THRES 105
+
+/* Thermal Sensor Register Lock */
+#define QRK_DTS_REG_OFFSET_LOCK 0x71
+#define QRK_DTS_LOCK_BIT BIT(5)
+
+/* Quark DTS has 2 trip points: hot & catastrophic */
+#define QRK_MAX_DTS_TRIPS 2
+/* If DTS not locked, all trip points are configurable */
+#define QRK_DTS_WR_MASK_SET 0x3
+/* If DTS locked, all trip points are not configurable */
+#define QRK_DTS_WR_MASK_CLR 0
+
+#define DEFAULT_POLL_DELAY 2000
+
+struct soc_sensor_entry {
+ bool locked;
+ u32 store_ptps;
+ u32 store_dts_enable;
+ struct thermal_zone_device *tzone;
+};
+
+static struct soc_sensor_entry *soc_dts;
+
+static int polling_delay = DEFAULT_POLL_DELAY;
+module_param(polling_delay, int, 0644);
+MODULE_PARM_DESC(polling_delay,
+ "Polling interval for checking trip points (in milliseconds)");
+
+static DEFINE_MUTEX(dts_update_mutex);
+
+static int soc_dts_enable(struct thermal_zone_device *tzd)
+{
+ u32 out;
+ struct soc_sensor_entry *aux_entry = tzd->devdata;
+ int ret;
+
+ ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ,
+ QRK_DTS_REG_OFFSET_ENABLE, &out);
+ if (ret)
+ return ret;
+
+ if (out & QRK_DTS_ENABLE_BIT)
+ return 0;
+
+ if (!aux_entry->locked) {
+ out |= QRK_DTS_ENABLE_BIT;
+ ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE,
+ QRK_DTS_REG_OFFSET_ENABLE, out);
+ if (ret)
+ return ret;
+ } else {
+ pr_info("DTS is locked. Cannot enable DTS\n");
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+static int soc_dts_disable(struct thermal_zone_device *tzd)
+{
+ u32 out;
+ struct soc_sensor_entry *aux_entry = tzd->devdata;
+ int ret;
+
+ ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ,
+ QRK_DTS_REG_OFFSET_ENABLE, &out);
+ if (ret)
+ return ret;
+
+ if (!(out & QRK_DTS_ENABLE_BIT))
+ return 0;
+
+ if (!aux_entry->locked) {
+ out &= ~QRK_DTS_ENABLE_BIT;
+ ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE,
+ QRK_DTS_REG_OFFSET_ENABLE, out);
+
+ if (ret)
+ return ret;
+ } else {
+ pr_info("DTS is locked. Cannot disable DTS\n");
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+static int _get_trip_temp(int trip, int *temp)
+{
+ int status;
+ u32 out;
+
+ mutex_lock(&dts_update_mutex);
+ status = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ,
+ QRK_DTS_REG_OFFSET_PTPS, &out);
+ mutex_unlock(&dts_update_mutex);
+
+ if (status)
+ return status;
+
+ /*
+ * Thermal Sensor Programmable Trip Point Register has 8-bit
+ * fields for critical (catastrophic) and hot set trip point
+ * thresholds. The threshold value is always offset by its
+ * temperature base (50 degree Celsius).
+ */
+ *temp = (out >> (trip * QRK_DTS_SHIFT_TP)) & QRK_DTS_MASK_TP_THRES;
+ *temp -= QRK_DTS_TEMP_BASE;
+
+ return 0;
+}
+
+static inline int sys_get_trip_temp(struct thermal_zone_device *tzd,
+ int trip, int *temp)
+{
+ return _get_trip_temp(trip, temp);
+}
+
+static inline int sys_get_crit_temp(struct thermal_zone_device *tzd, int *temp)
+{
+ return _get_trip_temp(QRK_DTS_ID_TP_CRITICAL, temp);
+}
+
+static int update_trip_temp(struct soc_sensor_entry *aux_entry,
+ int trip, int temp)
+{
+ u32 out;
+ u32 temp_out;
+ u32 store_ptps;
+ int ret;
+
+ mutex_lock(&dts_update_mutex);
+ if (aux_entry->locked) {
+ ret = -EPERM;
+ goto failed;
+ }
+
+ ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ,
+ QRK_DTS_REG_OFFSET_PTPS, &store_ptps);
+ if (ret)
+ goto failed;
+
+ /*
+ * Protection against unsafe trip point thresdhold value.
+ * As Quark X1000 data-sheet does not provide any recommendation
+ * regarding the safe trip point threshold value to use, we choose
+ * the safe value according to the threshold value set by UEFI BIOS.
+ */
+ if (temp > QRK_DTS_SAFE_TP_THRES)
+ temp = QRK_DTS_SAFE_TP_THRES;
+
+ /*
+ * Thermal Sensor Programmable Trip Point Register has 8-bit
+ * fields for critical (catastrophic) and hot set trip point
+ * thresholds. The threshold value is always offset by its
+ * temperature base (50 degree Celsius).
+ */
+ temp_out = temp + QRK_DTS_TEMP_BASE;
+ out = (store_ptps & ~(QRK_DTS_MASK_TP_THRES <<
+ (trip * QRK_DTS_SHIFT_TP)));
+ out |= (temp_out & QRK_DTS_MASK_TP_THRES) <<
+ (trip * QRK_DTS_SHIFT_TP);
+
+ ret = iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE,
+ QRK_DTS_REG_OFFSET_PTPS, out);
+
+failed:
+ mutex_unlock(&dts_update_mutex);
+ return ret;
+}
+
+static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
+ int temp)
+{
+ return update_trip_temp(tzd->devdata, trip, temp);
+}
+
+static int sys_get_trip_type(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trip_type *type)
+{
+ if (trip)
+ *type = THERMAL_TRIP_HOT;
+ else
+ *type = THERMAL_TRIP_CRITICAL;
+
+ return 0;
+}
+
+static int sys_get_curr_temp(struct thermal_zone_device *tzd,
+ int *temp)
+{
+ u32 out;
+ int ret;
+
+ mutex_lock(&dts_update_mutex);
+ ret = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ,
+ QRK_DTS_REG_OFFSET_TEMP, &out);
+ mutex_unlock(&dts_update_mutex);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Thermal Sensor Temperature Register has 8-bit field
+ * for temperature value (offset by temperature base
+ * 50 degree Celsius).
+ */
+ out = (out >> QRK_DTS_OFFSET_TEMP) & QRK_DTS_MASK_TEMP;
+ *temp = out - QRK_DTS_TEMP_BASE;
+
+ return 0;
+}
+
+static int sys_change_mode(struct thermal_zone_device *tzd,
+ enum thermal_device_mode mode)
+{
+ int ret;
+
+ mutex_lock(&dts_update_mutex);
+ if (mode == THERMAL_DEVICE_ENABLED)
+ ret = soc_dts_enable(tzd);
+ else
+ ret = soc_dts_disable(tzd);
+ mutex_unlock(&dts_update_mutex);
+
+ return ret;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = sys_get_curr_temp,
+ .get_trip_temp = sys_get_trip_temp,
+ .get_trip_type = sys_get_trip_type,
+ .set_trip_temp = sys_set_trip_temp,
+ .get_crit_temp = sys_get_crit_temp,
+ .change_mode = sys_change_mode,
+};
+
+static void free_soc_dts(struct soc_sensor_entry *aux_entry)
+{
+ if (aux_entry) {
+ if (!aux_entry->locked) {
+ mutex_lock(&dts_update_mutex);
+ iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE,
+ QRK_DTS_REG_OFFSET_ENABLE,
+ aux_entry->store_dts_enable);
+
+ iosf_mbi_write(QRK_MBI_UNIT_RMU, MBI_REG_WRITE,
+ QRK_DTS_REG_OFFSET_PTPS,
+ aux_entry->store_ptps);
+ mutex_unlock(&dts_update_mutex);
+ }
+ thermal_zone_device_unregister(aux_entry->tzone);
+ kfree(aux_entry);
+ }
+}
+
+static struct soc_sensor_entry *alloc_soc_dts(void)
+{
+ struct soc_sensor_entry *aux_entry;
+ int err;
+ u32 out;
+ int wr_mask;
+
+ aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
+ if (!aux_entry) {
+ err = -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Check if DTS register is locked */
+ err = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ,
+ QRK_DTS_REG_OFFSET_LOCK, &out);
+ if (err)
+ goto err_ret;
+
+ if (out & QRK_DTS_LOCK_BIT) {
+ aux_entry->locked = true;
+ wr_mask = QRK_DTS_WR_MASK_CLR;
+ } else {
+ aux_entry->locked = false;
+ wr_mask = QRK_DTS_WR_MASK_SET;
+ }
+
+ /* Store DTS default state if DTS registers are not locked */
+ if (!aux_entry->locked) {
+ /* Store DTS default enable for restore on exit */
+ err = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ,
+ QRK_DTS_REG_OFFSET_ENABLE,
+ &aux_entry->store_dts_enable);
+ if (err)
+ goto err_ret;
+
+ /* Store DTS default PTPS register for restore on exit */
+ err = iosf_mbi_read(QRK_MBI_UNIT_RMU, MBI_REG_READ,
+ QRK_DTS_REG_OFFSET_PTPS,
+ &aux_entry->store_ptps);
+ if (err)
+ goto err_ret;
+ }
+
+ aux_entry->tzone = thermal_zone_device_register("quark_dts",
+ QRK_MAX_DTS_TRIPS,
+ wr_mask,
+ aux_entry, &tzone_ops, NULL, 0, polling_delay);
+ if (IS_ERR(aux_entry->tzone)) {
+ err = PTR_ERR(aux_entry->tzone);
+ goto err_ret;
+ }
+
+ err = thermal_zone_device_enable(aux_entry->tzone);
+ if (err)
+ goto err_aux_status;
+
+ return aux_entry;
+
+err_aux_status:
+ thermal_zone_device_unregister(aux_entry->tzone);
+err_ret:
+ kfree(aux_entry);
+ return ERR_PTR(err);
+}
+
+static const struct x86_cpu_id qrk_thermal_ids[] __initconst = {
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids);
+
+static int __init intel_quark_thermal_init(void)
+{
+ if (!x86_match_cpu(qrk_thermal_ids) || !iosf_mbi_available())
+ return -ENODEV;
+
+ soc_dts = alloc_soc_dts();
+ if (IS_ERR(soc_dts))
+ return PTR_ERR(soc_dts);
+
+ return 0;
+}
+
+static void __exit intel_quark_thermal_exit(void)
+{
+ free_soc_dts(soc_dts);
+}
+
+module_init(intel_quark_thermal_init)
+module_exit(intel_quark_thermal_exit)
+
+MODULE_DESCRIPTION("Intel Quark DTS Thermal Driver");
+MODULE_AUTHOR("Ong Boon Leong <boon.leong.ong@intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
new file mode 100644
index 000000000..8d6707e48
--- /dev/null
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * intel_soc_dts_iosf.c
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <asm/iosf_mbi.h>
+#include "intel_soc_dts_iosf.h"
+
+#define SOC_DTS_OFFSET_ENABLE 0xB0
+#define SOC_DTS_OFFSET_TEMP 0xB1
+
+#define SOC_DTS_OFFSET_PTPS 0xB2
+#define SOC_DTS_OFFSET_PTTS 0xB3
+#define SOC_DTS_OFFSET_PTTSS 0xB4
+#define SOC_DTS_OFFSET_PTMC 0x80
+#define SOC_DTS_TE_AUX0 0xB5
+#define SOC_DTS_TE_AUX1 0xB6
+
+#define SOC_DTS_AUX0_ENABLE_BIT BIT(0)
+#define SOC_DTS_AUX1_ENABLE_BIT BIT(1)
+#define SOC_DTS_CPU_MODULE0_ENABLE_BIT BIT(16)
+#define SOC_DTS_CPU_MODULE1_ENABLE_BIT BIT(17)
+#define SOC_DTS_TE_SCI_ENABLE BIT(9)
+#define SOC_DTS_TE_SMI_ENABLE BIT(10)
+#define SOC_DTS_TE_MSI_ENABLE BIT(11)
+#define SOC_DTS_TE_APICA_ENABLE BIT(14)
+#define SOC_DTS_PTMC_APIC_DEASSERT_BIT BIT(4)
+
+/* DTS encoding for TJ MAX temperature */
+#define SOC_DTS_TJMAX_ENCODING 0x7F
+
+/* Only 2 out of 4 is allowed for OSPM */
+#define SOC_MAX_DTS_TRIPS 2
+
+/* Mask for two trips in status bits */
+#define SOC_DTS_TRIP_MASK 0x03
+
+/* DTS0 and DTS 1 */
+#define SOC_MAX_DTS_SENSORS 2
+
+static int get_tj_max(u32 *tj_max)
+{
+ u32 eax, edx;
+ u32 val;
+ int err;
+
+ err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+ if (err)
+ goto err_ret;
+ else {
+ val = (eax >> 16) & 0xff;
+ if (val)
+ *tj_max = val * 1000;
+ else {
+ err = -EINVAL;
+ goto err_ret;
+ }
+ }
+
+ return 0;
+err_ret:
+ *tj_max = 0;
+
+ return err;
+}
+
+static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip,
+ int *temp)
+{
+ int status;
+ u32 out;
+ struct intel_soc_dts_sensor_entry *dts;
+ struct intel_soc_dts_sensors *sensors;
+
+ dts = tzd->devdata;
+ sensors = dts->sensors;
+ mutex_lock(&sensors->dts_update_lock);
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ SOC_DTS_OFFSET_PTPS, &out);
+ mutex_unlock(&sensors->dts_update_lock);
+ if (status)
+ return status;
+
+ out = (out >> (trip * 8)) & SOC_DTS_TJMAX_ENCODING;
+ if (!out)
+ *temp = 0;
+ else
+ *temp = sensors->tj_max - out * 1000;
+
+ return 0;
+}
+
+static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
+ int thres_index, int temp,
+ enum thermal_trip_type trip_type)
+{
+ int status;
+ u32 temp_out;
+ u32 out;
+ unsigned long update_ptps;
+ u32 store_ptps;
+ u32 store_ptmc;
+ u32 store_te_out;
+ u32 te_out;
+ u32 int_enable_bit = SOC_DTS_TE_APICA_ENABLE;
+ struct intel_soc_dts_sensors *sensors = dts->sensors;
+
+ if (sensors->intr_type == INTEL_SOC_DTS_INTERRUPT_MSI)
+ int_enable_bit |= SOC_DTS_TE_MSI_ENABLE;
+
+ temp_out = (sensors->tj_max - temp) / 1000;
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ SOC_DTS_OFFSET_PTPS, &store_ptps);
+ if (status)
+ return status;
+
+ update_ptps = store_ptps;
+ bitmap_set_value8(&update_ptps, temp_out & 0xFF, thres_index * 8);
+ out = update_ptps;
+
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_OFFSET_PTPS, out);
+ if (status)
+ return status;
+
+ pr_debug("update_trip_temp PTPS = %x\n", out);
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ SOC_DTS_OFFSET_PTMC, &out);
+ if (status)
+ goto err_restore_ptps;
+
+ store_ptmc = out;
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ SOC_DTS_TE_AUX0 + thres_index,
+ &te_out);
+ if (status)
+ goto err_restore_ptmc;
+
+ store_te_out = te_out;
+ /* Enable for CPU module 0 and module 1 */
+ out |= (SOC_DTS_CPU_MODULE0_ENABLE_BIT |
+ SOC_DTS_CPU_MODULE1_ENABLE_BIT);
+ if (temp) {
+ if (thres_index)
+ out |= SOC_DTS_AUX1_ENABLE_BIT;
+ else
+ out |= SOC_DTS_AUX0_ENABLE_BIT;
+ te_out |= int_enable_bit;
+ } else {
+ if (thres_index)
+ out &= ~SOC_DTS_AUX1_ENABLE_BIT;
+ else
+ out &= ~SOC_DTS_AUX0_ENABLE_BIT;
+ te_out &= ~int_enable_bit;
+ }
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_OFFSET_PTMC, out);
+ if (status)
+ goto err_restore_te_out;
+
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_TE_AUX0 + thres_index,
+ te_out);
+ if (status)
+ goto err_restore_te_out;
+
+ dts->trip_types[thres_index] = trip_type;
+
+ return 0;
+err_restore_te_out:
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_OFFSET_PTMC, store_te_out);
+err_restore_ptmc:
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_OFFSET_PTMC, store_ptmc);
+err_restore_ptps:
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_OFFSET_PTPS, store_ptps);
+ /* Nothing we can do if restore fails */
+
+ return status;
+}
+
+static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
+ int temp)
+{
+ struct intel_soc_dts_sensor_entry *dts = tzd->devdata;
+ struct intel_soc_dts_sensors *sensors = dts->sensors;
+ int status;
+
+ if (temp > sensors->tj_max)
+ return -EINVAL;
+
+ mutex_lock(&sensors->dts_update_lock);
+ status = update_trip_temp(tzd->devdata, trip, temp,
+ dts->trip_types[trip]);
+ mutex_unlock(&sensors->dts_update_lock);
+
+ return status;
+}
+
+static int sys_get_trip_type(struct thermal_zone_device *tzd,
+ int trip, enum thermal_trip_type *type)
+{
+ struct intel_soc_dts_sensor_entry *dts;
+
+ dts = tzd->devdata;
+
+ *type = dts->trip_types[trip];
+
+ return 0;
+}
+
+static int sys_get_curr_temp(struct thermal_zone_device *tzd,
+ int *temp)
+{
+ int status;
+ u32 out;
+ struct intel_soc_dts_sensor_entry *dts;
+ struct intel_soc_dts_sensors *sensors;
+ unsigned long raw;
+
+ dts = tzd->devdata;
+ sensors = dts->sensors;
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ SOC_DTS_OFFSET_TEMP, &out);
+ if (status)
+ return status;
+
+ raw = out;
+ out = bitmap_get_value8(&raw, dts->id * 8) - SOC_DTS_TJMAX_ENCODING;
+ *temp = sensors->tj_max - out * 1000;
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = sys_get_curr_temp,
+ .get_trip_temp = sys_get_trip_temp,
+ .get_trip_type = sys_get_trip_type,
+ .set_trip_temp = sys_set_trip_temp,
+};
+
+static int soc_dts_enable(int id)
+{
+ u32 out;
+ int ret;
+
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ SOC_DTS_OFFSET_ENABLE, &out);
+ if (ret)
+ return ret;
+
+ if (!(out & BIT(id))) {
+ out |= BIT(id);
+ ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_OFFSET_ENABLE, out);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void remove_dts_thermal_zone(struct intel_soc_dts_sensor_entry *dts)
+{
+ if (dts) {
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_OFFSET_ENABLE, dts->store_status);
+ thermal_zone_device_unregister(dts->tzone);
+ }
+}
+
+static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
+ bool notification_support, int trip_cnt,
+ int read_only_trip_cnt)
+{
+ char name[10];
+ unsigned long trip;
+ int trip_count = 0;
+ int trip_mask = 0;
+ int writable_trip_cnt = 0;
+ unsigned long ptps;
+ u32 store_ptps;
+ unsigned long i;
+ int ret;
+
+ /* Store status to restor on exit */
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ SOC_DTS_OFFSET_ENABLE, &dts->store_status);
+ if (ret)
+ goto err_ret;
+
+ dts->id = id;
+ if (notification_support) {
+ trip_count = min(SOC_MAX_DTS_TRIPS, trip_cnt);
+ writable_trip_cnt = trip_count - read_only_trip_cnt;
+ trip_mask = GENMASK(writable_trip_cnt - 1, 0);
+ }
+
+ /* Check if the writable trip we provide is not used by BIOS */
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ SOC_DTS_OFFSET_PTPS, &store_ptps);
+ if (ret)
+ trip_mask = 0;
+ else {
+ ptps = store_ptps;
+ for_each_set_clump8(i, trip, &ptps, writable_trip_cnt * 8)
+ trip_mask &= ~BIT(i / 8);
+ }
+ dts->trip_mask = trip_mask;
+ dts->trip_count = trip_count;
+ snprintf(name, sizeof(name), "soc_dts%d", id);
+ dts->tzone = thermal_zone_device_register(name,
+ trip_count,
+ trip_mask,
+ dts, &tzone_ops,
+ NULL, 0, 0);
+ if (IS_ERR(dts->tzone)) {
+ ret = PTR_ERR(dts->tzone);
+ goto err_ret;
+ }
+ ret = thermal_zone_device_enable(dts->tzone);
+ if (ret)
+ goto err_enable;
+
+ ret = soc_dts_enable(id);
+ if (ret)
+ goto err_enable;
+
+ return 0;
+err_enable:
+ thermal_zone_device_unregister(dts->tzone);
+err_ret:
+ return ret;
+}
+
+int intel_soc_dts_iosf_add_read_only_critical_trip(
+ struct intel_soc_dts_sensors *sensors, int critical_offset)
+{
+ int i, j;
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ for (j = 0; j < sensors->soc_dts[i].trip_count; ++j) {
+ if (!(sensors->soc_dts[i].trip_mask & BIT(j))) {
+ return update_trip_temp(&sensors->soc_dts[i], j,
+ sensors->tj_max - critical_offset,
+ THERMAL_TRIP_CRITICAL);
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_add_read_only_critical_trip);
+
+void intel_soc_dts_iosf_interrupt_handler(struct intel_soc_dts_sensors *sensors)
+{
+ u32 sticky_out;
+ int status;
+ u32 ptmc_out;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sensors->intr_notify_lock, flags);
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ SOC_DTS_OFFSET_PTMC, &ptmc_out);
+ ptmc_out |= SOC_DTS_PTMC_APIC_DEASSERT_BIT;
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_OFFSET_PTMC, ptmc_out);
+
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ SOC_DTS_OFFSET_PTTSS, &sticky_out);
+ pr_debug("status %d PTTSS %x\n", status, sticky_out);
+ if (sticky_out & SOC_DTS_TRIP_MASK) {
+ int i;
+ /* reset sticky bit */
+ status = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_OFFSET_PTTSS, sticky_out);
+ spin_unlock_irqrestore(&sensors->intr_notify_lock, flags);
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ pr_debug("TZD update for zone %d\n", i);
+ thermal_zone_device_update(sensors->soc_dts[i].tzone,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+ } else
+ spin_unlock_irqrestore(&sensors->intr_notify_lock, flags);
+}
+EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_interrupt_handler);
+
+struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
+ enum intel_soc_dts_interrupt_type intr_type, int trip_count,
+ int read_only_trip_count)
+{
+ struct intel_soc_dts_sensors *sensors;
+ bool notification;
+ int tj_max;
+ int ret;
+ int i;
+
+ if (!iosf_mbi_available())
+ return ERR_PTR(-ENODEV);
+
+ if (!trip_count || read_only_trip_count > trip_count)
+ return ERR_PTR(-EINVAL);
+
+ if (get_tj_max(&tj_max))
+ return ERR_PTR(-EINVAL);
+
+ sensors = kzalloc(sizeof(*sensors), GFP_KERNEL);
+ if (!sensors)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&sensors->intr_notify_lock);
+ mutex_init(&sensors->dts_update_lock);
+ sensors->intr_type = intr_type;
+ sensors->tj_max = tj_max;
+ if (intr_type == INTEL_SOC_DTS_INTERRUPT_NONE)
+ notification = false;
+ else
+ notification = true;
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ sensors->soc_dts[i].sensors = sensors;
+ ret = add_dts_thermal_zone(i, &sensors->soc_dts[i],
+ notification, trip_count,
+ read_only_trip_count);
+ if (ret)
+ goto err_free;
+ }
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ ret = update_trip_temp(&sensors->soc_dts[i], 0, 0,
+ THERMAL_TRIP_PASSIVE);
+ if (ret)
+ goto err_remove_zone;
+
+ ret = update_trip_temp(&sensors->soc_dts[i], 1, 0,
+ THERMAL_TRIP_PASSIVE);
+ if (ret)
+ goto err_remove_zone;
+ }
+
+ return sensors;
+err_remove_zone:
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
+ remove_dts_thermal_zone(&sensors->soc_dts[i]);
+
+err_free:
+ kfree(sensors);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_init);
+
+void intel_soc_dts_iosf_exit(struct intel_soc_dts_sensors *sensors)
+{
+ int i;
+
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ update_trip_temp(&sensors->soc_dts[i], 0, 0, 0);
+ update_trip_temp(&sensors->soc_dts[i], 1, 0, 0);
+ remove_dts_thermal_zone(&sensors->soc_dts[i]);
+ }
+ kfree(sensors);
+}
+EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.h b/drivers/thermal/intel/intel_soc_dts_iosf.h
new file mode 100644
index 000000000..c54945748
--- /dev/null
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * intel_soc_dts_iosf.h
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#ifndef _INTEL_SOC_DTS_IOSF_CORE_H
+#define _INTEL_SOC_DTS_IOSF_CORE_H
+
+#include <linux/thermal.h>
+
+/* DTS0 and DTS 1 */
+#define SOC_MAX_DTS_SENSORS 2
+
+enum intel_soc_dts_interrupt_type {
+ INTEL_SOC_DTS_INTERRUPT_NONE,
+ INTEL_SOC_DTS_INTERRUPT_APIC,
+ INTEL_SOC_DTS_INTERRUPT_MSI,
+ INTEL_SOC_DTS_INTERRUPT_SCI,
+ INTEL_SOC_DTS_INTERRUPT_SMI,
+};
+
+struct intel_soc_dts_sensors;
+
+struct intel_soc_dts_sensor_entry {
+ int id;
+ u32 store_status;
+ u32 trip_mask;
+ u32 trip_count;
+ enum thermal_trip_type trip_types[2];
+ struct thermal_zone_device *tzone;
+ struct intel_soc_dts_sensors *sensors;
+};
+
+struct intel_soc_dts_sensors {
+ u32 tj_max;
+ spinlock_t intr_notify_lock;
+ struct mutex dts_update_lock;
+ enum intel_soc_dts_interrupt_type intr_type;
+ struct intel_soc_dts_sensor_entry soc_dts[SOC_MAX_DTS_SENSORS];
+};
+
+struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
+ enum intel_soc_dts_interrupt_type intr_type, int trip_count,
+ int read_only_trip_count);
+void intel_soc_dts_iosf_exit(struct intel_soc_dts_sensors *sensors);
+void intel_soc_dts_iosf_interrupt_handler(
+ struct intel_soc_dts_sensors *sensors);
+int intel_soc_dts_iosf_add_read_only_critical_trip(
+ struct intel_soc_dts_sensors *sensors, int critical_offset);
+#endif
diff --git a/drivers/thermal/intel/intel_soc_dts_thermal.c b/drivers/thermal/intel/intel_soc_dts_thermal.c
new file mode 100644
index 000000000..92e5c19d0
--- /dev/null
+++ b/drivers/thermal/intel/intel_soc_dts_thermal.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * intel_soc_dts_thermal.c
+ * Copyright (c) 2014, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include "intel_soc_dts_iosf.h"
+
+#define CRITICAL_OFFSET_FROM_TJ_MAX 5000
+
+static int crit_offset = CRITICAL_OFFSET_FROM_TJ_MAX;
+module_param(crit_offset, int, 0644);
+MODULE_PARM_DESC(crit_offset,
+ "Critical Temperature offset from tj max in millidegree Celsius.");
+
+/* IRQ 86 is a fixed APIC interrupt for BYT DTS Aux threshold notifications */
+#define BYT_SOC_DTS_APIC_IRQ 86
+
+static int soc_dts_thres_gsi;
+static int soc_dts_thres_irq;
+static struct intel_soc_dts_sensors *soc_dts;
+
+static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
+{
+ pr_debug("proc_thermal_interrupt\n");
+ intel_soc_dts_iosf_interrupt_handler(soc_dts);
+
+ return IRQ_HANDLED;
+}
+
+static const struct x86_cpu_id soc_thermal_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
+
+static int __init intel_soc_thermal_init(void)
+{
+ int err = 0;
+ const struct x86_cpu_id *match_cpu;
+
+ match_cpu = x86_match_cpu(soc_thermal_ids);
+ if (!match_cpu)
+ return -ENODEV;
+
+ /* Create a zone with 2 trips with marked as read only */
+ soc_dts = intel_soc_dts_iosf_init(INTEL_SOC_DTS_INTERRUPT_APIC, 2, 1);
+ if (IS_ERR(soc_dts)) {
+ err = PTR_ERR(soc_dts);
+ return err;
+ }
+
+ soc_dts_thres_gsi = (int)match_cpu->driver_data;
+ if (soc_dts_thres_gsi) {
+ /*
+ * Note the flags here MUST match the firmware defaults, rather
+ * then the request_irq flags, otherwise we get an EBUSY error.
+ */
+ soc_dts_thres_irq = acpi_register_gsi(NULL, soc_dts_thres_gsi,
+ ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_LOW);
+ if (soc_dts_thres_irq < 0) {
+ pr_warn("intel_soc_dts: Could not get IRQ for GSI %d, err %d\n",
+ soc_dts_thres_gsi, soc_dts_thres_irq);
+ soc_dts_thres_irq = 0;
+ }
+ }
+
+ if (soc_dts_thres_irq) {
+ err = request_threaded_irq(soc_dts_thres_irq, NULL,
+ soc_irq_thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "soc_dts", soc_dts);
+ if (err) {
+ /*
+ * Do not just error out because the user space thermal
+ * daemon such as DPTF may use polling instead of being
+ * interrupt driven.
+ */
+ pr_warn("request_threaded_irq ret %d\n", err);
+ }
+ }
+
+ err = intel_soc_dts_iosf_add_read_only_critical_trip(soc_dts,
+ crit_offset);
+ if (err)
+ goto error_trips;
+
+ return 0;
+
+error_trips:
+ if (soc_dts_thres_irq) {
+ free_irq(soc_dts_thres_irq, soc_dts);
+ acpi_unregister_gsi(soc_dts_thres_gsi);
+ }
+ intel_soc_dts_iosf_exit(soc_dts);
+
+ return err;
+}
+
+static void __exit intel_soc_thermal_exit(void)
+{
+ if (soc_dts_thres_irq) {
+ free_irq(soc_dts_thres_irq, soc_dts);
+ acpi_unregister_gsi(soc_dts_thres_gsi);
+ }
+ intel_soc_dts_iosf_exit(soc_dts);
+}
+
+module_init(intel_soc_thermal_init)
+module_exit(intel_soc_thermal_exit)
+
+MODULE_DESCRIPTION("Intel SoC DTS Thermal Driver");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
new file mode 100644
index 000000000..4f5d97329
--- /dev/null
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * x86_pkg_temp_thermal driver
+ * Copyright (c) 2013, Intel Corporation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/param.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/thermal.h>
+#include <linux/debugfs.h>
+#include <asm/cpu_device_id.h>
+#include <asm/mce.h>
+
+/*
+* Rate control delay: Idea is to introduce denounce effect
+* This should be long enough to avoid reduce events, when
+* threshold is set to a temperature, which is constantly
+* violated, but at the short enough to take any action.
+* The action can be remove threshold or change it to next
+* interesting setting. Based on experiments, in around
+* every 5 seconds under load will give us a significant
+* temperature change.
+*/
+#define PKG_TEMP_THERMAL_NOTIFY_DELAY 5000
+static int notify_delay_ms = PKG_TEMP_THERMAL_NOTIFY_DELAY;
+module_param(notify_delay_ms, int, 0644);
+MODULE_PARM_DESC(notify_delay_ms,
+ "User space notification delay in milli seconds.");
+
+/* Number of trip points in thermal zone. Currently it can't
+* be more than 2. MSR can allow setting and getting notifications
+* for only 2 thresholds. This define enforces this, if there
+* is some wrong values returned by cpuid for number of thresholds.
+*/
+#define MAX_NUMBER_OF_TRIPS 2
+
+struct zone_device {
+ int cpu;
+ bool work_scheduled;
+ u32 tj_max;
+ u32 msr_pkg_therm_low;
+ u32 msr_pkg_therm_high;
+ struct delayed_work work;
+ struct thermal_zone_device *tzone;
+ struct cpumask cpumask;
+};
+
+static struct thermal_zone_params pkg_temp_tz_params = {
+ .no_hwmon = true,
+};
+
+/* Keep track of how many zone pointers we allocated in init() */
+static int max_id __read_mostly;
+/* Array of zone pointers */
+static struct zone_device **zones;
+/* Serializes interrupt notification, work and hotplug */
+static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
+/* Protects zone operation in the work function against hotplug removal */
+static DEFINE_MUTEX(thermal_zone_mutex);
+
+/* The dynamically assigned cpu hotplug state for module_exit() */
+static enum cpuhp_state pkg_thermal_hp_state __read_mostly;
+
+/* Debug counters to show using debugfs */
+static struct dentry *debugfs;
+static unsigned int pkg_interrupt_cnt;
+static unsigned int pkg_work_cnt;
+
+static void pkg_temp_debugfs_init(void)
+{
+ debugfs = debugfs_create_dir("pkg_temp_thermal", NULL);
+
+ debugfs_create_u32("pkg_thres_interrupt", S_IRUGO, debugfs,
+ &pkg_interrupt_cnt);
+ debugfs_create_u32("pkg_thres_work", S_IRUGO, debugfs,
+ &pkg_work_cnt);
+}
+
+/*
+ * Protection:
+ *
+ * - cpu hotplug: Read serialized by cpu hotplug lock
+ * Write must hold pkg_temp_lock
+ *
+ * - Other callsites: Must hold pkg_temp_lock
+ */
+static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
+{
+ int id = topology_logical_die_id(cpu);
+
+ if (id >= 0 && id < max_id)
+ return zones[id];
+ return NULL;
+}
+
+/*
+* tj-max is is interesting because threshold is set relative to this
+* temperature.
+*/
+static int get_tj_max(int cpu, u32 *tj_max)
+{
+ u32 eax, edx, val;
+ int err;
+
+ err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+ if (err)
+ return err;
+
+ val = (eax >> 16) & 0xff;
+ *tj_max = val * 1000;
+
+ return val ? 0 : -EINVAL;
+}
+
+static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
+{
+ struct zone_device *zonedev = tzd->devdata;
+ u32 eax, edx;
+
+ rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_STATUS,
+ &eax, &edx);
+ if (eax & 0x80000000) {
+ *temp = zonedev->tj_max - ((eax >> 16) & 0x7f) * 1000;
+ pr_debug("sys_get_curr_temp %d\n", *temp);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int sys_get_trip_temp(struct thermal_zone_device *tzd,
+ int trip, int *temp)
+{
+ struct zone_device *zonedev = tzd->devdata;
+ unsigned long thres_reg_value;
+ u32 mask, shift, eax, edx;
+ int ret;
+
+ if (trip >= MAX_NUMBER_OF_TRIPS)
+ return -EINVAL;
+
+ if (trip) {
+ mask = THERM_MASK_THRESHOLD1;
+ shift = THERM_SHIFT_THRESHOLD1;
+ } else {
+ mask = THERM_MASK_THRESHOLD0;
+ shift = THERM_SHIFT_THRESHOLD0;
+ }
+
+ ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ &eax, &edx);
+ if (ret < 0)
+ return ret;
+
+ thres_reg_value = (eax & mask) >> shift;
+ if (thres_reg_value)
+ *temp = zonedev->tj_max - thres_reg_value * 1000;
+ else
+ *temp = THERMAL_TEMP_INVALID;
+ pr_debug("sys_get_trip_temp %d\n", *temp);
+
+ return 0;
+}
+
+static int
+sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
+{
+ struct zone_device *zonedev = tzd->devdata;
+ u32 l, h, mask, shift, intr;
+ int ret;
+
+ if (trip >= MAX_NUMBER_OF_TRIPS || temp >= zonedev->tj_max)
+ return -EINVAL;
+
+ ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ &l, &h);
+ if (ret < 0)
+ return ret;
+
+ if (trip) {
+ mask = THERM_MASK_THRESHOLD1;
+ shift = THERM_SHIFT_THRESHOLD1;
+ intr = THERM_INT_THRESHOLD1_ENABLE;
+ } else {
+ mask = THERM_MASK_THRESHOLD0;
+ shift = THERM_SHIFT_THRESHOLD0;
+ intr = THERM_INT_THRESHOLD0_ENABLE;
+ }
+ l &= ~mask;
+ /*
+ * When users space sets a trip temperature == 0, which is indication
+ * that, it is no longer interested in receiving notifications.
+ */
+ if (!temp) {
+ l &= ~intr;
+ } else {
+ l |= (zonedev->tj_max - temp)/1000 << shift;
+ l |= intr;
+ }
+
+ return wrmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ l, h);
+}
+
+static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip,
+ enum thermal_trip_type *type)
+{
+ *type = THERMAL_TRIP_PASSIVE;
+ return 0;
+}
+
+/* Thermal zone callback registry */
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = sys_get_curr_temp,
+ .get_trip_temp = sys_get_trip_temp,
+ .get_trip_type = sys_get_trip_type,
+ .set_trip_temp = sys_set_trip_temp,
+};
+
+static bool pkg_thermal_rate_control(void)
+{
+ return true;
+}
+
+/* Enable threshold interrupt on local package/cpu */
+static inline void enable_pkg_thres_interrupt(void)
+{
+ u8 thres_0, thres_1;
+ u32 l, h;
+
+ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+ /* only enable/disable if it had valid threshold value */
+ thres_0 = (l & THERM_MASK_THRESHOLD0) >> THERM_SHIFT_THRESHOLD0;
+ thres_1 = (l & THERM_MASK_THRESHOLD1) >> THERM_SHIFT_THRESHOLD1;
+ if (thres_0)
+ l |= THERM_INT_THRESHOLD0_ENABLE;
+ if (thres_1)
+ l |= THERM_INT_THRESHOLD1_ENABLE;
+ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+/* Disable threshold interrupt on local package/cpu */
+static inline void disable_pkg_thres_interrupt(void)
+{
+ u32 l, h;
+
+ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+
+ l &= ~(THERM_INT_THRESHOLD0_ENABLE | THERM_INT_THRESHOLD1_ENABLE);
+ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
+{
+ struct thermal_zone_device *tzone = NULL;
+ int cpu = smp_processor_id();
+ struct zone_device *zonedev;
+ u64 msr_val, wr_val;
+
+ mutex_lock(&thermal_zone_mutex);
+ raw_spin_lock_irq(&pkg_temp_lock);
+ ++pkg_work_cnt;
+
+ zonedev = pkg_temp_thermal_get_dev(cpu);
+ if (!zonedev) {
+ raw_spin_unlock_irq(&pkg_temp_lock);
+ mutex_unlock(&thermal_zone_mutex);
+ return;
+ }
+ zonedev->work_scheduled = false;
+
+ rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
+ wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1);
+ if (wr_val != msr_val) {
+ wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, wr_val);
+ tzone = zonedev->tzone;
+ }
+
+ enable_pkg_thres_interrupt();
+ raw_spin_unlock_irq(&pkg_temp_lock);
+
+ /*
+ * If tzone is not NULL, then thermal_zone_mutex will prevent the
+ * concurrent removal in the cpu offline callback.
+ */
+ if (tzone)
+ thermal_zone_device_update(tzone, THERMAL_EVENT_UNSPECIFIED);
+
+ mutex_unlock(&thermal_zone_mutex);
+}
+
+static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work)
+{
+ unsigned long ms = msecs_to_jiffies(notify_delay_ms);
+
+ schedule_delayed_work_on(cpu, work, ms);
+}
+
+static int pkg_thermal_notify(u64 msr_val)
+{
+ int cpu = smp_processor_id();
+ struct zone_device *zonedev;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pkg_temp_lock, flags);
+ ++pkg_interrupt_cnt;
+
+ disable_pkg_thres_interrupt();
+
+ /* Work is per package, so scheduling it once is enough. */
+ zonedev = pkg_temp_thermal_get_dev(cpu);
+ if (zonedev && !zonedev->work_scheduled) {
+ zonedev->work_scheduled = true;
+ pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
+ }
+
+ raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
+ return 0;
+}
+
+static int pkg_temp_thermal_device_add(unsigned int cpu)
+{
+ int id = topology_logical_die_id(cpu);
+ u32 tj_max, eax, ebx, ecx, edx;
+ struct zone_device *zonedev;
+ int thres_count, err;
+
+ if (id >= max_id)
+ return -ENOMEM;
+
+ cpuid(6, &eax, &ebx, &ecx, &edx);
+ thres_count = ebx & 0x07;
+ if (!thres_count)
+ return -ENODEV;
+
+ thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS);
+
+ err = get_tj_max(cpu, &tj_max);
+ if (err)
+ return err;
+
+ zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL);
+ if (!zonedev)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&zonedev->work, pkg_temp_thermal_threshold_work_fn);
+ zonedev->cpu = cpu;
+ zonedev->tj_max = tj_max;
+ zonedev->tzone = thermal_zone_device_register("x86_pkg_temp",
+ thres_count,
+ (thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01,
+ zonedev, &tzone_ops, &pkg_temp_tz_params, 0, 0);
+ if (IS_ERR(zonedev->tzone)) {
+ err = PTR_ERR(zonedev->tzone);
+ kfree(zonedev);
+ return err;
+ }
+ err = thermal_zone_device_enable(zonedev->tzone);
+ if (err) {
+ thermal_zone_device_unregister(zonedev->tzone);
+ kfree(zonedev);
+ return err;
+ }
+ /* Store MSR value for package thermal interrupt, to restore at exit */
+ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, zonedev->msr_pkg_therm_low,
+ zonedev->msr_pkg_therm_high);
+
+ cpumask_set_cpu(cpu, &zonedev->cpumask);
+ raw_spin_lock_irq(&pkg_temp_lock);
+ zones[id] = zonedev;
+ raw_spin_unlock_irq(&pkg_temp_lock);
+ return 0;
+}
+
+static int pkg_thermal_cpu_offline(unsigned int cpu)
+{
+ struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
+ bool lastcpu, was_target;
+ int target;
+
+ if (!zonedev)
+ return 0;
+
+ target = cpumask_any_but(&zonedev->cpumask, cpu);
+ cpumask_clear_cpu(cpu, &zonedev->cpumask);
+ lastcpu = target >= nr_cpu_ids;
+ /*
+ * Remove the sysfs files, if this is the last cpu in the package
+ * before doing further cleanups.
+ */
+ if (lastcpu) {
+ struct thermal_zone_device *tzone = zonedev->tzone;
+
+ /*
+ * We must protect against a work function calling
+ * thermal_zone_update, after/while unregister. We null out
+ * the pointer under the zone mutex, so the worker function
+ * won't try to call.
+ */
+ mutex_lock(&thermal_zone_mutex);
+ zonedev->tzone = NULL;
+ mutex_unlock(&thermal_zone_mutex);
+
+ thermal_zone_device_unregister(tzone);
+ }
+
+ /* Protect against work and interrupts */
+ raw_spin_lock_irq(&pkg_temp_lock);
+
+ /*
+ * Check whether this cpu was the current target and store the new
+ * one. When we drop the lock, then the interrupt notify function
+ * will see the new target.
+ */
+ was_target = zonedev->cpu == cpu;
+ zonedev->cpu = target;
+
+ /*
+ * If this is the last CPU in the package remove the package
+ * reference from the array and restore the interrupt MSR. When we
+ * drop the lock neither the interrupt notify function nor the
+ * worker will see the package anymore.
+ */
+ if (lastcpu) {
+ zones[topology_logical_die_id(cpu)] = NULL;
+ /* After this point nothing touches the MSR anymore. */
+ wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ zonedev->msr_pkg_therm_low, zonedev->msr_pkg_therm_high);
+ }
+
+ /*
+ * Check whether there is work scheduled and whether the work is
+ * targeted at the outgoing CPU.
+ */
+ if (zonedev->work_scheduled && was_target) {
+ /*
+ * To cancel the work we need to drop the lock, otherwise
+ * we might deadlock if the work needs to be flushed.
+ */
+ raw_spin_unlock_irq(&pkg_temp_lock);
+ cancel_delayed_work_sync(&zonedev->work);
+ raw_spin_lock_irq(&pkg_temp_lock);
+ /*
+ * If this is not the last cpu in the package and the work
+ * did not run after we dropped the lock above, then we
+ * need to reschedule the work, otherwise the interrupt
+ * stays disabled forever.
+ */
+ if (!lastcpu && zonedev->work_scheduled)
+ pkg_thermal_schedule_work(target, &zonedev->work);
+ }
+
+ raw_spin_unlock_irq(&pkg_temp_lock);
+
+ /* Final cleanup if this is the last cpu */
+ if (lastcpu)
+ kfree(zonedev);
+ return 0;
+}
+
+static int pkg_thermal_cpu_online(unsigned int cpu)
+{
+ struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ /* Paranoia check */
+ if (!cpu_has(c, X86_FEATURE_DTHERM) || !cpu_has(c, X86_FEATURE_PTS))
+ return -ENODEV;
+
+ /* If the package exists, nothing to do */
+ if (zonedev) {
+ cpumask_set_cpu(cpu, &zonedev->cpumask);
+ return 0;
+ }
+ return pkg_temp_thermal_device_add(cpu);
+}
+
+static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = {
+ X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_PTS, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids);
+
+static int __init pkg_temp_thermal_init(void)
+{
+ int ret;
+
+ if (!x86_match_cpu(pkg_temp_thermal_ids))
+ return -ENODEV;
+
+ max_id = topology_max_packages() * topology_max_die_per_package();
+ zones = kcalloc(max_id, sizeof(struct zone_device *),
+ GFP_KERNEL);
+ if (!zones)
+ return -ENOMEM;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online",
+ pkg_thermal_cpu_online, pkg_thermal_cpu_offline);
+ if (ret < 0)
+ goto err;
+
+ /* Store the state for module exit */
+ pkg_thermal_hp_state = ret;
+
+ platform_thermal_package_notify = pkg_thermal_notify;
+ platform_thermal_package_rate_control = pkg_thermal_rate_control;
+
+ /* Don't care if it fails */
+ pkg_temp_debugfs_init();
+ return 0;
+
+err:
+ kfree(zones);
+ return ret;
+}
+module_init(pkg_temp_thermal_init)
+
+static void __exit pkg_temp_thermal_exit(void)
+{
+ platform_thermal_package_notify = NULL;
+ platform_thermal_package_rate_control = NULL;
+
+ cpuhp_remove_state(pkg_thermal_hp_state);
+ debugfs_remove_recursive(debugfs);
+ kfree(zones);
+}
+module_exit(pkg_temp_thermal_exit)
+
+MODULE_DESCRIPTION("X86 PKG TEMP Thermal Driver");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");