summaryrefslogtreecommitdiffstats
path: root/drivers/platform/x86/intel
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/platform/x86/intel
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/platform/x86/intel')
-rw-r--r--drivers/platform/x86/intel/Kconfig228
-rw-r--r--drivers/platform/x86/intel/Makefile62
-rw-r--r--drivers/platform/x86/intel/atomisp2/Kconfig43
-rw-r--r--drivers/platform/x86/intel/atomisp2/Makefile9
-rw-r--r--drivers/platform/x86/intel/atomisp2/led.c117
-rw-r--r--drivers/platform/x86/intel/atomisp2/pm.c143
-rw-r--r--drivers/platform/x86/intel/bxtwc_tmu.c146
-rw-r--r--drivers/platform/x86/intel/bytcrc_pwrsrc.c181
-rw-r--r--drivers/platform/x86/intel/chtdc_ti_pwrbtn.c93
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c437
-rw-r--r--drivers/platform/x86/intel/crystal_cove_charger.c153
-rw-r--r--drivers/platform/x86/intel/hid.c785
-rw-r--r--drivers/platform/x86/intel/ifs/Kconfig12
-rw-r--r--drivers/platform/x86/intel/ifs/Makefile3
-rw-r--r--drivers/platform/x86/intel/ifs/core.c117
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h276
-rw-r--r--drivers/platform/x86/intel/ifs/load.c294
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c347
-rw-r--r--drivers/platform/x86/intel/ifs/sysfs.c155
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c282
-rw-r--r--drivers/platform/x86/intel/int1092/Kconfig14
-rw-r--r--drivers/platform/x86/intel/int1092/Makefile1
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c321
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.h86
-rw-r--r--drivers/platform/x86/intel/int3472/Kconfig31
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile4
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c352
-rw-r--r--drivers/platform/x86/intel/int3472/common.c82
-rw-r--r--drivers/platform/x86/intel/int3472/common.h132
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c349
-rw-r--r--drivers/platform/x86/intel/int3472/led.c75
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c261
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.h26
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c208
-rw-r--r--drivers/platform/x86/intel/ishtp_eclite.c703
-rw-r--r--drivers/platform/x86/intel/mrfld_pwrbtn.c106
-rw-r--r--drivers/platform/x86/intel/oaktrail.c371
-rw-r--r--drivers/platform/x86/intel/pmc/Kconfig25
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile10
-rw-r--r--drivers/platform/x86/intel/pmc/adl.c326
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c238
-rw-r--r--drivers/platform/x86/intel/pmc/core.c1409
-rw-r--r--drivers/platform/x86/intel/pmc/core.h527
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c133
-rw-r--r--drivers/platform/x86/intel/pmc/icl.c59
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c1009
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c89
-rw-r--r--drivers/platform/x86/intel/pmc/spt.c143
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c273
-rw-r--r--drivers/platform/x86/intel/pmt/Kconfig40
-rw-r--r--drivers/platform/x86/intel/pmt/Makefile12
-rw-r--r--drivers/platform/x86/intel/pmt/class.c362
-rw-r--r--drivers/platform/x86/intel/pmt/class.h54
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c331
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c163
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c335
-rw-r--r--drivers/platform/x86/intel/rst.c140
-rw-r--r--drivers/platform/x86/intel/sdsi.c672
-rw-r--r--drivers/platform/x86/intel/smartconnect.c44
-rw-r--r--drivers/platform/x86/intel/speed_select_if/Kconfig21
-rw-r--r--drivers/platform/x86/intel/speed_select_if/Makefile12
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c840
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.h79
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c214
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c227
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c200
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi.c72
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c1442
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h18
-rw-r--r--drivers/platform/x86/intel/telemetry/Kconfig16
-rw-r--r--drivers/platform/x86/intel/telemetry/Makefile11
-rw-r--r--drivers/platform/x86/intel/telemetry/core.c450
-rw-r--r--drivers/platform/x86/intel/telemetry/debugfs.c961
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c1188
-rw-r--r--drivers/platform/x86/intel/tpmi.c791
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c139
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/Kconfig25
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/Makefile11
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c313
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h76
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c413
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c284
-rw-r--r--drivers/platform/x86/intel/vbtn.c416
-rw-r--r--drivers/platform/x86/intel/vsec.c518
-rw-r--r--drivers/platform/x86/intel/vsec.h66
-rw-r--r--drivers/platform/x86/intel/wmi/Kconfig31
-rw-r--r--drivers/platform/x86/intel/wmi/Makefile9
-rw-r--r--drivers/platform/x86/intel/wmi/sbl-fw-update.c144
-rw-r--r--drivers/platform/x86/intel/wmi/thunderbolt.c74
89 files changed, 22460 insertions, 0 deletions
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
new file mode 100644
index 0000000000..e9dc0c0210
--- /dev/null
+++ b/drivers/platform/x86/intel/Kconfig
@@ -0,0 +1,228 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+source "drivers/platform/x86/intel/atomisp2/Kconfig"
+source "drivers/platform/x86/intel/ifs/Kconfig"
+source "drivers/platform/x86/intel/int1092/Kconfig"
+source "drivers/platform/x86/intel/int3472/Kconfig"
+source "drivers/platform/x86/intel/pmc/Kconfig"
+source "drivers/platform/x86/intel/pmt/Kconfig"
+source "drivers/platform/x86/intel/speed_select_if/Kconfig"
+source "drivers/platform/x86/intel/telemetry/Kconfig"
+source "drivers/platform/x86/intel/wmi/Kconfig"
+source "drivers/platform/x86/intel/uncore-frequency/Kconfig"
+
+
+config INTEL_HID_EVENT
+ tristate "Intel HID Event"
+ depends on ACPI
+ depends on INPUT
+ depends on I2C
+ select INPUT_SPARSEKMAP
+ help
+ This driver provides support for the Intel HID Event hotkey interface.
+ Some laptops require this driver for hotkey support.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_hid.
+
+config INTEL_VBTN
+ tristate "Intel Virtual Button"
+ depends on ACPI
+ depends on INPUT
+ depends on I2C
+ select INPUT_SPARSEKMAP
+ help
+ This driver provides support for the Intel Virtual Button interface.
+ Some laptops require this driver for power button support.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vbtn.
+
+config INTEL_INT0002_VGPIO
+ tristate "Intel ACPI INT0002 Virtual GPIO driver"
+ depends on GPIOLIB && ACPI && PM_SLEEP
+ select GPIOLIB_IRQCHIP
+ help
+ Some peripherals on Bay Trail and Cherry Trail platforms signal a
+ Power Management Event (PME) to the Power Management Controller (PMC)
+ to wakeup the system. When this happens software needs to explicitly
+ clear the PME bus 0 status bit in the GPE0a_STS register to avoid an
+ IRQ storm on IRQ 9.
+
+ This is modelled in ACPI through the INT0002 ACPI device, which is
+ called a "Virtual GPIO controller" in ACPI because it defines the
+ event handler to call when the PME triggers through _AEI and _L02
+ methods as would be done for a real GPIO interrupt in ACPI.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_int0002_vgpio.
+
+config INTEL_OAKTRAIL
+ tristate "Intel Oaktrail Platform Extras"
+ depends on ACPI
+ depends on ACPI_VIDEO || ACPI_VIDEO=n
+ depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
+ help
+ Intel Oaktrail platform need this driver to provide interfaces to
+ enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
+ here; it will only load on supported platforms.
+
+config INTEL_BXTWC_PMIC_TMU
+ tristate "Intel Broxton Whiskey Cove TMU Driver"
+ depends on INTEL_SOC_PMIC_BXTWC
+ depends on MFD_INTEL_PMC_BXT
+ select REGMAP
+ help
+ Select this driver to use Intel Broxton Whiskey Cove PMIC TMU feature.
+ This driver enables the alarm wakeup functionality in the TMU unit of
+ Whiskey Cove PMIC.
+
+config INTEL_BYTCRC_PWRSRC
+ tristate "Intel Bay Trail Crystal Cove power source driver"
+ depends on INTEL_SOC_PMIC
+ help
+ This option adds a power source driver for Crystal Cove PMICs
+ on Intel Bay Trail devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_bytcrc_pwrsrc.
+
+config INTEL_CHTDC_TI_PWRBTN
+ tristate "Intel Cherry Trail Dollar Cove TI power button driver"
+ depends on INTEL_SOC_PMIC_CHTDC_TI
+ depends on INPUT
+ help
+ This option adds a power button driver for Dollar Cove TI
+ PMIC on Intel Cherry Trail devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_chtdc_ti_pwrbtn.
+
+config INTEL_CHTWC_INT33FE
+ tristate "Intel Cherry Trail Whiskey Cove ACPI INT33FE Driver"
+ depends on X86 && ACPI && I2C && REGULATOR
+ depends on CHARGER_BQ24190=y || (CHARGER_BQ24190=m && m)
+ depends on USB_ROLES_INTEL_XHCI=y || (USB_ROLES_INTEL_XHCI=m && m)
+ depends on TYPEC_MUX_PI3USB30532=y || (TYPEC_MUX_PI3USB30532=m && m)
+ help
+ This driver add support for the Intel Cherry Trail Whiskey Cove
+ INT33FE ACPI device found on the GPD win and the GPD pocket.
+
+ The INT33FE ACPI device on these mini laptops contains I2cSerialBusV2
+ resources for a MAX17042 Fuel Gauge, FUSB302 USB Type-C Controller
+ and PI3USB30532 USB switch.
+ This driver instantiates i2c-clients for these, so that standard
+ i2c drivers for these chips can bind to the them.
+
+ If you enable this driver it is advised to also select
+ CONFIG_TYPEC_FUSB302=m, CONFIG_TYPEC_MUX_PI3USB30532=m and
+ CONFIG_BATTERY_MAX17042=m.
+
+config INTEL_ISHTP_ECLITE
+ tristate "Intel ISHTP eclite controller Driver"
+ depends on INTEL_ISH_HID
+ depends on ACPI
+ help
+ This driver is for accessing the PSE (Programmable Service Engine) -
+ an Embedded Controller like IP - using ISHTP (Integrated Sensor Hub
+ Transport Protocol) to get battery, thermal and UCSI (USB Type-C
+ Connector System Software Interface) related data from the platform.
+ Users who don't want to use discrete Embedded Controller on Intel's
+ Elkhartlake platform can leverage this integrated solution of
+ ECLite which is part of PSE subsystem.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_ishtp_eclite.
+
+config INTEL_MRFLD_PWRBTN
+ tristate "Intel Merrifield Basin Cove power button driver"
+ depends on INTEL_SOC_PMIC_MRFLD
+ depends on INPUT
+ help
+ This option adds a power button driver for Basin Cove PMIC
+ on Intel Merrifield devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_mrfld_pwrbtn.
+
+config INTEL_PUNIT_IPC
+ tristate "Intel P-Unit IPC Driver"
+ help
+ This driver provides support for Intel P-Unit Mailbox IPC mechanism,
+ which is used to bridge the communications between kernel and P-Unit.
+
+config INTEL_RST
+ tristate "Intel Rapid Start Technology Driver"
+ depends on ACPI
+ help
+ This driver provides support for modifying parameters on systems
+ equipped with Intel's Rapid Start Technology. When put in an ACPI
+ sleep state, these devices will wake after either a configured
+ timeout or when the system battery reaches a critical state,
+ automatically copying memory contents to disk. On resume, the
+ firmware will copy the memory contents back to RAM and resume the OS
+ as usual.
+
+config INTEL_SDSI
+ tristate "Intel On Demand (Software Defined Silicon) Driver"
+ depends on INTEL_VSEC
+ depends on X86_64
+ help
+ This driver enables access to the Intel On Demand (formerly Software
+ Defined Silicon) interface used to provision silicon features with an
+ authentication certificate and capability license.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_sdsi.
+
+config INTEL_SMARTCONNECT
+ tristate "Intel Smart Connect disabling driver"
+ depends on ACPI
+ help
+ Intel Smart Connect is a technology intended to permit devices to
+ update state by resuming for a short period of time at regular
+ intervals. If a user enables this functionality under Windows and
+ then reboots into Linux, the system may remain configured to resume
+ on suspend. In the absence of any userspace to support it, the system
+ will then remain awake until something triggers another suspend.
+
+ This driver checks to determine whether the device has Intel Smart
+ Connect enabled, and if so disables it.
+
+config INTEL_TPMI
+ tristate "Intel Topology Aware Register and PM Capsule Interface (TPMI)"
+ depends on INTEL_VSEC
+ depends on X86_64
+ help
+ The Intel Topology Aware Register and PM Capsule Interface (TPMI),
+ provides enumerable MMIO interface for power management features.
+ This driver creates devices, so that other PM feature driver can
+ be loaded for PM specific feature operation.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vsec_tpmi.
+
+config INTEL_TURBO_MAX_3
+ bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
+ depends on X86_64 && SCHED_MC_PRIO
+ help
+ This driver reads maximum performance ratio of each CPU and set up
+ the scheduler priority metrics. In this way scheduler can prefer
+ CPU with higher performance to schedule tasks.
+
+ This driver is only required when the system is not using Hardware
+ P-States (HWP). In HWP mode, priority can be read from ACPI tables.
+
+config INTEL_VSEC
+ tristate "Intel Vendor Specific Extended Capabilities Driver"
+ depends on PCI
+ select AUXILIARY_BUS
+ help
+ Adds support for feature drivers exposed using Intel PCIe VSEC and
+ DVSEC.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vsec.
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
new file mode 100644
index 0000000000..c1d5fe05e3
--- /dev/null
+++ b/drivers/platform/x86/intel/Makefile
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for drivers/platform/x86/intel
+# Intel x86 Platform-Specific Drivers
+#
+
+obj-$(CONFIG_INTEL_ATOMISP2_PDX86) += atomisp2/
+obj-$(CONFIG_INTEL_IFS) += ifs/
+obj-$(CONFIG_INTEL_SAR_INT1092) += int1092/
+obj-$(CONFIG_INTEL_SKL_INT3472) += int3472/
+obj-$(CONFIG_INTEL_PMC_CORE) += pmc/
+obj-$(CONFIG_INTEL_PMT_CLASS) += pmt/
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += speed_select_if/
+obj-$(CONFIG_INTEL_TELEMETRY) += telemetry/
+obj-$(CONFIG_INTEL_WMI) += wmi/
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += uncore-frequency/
+
+
+# Intel input drivers
+intel-hid-y := hid.o
+obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
+intel-vbtn-y := vbtn.o
+obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
+
+# Intel miscellaneous drivers
+obj-$(CONFIG_INTEL_ISHTP_ECLITE) += ishtp_eclite.o
+intel_int0002_vgpio-y := int0002_vgpio.o
+obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
+intel_oaktrail-y := oaktrail.o
+obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
+intel_sdsi-y := sdsi.o
+obj-$(CONFIG_INTEL_SDSI) += intel_sdsi.o
+intel_vsec-y := vsec.o
+obj-$(CONFIG_INTEL_VSEC) += intel_vsec.o
+
+# Intel PMIC / PMC / P-Unit drivers
+intel_bxtwc_tmu-y := bxtwc_tmu.o
+obj-$(CONFIG_INTEL_BXTWC_PMIC_TMU) += intel_bxtwc_tmu.o
+intel_crystal_cove_charger-y := crystal_cove_charger.o
+obj-$(CONFIG_X86_ANDROID_TABLETS) += intel_crystal_cove_charger.o
+intel_bytcrc_pwrsrc-y := bytcrc_pwrsrc.o
+obj-$(CONFIG_INTEL_BYTCRC_PWRSRC) += intel_bytcrc_pwrsrc.o
+intel_chtdc_ti_pwrbtn-y := chtdc_ti_pwrbtn.o
+obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
+intel_chtwc_int33fe-y := chtwc_int33fe.o
+obj-$(CONFIG_INTEL_CHTWC_INT33FE) += intel_chtwc_int33fe.o
+intel_mrfld_pwrbtn-y := mrfld_pwrbtn.o
+obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
+intel_punit_ipc-y := punit_ipc.o
+obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
+
+# TPMI drivers
+intel_vsec_tpmi-y := tpmi.o
+obj-$(CONFIG_INTEL_TPMI) += intel_vsec_tpmi.o
+
+# Intel Uncore drivers
+intel-rst-y := rst.o
+obj-$(CONFIG_INTEL_RST) += intel-rst.o
+intel-smartconnect-y := smartconnect.o
+obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o
+intel_turbo_max_3-y := turbo_max_3.o
+obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
diff --git a/drivers/platform/x86/intel/atomisp2/Kconfig b/drivers/platform/x86/intel/atomisp2/Kconfig
new file mode 100644
index 0000000000..35dd2be9d2
--- /dev/null
+++ b/drivers/platform/x86/intel/atomisp2/Kconfig
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+config INTEL_ATOMISP2_PDX86
+ bool
+
+config INTEL_ATOMISP2_LED
+ tristate "Intel AtomISP v2 camera LED driver"
+ depends on GPIOLIB && LEDS_GPIO
+ select INTEL_ATOMISP2_PDX86
+ help
+ Many Bay Trail and Cherry Trail devices come with a camera attached
+ to Intel's Image Signal Processor. Linux currently does not have a
+ driver for these, so they do not work as a camera. Some of these
+ camera's have a LED which is controlled through a GPIO.
+
+ Some of these devices have a firmware issue where the LED gets turned
+ on at boot. This driver will turn the LED off at boot and also allows
+ controlling the LED (repurposing it) through the sysfs LED interface.
+
+ Which GPIO is attached to the LED is usually not described in the
+ ACPI tables, so this driver contains per-system info about the GPIO
+ inside the driver, this means that this driver only works on systems
+ the driver knows about.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_atomisp2_led.
+
+config INTEL_ATOMISP2_PM
+ tristate "Intel AtomISP v2 dummy / power-management driver"
+ depends on PCI && IOSF_MBI && PM
+ depends on !INTEL_ATOMISP
+ select INTEL_ATOMISP2_PDX86
+ help
+ Power-management driver for Intel's Image Signal Processor found on
+ Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
+ is to turn the ISP off (put it in D3) to save power and to allow
+ entering of S0ix modes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_atomisp2_pm.
diff --git a/drivers/platform/x86/intel/atomisp2/Makefile b/drivers/platform/x86/intel/atomisp2/Makefile
new file mode 100644
index 0000000000..96b1e877d1
--- /dev/null
+++ b/drivers/platform/x86/intel/atomisp2/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+intel_atomisp2_led-y := led.o
+obj-$(CONFIG_INTEL_ATOMISP2_LED) += intel_atomisp2_led.o
+intel_atomisp2_pm-y += pm.o
+obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
diff --git a/drivers/platform/x86/intel/atomisp2/led.c b/drivers/platform/x86/intel/atomisp2/led.c
new file mode 100644
index 0000000000..10077a61d8
--- /dev/null
+++ b/drivers/platform/x86/intel/atomisp2/led.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for controlling LEDs for cameras connected to the Intel atomisp2
+ * The main purpose of this driver is to turn off LEDs which are on at boot.
+ *
+ * Copyright (C) 2020 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+/* This must be leds-gpio as the leds-gpio driver binds to the name */
+#define DEV_NAME "leds-gpio"
+
+static const struct gpio_led atomisp2_leds[] = {
+ {
+ .name = "atomisp2::camera",
+ .default_state = LEDS_GPIO_DEFSTATE_OFF,
+ },
+};
+
+static const struct gpio_led_platform_data atomisp2_leds_pdata = {
+ .num_leds = ARRAY_SIZE(atomisp2_leds),
+ .leds = atomisp2_leds,
+};
+
+static struct gpiod_lookup_table asus_t100ta_lookup = {
+ .dev_id = DEV_NAME,
+ .table = {
+ GPIO_LOOKUP_IDX("INT33FC:02", 8, NULL, 0, GPIO_ACTIVE_HIGH),
+ { }
+ }
+};
+
+static struct gpiod_lookup_table asus_t100chi_lookup = {
+ .dev_id = DEV_NAME,
+ .table = {
+ GPIO_LOOKUP_IDX("INT33FC:01", 24, NULL, 0, GPIO_ACTIVE_HIGH),
+ { }
+ }
+};
+
+static const struct dmi_system_id atomisp2_led_systems[] __initconst = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ /* Non exact match to also match T100TAF */
+ DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ .driver_data = &asus_t100ta_lookup,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ .driver_data = &asus_t100ta_lookup,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100CHI"),
+ },
+ .driver_data = &asus_t100chi_lookup,
+ },
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(dmi, atomisp2_led_systems);
+
+static struct gpiod_lookup_table *gpio_lookup;
+static struct platform_device *pdev;
+
+static int __init atomisp2_led_init(void)
+{
+ const struct dmi_system_id *system;
+
+ system = dmi_first_match(atomisp2_led_systems);
+ if (!system)
+ return -ENODEV;
+
+ gpio_lookup = system->driver_data;
+ gpiod_add_lookup_table(gpio_lookup);
+
+ pdev = platform_device_register_resndata(NULL,
+ DEV_NAME, PLATFORM_DEVID_NONE,
+ NULL, 0, &atomisp2_leds_pdata,
+ sizeof(atomisp2_leds_pdata));
+ if (IS_ERR(pdev))
+ gpiod_remove_lookup_table(gpio_lookup);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+
+static void __exit atomisp2_led_cleanup(void)
+{
+ platform_device_unregister(pdev);
+ gpiod_remove_lookup_table(gpio_lookup);
+}
+
+module_init(atomisp2_led_init);
+module_exit(atomisp2_led_cleanup);
+
+/*
+ * The ACPI INIT method from Asus WMI's code on the T100TA and T200TA turns the
+ * LED on (without the WMI interface allowing further control over the LED).
+ * Ensure we are loaded after asus-nb-wmi so that we turn the LED off again.
+ */
+MODULE_SOFTDEP("pre: asus_nb_wmi");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_DESCRIPTION("Intel atomisp2 camera LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/atomisp2/pm.c b/drivers/platform/x86/intel/atomisp2/pm.c
new file mode 100644
index 0000000000..805fc0d851
--- /dev/null
+++ b/drivers/platform/x86/intel/atomisp2/pm.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Dummy driver for Intel's Image Signal Processor found on Bay Trail
+ * and Cherry Trail devices. The sole purpose of this driver is to allow
+ * the ISP to be put in D3.
+ *
+ * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on various non upstream patches for ISP support:
+ * Copyright (C) 2010-2017 Intel Corporation. All rights reserved.
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <asm/iosf_mbi.h>
+
+/* PCI configuration regs */
+#define PCI_INTERRUPT_CTRL 0x9c
+
+#define PCI_CSI_CONTROL 0xe8
+#define PCI_CSI_CONTROL_PORTS_OFF_MASK 0x7
+
+/* IOSF BT_MBI_UNIT_PMC regs */
+#define ISPSSPM0 0x39
+#define ISPSSPM0_ISPSSC_OFFSET 0
+#define ISPSSPM0_ISPSSC_MASK 0x00000003
+#define ISPSSPM0_ISPSSS_OFFSET 24
+#define ISPSSPM0_ISPSSS_MASK 0x03000000
+#define ISPSSPM0_IUNIT_POWER_ON 0x0
+#define ISPSSPM0_IUNIT_POWER_OFF 0x3
+
+static int isp_set_power(struct pci_dev *dev, bool enable)
+{
+ unsigned long timeout;
+ u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF;
+
+ /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
+ iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
+ val, ISPSSPM0_ISPSSC_MASK);
+
+ /*
+ * There should be no IUNIT access while power-down is
+ * in progress. HW sighting: 4567865.
+ * Wait up to 50 ms for the IUNIT to shut down.
+ * And we do the same for power on.
+ */
+ timeout = jiffies + msecs_to_jiffies(50);
+ do {
+ u32 tmp;
+
+ /* Wait until ISPSSPM0 bit[25:24] shows the right value */
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
+ tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
+ if (tmp == val)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, timeout));
+
+ dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
+ return -EBUSY;
+}
+
+static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ pm_runtime_allow(&dev->dev);
+ pm_runtime_put_sync_suspend(&dev->dev);
+
+ return 0;
+}
+
+static void isp_remove(struct pci_dev *dev)
+{
+ pm_runtime_get_sync(&dev->dev);
+ pm_runtime_forbid(&dev->dev);
+}
+
+static int isp_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u32 val;
+
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, 0);
+
+ /*
+ * MRFLD IUNIT DPHY is located in an always-power-on island
+ * MRFLD HW design need all CSI ports are disabled before
+ * powering down the IUNIT.
+ */
+ pci_read_config_dword(pdev, PCI_CSI_CONTROL, &val);
+ val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
+ pci_write_config_dword(pdev, PCI_CSI_CONTROL, val);
+
+ /*
+ * We lose config space access when punit power gates
+ * the ISP. Can't use pci_set_power_state() because
+ * pmcsr won't actually change when we write to it.
+ */
+ pci_save_state(pdev);
+ pdev->current_state = PCI_D3cold;
+ isp_set_power(pdev, false);
+
+ return 0;
+}
+
+static int isp_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ isp_set_power(pdev, true);
+ pdev->current_state = PCI_D0;
+ pci_restore_state(pdev);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(isp_pm_ops, isp_pci_suspend,
+ isp_pci_resume, NULL);
+
+static const struct pci_device_id isp_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x0f38), },
+ { PCI_VDEVICE(INTEL, 0x22b8), },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, isp_id_table);
+
+static struct pci_driver isp_pci_driver = {
+ .name = "intel_atomisp2_pm",
+ .id_table = isp_id_table,
+ .probe = isp_probe,
+ .remove = isp_remove,
+ .driver.pm = &isp_pm_ops,
+};
+
+module_pci_driver(isp_pci_driver);
+
+MODULE_DESCRIPTION("Intel AtomISP2 dummy / power-management drv (for suspend)");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/bxtwc_tmu.c b/drivers/platform/x86/intel/bxtwc_tmu.c
new file mode 100644
index 0000000000..d0e2a3c293
--- /dev/null
+++ b/drivers/platform/x86/intel/bxtwc_tmu.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel BXT Whiskey Cove PMIC TMU driver
+ *
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This driver adds TMU (Time Management Unit) support for Intel BXT platform.
+ * It enables the alarm wake-up functionality in the TMU unit of Whiskey Cove
+ * PMIC.
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+
+#define BXTWC_TMUIRQ 0x4fb6
+#define BXTWC_MIRQLVL1 0x4e0e
+#define BXTWC_MTMUIRQ_REG 0x4fb7
+#define BXTWC_MIRQLVL1_MTMU BIT(1)
+#define BXTWC_TMU_WK_ALRM BIT(1)
+#define BXTWC_TMU_SYS_ALRM BIT(2)
+#define BXTWC_TMU_ALRM_MASK (BXTWC_TMU_WK_ALRM | BXTWC_TMU_SYS_ALRM)
+#define BXTWC_TMU_ALRM_IRQ (BXTWC_TMU_WK_ALRM | BXTWC_TMU_SYS_ALRM)
+
+struct wcove_tmu {
+ int irq;
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+static irqreturn_t bxt_wcove_tmu_irq_handler(int irq, void *data)
+{
+ struct wcove_tmu *wctmu = data;
+ unsigned int tmu_irq;
+
+ /* Read TMU interrupt reg */
+ regmap_read(wctmu->regmap, BXTWC_TMUIRQ, &tmu_irq);
+ if (tmu_irq & BXTWC_TMU_ALRM_IRQ) {
+ /* clear TMU irq */
+ regmap_write(wctmu->regmap, BXTWC_TMUIRQ, tmu_irq);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static int bxt_wcove_tmu_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct regmap_irq_chip_data *regmap_irq_chip;
+ struct wcove_tmu *wctmu;
+ int ret, virq, irq;
+
+ wctmu = devm_kzalloc(&pdev->dev, sizeof(*wctmu), GFP_KERNEL);
+ if (!wctmu)
+ return -ENOMEM;
+
+ wctmu->dev = &pdev->dev;
+ wctmu->regmap = pmic->regmap;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ regmap_irq_chip = pmic->irq_chip_data_tmu;
+ virq = regmap_irq_get_virq(regmap_irq_chip, irq);
+ if (virq < 0) {
+ dev_err(&pdev->dev,
+ "failed to get virtual interrupt=%d\n", irq);
+ return virq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq,
+ NULL, bxt_wcove_tmu_irq_handler,
+ IRQF_ONESHOT, "bxt_wcove_tmu", wctmu);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed: %d,virq: %d\n",
+ ret, virq);
+ return ret;
+ }
+ wctmu->irq = virq;
+
+ /* Unmask TMU second level Wake & System alarm */
+ regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG,
+ BXTWC_TMU_ALRM_MASK, 0);
+
+ platform_set_drvdata(pdev, wctmu);
+ return 0;
+}
+
+static void bxt_wcove_tmu_remove(struct platform_device *pdev)
+{
+ struct wcove_tmu *wctmu = platform_get_drvdata(pdev);
+ unsigned int val;
+
+ /* Mask TMU interrupts */
+ regmap_read(wctmu->regmap, BXTWC_MIRQLVL1, &val);
+ regmap_write(wctmu->regmap, BXTWC_MIRQLVL1,
+ val | BXTWC_MIRQLVL1_MTMU);
+ regmap_read(wctmu->regmap, BXTWC_MTMUIRQ_REG, &val);
+ regmap_write(wctmu->regmap, BXTWC_MTMUIRQ_REG,
+ val | BXTWC_TMU_ALRM_MASK);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bxtwc_tmu_suspend(struct device *dev)
+{
+ struct wcove_tmu *wctmu = dev_get_drvdata(dev);
+
+ enable_irq_wake(wctmu->irq);
+ return 0;
+}
+
+static int bxtwc_tmu_resume(struct device *dev)
+{
+ struct wcove_tmu *wctmu = dev_get_drvdata(dev);
+
+ disable_irq_wake(wctmu->irq);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bxtwc_tmu_pm_ops, bxtwc_tmu_suspend, bxtwc_tmu_resume);
+
+static const struct platform_device_id bxt_wcove_tmu_id_table[] = {
+ { .name = "bxt_wcove_tmu" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, bxt_wcove_tmu_id_table);
+
+static struct platform_driver bxt_wcove_tmu_driver = {
+ .probe = bxt_wcove_tmu_probe,
+ .remove_new = bxt_wcove_tmu_remove,
+ .driver = {
+ .name = "bxt_wcove_tmu",
+ .pm = &bxtwc_tmu_pm_ops,
+ },
+ .id_table = bxt_wcove_tmu_id_table,
+};
+
+module_platform_driver(bxt_wcove_tmu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nilesh Bacchewar <nilesh.bacchewar@intel.com>");
+MODULE_DESCRIPTION("BXT Whiskey Cove TMU Driver");
diff --git a/drivers/platform/x86/intel/bytcrc_pwrsrc.c b/drivers/platform/x86/intel/bytcrc_pwrsrc.c
new file mode 100644
index 0000000000..8a022b90d1
--- /dev/null
+++ b/drivers/platform/x86/intel/bytcrc_pwrsrc.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power-source driver for Bay Trail Crystal Cove PMIC
+ *
+ * Copyright (c) 2023 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on intel_crystalcove_pwrsrc.c from Android kernel sources, which is:
+ * Copyright (C) 2013 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define CRYSTALCOVE_SPWRSRC_REG 0x1E
+#define CRYSTALCOVE_RESETSRC0_REG 0x20
+#define CRYSTALCOVE_RESETSRC1_REG 0x21
+#define CRYSTALCOVE_WAKESRC_REG 0x22
+
+struct crc_pwrsrc_data {
+ struct regmap *regmap;
+ struct dentry *debug_dentry;
+ unsigned int resetsrc0;
+ unsigned int resetsrc1;
+ unsigned int wakesrc;
+};
+
+static const char * const pwrsrc_pwrsrc_info[] = {
+ /* bit 0 */ "USB",
+ /* bit 1 */ "DC in",
+ /* bit 2 */ "Battery",
+ NULL,
+};
+
+static const char * const pwrsrc_resetsrc0_info[] = {
+ /* bit 0 */ "SOC reporting a thermal event",
+ /* bit 1 */ "critical PMIC temperature",
+ /* bit 2 */ "critical system temperature",
+ /* bit 3 */ "critical battery temperature",
+ /* bit 4 */ "VSYS under voltage",
+ /* bit 5 */ "VSYS over voltage",
+ /* bit 6 */ "battery removal",
+ NULL,
+};
+
+static const char * const pwrsrc_resetsrc1_info[] = {
+ /* bit 0 */ "VCRIT threshold",
+ /* bit 1 */ "BATID reporting battery removal",
+ /* bit 2 */ "user pressing the power button",
+ NULL,
+};
+
+static const char * const pwrsrc_wakesrc_info[] = {
+ /* bit 0 */ "user pressing the power button",
+ /* bit 1 */ "a battery insertion",
+ /* bit 2 */ "a USB charger insertion",
+ /* bit 3 */ "an adapter insertion",
+ NULL,
+};
+
+static void crc_pwrsrc_log(struct seq_file *seq, const char *prefix,
+ const char * const *info, unsigned int reg_val)
+{
+ int i;
+
+ for (i = 0; info[i]; i++) {
+ if (reg_val & BIT(i))
+ seq_printf(seq, "%s by %s\n", prefix, info[i]);
+ }
+}
+
+static int pwrsrc_show(struct seq_file *seq, void *unused)
+{
+ struct crc_pwrsrc_data *data = seq->private;
+ unsigned int reg_val;
+ int ret;
+
+ ret = regmap_read(data->regmap, CRYSTALCOVE_SPWRSRC_REG, &reg_val);
+ if (ret)
+ return ret;
+
+ crc_pwrsrc_log(seq, "System powered", pwrsrc_pwrsrc_info, reg_val);
+ return 0;
+}
+
+static int resetsrc_show(struct seq_file *seq, void *unused)
+{
+ struct crc_pwrsrc_data *data = seq->private;
+
+ crc_pwrsrc_log(seq, "Last shutdown caused", pwrsrc_resetsrc0_info, data->resetsrc0);
+ crc_pwrsrc_log(seq, "Last shutdown caused", pwrsrc_resetsrc1_info, data->resetsrc1);
+ return 0;
+}
+
+static int wakesrc_show(struct seq_file *seq, void *unused)
+{
+ struct crc_pwrsrc_data *data = seq->private;
+
+ crc_pwrsrc_log(seq, "Last wake caused", pwrsrc_wakesrc_info, data->wakesrc);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pwrsrc);
+DEFINE_SHOW_ATTRIBUTE(resetsrc);
+DEFINE_SHOW_ATTRIBUTE(wakesrc);
+
+static int crc_pwrsrc_read_and_clear(struct crc_pwrsrc_data *data,
+ unsigned int reg, unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(data->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ return regmap_write(data->regmap, reg, *val);
+}
+
+static int crc_pwrsrc_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct crc_pwrsrc_data *data;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = pmic->regmap;
+
+ /*
+ * Read + clear resetsrc0/1 and wakesrc now, so that they get
+ * cleared even if the debugfs interface is never used.
+ *
+ * Properly clearing the wakesrc is important, leaving bit 0 of it
+ * set turns reboot into poweroff on some tablets.
+ */
+ ret = crc_pwrsrc_read_and_clear(data, CRYSTALCOVE_RESETSRC0_REG, &data->resetsrc0);
+ if (ret)
+ return ret;
+
+ ret = crc_pwrsrc_read_and_clear(data, CRYSTALCOVE_RESETSRC1_REG, &data->resetsrc1);
+ if (ret)
+ return ret;
+
+ ret = crc_pwrsrc_read_and_clear(data, CRYSTALCOVE_WAKESRC_REG, &data->wakesrc);
+ if (ret)
+ return ret;
+
+ data->debug_dentry = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ debugfs_create_file("pwrsrc", 0444, data->debug_dentry, data, &pwrsrc_fops);
+ debugfs_create_file("resetsrc", 0444, data->debug_dentry, data, &resetsrc_fops);
+ debugfs_create_file("wakesrc", 0444, data->debug_dentry, data, &wakesrc_fops);
+
+ platform_set_drvdata(pdev, data);
+ return 0;
+}
+
+static int crc_pwrsrc_remove(struct platform_device *pdev)
+{
+ struct crc_pwrsrc_data *data = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(data->debug_dentry);
+ return 0;
+}
+
+static struct platform_driver crc_pwrsrc_driver = {
+ .probe = crc_pwrsrc_probe,
+ .remove = crc_pwrsrc_remove,
+ .driver = {
+ .name = "crystal_cove_pwrsrc",
+ },
+};
+module_platform_driver(crc_pwrsrc_driver);
+
+MODULE_ALIAS("platform:crystal_cove_pwrsrc");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Power-source driver for Bay Trail Crystal Cove PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c
new file mode 100644
index 0000000000..615f8d1a0c
--- /dev/null
+++ b/drivers/platform/x86/intel/chtdc_ti_pwrbtn.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power-button driver for Dollar Cove TI PMIC
+ * Copyright (C) 2014 Intel Corp
+ * Copyright (c) 2017 Takashi Iwai <tiwai@suse.de>
+ */
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/slab.h>
+
+#define CHTDC_TI_SIRQ_REG 0x3
+#define SIRQ_PWRBTN_REL BIT(0)
+
+static irqreturn_t chtdc_ti_pwrbtn_interrupt(int irq, void *dev_id)
+{
+ struct input_dev *input = dev_id;
+ struct device *dev = input->dev.parent;
+ struct regmap *regmap = dev_get_drvdata(dev);
+ int state;
+
+ if (!regmap_read(regmap, CHTDC_TI_SIRQ_REG, &state)) {
+ dev_dbg(dev, "SIRQ_REG=0x%x\n", state);
+ input_report_key(input, KEY_POWER, !(state & SIRQ_PWRBTN_REL));
+ input_sync(input);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int chtdc_ti_pwrbtn_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct input_dev *input;
+ int irq, err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+ input->name = pdev->name;
+ input->phys = "power-button/input0";
+ input->id.bustype = BUS_HOST;
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ err = input_register_device(input);
+ if (err)
+ return err;
+
+ dev_set_drvdata(dev, pmic->regmap);
+
+ err = devm_request_threaded_irq(dev, irq, NULL,
+ chtdc_ti_pwrbtn_interrupt,
+ IRQF_ONESHOT, KBUILD_MODNAME, input);
+ if (err)
+ return err;
+
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+ return 0;
+}
+
+static void chtdc_ti_pwrbtn_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+}
+
+static const struct platform_device_id chtdc_ti_pwrbtn_id_table[] = {
+ { .name = "chtdc_ti_pwrbtn" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, chtdc_ti_pwrbtn_id_table);
+
+static struct platform_driver chtdc_ti_pwrbtn_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = chtdc_ti_pwrbtn_probe,
+ .remove_new = chtdc_ti_pwrbtn_remove,
+ .id_table = chtdc_ti_pwrbtn_id_table,
+};
+module_platform_driver(chtdc_ti_pwrbtn_driver);
+
+MODULE_DESCRIPTION("Power-button driver for Dollar Cove TI PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c
new file mode 100644
index 0000000000..848baecc1b
--- /dev/null
+++ b/drivers/platform/x86/intel/chtwc_int33fe.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Cherry Trail ACPI INT33FE pseudo device driver
+ *
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Some Intel Cherry Trail based device which ship with Windows 10, have
+ * this weird INT33FE ACPI device with a CRS table with 4 I2cSerialBusV2
+ * resources, for 4 different chips attached to various I²C buses:
+ * 1. The Whiskey Cove PMIC, which is also described by the INT34D3 ACPI device
+ * 2. Maxim MAX17047 Fuel Gauge Controller
+ * 3. FUSB302 USB Type-C Controller
+ * 4. PI3USB30532 USB switch
+ *
+ * So this driver is a stub / pseudo driver whose only purpose is to
+ * instantiate I²C clients for chips 2 - 4, so that standard I²C drivers
+ * for these chips can bind to the them.
+ */
+
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/pd.h>
+
+struct cht_int33fe_data {
+ struct i2c_client *battery_fg;
+ struct i2c_client *fusb302;
+ struct i2c_client *pi3usb30532;
+ struct fwnode_handle *dp;
+};
+
+/*
+ * Grrr, I severely dislike buggy BIOS-es. At least one BIOS enumerates
+ * the max17047 both through the INT33FE ACPI device (it is right there
+ * in the resources table) as well as through a separate MAX17047 device.
+ *
+ * These helpers are used to work around this by checking if an I²C client
+ * for the max17047 has already been registered.
+ */
+static int cht_int33fe_check_for_max17047(struct device *dev, void *data)
+{
+ struct i2c_client **max17047 = data;
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return 0;
+
+ /* The MAX17047 ACPI node doesn't have an UID, so we don't check that */
+ if (!acpi_dev_hid_uid_match(adev, "MAX17047", NULL))
+ return 0;
+
+ *max17047 = to_i2c_client(dev);
+ return 1;
+}
+
+static const char * const max17047_suppliers[] = { "bq24190-charger" };
+
+static const struct property_entry max17047_properties[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", max17047_suppliers),
+ { }
+};
+
+static const struct software_node max17047_node = {
+ .name = "max17047",
+ .properties = max17047_properties,
+};
+
+/*
+ * We are not using inline property here because those are constant,
+ * and we need to adjust this one at runtime to point to real
+ * software node.
+ */
+static struct software_node_ref_args fusb302_mux_refs[] = {
+ { .node = NULL },
+};
+
+static const struct property_entry fusb302_properties[] = {
+ PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"),
+ PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs),
+ { }
+};
+
+static const struct software_node fusb302_node = {
+ .name = "fusb302",
+ .properties = fusb302_properties,
+};
+
+#define PDO_FIXED_FLAGS \
+ (PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)
+
+static const u32 src_pdo[] = {
+ PDO_FIXED(5000, 1500, PDO_FIXED_FLAGS),
+};
+
+static const u32 snk_pdo[] = {
+ PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
+ PDO_VAR(5000, 12000, 3000),
+};
+
+static const struct software_node pi3usb30532_node = {
+ .name = "pi3usb30532",
+};
+
+static const struct software_node displayport_node = {
+ .name = "displayport",
+};
+
+static const struct property_entry usb_connector_properties[] = {
+ PROPERTY_ENTRY_STRING("data-role", "dual"),
+ PROPERTY_ENTRY_STRING("power-role", "dual"),
+ PROPERTY_ENTRY_STRING("try-power-role", "sink"),
+ PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
+ PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
+ PROPERTY_ENTRY_REF("orientation-switch", &pi3usb30532_node),
+ PROPERTY_ENTRY_REF("mode-switch", &pi3usb30532_node),
+ PROPERTY_ENTRY_REF("displayport", &displayport_node),
+ { }
+};
+
+static const struct software_node usb_connector_node = {
+ .name = "connector",
+ .parent = &fusb302_node,
+ .properties = usb_connector_properties,
+};
+
+static const struct software_node altmodes_node = {
+ .name = "altmodes",
+ .parent = &usb_connector_node,
+};
+
+static const struct property_entry dp_altmode_properties[] = {
+ PROPERTY_ENTRY_U32("svid", 0xff01),
+ PROPERTY_ENTRY_U32("vdo", 0x0c0086),
+ { }
+};
+
+static const struct software_node dp_altmode_node = {
+ .name = "displayport-altmode",
+ .parent = &altmodes_node,
+ .properties = dp_altmode_properties,
+};
+
+static const struct software_node *node_group[] = {
+ &fusb302_node,
+ &max17047_node,
+ &pi3usb30532_node,
+ &displayport_node,
+ &usb_connector_node,
+ &altmodes_node,
+ &dp_altmode_node,
+ NULL
+};
+
+static int cht_int33fe_setup_dp(struct cht_int33fe_data *data)
+{
+ struct fwnode_handle *fwnode;
+ struct pci_dev *pdev;
+
+ fwnode = software_node_fwnode(&displayport_node);
+ if (!fwnode)
+ return -ENODEV;
+
+ /* First let's find the GPU PCI device */
+ pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+ if (!pdev || pdev->vendor != PCI_VENDOR_ID_INTEL) {
+ pci_dev_put(pdev);
+ return -ENODEV;
+ }
+
+ /* Then the DP-2 child device node */
+ data->dp = device_get_named_child_node(&pdev->dev, "DD04");
+ pci_dev_put(pdev);
+ if (!data->dp)
+ return -ENODEV;
+
+ fwnode->secondary = ERR_PTR(-ENODEV);
+ data->dp->secondary = fwnode;
+
+ return 0;
+}
+
+static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
+{
+ software_node_unregister_node_group(node_group);
+
+ if (fusb302_mux_refs[0].node) {
+ fwnode_handle_put(software_node_fwnode(fusb302_mux_refs[0].node));
+ fusb302_mux_refs[0].node = NULL;
+ }
+
+ if (data->dp) {
+ data->dp->secondary = NULL;
+ fwnode_handle_put(data->dp);
+ data->dp = NULL;
+ }
+}
+
+static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
+{
+ const struct software_node *mux_ref_node;
+ int ret;
+
+ /*
+ * There is no ACPI device node for the USB role mux, so we need to wait
+ * until the mux driver has created software node for the mux device.
+ * It means we depend on the mux driver. This function will return
+ * -EPROBE_DEFER until the mux device is registered.
+ */
+ mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!mux_ref_node)
+ return -EPROBE_DEFER;
+
+ /*
+ * Update node used in "usb-role-switch" property. Note that we
+ * rely on software_node_register_node_group() to use the original
+ * instance of properties instead of copying them.
+ */
+ fusb302_mux_refs[0].node = mux_ref_node;
+
+ ret = software_node_register_node_group(node_group);
+ if (ret)
+ return ret;
+
+ /* The devices that are not created in this driver need extra steps. */
+
+ /*
+ * The DP connector does have ACPI device node. In this case we can just
+ * find that ACPI node and assign our node as the secondary node to it.
+ */
+ ret = cht_int33fe_setup_dp(data);
+ if (ret)
+ goto err_remove_nodes;
+
+ return 0;
+
+err_remove_nodes:
+ cht_int33fe_remove_nodes(data);
+
+ return ret;
+}
+
+static int
+cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
+{
+ struct i2c_client *max17047 = NULL;
+ struct i2c_board_info board_info;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ fwnode = software_node_fwnode(&max17047_node);
+ if (!fwnode)
+ return -ENODEV;
+
+ i2c_for_each_dev(&max17047, cht_int33fe_check_for_max17047);
+ if (max17047) {
+ /* Pre-existing I²C client for the max17047, add device properties */
+ set_secondary_fwnode(&max17047->dev, fwnode);
+ /* And re-probe to get the new device properties applied */
+ ret = device_reprobe(&max17047->dev);
+ if (ret)
+ dev_warn(dev, "Reprobing max17047 error: %d\n", ret);
+ return 0;
+ }
+
+ memset(&board_info, 0, sizeof(board_info));
+ strscpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ board_info.dev_name = "max17047";
+ board_info.fwnode = fwnode;
+ data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
+
+ return PTR_ERR_OR_ZERO(data->battery_fg);
+}
+
+static const struct dmi_system_id cht_int33fe_typec_ids[] = {
+ {
+ /*
+ * GPD win / GPD pocket mini laptops
+ *
+ * This DMI match may not seem unique, but it is. In the 67000+
+ * DMI decode dumps from linux-hardware.org only 116 have
+ * board_vendor set to "AMI Corporation" and of those 116 only
+ * the GPD win's and pocket's board_name is "Default string".
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, cht_int33fe_typec_ids);
+
+static int cht_int33fe_typec_probe(struct platform_device *pdev)
+{
+ struct i2c_board_info board_info;
+ struct device *dev = &pdev->dev;
+ struct cht_int33fe_data *data;
+ struct fwnode_handle *fwnode;
+ struct regulator *regulator;
+ int fusb302_irq;
+ int ret;
+
+ if (!dmi_check_system(cht_int33fe_typec_ids))
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /*
+ * We expect the WC PMIC to be paired with a TI bq24292i charger-IC.
+ * We check for the bq24292i vbus regulator here, this has 2 purposes:
+ * 1) The bq24292i allows charging with up to 12V, setting the fusb302's
+ * max-snk voltage to 12V with another charger-IC is not good.
+ * 2) For the fusb302 driver to get the bq24292i vbus regulator, the
+ * regulator-map, which is part of the bq24292i regulator_init_data,
+ * must be registered before the fusb302 is instantiated, otherwise
+ * it will end up with a dummy-regulator.
+ * Note "cht_wc_usb_typec_vbus" comes from the regulator_init_data
+ * which is defined in i2c-cht-wc.c from where the bq24292i I²C client
+ * gets instantiated. We use regulator_get_optional here so that we
+ * don't end up getting a dummy-regulator ourselves.
+ */
+ regulator = regulator_get_optional(dev, "cht_wc_usb_typec_vbus");
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
+ return (ret == -ENODEV) ? -EPROBE_DEFER : ret;
+ }
+ regulator_put(regulator);
+
+ /* The FUSB302 uses the IRQ at index 1 and is the only IRQ user */
+ fusb302_irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 1);
+ if (fusb302_irq < 0) {
+ if (fusb302_irq != -EPROBE_DEFER)
+ dev_err(dev, "Error getting FUSB302 irq\n");
+ return fusb302_irq;
+ }
+
+ ret = cht_int33fe_add_nodes(data);
+ if (ret)
+ return ret;
+
+ /* Work around BIOS bug, see comment on cht_int33fe_check_for_max17047() */
+ ret = cht_int33fe_register_max17047(dev, data);
+ if (ret)
+ goto out_remove_nodes;
+
+ fwnode = software_node_fwnode(&fusb302_node);
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto out_unregister_max17047;
+ }
+
+ memset(&board_info, 0, sizeof(board_info));
+ strscpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
+ board_info.dev_name = "fusb302";
+ board_info.fwnode = fwnode;
+ board_info.irq = fusb302_irq;
+
+ data->fusb302 = i2c_acpi_new_device(dev, 2, &board_info);
+ if (IS_ERR(data->fusb302)) {
+ ret = PTR_ERR(data->fusb302);
+ goto out_unregister_max17047;
+ }
+
+ fwnode = software_node_fwnode(&pi3usb30532_node);
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto out_unregister_fusb302;
+ }
+
+ memset(&board_info, 0, sizeof(board_info));
+ board_info.dev_name = "pi3usb30532";
+ board_info.fwnode = fwnode;
+ strscpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
+
+ data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
+ if (IS_ERR(data->pi3usb30532)) {
+ ret = PTR_ERR(data->pi3usb30532);
+ goto out_unregister_fusb302;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+
+out_unregister_fusb302:
+ i2c_unregister_device(data->fusb302);
+
+out_unregister_max17047:
+ i2c_unregister_device(data->battery_fg);
+
+out_remove_nodes:
+ cht_int33fe_remove_nodes(data);
+
+ return ret;
+}
+
+static void cht_int33fe_typec_remove(struct platform_device *pdev)
+{
+ struct cht_int33fe_data *data = platform_get_drvdata(pdev);
+
+ i2c_unregister_device(data->pi3usb30532);
+ i2c_unregister_device(data->fusb302);
+ i2c_unregister_device(data->battery_fg);
+
+ cht_int33fe_remove_nodes(data);
+}
+
+static const struct acpi_device_id cht_int33fe_acpi_ids[] = {
+ { "INT33FE", },
+ { }
+};
+
+static struct platform_driver cht_int33fe_typec_driver = {
+ .driver = {
+ .name = "Intel Cherry Trail ACPI INT33FE Type-C driver",
+ .acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids),
+ },
+ .probe = cht_int33fe_typec_probe,
+ .remove_new = cht_int33fe_typec_remove,
+};
+
+module_platform_driver(cht_int33fe_typec_driver);
+
+MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE Type-C pseudo device driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/crystal_cove_charger.c b/drivers/platform/x86/intel/crystal_cove_charger.c
new file mode 100644
index 0000000000..e4299cfa22
--- /dev/null
+++ b/drivers/platform/x86/intel/crystal_cove_charger.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for the external-charger IRQ pass-through function of the
+ * Intel Bay Trail Crystal Cove PMIC.
+ *
+ * Note this is NOT a power_supply class driver, it just deals with IRQ
+ * pass-through, this requires a separate driver because the PMIC's
+ * level 2 interrupt for this must be explicitly acked.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define CHGRIRQ_REG 0x0a
+#define MCHGRIRQ_REG 0x17
+
+struct crystal_cove_charger_data {
+ struct mutex buslock; /* irq_bus_lock */
+ struct irq_chip irqchip;
+ struct regmap *regmap;
+ struct irq_domain *irq_domain;
+ int irq;
+ int charger_irq;
+ u8 mask;
+ u8 new_mask;
+};
+
+static irqreturn_t crystal_cove_charger_irq(int irq, void *data)
+{
+ struct crystal_cove_charger_data *charger = data;
+
+ /* No need to read CHGRIRQ_REG as there is only 1 IRQ */
+ handle_nested_irq(charger->charger_irq);
+
+ /* Ack CHGRIRQ 0 */
+ regmap_write(charger->regmap, CHGRIRQ_REG, BIT(0));
+
+ return IRQ_HANDLED;
+}
+
+static void crystal_cove_charger_irq_bus_lock(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&charger->buslock);
+}
+
+static void crystal_cove_charger_irq_bus_sync_unlock(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ if (charger->mask != charger->new_mask) {
+ regmap_write(charger->regmap, MCHGRIRQ_REG, charger->new_mask);
+ charger->mask = charger->new_mask;
+ }
+
+ mutex_unlock(&charger->buslock);
+}
+
+static void crystal_cove_charger_irq_unmask(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ charger->new_mask &= ~BIT(data->hwirq);
+}
+
+static void crystal_cove_charger_irq_mask(struct irq_data *data)
+{
+ struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
+
+ charger->new_mask |= BIT(data->hwirq);
+}
+
+static void crystal_cove_charger_rm_irq_domain(void *data)
+{
+ struct crystal_cove_charger_data *charger = data;
+
+ irq_domain_remove(charger->irq_domain);
+}
+
+static int crystal_cove_charger_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct crystal_cove_charger_data *charger;
+ int ret;
+
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->regmap = pmic->regmap;
+ mutex_init(&charger->buslock);
+
+ charger->irq = platform_get_irq(pdev, 0);
+ if (charger->irq < 0)
+ return charger->irq;
+
+ charger->irq_domain = irq_domain_create_linear(dev_fwnode(pdev->dev.parent), 1,
+ &irq_domain_simple_ops, NULL);
+ if (!charger->irq_domain)
+ return -ENOMEM;
+
+ /* Distuingish IRQ domain from others sharing (MFD) the same fwnode */
+ irq_domain_update_bus_token(charger->irq_domain, DOMAIN_BUS_WAKEUP);
+
+ ret = devm_add_action_or_reset(&pdev->dev, crystal_cove_charger_rm_irq_domain, charger);
+ if (ret)
+ return ret;
+
+ charger->charger_irq = irq_create_mapping(charger->irq_domain, 0);
+ if (!charger->charger_irq)
+ return -ENOMEM;
+
+ charger->irqchip.name = KBUILD_MODNAME;
+ charger->irqchip.irq_unmask = crystal_cove_charger_irq_unmask;
+ charger->irqchip.irq_mask = crystal_cove_charger_irq_mask;
+ charger->irqchip.irq_bus_lock = crystal_cove_charger_irq_bus_lock;
+ charger->irqchip.irq_bus_sync_unlock = crystal_cove_charger_irq_bus_sync_unlock;
+
+ irq_set_chip_data(charger->charger_irq, charger);
+ irq_set_chip_and_handler(charger->charger_irq, &charger->irqchip, handle_simple_irq);
+ irq_set_nested_thread(charger->charger_irq, true);
+ irq_set_noprobe(charger->charger_irq);
+
+ /* Mask the single 2nd level IRQ before enabling the 1st level IRQ */
+ charger->mask = charger->new_mask = BIT(0);
+ regmap_write(charger->regmap, MCHGRIRQ_REG, charger->mask);
+
+ ret = devm_request_threaded_irq(&pdev->dev, charger->irq, NULL,
+ crystal_cove_charger_irq,
+ IRQF_ONESHOT, KBUILD_MODNAME, charger);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "requesting irq\n");
+
+ return 0;
+}
+
+static struct platform_driver crystal_cove_charger_driver = {
+ .probe = crystal_cove_charger_probe,
+ .driver = {
+ .name = "crystal_cove_charger",
+ },
+};
+module_platform_driver(crystal_cove_charger_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_DESCRIPTION("Intel Bay Trail Crystal Cove external charger IRQ pass-through");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
new file mode 100644
index 0000000000..7457ca2b27
--- /dev/null
+++ b/drivers/platform/x86/intel/hid.c
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Intel HID event & 5 button array driver
+ *
+ * Copyright (C) 2015 Alex Hung <alex.hung@canonical.com>
+ * Copyright (C) 2015 Andrew Lutomirski <luto@kernel.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
+#include "../dual_accel_detect.h"
+
+enum intel_hid_tablet_sw_mode {
+ TABLET_SW_AUTO = -1,
+ TABLET_SW_OFF = 0,
+ TABLET_SW_AT_EVENT,
+ TABLET_SW_AT_PROBE,
+};
+
+static bool enable_5_button_array;
+module_param(enable_5_button_array, bool, 0444);
+MODULE_PARM_DESC(enable_5_button_array,
+ "Enable 5 Button Array support. "
+ "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
+static int enable_sw_tablet_mode = TABLET_SW_AUTO;
+module_param(enable_sw_tablet_mode, int, 0444);
+MODULE_PARM_DESC(enable_sw_tablet_mode,
+ "Enable SW_TABLET_MODE reporting -1:auto 0:off 1:at-first-event 2:at-probe. "
+ "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
+/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
+#define TABLET_MODE_FLAG BIT(6)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Hung");
+
+static const struct acpi_device_id intel_hid_ids[] = {
+ {"INT33D5", 0},
+ {"INTC1051", 0},
+ {"INTC1054", 0},
+ {"INTC1070", 0},
+ {"INTC1076", 0},
+ {"INTC1077", 0},
+ {"INTC1078", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
+
+/* In theory, these are HID usages. */
+static const struct key_entry intel_hid_keymap[] = {
+ /* 1: LSuper (Page 0x07, usage 0xE3) -- unclear what to do */
+ /* 2: Toggle SW_ROTATE_LOCK -- easy to implement if seen in wild */
+ { KE_KEY, 3, { KEY_NUMLOCK } },
+ { KE_KEY, 4, { KEY_HOME } },
+ { KE_KEY, 5, { KEY_END } },
+ { KE_KEY, 6, { KEY_PAGEUP } },
+ { KE_KEY, 7, { KEY_PAGEDOWN } },
+ { KE_KEY, 8, { KEY_RFKILL } },
+ { KE_KEY, 9, { KEY_POWER } },
+ { KE_KEY, 11, { KEY_SLEEP } },
+ /* 13 has two different meanings in the spec -- ignore it. */
+ { KE_KEY, 14, { KEY_STOPCD } },
+ { KE_KEY, 15, { KEY_PLAYPAUSE } },
+ { KE_KEY, 16, { KEY_MUTE } },
+ { KE_KEY, 17, { KEY_VOLUMEUP } },
+ { KE_KEY, 18, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 19, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 20, { KEY_BRIGHTNESSDOWN } },
+ /* 27: wake -- needs special handling */
+ { KE_END },
+};
+
+/* 5 button array notification value. */
+static const struct key_entry intel_array_keymap[] = {
+ { KE_KEY, 0xC2, { KEY_LEFTMETA } }, /* Press */
+ { KE_IGNORE, 0xC3, { KEY_LEFTMETA } }, /* Release */
+ { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* Press */
+ { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* Release */
+ { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* Press */
+ { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* Release */
+ { KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* Press */
+ { KE_IGNORE, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* Release */
+ { KE_KEY, 0xCE, { KEY_POWER } }, /* Press */
+ { KE_IGNORE, 0xCF, { KEY_POWER } }, /* Release */
+ { KE_END },
+};
+
+static const struct dmi_system_id button_array_table[] = {
+ {
+ .ident = "Wacom MobileStudio Pro 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wacom Co.,Ltd"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Wacom MobileStudio Pro 13"),
+ },
+ },
+ {
+ .ident = "Wacom MobileStudio Pro 16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wacom Co.,Ltd"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Wacom MobileStudio Pro 16"),
+ },
+ },
+ {
+ .ident = "HP Spectre x2 (2015)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"),
+ },
+ },
+ {
+ .ident = "Lenovo ThinkPad X1 Tablet Gen 2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
+ },
+ },
+ {
+ .ident = "Microsoft Surface Go 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ },
+ },
+ { }
+};
+
+/*
+ * Some convertible use the intel-hid ACPI interface to report SW_TABLET_MODE,
+ * these need to be compared via a DMI based authorization list because some
+ * models have unreliable VGBS return which could cause incorrect
+ * SW_TABLET_MODE report.
+ */
+static const struct dmi_system_id dmi_vgbs_allow_list[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible 15-df0xxx"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite Dragonfly G2 Notebook PC"),
+ },
+ },
+ { }
+};
+
+/*
+ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE
+ * reports. Accept such reports only from devices in this list.
+ */
+static const struct dmi_system_id dmi_auto_add_switch[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+ },
+ },
+ {} /* Array terminator */
+};
+
+struct intel_hid_priv {
+ struct input_dev *input_dev;
+ struct input_dev *array;
+ struct input_dev *switches;
+ bool wakeup_mode;
+};
+
+#define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"
+
+enum intel_hid_dsm_fn_codes {
+ INTEL_HID_DSM_FN_INVALID,
+ INTEL_HID_DSM_BTNL_FN,
+ INTEL_HID_DSM_HDMM_FN,
+ INTEL_HID_DSM_HDSM_FN,
+ INTEL_HID_DSM_HDEM_FN,
+ INTEL_HID_DSM_BTNS_FN,
+ INTEL_HID_DSM_BTNE_FN,
+ INTEL_HID_DSM_HEBC_V1_FN,
+ INTEL_HID_DSM_VGBS_FN,
+ INTEL_HID_DSM_HEBC_V2_FN,
+ INTEL_HID_DSM_FN_MAX
+};
+
+static const char *intel_hid_dsm_fn_to_method[INTEL_HID_DSM_FN_MAX] = {
+ NULL,
+ "BTNL",
+ "HDMM",
+ "HDSM",
+ "HDEM",
+ "BTNS",
+ "BTNE",
+ "HEBC",
+ "VGBS",
+ "HEBC"
+};
+
+static unsigned long long intel_hid_dsm_fn_mask;
+static guid_t intel_dsm_guid;
+
+static bool intel_hid_execute_method(acpi_handle handle,
+ enum intel_hid_dsm_fn_codes fn_index,
+ unsigned long long arg)
+{
+ union acpi_object *obj, argv4, req;
+ acpi_status status;
+ char *method_name;
+
+ if (fn_index <= INTEL_HID_DSM_FN_INVALID ||
+ fn_index >= INTEL_HID_DSM_FN_MAX)
+ return false;
+
+ method_name = (char *)intel_hid_dsm_fn_to_method[fn_index];
+
+ if (!(intel_hid_dsm_fn_mask & BIT(fn_index)))
+ goto skip_dsm_exec;
+
+ /* All methods expects a package with one integer element */
+ req.type = ACPI_TYPE_INTEGER;
+ req.integer.value = arg;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &req;
+
+ obj = acpi_evaluate_dsm(handle, &intel_dsm_guid, 1, fn_index, &argv4);
+ if (obj) {
+ acpi_handle_debug(handle, "Exec DSM Fn code: %d[%s] success\n",
+ fn_index, method_name);
+ ACPI_FREE(obj);
+ return true;
+ }
+
+skip_dsm_exec:
+ status = acpi_execute_simple_method(handle, method_name, arg);
+ if (ACPI_SUCCESS(status))
+ return true;
+
+ return false;
+}
+
+static bool intel_hid_evaluate_method(acpi_handle handle,
+ enum intel_hid_dsm_fn_codes fn_index,
+ unsigned long long *result)
+{
+ union acpi_object *obj;
+ acpi_status status;
+ char *method_name;
+
+ if (fn_index <= INTEL_HID_DSM_FN_INVALID ||
+ fn_index >= INTEL_HID_DSM_FN_MAX)
+ return false;
+
+ method_name = (char *)intel_hid_dsm_fn_to_method[fn_index];
+
+ if (!(intel_hid_dsm_fn_mask & BIT(fn_index)))
+ goto skip_dsm_eval;
+
+ obj = acpi_evaluate_dsm_typed(handle, &intel_dsm_guid,
+ 1, fn_index,
+ NULL, ACPI_TYPE_INTEGER);
+ if (obj) {
+ *result = obj->integer.value;
+ acpi_handle_debug(handle,
+ "Eval DSM Fn code: %d[%s] results: 0x%llx\n",
+ fn_index, method_name, *result);
+ ACPI_FREE(obj);
+ return true;
+ }
+
+skip_dsm_eval:
+ status = acpi_evaluate_integer(handle, method_name, NULL, result);
+ if (ACPI_SUCCESS(status))
+ return true;
+
+ return false;
+}
+
+static void intel_hid_init_dsm(acpi_handle handle)
+{
+ union acpi_object *obj;
+
+ guid_parse(HID_EVENT_FILTER_UUID, &intel_dsm_guid);
+
+ obj = acpi_evaluate_dsm_typed(handle, &intel_dsm_guid, 1, 0, NULL,
+ ACPI_TYPE_BUFFER);
+ if (obj) {
+ switch (obj->buffer.length) {
+ default:
+ case 2:
+ intel_hid_dsm_fn_mask = *(u16 *)obj->buffer.pointer;
+ break;
+ case 1:
+ intel_hid_dsm_fn_mask = *obj->buffer.pointer;
+ break;
+ case 0:
+ acpi_handle_warn(handle, "intel_hid_dsm_fn_mask length is zero\n");
+ intel_hid_dsm_fn_mask = 0;
+ break;
+ }
+ ACPI_FREE(obj);
+ }
+
+ acpi_handle_debug(handle, "intel_hid_dsm_fn_mask = %llx\n",
+ intel_hid_dsm_fn_mask);
+}
+
+static int intel_hid_set_enable(struct device *device, bool enable)
+{
+ acpi_handle handle = ACPI_HANDLE(device);
+
+ /* Enable|disable features - power button is always enabled */
+ if (!intel_hid_execute_method(handle, INTEL_HID_DSM_HDSM_FN,
+ enable)) {
+ dev_warn(device, "failed to %sable hotkeys\n",
+ enable ? "en" : "dis");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void intel_button_array_enable(struct device *device, bool enable)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(device);
+ acpi_handle handle = ACPI_HANDLE(device);
+ unsigned long long button_cap;
+ acpi_status status;
+
+ if (!priv->array)
+ return;
+
+ /* Query supported platform features */
+ status = acpi_evaluate_integer(handle, "BTNC", NULL, &button_cap);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(device, "failed to get button capability\n");
+ return;
+ }
+
+ /* Enable|disable features - power button is always enabled */
+ if (!intel_hid_execute_method(handle, INTEL_HID_DSM_BTNE_FN,
+ enable ? button_cap : 1))
+ dev_warn(device, "failed to set button capability\n");
+}
+
+static int intel_hid_pm_prepare(struct device *device)
+{
+ if (device_may_wakeup(device)) {
+ struct intel_hid_priv *priv = dev_get_drvdata(device);
+
+ priv->wakeup_mode = true;
+ }
+ return 0;
+}
+
+static void intel_hid_pm_complete(struct device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(device);
+
+ priv->wakeup_mode = false;
+}
+
+static int intel_hid_pl_suspend_handler(struct device *device)
+{
+ intel_button_array_enable(device, false);
+
+ if (!pm_suspend_no_platform())
+ intel_hid_set_enable(device, false);
+
+ return 0;
+}
+
+static int intel_hid_pl_resume_handler(struct device *device)
+{
+ intel_hid_pm_complete(device);
+
+ if (!pm_suspend_no_platform())
+ intel_hid_set_enable(device, true);
+
+ intel_button_array_enable(device, true);
+ return 0;
+}
+
+static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+ .prepare = intel_hid_pm_prepare,
+ .complete = intel_hid_pm_complete,
+ .freeze = intel_hid_pl_suspend_handler,
+ .thaw = intel_hid_pl_resume_handler,
+ .restore = intel_hid_pl_resume_handler,
+ .suspend = intel_hid_pl_suspend_handler,
+ .resume = intel_hid_pl_resume_handler,
+};
+
+static int intel_hid_input_setup(struct platform_device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
+ int ret;
+
+ priv->input_dev = devm_input_allocate_device(&device->dev);
+ if (!priv->input_dev)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(priv->input_dev, intel_hid_keymap, NULL);
+ if (ret)
+ return ret;
+
+ priv->input_dev->name = "Intel HID events";
+ priv->input_dev->id.bustype = BUS_HOST;
+
+ return input_register_device(priv->input_dev);
+}
+
+static int intel_button_array_input_setup(struct platform_device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
+ int ret;
+
+ /* Setup input device for 5 button array */
+ priv->array = devm_input_allocate_device(&device->dev);
+ if (!priv->array)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(priv->array, intel_array_keymap, NULL);
+ if (ret)
+ return ret;
+
+ priv->array->name = "Intel HID 5 button array";
+ priv->array->id.bustype = BUS_HOST;
+
+ return input_register_device(priv->array);
+}
+
+static int intel_hid_switches_setup(struct platform_device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
+
+ /* Setup input device for switches */
+ priv->switches = devm_input_allocate_device(&device->dev);
+ if (!priv->switches)
+ return -ENOMEM;
+
+ __set_bit(EV_SW, priv->switches->evbit);
+ __set_bit(SW_TABLET_MODE, priv->switches->swbit);
+
+ priv->switches->name = "Intel HID switches";
+ priv->switches->id.bustype = BUS_HOST;
+ return input_register_device(priv->switches);
+}
+
+static void report_tablet_mode_state(struct platform_device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ unsigned long long vgbs;
+ int m;
+
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_VGBS_FN, &vgbs))
+ return;
+
+ m = !(vgbs & TABLET_MODE_FLAG);
+ input_report_switch(priv->switches, SW_TABLET_MODE, m);
+ input_sync(priv->switches);
+}
+
+static bool report_tablet_mode_event(struct input_dev *input_dev, u32 event)
+{
+ if (!input_dev)
+ return false;
+
+ switch (event) {
+ case 0xcc:
+ input_report_switch(input_dev, SW_TABLET_MODE, 1);
+ input_sync(input_dev);
+ return true;
+ case 0xcd:
+ input_report_switch(input_dev, SW_TABLET_MODE, 0);
+ input_sync(input_dev);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void notify_handler(acpi_handle handle, u32 event, void *context)
+{
+ struct platform_device *device = context;
+ struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
+ unsigned long long ev_index;
+ int err;
+
+ /*
+ * Some convertible have unreliable VGBS return which could cause incorrect
+ * SW_TABLET_MODE report, in these cases we enable support when receiving
+ * the first event instead of during driver setup.
+ */
+ if (!priv->switches && enable_sw_tablet_mode == TABLET_SW_AT_EVENT &&
+ (event == 0xcc || event == 0xcd)) {
+ dev_info(&device->dev, "switch event received, enable switches supports\n");
+ err = intel_hid_switches_setup(device);
+ if (err)
+ pr_err("Failed to setup Intel HID switches\n");
+ }
+
+ if (priv->wakeup_mode) {
+ /*
+ * Needed for wakeup from suspend-to-idle to work on some
+ * platforms that don't expose the 5-button array, but still
+ * send notifies with the power button event code to this
+ * device object on power button actions while suspended.
+ */
+ if (event == 0xce)
+ goto wakeup;
+
+ /*
+ * Some devices send (duplicate) tablet-mode events when moved
+ * around even though the mode has not changed; and they do this
+ * even when suspended.
+ * Update the switch state in case it changed and then return
+ * without waking up to avoid spurious wakeups.
+ */
+ if (event == 0xcc || event == 0xcd) {
+ report_tablet_mode_event(priv->switches, event);
+ return;
+ }
+
+ /* Wake up on 5-button array events only. */
+ if (event == 0xc0 || !priv->array)
+ return;
+
+ if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
+ dev_info(&device->dev, "unknown event 0x%x\n", event);
+ return;
+ }
+
+wakeup:
+ pm_wakeup_hard_event(&device->dev);
+
+ return;
+ }
+
+ /*
+ * Needed for suspend to work on some platforms that don't expose
+ * the 5-button array, but still send notifies with power button
+ * event code to this device object on power button actions.
+ *
+ * Report the power button press and release.
+ */
+ if (!priv->array) {
+ if (event == 0xce) {
+ input_report_key(priv->input_dev, KEY_POWER, 1);
+ input_sync(priv->input_dev);
+ return;
+ }
+
+ if (event == 0xcf) {
+ input_report_key(priv->input_dev, KEY_POWER, 0);
+ input_sync(priv->input_dev);
+ return;
+ }
+ }
+
+ if (report_tablet_mode_event(priv->switches, event))
+ return;
+
+ /* 0xC0 is for HID events, other values are for 5 button array */
+ if (event != 0xc0) {
+ if (!priv->array ||
+ !sparse_keymap_report_event(priv->array, event, 1, true))
+ dev_dbg(&device->dev, "unknown event 0x%x\n", event);
+ return;
+ }
+
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_HDEM_FN,
+ &ev_index)) {
+ dev_warn(&device->dev, "failed to get event index\n");
+ return;
+ }
+
+ if (!sparse_keymap_report_event(priv->input_dev, ev_index, 1, true))
+ dev_dbg(&device->dev, "unknown event index 0x%llx\n",
+ ev_index);
+}
+
+static bool button_array_present(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ unsigned long long event_cap;
+
+ if (intel_hid_evaluate_method(handle, INTEL_HID_DSM_HEBC_V2_FN,
+ &event_cap)) {
+ /* Check presence of 5 button array or v2 power button */
+ if (event_cap & 0x60000)
+ return true;
+ }
+
+ if (intel_hid_evaluate_method(handle, INTEL_HID_DSM_HEBC_V1_FN,
+ &event_cap)) {
+ if (event_cap & 0x20000)
+ return true;
+ }
+
+ if (enable_5_button_array || dmi_check_system(button_array_table))
+ return true;
+
+ return false;
+}
+
+static int intel_hid_probe(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ unsigned long long mode, dummy;
+ struct intel_hid_priv *priv;
+ acpi_status status;
+ int err;
+
+ intel_hid_init_dsm(handle);
+
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_HDMM_FN, &mode)) {
+ dev_warn(&device->dev, "failed to read mode\n");
+ return -ENODEV;
+ }
+
+ if (mode != 0) {
+ /*
+ * This driver only implements "simple" mode. There appear
+ * to be no other modes, but we should be paranoid and check
+ * for compatibility.
+ */
+ dev_info(&device->dev, "platform is not in simple mode\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(&device->dev, priv);
+
+ /* See dual_accel_detect.h for more info on the dual_accel check. */
+ if (enable_sw_tablet_mode == TABLET_SW_AUTO) {
+ if (dmi_check_system(dmi_vgbs_allow_list))
+ enable_sw_tablet_mode = TABLET_SW_AT_PROBE;
+ else if (dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect())
+ enable_sw_tablet_mode = TABLET_SW_AT_EVENT;
+ else
+ enable_sw_tablet_mode = TABLET_SW_OFF;
+ }
+
+ err = intel_hid_input_setup(device);
+ if (err) {
+ pr_err("Failed to setup Intel HID hotkeys\n");
+ return err;
+ }
+
+ /* Setup 5 button array */
+ if (button_array_present(device)) {
+ dev_info(&device->dev, "platform supports 5 button array\n");
+ err = intel_button_array_input_setup(device);
+ if (err)
+ pr_err("Failed to setup Intel 5 button array hotkeys\n");
+ }
+
+ /* Setup switches for devices that we know VGBS return correctly */
+ if (enable_sw_tablet_mode == TABLET_SW_AT_PROBE) {
+ dev_info(&device->dev, "platform supports switches\n");
+ err = intel_hid_switches_setup(device);
+ if (err)
+ pr_err("Failed to setup Intel HID switches\n");
+ else
+ report_tablet_mode_state(device);
+ }
+
+ status = acpi_install_notify_handler(handle,
+ ACPI_DEVICE_NOTIFY,
+ notify_handler,
+ device);
+ if (ACPI_FAILURE(status))
+ return -EBUSY;
+
+ err = intel_hid_set_enable(&device->dev, true);
+ if (err)
+ goto err_remove_notify;
+
+ intel_button_array_enable(&device->dev, true);
+
+ /*
+ * Call button load method to enable HID power button
+ * Always do this since it activates events on some devices without
+ * a button array too.
+ */
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, &dummy))
+ dev_warn(&device->dev, "failed to enable HID power button\n");
+
+ device_init_wakeup(&device->dev, true);
+ /*
+ * In order for system wakeup to work, the EC GPE has to be marked as
+ * a wakeup one, so do that here (this setting will persist, but it has
+ * no effect until the wakeup mask is set for the EC GPE).
+ */
+ acpi_ec_mark_gpe_for_wake();
+ return 0;
+
+err_remove_notify:
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
+
+ return err;
+}
+
+static void intel_hid_remove(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+
+ device_init_wakeup(&device->dev, false);
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
+ intel_hid_set_enable(&device->dev, false);
+ intel_button_array_enable(&device->dev, false);
+}
+
+static struct platform_driver intel_hid_pl_driver = {
+ .driver = {
+ .name = "intel-hid",
+ .acpi_match_table = intel_hid_ids,
+ .pm = &intel_hid_pl_pm_ops,
+ },
+ .probe = intel_hid_probe,
+ .remove_new = intel_hid_remove,
+};
+
+/*
+ * Unfortunately, some laptops provide a _HID="INT33D5" device with
+ * _CID="PNP0C02". This causes the pnpacpi scan driver to claim the
+ * ACPI node, so no platform device will be created. The pnpacpi
+ * driver rejects this device in subsequent processing, so no physical
+ * node is created at all.
+ *
+ * As a workaround until the ACPI core figures out how to handle
+ * this corner case, manually ask the ACPI platform device code to
+ * claim the ACPI node.
+ */
+static acpi_status __init
+check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ const struct acpi_device_id *ids = context;
+ struct acpi_device *dev = acpi_fetch_acpi_dev(handle);
+
+ if (dev && acpi_match_device_ids(dev, ids) == 0)
+ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
+ dev_info(&dev->dev,
+ "intel-hid: created platform device\n");
+
+ return AE_OK;
+}
+
+static int __init intel_hid_init(void)
+{
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, check_acpi_dev, NULL,
+ (void *)intel_hid_ids, NULL);
+
+ return platform_driver_register(&intel_hid_pl_driver);
+}
+module_init(intel_hid_init);
+
+static void __exit intel_hid_exit(void)
+{
+ platform_driver_unregister(&intel_hid_pl_driver);
+}
+module_exit(intel_hid_exit);
diff --git a/drivers/platform/x86/intel/ifs/Kconfig b/drivers/platform/x86/intel/ifs/Kconfig
new file mode 100644
index 0000000000..3eded96675
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/Kconfig
@@ -0,0 +1,12 @@
+config INTEL_IFS
+ tristate "Intel In Field Scan"
+ depends on X86 && CPU_SUP_INTEL && 64BIT && SMP
+ help
+ Enable support for the In Field Scan capability in select
+ CPUs. The capability allows for running low level tests via
+ a scan image distributed by Intel via Github to validate CPU
+ operation beyond baseline RAS capabilities. To compile this
+ support as a module, choose M here. The module will be called
+ intel_ifs.
+
+ If unsure, say N.
diff --git a/drivers/platform/x86/intel/ifs/Makefile b/drivers/platform/x86/intel/ifs/Makefile
new file mode 100644
index 0000000000..30f035ef55
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INTEL_IFS) += intel_ifs.o
+
+intel_ifs-objs := core.o load.o runtest.o sysfs.o
diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c
new file mode 100644
index 0000000000..306f886b52
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/core.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. */
+
+#include <linux/module.h>
+#include <linux/kdev_t.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+
+#include <asm/cpu_device_id.h>
+
+#include "ifs.h"
+
+#define X86_MATCH(model) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
+ INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, NULL)
+
+static const struct x86_cpu_id ifs_cpu_ids[] __initconst = {
+ X86_MATCH(SAPPHIRERAPIDS_X),
+ X86_MATCH(EMERALDRAPIDS_X),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids);
+
+ATTRIBUTE_GROUPS(plat_ifs);
+ATTRIBUTE_GROUPS(plat_ifs_array);
+
+bool *ifs_pkg_auth;
+
+static const struct ifs_test_caps scan_test = {
+ .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT,
+ .test_num = IFS_TYPE_SAF,
+};
+
+static const struct ifs_test_caps array_test = {
+ .integrity_cap_bit = MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT,
+ .test_num = IFS_TYPE_ARRAY_BIST,
+};
+
+static struct ifs_device ifs_devices[] = {
+ [IFS_TYPE_SAF] = {
+ .test_caps = &scan_test,
+ .misc = {
+ .name = "intel_ifs_0",
+ .minor = MISC_DYNAMIC_MINOR,
+ .groups = plat_ifs_groups,
+ },
+ },
+ [IFS_TYPE_ARRAY_BIST] = {
+ .test_caps = &array_test,
+ .misc = {
+ .name = "intel_ifs_1",
+ .minor = MISC_DYNAMIC_MINOR,
+ .groups = plat_ifs_array_groups,
+ },
+ },
+};
+
+#define IFS_NUMTESTS ARRAY_SIZE(ifs_devices)
+
+static void ifs_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < IFS_NUMTESTS; i++) {
+ if (ifs_devices[i].misc.this_device)
+ misc_deregister(&ifs_devices[i].misc);
+ }
+ kfree(ifs_pkg_auth);
+}
+
+static int __init ifs_init(void)
+{
+ const struct x86_cpu_id *m;
+ u64 msrval;
+ int i, ret;
+
+ m = x86_match_cpu(ifs_cpu_ids);
+ if (!m)
+ return -ENODEV;
+
+ if (rdmsrl_safe(MSR_IA32_CORE_CAPS, &msrval))
+ return -ENODEV;
+
+ if (!(msrval & MSR_IA32_CORE_CAPS_INTEGRITY_CAPS))
+ return -ENODEV;
+
+ if (rdmsrl_safe(MSR_INTEGRITY_CAPS, &msrval))
+ return -ENODEV;
+
+ ifs_pkg_auth = kmalloc_array(topology_max_packages(), sizeof(bool), GFP_KERNEL);
+ if (!ifs_pkg_auth)
+ return -ENOMEM;
+
+ for (i = 0; i < IFS_NUMTESTS; i++) {
+ if (!(msrval & BIT(ifs_devices[i].test_caps->integrity_cap_bit)))
+ continue;
+ ret = misc_register(&ifs_devices[i].misc);
+ if (ret)
+ goto err_exit;
+ }
+ return 0;
+
+err_exit:
+ ifs_cleanup();
+ return ret;
+}
+
+static void __exit ifs_exit(void)
+{
+ ifs_cleanup();
+}
+
+module_init(ifs_init);
+module_exit(ifs_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel In Field Scan (IFS) device");
diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h
new file mode 100644
index 0000000000..9319185589
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/ifs.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation. */
+
+#ifndef _IFS_H_
+#define _IFS_H_
+
+/**
+ * DOC: In-Field Scan
+ *
+ * =============
+ * In-Field Scan
+ * =============
+ *
+ * Introduction
+ * ------------
+ *
+ * In Field Scan (IFS) is a hardware feature to run circuit level tests on
+ * a CPU core to detect problems that are not caught by parity or ECC checks.
+ * Future CPUs will support more than one type of test which will show up
+ * with a new platform-device instance-id.
+ *
+ *
+ * IFS Image
+ * ---------
+ *
+ * Intel provides a firmware file containing the scan tests via
+ * github [#f1]_. Similar to microcode there is a separate file for each
+ * family-model-stepping. IFS Images are not applicable for some test types.
+ * Wherever applicable the sysfs directory would provide a "current_batch" file
+ * (see below) for loading the image.
+ *
+ *
+ * IFS Image Loading
+ * -----------------
+ *
+ * The driver loads the tests into memory reserved BIOS local to each CPU
+ * socket in a two step process using writes to MSRs to first load the
+ * SHA hashes for the test. Then the tests themselves. Status MSRs provide
+ * feedback on the success/failure of these steps.
+ *
+ * The test files are kept in a fixed location: /lib/firmware/intel/ifs_<n>/
+ * For e.g if there are 3 test files, they would be named in the following
+ * fashion:
+ * ff-mm-ss-01.scan
+ * ff-mm-ss-02.scan
+ * ff-mm-ss-03.scan
+ * (where ff refers to family, mm indicates model and ss indicates stepping)
+ *
+ * A different test file can be loaded by writing the numerical portion
+ * (e.g 1, 2 or 3 in the above scenario) into the curent_batch file.
+ * To load ff-mm-ss-02.scan, the following command can be used::
+ *
+ * # echo 2 > /sys/devices/virtual/misc/intel_ifs_<n>/current_batch
+ *
+ * The above file can also be read to know the currently loaded image.
+ *
+ * Running tests
+ * -------------
+ *
+ * Tests are run by the driver synchronizing execution of all threads on a
+ * core and then writing to the ACTIVATE_SCAN MSR on all threads. Instruction
+ * execution continues when:
+ *
+ * 1) All tests have completed.
+ * 2) Execution was interrupted.
+ * 3) A test detected a problem.
+ *
+ * Note that ALL THREADS ON THE CORE ARE EFFECTIVELY OFFLINE FOR THE
+ * DURATION OF THE TEST. This can be up to 200 milliseconds. If the system
+ * is running latency sensitive applications that cannot tolerate an
+ * interruption of this magnitude, the system administrator must arrange
+ * to migrate those applications to other cores before running a core test.
+ * It may also be necessary to redirect interrupts to other CPUs.
+ *
+ * In all cases reading the corresponding test's STATUS MSR provides details on what
+ * happened. The driver makes the value of this MSR visible to applications
+ * via the "details" file (see below). Interrupted tests may be restarted.
+ *
+ * The IFS driver provides sysfs interfaces via /sys/devices/virtual/misc/intel_ifs_<n>/
+ * to control execution:
+ *
+ * Test a specific core::
+ *
+ * # echo <cpu#> > /sys/devices/virtual/misc/intel_ifs_<n>/run_test
+ *
+ * when HT is enabled any of the sibling cpu# can be specified to test
+ * its corresponding physical core. Since the tests are per physical core,
+ * the result of testing any thread is same. All siblings must be online
+ * to run a core test. It is only necessary to test one thread.
+ *
+ * For e.g. to test core corresponding to cpu5
+ *
+ * # echo 5 > /sys/devices/virtual/misc/intel_ifs_<n>/run_test
+ *
+ * Results of the last test is provided in /sys::
+ *
+ * $ cat /sys/devices/virtual/misc/intel_ifs_<n>/status
+ * pass
+ *
+ * Status can be one of pass, fail, untested
+ *
+ * Additional details of the last test is provided by the details file::
+ *
+ * $ cat /sys/devices/virtual/misc/intel_ifs_<n>/details
+ * 0x8081
+ *
+ * The details file reports the hex value of the test specific status MSR.
+ * Hardware defined error codes are documented in volume 4 of the Intel
+ * Software Developer's Manual but the error_code field may contain one of
+ * the following driver defined software codes:
+ *
+ * +------+--------------------+
+ * | 0xFD | Software timeout |
+ * +------+--------------------+
+ * | 0xFE | Partial completion |
+ * +------+--------------------+
+ *
+ * Driver design choices
+ * ---------------------
+ *
+ * 1) The ACTIVATE_SCAN MSR allows for running any consecutive subrange of
+ * available tests. But the driver always tries to run all tests and only
+ * uses the subrange feature to restart an interrupted test.
+ *
+ * 2) Hardware allows for some number of cores to be tested in parallel.
+ * The driver does not make use of this, it only tests one core at a time.
+ *
+ * .. [#f1] https://github.com/intel/TBD
+ */
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#define MSR_ARRAY_BIST 0x00000105
+#define MSR_COPY_SCAN_HASHES 0x000002c2
+#define MSR_SCAN_HASHES_STATUS 0x000002c3
+#define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4
+#define MSR_CHUNKS_AUTHENTICATION_STATUS 0x000002c5
+#define MSR_ACTIVATE_SCAN 0x000002c6
+#define MSR_SCAN_STATUS 0x000002c7
+#define SCAN_NOT_TESTED 0
+#define SCAN_TEST_PASS 1
+#define SCAN_TEST_FAIL 2
+
+#define IFS_TYPE_SAF 0
+#define IFS_TYPE_ARRAY_BIST 1
+
+/* MSR_SCAN_HASHES_STATUS bit fields */
+union ifs_scan_hashes_status {
+ u64 data;
+ struct {
+ u32 chunk_size :16;
+ u32 num_chunks :8;
+ u32 rsvd1 :8;
+ u32 error_code :8;
+ u32 rsvd2 :11;
+ u32 max_core_limit :12;
+ u32 valid :1;
+ };
+};
+
+/* MSR_CHUNKS_AUTH_STATUS bit fields */
+union ifs_chunks_auth_status {
+ u64 data;
+ struct {
+ u32 valid_chunks :8;
+ u32 total_chunks :8;
+ u32 rsvd1 :16;
+ u32 error_code :8;
+ u32 rsvd2 :24;
+ };
+};
+
+/* MSR_ACTIVATE_SCAN bit fields */
+union ifs_scan {
+ u64 data;
+ struct {
+ u32 start :8;
+ u32 stop :8;
+ u32 rsvd :16;
+ u32 delay :31;
+ u32 sigmce :1;
+ };
+};
+
+/* MSR_SCAN_STATUS bit fields */
+union ifs_status {
+ u64 data;
+ struct {
+ u32 chunk_num :8;
+ u32 chunk_stop_index :8;
+ u32 rsvd1 :16;
+ u32 error_code :8;
+ u32 rsvd2 :22;
+ u32 control_error :1;
+ u32 signature_error :1;
+ };
+};
+
+/* MSR_ARRAY_BIST bit fields */
+union ifs_array {
+ u64 data;
+ struct {
+ u32 array_bitmask;
+ u16 array_bank;
+ u16 rsvd :15;
+ u16 ctrl_result :1;
+ };
+};
+
+/*
+ * Driver populated error-codes
+ * 0xFD: Test timed out before completing all the chunks.
+ * 0xFE: not all scan chunks were executed. Maximum forward progress retries exceeded.
+ */
+#define IFS_SW_TIMEOUT 0xFD
+#define IFS_SW_PARTIAL_COMPLETION 0xFE
+
+struct ifs_test_caps {
+ int integrity_cap_bit;
+ int test_num;
+};
+
+/**
+ * struct ifs_data - attributes related to intel IFS driver
+ * @loaded_version: stores the currently loaded ifs image version.
+ * @loaded: If a valid test binary has been loaded into the memory
+ * @loading_error: Error occurred on another CPU while loading image
+ * @valid_chunks: number of chunks which could be validated.
+ * @status: it holds simple status pass/fail/untested
+ * @scan_details: opaque scan status code from h/w
+ * @cur_batch: number indicating the currently loaded test file
+ */
+struct ifs_data {
+ int loaded_version;
+ bool loaded;
+ bool loading_error;
+ int valid_chunks;
+ int status;
+ u64 scan_details;
+ u32 cur_batch;
+};
+
+struct ifs_work {
+ struct work_struct w;
+ struct device *dev;
+};
+
+struct ifs_device {
+ const struct ifs_test_caps *test_caps;
+ struct ifs_data rw_data;
+ struct miscdevice misc;
+};
+
+static inline struct ifs_data *ifs_get_data(struct device *dev)
+{
+ struct miscdevice *m = dev_get_drvdata(dev);
+ struct ifs_device *d = container_of(m, struct ifs_device, misc);
+
+ return &d->rw_data;
+}
+
+static inline const struct ifs_test_caps *ifs_get_test_caps(struct device *dev)
+{
+ struct miscdevice *m = dev_get_drvdata(dev);
+ struct ifs_device *d = container_of(m, struct ifs_device, misc);
+
+ return d->test_caps;
+}
+
+extern bool *ifs_pkg_auth;
+int ifs_load_firmware(struct device *dev);
+int do_core_test(int cpu, struct device *dev);
+extern struct attribute *plat_ifs_attrs[];
+extern struct attribute *plat_ifs_array_attrs[];
+
+#endif
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
new file mode 100644
index 0000000000..cefd0d886c
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. */
+
+#include <linux/firmware.h>
+#include <asm/cpu.h>
+#include <asm/microcode.h>
+
+#include "ifs.h"
+
+#define IFS_CHUNK_ALIGNMENT 256
+union meta_data {
+ struct {
+ u32 meta_type; // metadata type
+ u32 meta_size; // size of this entire struct including hdrs.
+ u32 test_type; // IFS test type
+ u32 fusa_info; // Fusa info
+ u32 total_images; // Total number of images
+ u32 current_image; // Current Image #
+ u32 total_chunks; // Total number of chunks in this image
+ u32 starting_chunk; // Starting chunk number in this image
+ u32 size_per_chunk; // size of each chunk
+ u32 chunks_per_stride; // number of chunks in a stride
+ };
+ u8 padding[IFS_CHUNK_ALIGNMENT];
+};
+
+#define IFS_HEADER_SIZE (sizeof(struct microcode_header_intel))
+#define META_TYPE_IFS 1
+static struct microcode_header_intel *ifs_header_ptr; /* pointer to the ifs image header */
+static u64 ifs_hash_ptr; /* Address of ifs metadata (hash) */
+static u64 ifs_test_image_ptr; /* 256B aligned address of test pattern */
+static DECLARE_COMPLETION(ifs_done);
+
+static const char * const scan_hash_status[] = {
+ [0] = "No error reported",
+ [1] = "Attempt to copy scan hashes when copy already in progress",
+ [2] = "Secure Memory not set up correctly",
+ [3] = "FuSaInfo.ProgramID does not match or ff-mm-ss does not match",
+ [4] = "Reserved",
+ [5] = "Integrity check failed",
+ [6] = "Scan reload or test is in progress"
+};
+
+static const char * const scan_authentication_status[] = {
+ [0] = "No error reported",
+ [1] = "Attempt to authenticate a chunk which is already marked as authentic",
+ [2] = "Chunk authentication error. The hash of chunk did not match expected value"
+};
+
+#define MC_HEADER_META_TYPE_END (0)
+
+struct metadata_header {
+ unsigned int type;
+ unsigned int blk_size;
+};
+
+static struct metadata_header *find_meta_data(void *ucode, unsigned int meta_type)
+{
+ struct microcode_header_intel *hdr = &((struct microcode_intel *)ucode)->hdr;
+ struct metadata_header *meta_header;
+ unsigned long data_size, total_meta;
+ unsigned long meta_size = 0;
+
+ data_size = intel_microcode_get_datasize(hdr);
+ total_meta = hdr->metasize;
+ if (!total_meta)
+ return NULL;
+
+ meta_header = (ucode + MC_HEADER_SIZE + data_size) - total_meta;
+
+ while (meta_header->type != MC_HEADER_META_TYPE_END &&
+ meta_header->blk_size &&
+ meta_size < total_meta) {
+ meta_size += meta_header->blk_size;
+ if (meta_header->type == meta_type)
+ return meta_header;
+
+ meta_header = (void *)meta_header + meta_header->blk_size;
+ }
+ return NULL;
+}
+
+/*
+ * To copy scan hashes and authenticate test chunks, the initiating cpu must point
+ * to the EDX:EAX to the test image in linear address.
+ * Run wrmsr(MSR_COPY_SCAN_HASHES) for scan hash copy and run wrmsr(MSR_AUTHENTICATE_AND_COPY_CHUNK)
+ * for scan hash copy and test chunk authentication.
+ */
+static void copy_hashes_authenticate_chunks(struct work_struct *work)
+{
+ struct ifs_work *local_work = container_of(work, struct ifs_work, w);
+ union ifs_scan_hashes_status hashes_status;
+ union ifs_chunks_auth_status chunk_status;
+ struct device *dev = local_work->dev;
+ int i, num_chunks, chunk_size;
+ struct ifs_data *ifsd;
+ u64 linear_addr, base;
+ u32 err_code;
+
+ ifsd = ifs_get_data(dev);
+ /* run scan hash copy */
+ wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr);
+ rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data);
+
+ /* enumerate the scan image information */
+ num_chunks = hashes_status.num_chunks;
+ chunk_size = hashes_status.chunk_size * 1024;
+ err_code = hashes_status.error_code;
+
+ if (!hashes_status.valid) {
+ ifsd->loading_error = true;
+ if (err_code >= ARRAY_SIZE(scan_hash_status)) {
+ dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code);
+ goto done;
+ }
+ dev_err(dev, "Hash copy error : %s", scan_hash_status[err_code]);
+ goto done;
+ }
+
+ /* base linear address to the scan data */
+ base = ifs_test_image_ptr;
+
+ /* scan data authentication and copy chunks to secured memory */
+ for (i = 0; i < num_chunks; i++) {
+ linear_addr = base + i * chunk_size;
+ linear_addr |= i;
+
+ wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, linear_addr);
+ rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data);
+
+ ifsd->valid_chunks = chunk_status.valid_chunks;
+ err_code = chunk_status.error_code;
+
+ if (err_code) {
+ ifsd->loading_error = true;
+ if (err_code >= ARRAY_SIZE(scan_authentication_status)) {
+ dev_err(dev,
+ "invalid error code 0x%x for authentication\n", err_code);
+ goto done;
+ }
+ dev_err(dev, "Chunk authentication error %s\n",
+ scan_authentication_status[err_code]);
+ goto done;
+ }
+ }
+done:
+ complete(&ifs_done);
+}
+
+static int validate_ifs_metadata(struct device *dev)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+ union meta_data *ifs_meta;
+ char test_file[64];
+ int ret = -EINVAL;
+
+ snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.scan",
+ boot_cpu_data.x86, boot_cpu_data.x86_model,
+ boot_cpu_data.x86_stepping, ifsd->cur_batch);
+
+ ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS);
+ if (!ifs_meta) {
+ dev_err(dev, "IFS Metadata missing in file %s\n", test_file);
+ return ret;
+ }
+
+ ifs_test_image_ptr = (u64)ifs_meta + sizeof(union meta_data);
+
+ /* Scan chunk start must be 256 byte aligned */
+ if (!IS_ALIGNED(ifs_test_image_ptr, IFS_CHUNK_ALIGNMENT)) {
+ dev_err(dev, "Scan pattern is not aligned on %d bytes aligned in %s\n",
+ IFS_CHUNK_ALIGNMENT, test_file);
+ return ret;
+ }
+
+ if (ifs_meta->current_image != ifsd->cur_batch) {
+ dev_warn(dev, "Mismatch between filename %s and batch metadata 0x%02x\n",
+ test_file, ifs_meta->current_image);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * IFS requires scan chunks authenticated per each socket in the platform.
+ * Once the test chunk is authenticated, it is automatically copied to secured memory
+ * and proceed the authentication for the next chunk.
+ */
+static int scan_chunks_sanity_check(struct device *dev)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+ struct ifs_work local_work;
+ int curr_pkg, cpu, ret;
+
+ memset(ifs_pkg_auth, 0, (topology_max_packages() * sizeof(bool)));
+ ret = validate_ifs_metadata(dev);
+ if (ret)
+ return ret;
+
+ ifsd->loading_error = false;
+ ifsd->loaded_version = ifs_header_ptr->rev;
+
+ /* copy the scan hash and authenticate per package */
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ curr_pkg = topology_physical_package_id(cpu);
+ if (ifs_pkg_auth[curr_pkg])
+ continue;
+ reinit_completion(&ifs_done);
+ local_work.dev = dev;
+ INIT_WORK_ONSTACK(&local_work.w, copy_hashes_authenticate_chunks);
+ schedule_work_on(cpu, &local_work.w);
+ wait_for_completion(&ifs_done);
+ if (ifsd->loading_error) {
+ ret = -EIO;
+ goto out;
+ }
+ ifs_pkg_auth[curr_pkg] = 1;
+ }
+ ret = 0;
+out:
+ cpus_read_unlock();
+
+ return ret;
+}
+
+static int image_sanity_check(struct device *dev, const struct microcode_header_intel *data)
+{
+ struct ucode_cpu_info uci;
+
+ /* Provide a specific error message when loading an older/unsupported image */
+ if (data->hdrver != MC_HEADER_TYPE_IFS) {
+ dev_err(dev, "Header version %d not supported\n", data->hdrver);
+ return -EINVAL;
+ }
+
+ if (intel_microcode_sanity_check((void *)data, true, MC_HEADER_TYPE_IFS)) {
+ dev_err(dev, "sanity check failed\n");
+ return -EINVAL;
+ }
+
+ intel_cpu_collect_info(&uci);
+
+ if (!intel_find_matching_signature((void *)data,
+ uci.cpu_sig.sig,
+ uci.cpu_sig.pf)) {
+ dev_err(dev, "cpu signature, processor flags not matching\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Load ifs image. Before loading ifs module, the ifs image must be located
+ * in /lib/firmware/intel/ifs_x/ and named as family-model-stepping-02x.{testname}.
+ */
+int ifs_load_firmware(struct device *dev)
+{
+ const struct ifs_test_caps *test = ifs_get_test_caps(dev);
+ struct ifs_data *ifsd = ifs_get_data(dev);
+ const struct firmware *fw;
+ char scan_path[64];
+ int ret = -EINVAL;
+
+ snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan",
+ test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model,
+ boot_cpu_data.x86_stepping, ifsd->cur_batch);
+
+ ret = request_firmware_direct(&fw, scan_path, dev);
+ if (ret) {
+ dev_err(dev, "ifs file %s load failed\n", scan_path);
+ goto done;
+ }
+
+ ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data);
+ if (ret)
+ goto release;
+
+ ifs_header_ptr = (struct microcode_header_intel *)fw->data;
+ ifs_hash_ptr = (u64)(ifs_header_ptr + 1);
+
+ ret = scan_chunks_sanity_check(dev);
+ if (ret)
+ dev_err(dev, "Load failure for batch: %02x\n", ifsd->cur_batch);
+
+release:
+ release_firmware(fw);
+done:
+ ifsd->loaded = (ret == 0);
+
+ return ret;
+}
diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
new file mode 100644
index 0000000000..43c864add7
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/nmi.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+
+#include "ifs.h"
+
+/*
+ * Note all code and data in this file is protected by
+ * ifs_sem. On HT systems all threads on a core will
+ * execute together, but only the first thread on the
+ * core will update results of the test.
+ */
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/intel_ifs.h>
+
+/* Max retries on the same chunk */
+#define MAX_IFS_RETRIES 5
+
+/*
+ * Number of TSC cycles that a logical CPU will wait for the other
+ * logical CPU on the core in the WRMSR(ACTIVATE_SCAN).
+ */
+#define IFS_THREAD_WAIT 100000
+
+enum ifs_status_err_code {
+ IFS_NO_ERROR = 0,
+ IFS_OTHER_THREAD_COULD_NOT_JOIN = 1,
+ IFS_INTERRUPTED_BEFORE_RENDEZVOUS = 2,
+ IFS_POWER_MGMT_INADEQUATE_FOR_SCAN = 3,
+ IFS_INVALID_CHUNK_RANGE = 4,
+ IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS = 5,
+ IFS_CORE_NOT_CAPABLE_CURRENTLY = 6,
+ IFS_UNASSIGNED_ERROR_CODE = 7,
+ IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8,
+ IFS_INTERRUPTED_DURING_EXECUTION = 9,
+};
+
+static const char * const scan_test_status[] = {
+ [IFS_NO_ERROR] = "SCAN no error",
+ [IFS_OTHER_THREAD_COULD_NOT_JOIN] = "Other thread could not join.",
+ [IFS_INTERRUPTED_BEFORE_RENDEZVOUS] = "Interrupt occurred prior to SCAN coordination.",
+ [IFS_POWER_MGMT_INADEQUATE_FOR_SCAN] =
+ "Core Abort SCAN Response due to power management condition.",
+ [IFS_INVALID_CHUNK_RANGE] = "Non valid chunks in the range",
+ [IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS] = "Mismatch in arguments between threads T0/T1.",
+ [IFS_CORE_NOT_CAPABLE_CURRENTLY] = "Core not capable of performing SCAN currently",
+ [IFS_UNASSIGNED_ERROR_CODE] = "Unassigned error code 0x7",
+ [IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT] =
+ "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently",
+ [IFS_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SCAN start",
+};
+
+static void message_not_tested(struct device *dev, int cpu, union ifs_status status)
+{
+ if (status.error_code < ARRAY_SIZE(scan_test_status)) {
+ dev_info(dev, "CPU(s) %*pbl: SCAN operation did not start. %s\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)),
+ scan_test_status[status.error_code]);
+ } else if (status.error_code == IFS_SW_TIMEOUT) {
+ dev_info(dev, "CPU(s) %*pbl: software timeout during scan\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)));
+ } else if (status.error_code == IFS_SW_PARTIAL_COMPLETION) {
+ dev_info(dev, "CPU(s) %*pbl: %s\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)),
+ "Not all scan chunks were executed. Maximum forward progress retries exceeded");
+ } else {
+ dev_info(dev, "CPU(s) %*pbl: SCAN unknown status %llx\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)), status.data);
+ }
+}
+
+static void message_fail(struct device *dev, int cpu, union ifs_status status)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+
+ /*
+ * control_error is set when the microcode runs into a problem
+ * loading the image from the reserved BIOS memory, or it has
+ * been corrupted. Reloading the image may fix this issue.
+ */
+ if (status.control_error) {
+ dev_err(dev, "CPU(s) %*pbl: could not execute from loaded scan image. Batch: %02x version: 0x%x\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version);
+ }
+
+ /*
+ * signature_error is set when the output from the scan chains does not
+ * match the expected signature. This might be a transient problem (e.g.
+ * due to a bit flip from an alpha particle or neutron). If the problem
+ * repeats on a subsequent test, then it indicates an actual problem in
+ * the core being tested.
+ */
+ if (status.signature_error) {
+ dev_err(dev, "CPU(s) %*pbl: test signature incorrect. Batch: %02x version: 0x%x\n",
+ cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version);
+ }
+}
+
+static bool can_restart(union ifs_status status)
+{
+ enum ifs_status_err_code err_code = status.error_code;
+
+ /* Signature for chunk is bad, or scan test failed */
+ if (status.signature_error || status.control_error)
+ return false;
+
+ switch (err_code) {
+ case IFS_NO_ERROR:
+ case IFS_OTHER_THREAD_COULD_NOT_JOIN:
+ case IFS_INTERRUPTED_BEFORE_RENDEZVOUS:
+ case IFS_POWER_MGMT_INADEQUATE_FOR_SCAN:
+ case IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT:
+ case IFS_INTERRUPTED_DURING_EXECUTION:
+ return true;
+ case IFS_INVALID_CHUNK_RANGE:
+ case IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS:
+ case IFS_CORE_NOT_CAPABLE_CURRENTLY:
+ case IFS_UNASSIGNED_ERROR_CODE:
+ break;
+ }
+ return false;
+}
+
+/*
+ * Execute the scan. Called "simultaneously" on all threads of a core
+ * at high priority using the stop_cpus mechanism.
+ */
+static int doscan(void *data)
+{
+ int cpu = smp_processor_id();
+ u64 *msrs = data;
+ int first;
+
+ /* Only the first logical CPU on a core reports result */
+ first = cpumask_first(cpu_smt_mask(cpu));
+
+ /*
+ * This WRMSR will wait for other HT threads to also write
+ * to this MSR (at most for activate.delay cycles). Then it
+ * starts scan of each requested chunk. The core scan happens
+ * during the "execution" of the WRMSR. This instruction can
+ * take up to 200 milliseconds (in the case where all chunks
+ * are processed in a single pass) before it retires.
+ */
+ wrmsrl(MSR_ACTIVATE_SCAN, msrs[0]);
+
+ if (cpu == first) {
+ /* Pass back the result of the scan */
+ rdmsrl(MSR_SCAN_STATUS, msrs[1]);
+ }
+
+ return 0;
+}
+
+/*
+ * Use stop_core_cpuslocked() to synchronize writing to MSR_ACTIVATE_SCAN
+ * on all threads of the core to be tested. Loop if necessary to complete
+ * run of all chunks. Include some defensive tests to make sure forward
+ * progress is made, and that the whole test completes in a reasonable time.
+ */
+static void ifs_test_core(int cpu, struct device *dev)
+{
+ union ifs_scan activate;
+ union ifs_status status;
+ unsigned long timeout;
+ struct ifs_data *ifsd;
+ u64 msrvals[2];
+ int retries;
+
+ ifsd = ifs_get_data(dev);
+
+ activate.rsvd = 0;
+ activate.delay = IFS_THREAD_WAIT;
+ activate.sigmce = 0;
+ activate.start = 0;
+ activate.stop = ifsd->valid_chunks - 1;
+
+ timeout = jiffies + HZ / 2;
+ retries = MAX_IFS_RETRIES;
+
+ while (activate.start <= activate.stop) {
+ if (time_after(jiffies, timeout)) {
+ status.error_code = IFS_SW_TIMEOUT;
+ break;
+ }
+
+ msrvals[0] = activate.data;
+ stop_core_cpuslocked(cpu, doscan, msrvals);
+
+ status.data = msrvals[1];
+
+ trace_ifs_status(cpu, activate, status);
+
+ /* Some cases can be retried, give up for others */
+ if (!can_restart(status))
+ break;
+
+ if (status.chunk_num == activate.start) {
+ /* Check for forward progress */
+ if (--retries == 0) {
+ if (status.error_code == IFS_NO_ERROR)
+ status.error_code = IFS_SW_PARTIAL_COMPLETION;
+ break;
+ }
+ } else {
+ retries = MAX_IFS_RETRIES;
+ activate.start = status.chunk_num;
+ }
+ }
+
+ /* Update status for this core */
+ ifsd->scan_details = status.data;
+
+ if (status.control_error || status.signature_error) {
+ ifsd->status = SCAN_TEST_FAIL;
+ message_fail(dev, cpu, status);
+ } else if (status.error_code) {
+ ifsd->status = SCAN_NOT_TESTED;
+ message_not_tested(dev, cpu, status);
+ } else {
+ ifsd->status = SCAN_TEST_PASS;
+ }
+}
+
+#define SPINUNIT 100 /* 100 nsec */
+static atomic_t array_cpus_out;
+
+/*
+ * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
+ */
+static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
+{
+ int cpu = smp_processor_id();
+ const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+ int all_cpus = cpumask_weight(smt_mask);
+
+ atomic_inc(t);
+ while (atomic_read(t) < all_cpus) {
+ if (timeout < SPINUNIT)
+ return;
+ ndelay(SPINUNIT);
+ timeout -= SPINUNIT;
+ touch_nmi_watchdog();
+ }
+}
+
+static int do_array_test(void *data)
+{
+ union ifs_array *command = data;
+ int cpu = smp_processor_id();
+ int first;
+
+ /*
+ * Only one logical CPU on a core needs to trigger the Array test via MSR write.
+ */
+ first = cpumask_first(cpu_smt_mask(cpu));
+
+ if (cpu == first) {
+ wrmsrl(MSR_ARRAY_BIST, command->data);
+ /* Pass back the result of the test */
+ rdmsrl(MSR_ARRAY_BIST, command->data);
+ }
+
+ /* Tests complete faster if the sibling is spinning here */
+ wait_for_sibling_cpu(&array_cpus_out, NSEC_PER_SEC);
+
+ return 0;
+}
+
+static void ifs_array_test_core(int cpu, struct device *dev)
+{
+ union ifs_array command = {};
+ bool timed_out = false;
+ struct ifs_data *ifsd;
+ unsigned long timeout;
+
+ ifsd = ifs_get_data(dev);
+
+ command.array_bitmask = ~0U;
+ timeout = jiffies + HZ / 2;
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ timed_out = true;
+ break;
+ }
+ atomic_set(&array_cpus_out, 0);
+ stop_core_cpuslocked(cpu, do_array_test, &command);
+
+ if (command.ctrl_result)
+ break;
+ } while (command.array_bitmask);
+
+ ifsd->scan_details = command.data;
+
+ if (command.ctrl_result)
+ ifsd->status = SCAN_TEST_FAIL;
+ else if (timed_out || command.array_bitmask)
+ ifsd->status = SCAN_NOT_TESTED;
+ else
+ ifsd->status = SCAN_TEST_PASS;
+}
+
+/*
+ * Initiate per core test. It wakes up work queue threads on the target cpu and
+ * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and
+ * wait for all sibling threads to finish the scan test.
+ */
+int do_core_test(int cpu, struct device *dev)
+{
+ const struct ifs_test_caps *test = ifs_get_test_caps(dev);
+ struct ifs_data *ifsd = ifs_get_data(dev);
+ int ret = 0;
+
+ /* Prevent CPUs from being taken offline during the scan test */
+ cpus_read_lock();
+
+ if (!cpu_online(cpu)) {
+ dev_info(dev, "cannot test on the offline cpu %d\n", cpu);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (test->test_num) {
+ case IFS_TYPE_SAF:
+ if (!ifsd->loaded)
+ ret = -EPERM;
+ else
+ ifs_test_core(cpu, dev);
+ break;
+ case IFS_TYPE_ARRAY_BIST:
+ ifs_array_test_core(cpu, dev);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+out:
+ cpus_read_unlock();
+ return ret;
+}
diff --git a/drivers/platform/x86/intel/ifs/sysfs.c b/drivers/platform/x86/intel/ifs/sysfs.c
new file mode 100644
index 0000000000..01b7502f46
--- /dev/null
+++ b/drivers/platform/x86/intel/ifs/sysfs.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+
+#include "ifs.h"
+
+/*
+ * Protects against simultaneous tests on multiple cores, or
+ * reloading can file while a test is in progress
+ */
+static DEFINE_SEMAPHORE(ifs_sem, 1);
+
+/*
+ * The sysfs interface to check additional details of last test
+ * cat /sys/devices/system/platform/ifs/details
+ */
+static ssize_t details_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+
+ return sysfs_emit(buf, "%#llx\n", ifsd->scan_details);
+}
+
+static DEVICE_ATTR_RO(details);
+
+static const char * const status_msg[] = {
+ [SCAN_NOT_TESTED] = "untested",
+ [SCAN_TEST_PASS] = "pass",
+ [SCAN_TEST_FAIL] = "fail"
+};
+
+/*
+ * The sysfs interface to check the test status:
+ * To check the status of last test
+ * cat /sys/devices/platform/ifs/status
+ */
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+
+ return sysfs_emit(buf, "%s\n", status_msg[ifsd->status]);
+}
+
+static DEVICE_ATTR_RO(status);
+
+/*
+ * The sysfs interface for single core testing
+ * To start test, for example, cpu5
+ * echo 5 > /sys/devices/platform/ifs/run_test
+ * To check the result:
+ * cat /sys/devices/platform/ifs/result
+ * The sibling core gets tested at the same time.
+ */
+static ssize_t run_test_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int cpu;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &cpu);
+ if (rc < 0 || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (down_interruptible(&ifs_sem))
+ return -EINTR;
+
+ rc = do_core_test(cpu, dev);
+
+ up(&ifs_sem);
+
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR_WO(run_test);
+
+static ssize_t current_batch_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+ unsigned int cur_batch;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &cur_batch);
+ if (rc < 0 || cur_batch > 0xff)
+ return -EINVAL;
+
+ if (down_interruptible(&ifs_sem))
+ return -EINTR;
+
+ ifsd->cur_batch = cur_batch;
+
+ rc = ifs_load_firmware(dev);
+
+ up(&ifs_sem);
+
+ return (rc == 0) ? count : rc;
+}
+
+static ssize_t current_batch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+
+ if (!ifsd->loaded)
+ return sysfs_emit(buf, "none\n");
+ else
+ return sysfs_emit(buf, "0x%02x\n", ifsd->cur_batch);
+}
+
+static DEVICE_ATTR_RW(current_batch);
+
+/*
+ * Display currently loaded IFS image version.
+ */
+static ssize_t image_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ifs_data *ifsd = ifs_get_data(dev);
+
+ if (!ifsd->loaded)
+ return sysfs_emit(buf, "%s\n", "none");
+ else
+ return sysfs_emit(buf, "%#x\n", ifsd->loaded_version);
+}
+
+static DEVICE_ATTR_RO(image_version);
+
+/* global scan sysfs attributes */
+struct attribute *plat_ifs_attrs[] = {
+ &dev_attr_details.attr,
+ &dev_attr_status.attr,
+ &dev_attr_run_test.attr,
+ &dev_attr_current_batch.attr,
+ &dev_attr_image_version.attr,
+ NULL
+};
+
+/* global array sysfs attributes */
+struct attribute *plat_ifs_array_attrs[] = {
+ &dev_attr_details.attr,
+ &dev_attr_status.attr,
+ &dev_attr_run_test.attr,
+ NULL
+};
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
new file mode 100644
index 0000000000..b6708bab7c
--- /dev/null
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel INT0002 "Virtual GPIO" driver
+ *
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Loosely based on android x86 kernel code which is:
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * Author: Dyut Kumar Sil <dyut.k.sil@intel.com>
+ *
+ * Some peripherals on Bay Trail and Cherry Trail platforms signal a Power
+ * Management Event (PME) to the Power Management Controller (PMC) to wakeup
+ * the system. When this happens software needs to clear the PME bus 0 status
+ * bit in the GPE0a_STS register to avoid an IRQ storm on IRQ 9.
+ *
+ * This is modelled in ACPI through the INT0002 ACPI device, which is
+ * called a "Virtual GPIO controller" in ACPI because it defines the event
+ * handler to call when the PME triggers through _AEI and _L02 / _E02
+ * methods as would be done for a real GPIO interrupt in ACPI. Note this
+ * is a hack to define an AML event handler for the PME while using existing
+ * ACPI mechanisms, this is not a real GPIO at all.
+ *
+ * This driver will bind to the INT0002 device, and register as a GPIO
+ * controller, letting gpiolib-acpi.c call the _L02 handler as it would
+ * for a real GPIO controller.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/x86/soc.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#define DRV_NAME "INT0002 Virtual GPIO"
+
+/* For some reason the virtual GPIO pin tied to the GPE is numbered pin 2 */
+#define GPE0A_PME_B0_VIRT_GPIO_PIN 2
+
+#define GPE0A_PME_B0_STS_BIT BIT(13)
+#define GPE0A_PME_B0_EN_BIT BIT(13)
+#define GPE0A_STS_PORT 0x420
+#define GPE0A_EN_PORT 0x428
+
+struct int0002_data {
+ struct gpio_chip chip;
+ int parent_irq;
+ int wake_enable_count;
+};
+
+/*
+ * As this is not a real GPIO at all, but just a hack to model an event in
+ * ACPI the get / set functions are dummy functions.
+ */
+
+static int int0002_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return 0;
+}
+
+static void int0002_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+}
+
+static int int0002_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ return 0;
+}
+
+static void int0002_irq_ack(struct irq_data *data)
+{
+ outl(GPE0A_PME_B0_STS_BIT, GPE0A_STS_PORT);
+}
+
+static void int0002_irq_unmask(struct irq_data *data)
+{
+ u32 gpe_en_reg;
+
+ gpe_en_reg = inl(GPE0A_EN_PORT);
+ gpe_en_reg |= GPE0A_PME_B0_EN_BIT;
+ outl(gpe_en_reg, GPE0A_EN_PORT);
+}
+
+static void int0002_irq_mask(struct irq_data *data)
+{
+ u32 gpe_en_reg;
+
+ gpe_en_reg = inl(GPE0A_EN_PORT);
+ gpe_en_reg &= ~GPE0A_PME_B0_EN_BIT;
+ outl(gpe_en_reg, GPE0A_EN_PORT);
+}
+
+static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct int0002_data *int0002 = container_of(chip, struct int0002_data, chip);
+
+ /*
+ * Applying of the wakeup flag to our parent IRQ is delayed till system
+ * suspend, because we only want to do this when using s2idle.
+ */
+ if (on)
+ int0002->wake_enable_count++;
+ else
+ int0002->wake_enable_count--;
+
+ return 0;
+}
+
+static irqreturn_t int0002_irq(int irq, void *data)
+{
+ struct gpio_chip *chip = data;
+ u32 gpe_sts_reg;
+
+ gpe_sts_reg = inl(GPE0A_STS_PORT);
+ if (!(gpe_sts_reg & GPE0A_PME_B0_STS_BIT))
+ return IRQ_NONE;
+
+ generic_handle_domain_irq_safe(chip->irq.domain, GPE0A_PME_B0_VIRT_GPIO_PIN);
+
+ pm_wakeup_hard_event(chip->parent);
+
+ return IRQ_HANDLED;
+}
+
+static bool int0002_check_wake(void *data)
+{
+ u32 gpe_sts_reg;
+
+ gpe_sts_reg = inl(GPE0A_STS_PORT);
+ return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
+}
+
+static struct irq_chip int0002_irqchip = {
+ .name = DRV_NAME,
+ .irq_ack = int0002_irq_ack,
+ .irq_mask = int0002_irq_mask,
+ .irq_unmask = int0002_irq_unmask,
+ .irq_set_wake = int0002_irq_set_wake,
+};
+
+static void int0002_init_irq_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ bitmap_clear(valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
+}
+
+static int int0002_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct int0002_data *int0002;
+ struct gpio_irq_chip *girq;
+ struct gpio_chip *chip;
+ int irq, ret;
+
+ /* Menlow has a different INT0002 device? <sigh> */
+ if (!soc_intel_is_byt() && !soc_intel_is_cht())
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ int0002 = devm_kzalloc(dev, sizeof(*int0002), GFP_KERNEL);
+ if (!int0002)
+ return -ENOMEM;
+
+ int0002->parent_irq = irq;
+
+ chip = &int0002->chip;
+ chip->label = DRV_NAME;
+ chip->parent = dev;
+ chip->owner = THIS_MODULE;
+ chip->get = int0002_gpio_get;
+ chip->set = int0002_gpio_set;
+ chip->direction_input = int0002_gpio_get;
+ chip->direction_output = int0002_gpio_direction_output;
+ chip->base = -1;
+ chip->ngpio = GPE0A_PME_B0_VIRT_GPIO_PIN + 1;
+ chip->irq.init_valid_mask = int0002_init_irq_valid_mask;
+
+ /*
+ * We directly request the irq here instead of passing a flow-handler
+ * to gpiochip_set_chained_irqchip, because the irq is shared.
+ * FIXME: augment this if we managed to pull handling of shared
+ * IRQs into gpiolib.
+ */
+ ret = devm_request_irq(dev, irq, int0002_irq,
+ IRQF_SHARED, "INT0002", chip);
+ if (ret) {
+ dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret);
+ return ret;
+ }
+
+ girq = &chip->irq;
+ girq->chip = &int0002_irqchip;
+ /* This let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
+ ret = devm_gpiochip_add_data(dev, chip, NULL);
+ if (ret) {
+ dev_err(dev, "Error adding gpio chip: %d\n", ret);
+ return ret;
+ }
+
+ acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
+ device_init_wakeup(dev, true);
+ dev_set_drvdata(dev, int0002);
+ return 0;
+}
+
+static void int0002_remove(struct platform_device *pdev)
+{
+ device_init_wakeup(&pdev->dev, false);
+ acpi_unregister_wakeup_handler(int0002_check_wake, NULL);
+}
+
+static int int0002_suspend(struct device *dev)
+{
+ struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+ /*
+ * The INT0002 parent IRQ is often shared with the ACPI GPE IRQ, don't
+ * muck with it when firmware based suspend is used, otherwise we may
+ * cause spurious wakeups from firmware managed suspend.
+ */
+ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+ enable_irq_wake(int0002->parent_irq);
+
+ return 0;
+}
+
+static int int0002_resume(struct device *dev)
+{
+ struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+ disable_irq_wake(int0002->parent_irq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops int0002_pm_ops = {
+ .suspend = int0002_suspend,
+ .resume = int0002_resume,
+};
+
+static const struct acpi_device_id int0002_acpi_ids[] = {
+ { "INT0002", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, int0002_acpi_ids);
+
+static struct platform_driver int0002_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = int0002_acpi_ids,
+ .pm = &int0002_pm_ops,
+ },
+ .probe = int0002_probe,
+ .remove_new = int0002_remove,
+};
+
+module_platform_driver(int0002_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Intel INT0002 Virtual GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/int1092/Kconfig b/drivers/platform/x86/intel/int1092/Kconfig
new file mode 100644
index 0000000000..2e9a177241
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/Kconfig
@@ -0,0 +1,14 @@
+config INTEL_SAR_INT1092
+ tristate "Intel Specific Absorption Rate Driver"
+ depends on ACPI
+ help
+ This driver helps to limit the exposure of human body to RF frequency by
+ providing information to userspace application that will inform the Intel
+ M.2 modem to regulate the RF power based on SAR data obtained from the
+ sensors captured in the BIOS. ACPI interface exposes this data from the BIOS
+ to SAR driver. The front end application in userspace will interact with SAR
+ driver to obtain information like the device mode, Antenna index, baseband index,
+ SAR table index and use available communication like MBIM interface to enable
+ data communication to modem for RF power regulation. Enable this config when
+ given platform needs to support "Dynamic SAR" configuration for a modem available
+ on the platform.
diff --git a/drivers/platform/x86/intel/int1092/Makefile b/drivers/platform/x86/intel/int1092/Makefile
new file mode 100644
index 0000000000..4ab94e541d
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_INTEL_SAR_INT1092) += intel_sar.o
diff --git a/drivers/platform/x86/intel/int1092/intel_sar.c b/drivers/platform/x86/intel/int1092/intel_sar.c
new file mode 100644
index 0000000000..6246c066ad
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/intel_sar.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include "intel_sar.h"
+
+/**
+ * get_int_value: Retrieve integer values from ACPI Object
+ * @obj: acpi_object pointer which has the integer value
+ * @out: output pointer will get integer value
+ *
+ * Function is used to retrieve integer value from acpi object.
+ *
+ * Return:
+ * * 0 on success
+ * * -EIO if there is an issue in acpi_object passed.
+ */
+static int get_int_value(union acpi_object *obj, int *out)
+{
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ return -EIO;
+ *out = (int)obj->integer.value;
+ return 0;
+}
+
+/**
+ * update_sar_data: sar data is updated based on regulatory mode
+ * @context: pointer to driver context structure
+ *
+ * sar_data is updated based on regulatory value
+ * context->reg_value will never exceed MAX_REGULATORY
+ */
+static void update_sar_data(struct wwan_sar_context *context)
+{
+ struct wwan_device_mode_configuration *config =
+ &context->config_data[context->reg_value];
+
+ if (config->device_mode_info &&
+ context->sar_data.device_mode < config->total_dev_mode) {
+ int itr = 0;
+
+ for (itr = 0; itr < config->total_dev_mode; itr++) {
+ if (context->sar_data.device_mode ==
+ config->device_mode_info[itr].device_mode) {
+ struct wwan_device_mode_info *dev_mode =
+ &config->device_mode_info[itr];
+
+ context->sar_data.antennatable_index = dev_mode->antennatable_index;
+ context->sar_data.bandtable_index = dev_mode->bandtable_index;
+ context->sar_data.sartable_index = dev_mode->sartable_index;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * parse_package: parse acpi package for retrieving SAR information
+ * @context: pointer to driver context structure
+ * @item : acpi_object pointer
+ *
+ * Given acpi_object is iterated to retrieve information for each device mode.
+ * If a given package corresponding to a specific device mode is faulty, it is
+ * skipped and the specific entry in context structure will have the default value
+ * of zero. Decoding of subsequent device modes is realized by having "continue"
+ * statements in the for loop on encountering error in parsing given device mode.
+ *
+ * Return:
+ * AE_OK if success
+ * AE_ERROR on error
+ */
+static acpi_status parse_package(struct wwan_sar_context *context, union acpi_object *item)
+{
+ struct wwan_device_mode_configuration *data;
+ int value, itr, reg;
+ union acpi_object *num;
+
+ num = &item->package.elements[0];
+ if (get_int_value(num, &value) || value < 0 || value >= MAX_REGULATORY)
+ return AE_ERROR;
+
+ reg = value;
+
+ data = &context->config_data[reg];
+ if (data->total_dev_mode > MAX_DEV_MODES || data->total_dev_mode == 0 ||
+ item->package.count <= data->total_dev_mode)
+ return AE_ERROR;
+
+ data->device_mode_info = kmalloc_array(data->total_dev_mode,
+ sizeof(struct wwan_device_mode_info), GFP_KERNEL);
+ if (!data->device_mode_info)
+ return AE_ERROR;
+
+ for (itr = 0; itr < data->total_dev_mode; itr++) {
+ struct wwan_device_mode_info temp = { 0 };
+
+ num = &item->package.elements[itr + 1];
+ if (num->type != ACPI_TYPE_PACKAGE || num->package.count < TOTAL_DATA)
+ continue;
+ if (get_int_value(&num->package.elements[0], &temp.device_mode))
+ continue;
+ if (get_int_value(&num->package.elements[1], &temp.bandtable_index))
+ continue;
+ if (get_int_value(&num->package.elements[2], &temp.antennatable_index))
+ continue;
+ if (get_int_value(&num->package.elements[3], &temp.sartable_index))
+ continue;
+ data->device_mode_info[itr] = temp;
+ }
+ return AE_OK;
+}
+
+/**
+ * sar_get_device_mode: Extraction of information from BIOS via DSM calls
+ * @device: ACPI device for which to retrieve the data
+ *
+ * Retrieve the current device mode information from the BIOS.
+ *
+ * Return:
+ * AE_OK on success
+ * AE_ERROR on error
+ */
+static acpi_status sar_get_device_mode(struct platform_device *device)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(&device->dev);
+ acpi_status status = AE_OK;
+ union acpi_object *out;
+ u32 rev = 0;
+
+ out = acpi_evaluate_dsm_typed(context->handle, &context->guid, rev,
+ COMMAND_ID_DEV_MODE, NULL, ACPI_TYPE_INTEGER);
+ if (!out) {
+ dev_err(&device->dev, "DSM cmd:%d Failed to retrieve value\n", COMMAND_ID_DEV_MODE);
+ status = AE_ERROR;
+ goto dev_mode_error;
+ }
+ context->sar_data.device_mode = out->integer.value;
+ update_sar_data(context);
+ sysfs_notify(&device->dev.kobj, NULL, SYSFS_DATANAME);
+
+dev_mode_error:
+ ACPI_FREE(out);
+ return status;
+}
+
+static const struct acpi_device_id sar_device_ids[] = {
+ { "INTC1092", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, sar_device_ids);
+
+static ssize_t intc_data_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d %d %d %d\n", context->sar_data.device_mode,
+ context->sar_data.bandtable_index,
+ context->sar_data.antennatable_index,
+ context->sar_data.sartable_index);
+}
+static DEVICE_ATTR_RO(intc_data);
+
+static ssize_t intc_reg_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", context->reg_value);
+}
+
+static ssize_t intc_reg_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(dev);
+ unsigned int value;
+ int read;
+
+ if (!count)
+ return -EINVAL;
+ read = kstrtouint(buf, 10, &value);
+ if (read < 0)
+ return read;
+ if (value >= MAX_REGULATORY)
+ return -EOVERFLOW;
+ context->reg_value = value;
+ update_sar_data(context);
+ sysfs_notify(&dev->kobj, NULL, SYSFS_DATANAME);
+ return count;
+}
+static DEVICE_ATTR_RW(intc_reg);
+
+static struct attribute *intcsar_attrs[] = {
+ &dev_attr_intc_data.attr,
+ &dev_attr_intc_reg.attr,
+ NULL
+};
+
+static struct attribute_group intcsar_group = {
+ .attrs = intcsar_attrs,
+};
+
+static void sar_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct platform_device *device = data;
+
+ if (event == SAR_EVENT) {
+ if (sar_get_device_mode(device) != AE_OK)
+ dev_err(&device->dev, "sar_get_device_mode error");
+ }
+}
+
+static void sar_get_data(int reg, struct wwan_sar_context *context)
+{
+ union acpi_object *out, req;
+ u32 rev = 0;
+
+ req.type = ACPI_TYPE_INTEGER;
+ req.integer.value = reg;
+ out = acpi_evaluate_dsm_typed(context->handle, &context->guid, rev,
+ COMMAND_ID_CONFIG_TABLE, &req, ACPI_TYPE_PACKAGE);
+ if (!out)
+ return;
+ if (out->package.count >= 3 &&
+ out->package.elements[0].type == ACPI_TYPE_INTEGER &&
+ out->package.elements[1].type == ACPI_TYPE_INTEGER &&
+ out->package.elements[2].type == ACPI_TYPE_PACKAGE &&
+ out->package.elements[2].package.count > 0) {
+ context->config_data[reg].version = out->package.elements[0].integer.value;
+ context->config_data[reg].total_dev_mode =
+ out->package.elements[1].integer.value;
+ if (context->config_data[reg].total_dev_mode <= 0 ||
+ context->config_data[reg].total_dev_mode > MAX_DEV_MODES) {
+ ACPI_FREE(out);
+ return;
+ }
+ parse_package(context, &out->package.elements[2]);
+ }
+ ACPI_FREE(out);
+}
+
+static int sar_probe(struct platform_device *device)
+{
+ struct wwan_sar_context *context;
+ int reg;
+ int result;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ context->sar_device = device;
+ context->handle = ACPI_HANDLE(&device->dev);
+ dev_set_drvdata(&device->dev, context);
+
+ result = guid_parse(SAR_DSM_UUID, &context->guid);
+ if (result) {
+ dev_err(&device->dev, "SAR UUID parse error: %d\n", result);
+ goto r_free;
+ }
+
+ for (reg = 0; reg < MAX_REGULATORY; reg++)
+ sar_get_data(reg, context);
+
+ if (sar_get_device_mode(device) != AE_OK) {
+ dev_err(&device->dev, "Failed to get device mode\n");
+ result = -EIO;
+ goto r_free;
+ }
+
+ result = sysfs_create_group(&device->dev.kobj, &intcsar_group);
+ if (result) {
+ dev_err(&device->dev, "sysfs creation failed\n");
+ goto r_free;
+ }
+
+ if (acpi_install_notify_handler(ACPI_HANDLE(&device->dev), ACPI_DEVICE_NOTIFY,
+ sar_notify, (void *)device) != AE_OK) {
+ dev_err(&device->dev, "Failed acpi_install_notify_handler\n");
+ result = -EIO;
+ goto r_sys;
+ }
+ return 0;
+
+r_sys:
+ sysfs_remove_group(&device->dev.kobj, &intcsar_group);
+r_free:
+ kfree(context);
+ return result;
+}
+
+static void sar_remove(struct platform_device *device)
+{
+ struct wwan_sar_context *context = dev_get_drvdata(&device->dev);
+ int reg;
+
+ acpi_remove_notify_handler(ACPI_HANDLE(&device->dev),
+ ACPI_DEVICE_NOTIFY, sar_notify);
+ sysfs_remove_group(&device->dev.kobj, &intcsar_group);
+ for (reg = 0; reg < MAX_REGULATORY; reg++)
+ kfree(context->config_data[reg].device_mode_info);
+
+ kfree(context);
+}
+
+static struct platform_driver sar_driver = {
+ .probe = sar_probe,
+ .remove_new = sar_remove,
+ .driver = {
+ .name = DRVNAME,
+ .acpi_match_table = ACPI_PTR(sar_device_ids)
+ }
+};
+module_platform_driver(sar_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Platform device driver for INTEL MODEM BIOS SAR");
+MODULE_AUTHOR("Shravan Sudhakar <s.shravan@intel.com>");
diff --git a/drivers/platform/x86/intel/int1092/intel_sar.h b/drivers/platform/x86/intel/int1092/intel_sar.h
new file mode 100644
index 0000000000..b5310510b8
--- /dev/null
+++ b/drivers/platform/x86/intel/int1092/intel_sar.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#ifndef INTEL_SAR_H
+#define INTEL_SAR_H
+
+#define COMMAND_ID_DEV_MODE 1
+#define COMMAND_ID_CONFIG_TABLE 2
+#define DRVNAME "intc_sar"
+#define MAX_DEV_MODES 50
+#define MAX_REGULATORY 3
+#define SAR_DSM_UUID "82737E72-3A33-4C45-A9C7-57C0411A5F13"
+#define SAR_EVENT 0x80
+#define SYSFS_DATANAME "intc_data"
+#define TOTAL_DATA 4
+
+/**
+ * Structure wwan_device_mode_info - device mode information
+ * Holds the data that needs to be passed to userspace.
+ * The data is updated from the BIOS sensor information.
+ * @device_mode: Specific mode of the device
+ * @bandtable_index: Index of RF band
+ * @antennatable_index: Index of antenna
+ * @sartable_index: Index of SAR
+ */
+struct wwan_device_mode_info {
+ int device_mode;
+ int bandtable_index;
+ int antennatable_index;
+ int sartable_index;
+};
+
+/**
+ * Structure wwan_device_mode_configuration - device configuration
+ * Holds the data that is configured and obtained on probe event.
+ * The data is updated from the BIOS sensor information.
+ * @version: Mode configuration version
+ * @total_dev_mode: Total number of device modes
+ * @device_mode_info: pointer to structure wwan_device_mode_info
+ */
+struct wwan_device_mode_configuration {
+ int version;
+ int total_dev_mode;
+ struct wwan_device_mode_info *device_mode_info;
+};
+
+/**
+ * Structure wwan_supported_info - userspace datastore
+ * Holds the data that is obtained from userspace
+ * The data is updated from the userspace and send value back in the
+ * structure format that is mentioned here.
+ * @reg_mode_needed: regulatory mode set by user for tests
+ * @bios_table_revision: Version of SAR table
+ * @num_supported_modes: Total supported modes based on reg_mode
+ */
+struct wwan_supported_info {
+ int reg_mode_needed;
+ int bios_table_revision;
+ int num_supported_modes;
+};
+
+/**
+ * Structure wwan_sar_context - context of SAR
+ * Holds the complete context as long as the driver is in existence
+ * The context holds instance of the data used for different cases.
+ * @guid: Group id
+ * @handle: store acpi handle
+ * @reg_value: regulatory value
+ * Regulatory 0: FCC, 1: CE, 2: ISED
+ * @sar_device: platform_device type
+ * @sar_kobject: kobject for sysfs
+ * @supported_data: wwan_supported_info struct
+ * @sar_data: wwan_device_mode_info struct
+ * @config_data: wwan_device_mode_configuration array struct
+ */
+struct wwan_sar_context {
+ guid_t guid;
+ acpi_handle handle;
+ int reg_value;
+ struct platform_device *sar_device;
+ struct wwan_supported_info supported_data;
+ struct wwan_device_mode_info sar_data;
+ struct wwan_device_mode_configuration config_data[MAX_REGULATORY];
+};
+#endif /* INTEL_SAR_H */
diff --git a/drivers/platform/x86/intel/int3472/Kconfig b/drivers/platform/x86/intel/int3472/Kconfig
new file mode 100644
index 0000000000..17ae997f93
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/Kconfig
@@ -0,0 +1,31 @@
+config INTEL_SKL_INT3472
+ tristate "Intel SkyLake ACPI INT3472 Driver"
+ depends on ACPI
+ depends on COMMON_CLK
+ depends on I2C
+ depends on GPIOLIB
+ depends on LEDS_CLASS
+ depends on REGULATOR
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ This driver adds power controller support for the Intel SkyCam
+ devices found on the Intel SkyLake platforms.
+
+ The INT3472 is a camera power controller, a logical device found on
+ Intel Skylake-based systems that can map to different hardware
+ devices depending on the platform. On machines designed for Chrome OS
+ it maps to a TPS68470 camera PMIC. On machines designed for Windows,
+ it maps to either a TP68470 camera PMIC, a uP6641Q sensor PMIC, or a
+ set of discrete GPIOs and power gates.
+
+ If your device was designed for Chrome OS, this driver will provide
+ an ACPI OpRegion, which must be available before any of the devices
+ using it are probed. For this reason, you should select Y if your
+ device was designed for ChromeOS. For the same reason the
+ I2C_DESIGNWARE_PLATFORM option must be set to Y too.
+
+ Say Y or M here if you have a SkyLake device designed for use
+ with Windows or ChromeOS. Say N here if you are not sure.
+
+ The module will be named "intel-skl-int3472".
diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile
new file mode 100644
index 0000000000..9f16cb5143
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472_discrete.o \
+ intel_skl_int3472_tps68470.o
+intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o common.o
+intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o common.o
diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
new file mode 100644
index 0000000000..ef4b3141ef
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/*
+ * 82c0d13a-78c5-4244-9bb1-eb8b539a8d11
+ * This _DSM GUID allows controlling the sensor clk when it is not controlled
+ * through a GPIO.
+ */
+static const guid_t img_clk_guid =
+ GUID_INIT(0x82c0d13a, 0x78c5, 0x4244,
+ 0x9b, 0xb1, 0xeb, 0x8b, 0x53, 0x9a, 0x8d, 0x11);
+
+static void skl_int3472_enable_clk(struct int3472_clock *clk, int enable)
+{
+ struct int3472_discrete_device *int3472 = to_int3472_device(clk);
+ union acpi_object args[3];
+ union acpi_object argv4;
+
+ if (clk->ena_gpio) {
+ gpiod_set_value_cansleep(clk->ena_gpio, enable);
+ return;
+ }
+
+ args[0].integer.type = ACPI_TYPE_INTEGER;
+ args[0].integer.value = clk->imgclk_index;
+ args[1].integer.type = ACPI_TYPE_INTEGER;
+ args[1].integer.value = enable;
+ args[2].integer.type = ACPI_TYPE_INTEGER;
+ args[2].integer.value = 1;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = args;
+
+ acpi_evaluate_dsm(acpi_device_handle(int3472->adev), &img_clk_guid,
+ 0, 1, &argv4);
+}
+
+/*
+ * The regulators have to have .ops to be valid, but the only ops we actually
+ * support are .enable and .disable which are handled via .ena_gpiod. Pass an
+ * empty struct to clear the check without lying about capabilities.
+ */
+static const struct regulator_ops int3472_gpio_regulator_ops;
+
+static int skl_int3472_clk_prepare(struct clk_hw *hw)
+{
+ skl_int3472_enable_clk(to_int3472_clk(hw), 1);
+ return 0;
+}
+
+static void skl_int3472_clk_unprepare(struct clk_hw *hw)
+{
+ skl_int3472_enable_clk(to_int3472_clk(hw), 0);
+}
+
+static int skl_int3472_clk_enable(struct clk_hw *hw)
+{
+ /*
+ * We're just turning a GPIO on to enable the clock, which operation
+ * has the potential to sleep. Given .enable() cannot sleep, but
+ * .prepare() can, we toggle the GPIO in .prepare() instead. Thus,
+ * nothing to do here.
+ */
+ return 0;
+}
+
+static void skl_int3472_clk_disable(struct clk_hw *hw)
+{
+ /* Likewise, nothing to do here... */
+}
+
+static unsigned int skl_int3472_get_clk_frequency(struct int3472_discrete_device *int3472)
+{
+ union acpi_object *obj;
+ unsigned int freq;
+
+ obj = skl_int3472_get_acpi_buffer(int3472->sensor, "SSDB");
+ if (IS_ERR(obj))
+ return 0; /* report rate as 0 on error */
+
+ if (obj->buffer.length < CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET + sizeof(u32)) {
+ dev_err(int3472->dev, "The buffer is too small\n");
+ kfree(obj);
+ return 0;
+ }
+
+ freq = *(u32 *)(obj->buffer.pointer + CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET);
+
+ kfree(obj);
+ return freq;
+}
+
+static unsigned long skl_int3472_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct int3472_clock *clk = to_int3472_clk(hw);
+
+ return clk->frequency;
+}
+
+static const struct clk_ops skl_int3472_clock_ops = {
+ .prepare = skl_int3472_clk_prepare,
+ .unprepare = skl_int3472_clk_unprepare,
+ .enable = skl_int3472_clk_enable,
+ .disable = skl_int3472_clk_disable,
+ .recalc_rate = skl_int3472_clk_recalc_rate,
+};
+
+int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472)
+{
+ struct acpi_device *adev = int3472->adev;
+ struct clk_init_data init = {
+ .ops = &skl_int3472_clock_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ };
+ int ret;
+
+ if (int3472->clock.cl)
+ return 0; /* A GPIO controlled clk has already been registered */
+
+ if (!acpi_check_dsm(adev->handle, &img_clk_guid, 0, BIT(1)))
+ return 0; /* DSM clock control is not available */
+
+ init.name = kasprintf(GFP_KERNEL, "%s-clk", acpi_dev_name(adev));
+ if (!init.name)
+ return -ENOMEM;
+
+ int3472->clock.frequency = skl_int3472_get_clk_frequency(int3472);
+ int3472->clock.clk_hw.init = &init;
+ int3472->clock.clk = clk_register(&adev->dev, &int3472->clock.clk_hw);
+ if (IS_ERR(int3472->clock.clk)) {
+ ret = PTR_ERR(int3472->clock.clk);
+ goto out_free_init_name;
+ }
+
+ int3472->clock.cl = clkdev_create(int3472->clock.clk, NULL, int3472->sensor_name);
+ if (!int3472->clock.cl) {
+ ret = -ENOMEM;
+ goto err_unregister_clk;
+ }
+
+ kfree(init.name);
+ return 0;
+
+err_unregister_clk:
+ clk_unregister(int3472->clock.clk);
+out_free_init_name:
+ kfree(init.name);
+ return ret;
+}
+
+int skl_int3472_register_gpio_clock(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio, u32 polarity)
+{
+ char *path = agpio->resource_source.string_ptr;
+ struct clk_init_data init = {
+ .ops = &skl_int3472_clock_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ };
+ int ret;
+
+ if (int3472->clock.cl)
+ return -EBUSY;
+
+ int3472->clock.ena_gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
+ "int3472,clk-enable");
+ if (IS_ERR(int3472->clock.ena_gpio)) {
+ ret = PTR_ERR(int3472->clock.ena_gpio);
+ int3472->clock.ena_gpio = NULL;
+ return dev_err_probe(int3472->dev, ret, "getting clk-enable GPIO\n");
+ }
+
+ if (polarity == GPIO_ACTIVE_LOW)
+ gpiod_toggle_active_low(int3472->clock.ena_gpio);
+
+ /* Ensure the pin is in output mode and non-active state */
+ gpiod_direction_output(int3472->clock.ena_gpio, 0);
+
+ init.name = kasprintf(GFP_KERNEL, "%s-clk",
+ acpi_dev_name(int3472->adev));
+ if (!init.name) {
+ ret = -ENOMEM;
+ goto out_put_gpio;
+ }
+
+ int3472->clock.frequency = skl_int3472_get_clk_frequency(int3472);
+
+ int3472->clock.clk_hw.init = &init;
+ int3472->clock.clk = clk_register(&int3472->adev->dev,
+ &int3472->clock.clk_hw);
+ if (IS_ERR(int3472->clock.clk)) {
+ ret = PTR_ERR(int3472->clock.clk);
+ goto out_free_init_name;
+ }
+
+ int3472->clock.cl = clkdev_create(int3472->clock.clk, NULL,
+ int3472->sensor_name);
+ if (!int3472->clock.cl) {
+ ret = -ENOMEM;
+ goto err_unregister_clk;
+ }
+
+ kfree(init.name);
+ return 0;
+
+err_unregister_clk:
+ clk_unregister(int3472->clock.clk);
+out_free_init_name:
+ kfree(init.name);
+out_put_gpio:
+ gpiod_put(int3472->clock.ena_gpio);
+
+ return ret;
+}
+
+void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472)
+{
+ if (!int3472->clock.cl)
+ return;
+
+ clkdev_drop(int3472->clock.cl);
+ clk_unregister(int3472->clock.clk);
+ gpiod_put(int3472->clock.ena_gpio);
+}
+
+/*
+ * The INT3472 device is going to be the only supplier of a regulator for
+ * the sensor device. But unlike the clk framework the regulator framework
+ * does not allow matching by consumer-device-name only.
+ *
+ * Ideally all sensor drivers would use "avdd" as supply-id. But for drivers
+ * where this cannot be changed because another supply-id is already used in
+ * e.g. DeviceTree files an alias for the other supply-id can be added here.
+ *
+ * Do not forget to update GPIO_REGULATOR_SUPPLY_MAP_COUNT when changing this.
+ */
+static const char * const skl_int3472_regulator_map_supplies[] = {
+ "avdd",
+ "AVDD",
+};
+
+static_assert(ARRAY_SIZE(skl_int3472_regulator_map_supplies) ==
+ GPIO_REGULATOR_SUPPLY_MAP_COUNT);
+
+/*
+ * On some models there is a single GPIO regulator which is shared between
+ * sensors and only listed in the ACPI resources of one sensor.
+ * This DMI table contains the name of the second sensor. This is used to add
+ * entries for the second sensor to the supply_map.
+ */
+static const struct dmi_system_id skl_int3472_regulator_second_sensor[] = {
+ {
+ /* Lenovo Miix 510-12IKB */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 510-12IKB"),
+ },
+ .driver_data = "i2c-OVTI2680:00",
+ },
+ { }
+};
+
+int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio)
+{
+ char *path = agpio->resource_source.string_ptr;
+ struct regulator_init_data init_data = { };
+ struct regulator_config cfg = { };
+ const char *second_sensor = NULL;
+ const struct dmi_system_id *id;
+ int i, j, ret;
+
+ id = dmi_first_match(skl_int3472_regulator_second_sensor);
+ if (id)
+ second_sensor = id->driver_data;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(skl_int3472_regulator_map_supplies); i++) {
+ int3472->regulator.supply_map[j].supply = skl_int3472_regulator_map_supplies[i];
+ int3472->regulator.supply_map[j].dev_name = int3472->sensor_name;
+ j++;
+
+ if (second_sensor) {
+ int3472->regulator.supply_map[j].supply =
+ skl_int3472_regulator_map_supplies[i];
+ int3472->regulator.supply_map[j].dev_name = second_sensor;
+ j++;
+ }
+ }
+
+ init_data.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
+ init_data.consumer_supplies = int3472->regulator.supply_map;
+ init_data.num_consumer_supplies = j;
+
+ snprintf(int3472->regulator.regulator_name,
+ sizeof(int3472->regulator.regulator_name), "%s-regulator",
+ acpi_dev_name(int3472->adev));
+ snprintf(int3472->regulator.supply_name,
+ GPIO_REGULATOR_SUPPLY_NAME_LENGTH, "supply-0");
+
+ int3472->regulator.rdesc = INT3472_REGULATOR(
+ int3472->regulator.regulator_name,
+ int3472->regulator.supply_name,
+ &int3472_gpio_regulator_ops);
+
+ int3472->regulator.gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
+ "int3472,regulator");
+ if (IS_ERR(int3472->regulator.gpio)) {
+ ret = PTR_ERR(int3472->regulator.gpio);
+ int3472->regulator.gpio = NULL;
+ return dev_err_probe(int3472->dev, ret, "getting regulator GPIO\n");
+ }
+
+ /* Ensure the pin is in output mode and non-active state */
+ gpiod_direction_output(int3472->regulator.gpio, 0);
+
+ cfg.dev = &int3472->adev->dev;
+ cfg.init_data = &init_data;
+ cfg.ena_gpiod = int3472->regulator.gpio;
+
+ int3472->regulator.rdev = regulator_register(int3472->dev,
+ &int3472->regulator.rdesc,
+ &cfg);
+ if (IS_ERR(int3472->regulator.rdev)) {
+ ret = PTR_ERR(int3472->regulator.rdev);
+ goto err_free_gpio;
+ }
+
+ return 0;
+
+err_free_gpio:
+ gpiod_put(int3472->regulator.gpio);
+
+ return ret;
+}
+
+void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472)
+{
+ regulator_unregister(int3472->regulator.rdev);
+ gpiod_put(int3472->regulator.gpio);
+}
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
new file mode 100644
index 0000000000..9db2bb0bbb
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *id)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle handle = adev->handle;
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = acpi_evaluate_object(handle, id, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENODEV);
+
+ obj = buffer.pointer;
+ if (!obj)
+ return ERR_PTR(-ENODEV);
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ acpi_handle_err(handle, "%s object is not an ACPI buffer\n", id);
+ kfree(obj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return obj;
+}
+
+int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb)
+{
+ union acpi_object *obj;
+ int ret;
+
+ obj = skl_int3472_get_acpi_buffer(adev, "CLDB");
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->buffer.length > sizeof(*cldb)) {
+ acpi_handle_err(adev->handle, "The CLDB buffer is too large\n");
+ ret = -EINVAL;
+ goto out_free_obj;
+ }
+
+ memcpy(cldb, obj->buffer.pointer, obj->buffer.length);
+ ret = 0;
+
+out_free_obj:
+ kfree(obj);
+ return ret;
+}
+
+/* sensor_adev_ret may be NULL, name_ret must not be NULL */
+int skl_int3472_get_sensor_adev_and_name(struct device *dev,
+ struct acpi_device **sensor_adev_ret,
+ const char **name_ret)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_device *sensor;
+ int ret = 0;
+
+ sensor = acpi_dev_get_next_consumer_dev(adev, NULL);
+ if (!sensor) {
+ dev_err(dev, "INT3472 seems to have no dependents.\n");
+ return -ENODEV;
+ }
+
+ *name_ret = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
+ acpi_dev_name(sensor));
+ if (!*name_ret)
+ ret = -ENOMEM;
+
+ if (ret == 0 && sensor_adev_ret)
+ *sensor_adev_ret = sensor;
+ else
+ acpi_dev_put(sensor);
+
+ return ret;
+}
diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h
new file mode 100644
index 0000000000..9f29baa138
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/common.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#ifndef _INTEL_SKL_INT3472_H
+#define _INTEL_SKL_INT3472_H
+
+#include <linux/clk-provider.h>
+#include <linux/gpio/machine.h>
+#include <linux/leds.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
+/* FIXME drop this once the I2C_DEV_NAME_FORMAT macro has been added to include/linux/i2c.h */
+#ifndef I2C_DEV_NAME_FORMAT
+#define I2C_DEV_NAME_FORMAT "i2c-%s"
+#endif
+
+/* PMIC GPIO Types */
+#define INT3472_GPIO_TYPE_RESET 0x00
+#define INT3472_GPIO_TYPE_POWERDOWN 0x01
+#define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b
+#define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c
+#define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d
+
+#define INT3472_PDEV_MAX_NAME_LEN 23
+#define INT3472_MAX_SENSOR_GPIOS 3
+
+#define GPIO_REGULATOR_NAME_LENGTH 21
+#define GPIO_REGULATOR_SUPPLY_NAME_LENGTH 9
+#define GPIO_REGULATOR_SUPPLY_MAP_COUNT 2
+
+#define INT3472_LED_MAX_NAME_LEN 32
+
+#define CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET 86
+
+#define INT3472_REGULATOR(_name, _supply, _ops) \
+ (const struct regulator_desc) { \
+ .name = _name, \
+ .supply_name = _supply, \
+ .type = REGULATOR_VOLTAGE, \
+ .ops = _ops, \
+ .owner = THIS_MODULE, \
+ }
+
+#define to_int3472_clk(hw) \
+ container_of(hw, struct int3472_clock, clk_hw)
+
+#define to_int3472_device(clk) \
+ container_of(clk, struct int3472_discrete_device, clock)
+
+struct acpi_device;
+struct i2c_client;
+struct platform_device;
+
+struct int3472_cldb {
+ u8 version;
+ /*
+ * control logic type
+ * 0: UNKNOWN
+ * 1: DISCRETE(CRD-D)
+ * 2: PMIC TPS68470
+ * 3: PMIC uP6641
+ */
+ u8 control_logic_type;
+ u8 control_logic_id;
+ u8 sensor_card_sku;
+ u8 reserved[10];
+ u8 clock_source;
+ u8 reserved2[17];
+};
+
+struct int3472_discrete_device {
+ struct acpi_device *adev;
+ struct device *dev;
+ struct acpi_device *sensor;
+ const char *sensor_name;
+
+ const struct int3472_sensor_config *sensor_config;
+
+ struct int3472_gpio_regulator {
+ /* SUPPLY_MAP_COUNT * 2 to make room for second sensor mappings */
+ struct regulator_consumer_supply supply_map[GPIO_REGULATOR_SUPPLY_MAP_COUNT * 2];
+ char regulator_name[GPIO_REGULATOR_NAME_LENGTH];
+ char supply_name[GPIO_REGULATOR_SUPPLY_NAME_LENGTH];
+ struct gpio_desc *gpio;
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+ } regulator;
+
+ struct int3472_clock {
+ struct clk *clk;
+ struct clk_hw clk_hw;
+ struct clk_lookup *cl;
+ struct gpio_desc *ena_gpio;
+ u32 frequency;
+ u8 imgclk_index;
+ } clock;
+
+ struct int3472_pled {
+ struct led_classdev classdev;
+ struct led_lookup_data lookup;
+ char name[INT3472_LED_MAX_NAME_LEN];
+ struct gpio_desc *gpio;
+ } pled;
+
+ unsigned int ngpios; /* how many GPIOs have we seen */
+ unsigned int n_sensor_gpios; /* how many have we mapped to sensor */
+ struct gpiod_lookup_table gpios;
+};
+
+union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev,
+ char *id);
+int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb);
+int skl_int3472_get_sensor_adev_and_name(struct device *dev,
+ struct acpi_device **sensor_adev_ret,
+ const char **name_ret);
+
+int skl_int3472_register_gpio_clock(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio, u32 polarity);
+int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472);
+void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio);
+void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_pled(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio, u32 polarity);
+void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472);
+
+#endif
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
new file mode 100644
index 0000000000..e33c2d7597
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/uuid.h>
+
+#include "common.h"
+
+/*
+ * 79234640-9e10-4fea-a5c1-b5aa8b19756f
+ * This _DSM GUID returns information about the GPIO lines mapped to a
+ * discrete INT3472 device. Function number 1 returns a count of the GPIO
+ * lines that are mapped. Subsequent functions return 32 bit ints encoding
+ * information about the GPIO line, including its purpose.
+ */
+static const guid_t int3472_gpio_guid =
+ GUID_INIT(0x79234640, 0x9e10, 0x4fea,
+ 0xa5, 0xc1, 0xb5, 0xaa, 0x8b, 0x19, 0x75, 0x6f);
+
+#define INT3472_GPIO_DSM_TYPE GENMASK(7, 0)
+#define INT3472_GPIO_DSM_PIN GENMASK(15, 8)
+#define INT3472_GPIO_DSM_SENSOR_ON_VAL GENMASK(31, 24)
+
+/*
+ * 822ace8f-2814-4174-a56b-5f029fe079ee
+ * This _DSM GUID returns a string from the sensor device, which acts as a
+ * module identifier.
+ */
+static const guid_t cio2_sensor_module_guid =
+ GUID_INIT(0x822ace8f, 0x2814, 0x4174,
+ 0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee);
+
+static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *int3472)
+{
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm_typed(int3472->sensor->handle,
+ &cio2_sensor_module_guid, 0x00,
+ 0x01, NULL, ACPI_TYPE_STRING);
+ if (obj) {
+ dev_dbg(int3472->dev, "Sensor module id: '%s'\n", obj->string.pointer);
+ ACPI_FREE(obj);
+ }
+}
+
+static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio,
+ const char *func, u32 polarity)
+{
+ char *path = agpio->resource_source.string_ptr;
+ struct gpiod_lookup *table_entry;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ acpi_status status;
+
+ if (int3472->n_sensor_gpios >= INT3472_MAX_SENSOR_GPIOS) {
+ dev_warn(int3472->dev, "Too many GPIOs mapped\n");
+ return -EINVAL;
+ }
+
+ status = acpi_get_handle(NULL, path, &handle);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
+ return -ENODEV;
+
+ table_entry = &int3472->gpios.table[int3472->n_sensor_gpios];
+ table_entry->key = acpi_dev_name(adev);
+ table_entry->chip_hwnum = agpio->pin_table[0];
+ table_entry->con_id = func;
+ table_entry->idx = 0;
+ table_entry->flags = polarity;
+
+ int3472->n_sensor_gpios++;
+
+ return 0;
+}
+
+static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polarity)
+{
+ switch (type) {
+ case INT3472_GPIO_TYPE_RESET:
+ *func = "reset";
+ *polarity = GPIO_ACTIVE_LOW;
+ break;
+ case INT3472_GPIO_TYPE_POWERDOWN:
+ *func = "powerdown";
+ *polarity = GPIO_ACTIVE_LOW;
+ break;
+ case INT3472_GPIO_TYPE_CLK_ENABLE:
+ *func = "clk-enable";
+ *polarity = GPIO_ACTIVE_HIGH;
+ break;
+ case INT3472_GPIO_TYPE_PRIVACY_LED:
+ *func = "privacy-led";
+ *polarity = GPIO_ACTIVE_HIGH;
+ break;
+ case INT3472_GPIO_TYPE_POWER_ENABLE:
+ *func = "power-enable";
+ *polarity = GPIO_ACTIVE_HIGH;
+ break;
+ default:
+ *func = "unknown";
+ *polarity = GPIO_ACTIVE_HIGH;
+ break;
+ }
+}
+
+/**
+ * skl_int3472_handle_gpio_resources: Map PMIC resources to consuming sensor
+ * @ares: A pointer to a &struct acpi_resource
+ * @data: A pointer to a &struct int3472_discrete_device
+ *
+ * This function handles GPIO resources that are against an INT3472
+ * ACPI device, by checking the value of the corresponding _DSM entry.
+ * This will return a 32bit int, where the lowest byte represents the
+ * function of the GPIO pin:
+ *
+ * 0x00 Reset
+ * 0x01 Power down
+ * 0x0b Power enable
+ * 0x0c Clock enable
+ * 0x0d Privacy LED
+ *
+ * There are some known platform specific quirks where that does not quite
+ * hold up; for example where a pin with type 0x01 (Power down) is mapped to
+ * a sensor pin that performs a reset function or entries in _CRS and _DSM that
+ * do not actually correspond to a physical connection. These will be handled
+ * by the mapping sub-functions.
+ *
+ * GPIOs will either be mapped directly to the sensor device or else used
+ * to create clocks and regulators via the usual frameworks.
+ *
+ * Return:
+ * * 1 - To continue the loop
+ * * 0 - When all resources found are handled properly.
+ * * -EINVAL - If the resource is not a GPIO IO resource
+ * * -ENODEV - If the resource has no corresponding _DSM entry
+ * * -Other - Errors propagated from one of the sub-functions.
+ */
+static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
+ void *data)
+{
+ struct int3472_discrete_device *int3472 = data;
+ struct acpi_resource_gpio *agpio;
+ u8 active_value, pin, type;
+ union acpi_object *obj;
+ const char *err_msg;
+ const char *func;
+ u32 polarity;
+ int ret;
+
+ if (!acpi_gpio_get_io_resource(ares, &agpio))
+ return 1;
+
+ /*
+ * ngpios + 2 because the index of this _DSM function is 1-based and
+ * the first function is just a count.
+ */
+ obj = acpi_evaluate_dsm_typed(int3472->adev->handle,
+ &int3472_gpio_guid, 0x00,
+ int3472->ngpios + 2,
+ NULL, ACPI_TYPE_INTEGER);
+
+ if (!obj) {
+ dev_warn(int3472->dev, "No _DSM entry for GPIO pin %u\n",
+ agpio->pin_table[0]);
+ return 1;
+ }
+
+ type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value);
+
+ int3472_get_func_and_polarity(type, &func, &polarity);
+
+ pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
+ if (pin != agpio->pin_table[0])
+ dev_warn(int3472->dev, "%s %s pin number mismatch _DSM %d resource %d\n",
+ func, agpio->resource_source.string_ptr, pin,
+ agpio->pin_table[0]);
+
+ active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value);
+ if (!active_value)
+ polarity ^= GPIO_ACTIVE_LOW;
+
+ dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func,
+ agpio->resource_source.string_ptr, agpio->pin_table[0],
+ (polarity == GPIO_ACTIVE_HIGH) ? "high" : "low");
+
+ switch (type) {
+ case INT3472_GPIO_TYPE_RESET:
+ case INT3472_GPIO_TYPE_POWERDOWN:
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, polarity);
+ if (ret)
+ err_msg = "Failed to map GPIO pin to sensor\n";
+
+ break;
+ case INT3472_GPIO_TYPE_CLK_ENABLE:
+ ret = skl_int3472_register_gpio_clock(int3472, agpio, polarity);
+ if (ret)
+ err_msg = "Failed to register clock\n";
+
+ break;
+ case INT3472_GPIO_TYPE_PRIVACY_LED:
+ ret = skl_int3472_register_pled(int3472, agpio, polarity);
+ if (ret)
+ err_msg = "Failed to register LED\n";
+
+ break;
+ case INT3472_GPIO_TYPE_POWER_ENABLE:
+ ret = skl_int3472_register_regulator(int3472, agpio);
+ if (ret)
+ err_msg = "Failed to map regulator to sensor\n";
+
+ break;
+ default:
+ dev_warn(int3472->dev,
+ "GPIO type 0x%02x unknown; the sensor may not work\n",
+ type);
+ ret = 1;
+ break;
+ }
+
+ int3472->ngpios++;
+ ACPI_FREE(obj);
+
+ if (ret < 0)
+ return dev_err_probe(int3472->dev, ret, err_msg);
+
+ return ret;
+}
+
+static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
+{
+ LIST_HEAD(resource_list);
+ int ret;
+
+ skl_int3472_log_sensor_module_name(int3472);
+
+ ret = acpi_dev_get_resources(int3472->adev, &resource_list,
+ skl_int3472_handle_gpio_resources,
+ int3472);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ /* Register _DSM based clock (no-op if a GPIO clock was already registered) */
+ ret = skl_int3472_register_dsm_clock(int3472);
+ if (ret < 0)
+ return ret;
+
+ int3472->gpios.dev_id = int3472->sensor_name;
+ gpiod_add_lookup_table(&int3472->gpios);
+
+ return 0;
+}
+
+static void skl_int3472_discrete_remove(struct platform_device *pdev)
+{
+ struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev);
+
+ gpiod_remove_lookup_table(&int3472->gpios);
+
+ skl_int3472_unregister_clock(int3472);
+ skl_int3472_unregister_pled(int3472);
+ skl_int3472_unregister_regulator(int3472);
+}
+
+static int skl_int3472_discrete_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct int3472_discrete_device *int3472;
+ struct int3472_cldb cldb;
+ int ret;
+
+ ret = skl_int3472_fill_cldb(adev, &cldb);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't fill CLDB structure\n");
+ return ret;
+ }
+
+ if (cldb.control_logic_type != 1) {
+ dev_err(&pdev->dev, "Unsupported control logic type %u\n",
+ cldb.control_logic_type);
+ return -EINVAL;
+ }
+
+ /* Max num GPIOs we've seen plus a terminator */
+ int3472 = devm_kzalloc(&pdev->dev, struct_size(int3472, gpios.table,
+ INT3472_MAX_SENSOR_GPIOS + 1), GFP_KERNEL);
+ if (!int3472)
+ return -ENOMEM;
+
+ int3472->adev = adev;
+ int3472->dev = &pdev->dev;
+ platform_set_drvdata(pdev, int3472);
+ int3472->clock.imgclk_index = cldb.clock_source;
+
+ ret = skl_int3472_get_sensor_adev_and_name(&pdev->dev, &int3472->sensor,
+ &int3472->sensor_name);
+ if (ret)
+ return ret;
+
+ /*
+ * Initialising this list means we can call gpiod_remove_lookup_table()
+ * in failure paths without issue.
+ */
+ INIT_LIST_HEAD(&int3472->gpios.list);
+
+ ret = skl_int3472_parse_crs(int3472);
+ if (ret) {
+ skl_int3472_discrete_remove(pdev);
+ return ret;
+ }
+
+ acpi_dev_clear_dependencies(adev);
+ return 0;
+}
+
+static const struct acpi_device_id int3472_device_id[] = {
+ { "INT3472", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, int3472_device_id);
+
+static struct platform_driver int3472_discrete = {
+ .driver = {
+ .name = "int3472-discrete",
+ .acpi_match_table = int3472_device_id,
+ },
+ .probe = skl_int3472_discrete_probe,
+ .remove_new = skl_int3472_discrete_remove,
+};
+module_platform_driver(int3472_discrete);
+
+MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Discrete Device Driver");
+MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/int3472/led.c b/drivers/platform/x86/intel/int3472/led.c
new file mode 100644
index 0000000000..bca1ce7d0d
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/led.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Hans de Goede <hdegoede@redhat.com> */
+
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds.h>
+#include "common.h"
+
+static int int3472_pled_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct int3472_discrete_device *int3472 =
+ container_of(led_cdev, struct int3472_discrete_device, pled.classdev);
+
+ gpiod_set_value_cansleep(int3472->pled.gpio, brightness);
+ return 0;
+}
+
+int skl_int3472_register_pled(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio, u32 polarity)
+{
+ char *p, *path = agpio->resource_source.string_ptr;
+ int ret;
+
+ if (int3472->pled.classdev.dev)
+ return -EBUSY;
+
+ int3472->pled.gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
+ "int3472,privacy-led");
+ if (IS_ERR(int3472->pled.gpio))
+ return dev_err_probe(int3472->dev, PTR_ERR(int3472->pled.gpio),
+ "getting privacy LED GPIO\n");
+
+ if (polarity == GPIO_ACTIVE_LOW)
+ gpiod_toggle_active_low(int3472->pled.gpio);
+
+ /* Ensure the pin is in output mode and non-active state */
+ gpiod_direction_output(int3472->pled.gpio, 0);
+
+ /* Generate the name, replacing the ':' in the ACPI devname with '_' */
+ snprintf(int3472->pled.name, sizeof(int3472->pled.name),
+ "%s::privacy_led", acpi_dev_name(int3472->sensor));
+ p = strchr(int3472->pled.name, ':');
+ if (p)
+ *p = '_';
+
+ int3472->pled.classdev.name = int3472->pled.name;
+ int3472->pled.classdev.max_brightness = 1;
+ int3472->pled.classdev.brightness_set_blocking = int3472_pled_set;
+
+ ret = led_classdev_register(int3472->dev, &int3472->pled.classdev);
+ if (ret)
+ goto err_free_gpio;
+
+ int3472->pled.lookup.provider = int3472->pled.name;
+ int3472->pled.lookup.dev_id = int3472->sensor_name;
+ int3472->pled.lookup.con_id = "privacy-led";
+ led_add_lookup(&int3472->pled.lookup);
+
+ return 0;
+
+err_free_gpio:
+ gpiod_put(int3472->pled.gpio);
+ return ret;
+}
+
+void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472)
+{
+ if (IS_ERR_OR_NULL(int3472->pled.classdev.dev))
+ return;
+
+ led_remove_lookup(&int3472->pled.lookup);
+ led_classdev_unregister(&int3472->pled.classdev);
+ gpiod_put(int3472->pled.gpio);
+}
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
new file mode 100644
index 0000000000..1e107fd49f
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps68470.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/tps68470.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+
+#include "common.h"
+#include "tps68470.h"
+
+#define DESIGNED_FOR_CHROMEOS 1
+#define DESIGNED_FOR_WINDOWS 2
+
+#define TPS68470_WIN_MFD_CELL_COUNT 3
+
+static const struct mfd_cell tps68470_cros[] = {
+ { .name = "tps68470-gpio" },
+ { .name = "tps68470_pmic_opregion" },
+};
+
+static const struct regmap_config tps68470_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPS68470_REG_MAX,
+};
+
+static int tps68470_chip_init(struct device *dev, struct regmap *regmap)
+{
+ unsigned int version;
+ int ret;
+
+ /* Force software reset */
+ ret = regmap_write(regmap, TPS68470_REG_RESET, TPS68470_REG_RESET_MASK);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(regmap, TPS68470_REG_REVID, &version);
+ if (ret) {
+ dev_err(dev, "Failed to read revision register: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "TPS68470 REVID: 0x%02x\n", version);
+
+ return 0;
+}
+
+/** skl_int3472_tps68470_calc_type: Check what platform a device is designed for
+ * @adev: A pointer to a &struct acpi_device
+ *
+ * Check CLDB buffer against the PMIC's adev. If present, then we check
+ * the value of control_logic_type field and follow one of the
+ * following scenarios:
+ *
+ * 1. No CLDB - likely ACPI tables designed for ChromeOS. We
+ * create platform devices for the GPIOs and OpRegion drivers.
+ *
+ * 2. CLDB, with control_logic_type = 2 - probably ACPI tables
+ * made for Windows 2-in-1 platforms. Register pdevs for GPIO,
+ * Clock and Regulator drivers to bind to.
+ *
+ * 3. Any other value in control_logic_type, we should never have
+ * gotten to this point; fail probe and return.
+ *
+ * Return:
+ * * 1 Device intended for ChromeOS
+ * * 2 Device intended for Windows
+ * * -EINVAL Where @adev has an object named CLDB but it does not conform to
+ * our expectations
+ */
+static int skl_int3472_tps68470_calc_type(struct acpi_device *adev)
+{
+ struct int3472_cldb cldb = { 0 };
+ int ret;
+
+ /*
+ * A CLDB buffer that exists, but which does not match our expectations
+ * should trigger an error so we don't blindly continue.
+ */
+ ret = skl_int3472_fill_cldb(adev, &cldb);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ if (ret)
+ return DESIGNED_FOR_CHROMEOS;
+
+ if (cldb.control_logic_type != 2)
+ return -EINVAL;
+
+ return DESIGNED_FOR_WINDOWS;
+}
+
+/*
+ * Return the size of the flexible array member, because we'll need that later
+ * on to pass .pdata_size to cells.
+ */
+static int
+skl_int3472_fill_clk_pdata(struct device *dev, struct tps68470_clk_platform_data **clk_pdata)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_device *consumer;
+ unsigned int n_consumers = 0;
+ const char *sensor_name;
+ unsigned int i = 0;
+
+ for_each_acpi_consumer_dev(adev, consumer)
+ n_consumers++;
+
+ if (!n_consumers) {
+ dev_err(dev, "INT3472 seems to have no dependents\n");
+ return -ENODEV;
+ }
+
+ *clk_pdata = devm_kzalloc(dev, struct_size(*clk_pdata, consumers, n_consumers),
+ GFP_KERNEL);
+ if (!*clk_pdata)
+ return -ENOMEM;
+
+ (*clk_pdata)->n_consumers = n_consumers;
+ i = 0;
+
+ for_each_acpi_consumer_dev(adev, consumer) {
+ sensor_name = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
+ acpi_dev_name(consumer));
+ if (!sensor_name) {
+ acpi_dev_put(consumer);
+ return -ENOMEM;
+ }
+
+ (*clk_pdata)->consumers[i].consumer_dev_name = sensor_name;
+ i++;
+ }
+
+ return n_consumers;
+}
+
+static int skl_int3472_tps68470_probe(struct i2c_client *client)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+ const struct int3472_tps68470_board_data *board_data;
+ struct tps68470_clk_platform_data *clk_pdata;
+ struct mfd_cell *cells;
+ struct regmap *regmap;
+ int n_consumers;
+ int device_type;
+ int ret;
+ int i;
+
+ n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata);
+ if (n_consumers < 0)
+ return n_consumers;
+
+ regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to create regmap: %ld\n", PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ i2c_set_clientdata(client, regmap);
+
+ ret = tps68470_chip_init(&client->dev, regmap);
+ if (ret < 0) {
+ dev_err(&client->dev, "TPS68470 init error %d\n", ret);
+ return ret;
+ }
+
+ device_type = skl_int3472_tps68470_calc_type(adev);
+ switch (device_type) {
+ case DESIGNED_FOR_WINDOWS:
+ board_data = int3472_tps68470_get_board_data(dev_name(&client->dev));
+ if (!board_data)
+ return dev_err_probe(&client->dev, -ENODEV, "No board-data found for this model\n");
+
+ cells = kcalloc(TPS68470_WIN_MFD_CELL_COUNT, sizeof(*cells), GFP_KERNEL);
+ if (!cells)
+ return -ENOMEM;
+
+ /*
+ * The order of the cells matters here! The clk must be first
+ * because the regulator depends on it. The gpios must be last,
+ * acpi_gpiochip_add() calls acpi_dev_clear_dependencies() and
+ * the clk + regulators must be ready when this happens.
+ */
+ cells[0].name = "tps68470-clk";
+ cells[0].platform_data = clk_pdata;
+ cells[0].pdata_size = struct_size(clk_pdata, consumers, n_consumers);
+ cells[1].name = "tps68470-regulator";
+ cells[1].platform_data = (void *)board_data->tps68470_regulator_pdata;
+ cells[1].pdata_size = sizeof(struct tps68470_regulator_platform_data);
+ cells[2].name = "tps68470-gpio";
+
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_add_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
+
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
+ cells, TPS68470_WIN_MFD_CELL_COUNT,
+ NULL, 0, NULL);
+ kfree(cells);
+
+ if (ret) {
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
+ }
+
+ break;
+ case DESIGNED_FOR_CHROMEOS:
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
+ tps68470_cros, ARRAY_SIZE(tps68470_cros),
+ NULL, 0, NULL);
+ break;
+ default:
+ dev_err(&client->dev, "Failed to add MFD devices\n");
+ return device_type;
+ }
+
+ /*
+ * No acpi_dev_clear_dependencies() here, since the acpi_gpiochip_add()
+ * for the GPIO cell already does this.
+ */
+
+ return ret;
+}
+
+static void skl_int3472_tps68470_remove(struct i2c_client *client)
+{
+ const struct int3472_tps68470_board_data *board_data;
+ int i;
+
+ board_data = int3472_tps68470_get_board_data(dev_name(&client->dev));
+ if (board_data) {
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
+ }
+}
+
+static const struct acpi_device_id int3472_device_id[] = {
+ { "INT3472", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, int3472_device_id);
+
+static struct i2c_driver int3472_tps68470 = {
+ .driver = {
+ .name = "int3472-tps68470",
+ .acpi_match_table = int3472_device_id,
+ },
+ .probe = skl_int3472_tps68470_probe,
+ .remove = skl_int3472_tps68470_remove,
+};
+module_i2c_driver(int3472_tps68470);
+
+MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI TPS68470 Device Driver");
+MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_SOFTDEP("pre: clk-tps68470 tps68470-regulator");
diff --git a/drivers/platform/x86/intel/int3472/tps68470.h b/drivers/platform/x86/intel/int3472/tps68470.h
new file mode 100644
index 0000000000..35915e7015
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/tps68470.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI TPS68470 PMIC platform data definition.
+ *
+ * Copyright (c) 2021 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef _INTEL_SKL_INT3472_TPS68470_H
+#define _INTEL_SKL_INT3472_TPS68470_H
+
+struct gpiod_lookup_table;
+struct tps68470_regulator_platform_data;
+
+struct int3472_tps68470_board_data {
+ const char *dev_name;
+ const struct tps68470_regulator_platform_data *tps68470_regulator_pdata;
+ unsigned int n_gpiod_lookups;
+ struct gpiod_lookup_table *tps68470_gpio_lookup_tables[];
+};
+
+const struct int3472_tps68470_board_data *int3472_tps68470_get_board_data(const char *dev_name);
+
+#endif
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
new file mode 100644
index 0000000000..322237e056
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI TPS68470 PMIC platform data definition.
+ *
+ * Copyright (c) 2021 Dan Scally <djrscally@gmail.com>
+ * Copyright (c) 2021 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/gpio/machine.h>
+#include <linux/platform_data/tps68470.h>
+#include <linux/regulator/machine.h>
+#include "tps68470.h"
+
+static struct regulator_consumer_supply int347a_core_consumer_supplies[] = {
+ REGULATOR_SUPPLY("dvdd", "i2c-INT347A:00"),
+};
+
+static struct regulator_consumer_supply int347a_ana_consumer_supplies[] = {
+ REGULATOR_SUPPLY("avdd", "i2c-INT347A:00"),
+};
+
+static struct regulator_consumer_supply int347a_vcm_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdd", "i2c-INT347A:00-VCM"),
+};
+
+static struct regulator_consumer_supply int347a_vsio_consumer_supplies[] = {
+ REGULATOR_SUPPLY("dovdd", "i2c-INT347A:00"),
+ REGULATOR_SUPPLY("vsio", "i2c-INT347A:00-VCM"),
+ REGULATOR_SUPPLY("vddd", "i2c-INT347E:00"),
+};
+
+static struct regulator_consumer_supply int347a_aux1_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdda", "i2c-INT347E:00"),
+};
+
+static struct regulator_consumer_supply int347a_aux2_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdddo", "i2c-INT347E:00"),
+};
+
+static const struct regulator_init_data surface_go_tps68470_core_reg_init_data = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 1200000,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_core_consumer_supplies),
+ .consumer_supplies = int347a_core_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_ana_reg_init_data = {
+ .constraints = {
+ .min_uV = 2815200,
+ .max_uV = 2815200,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_ana_consumer_supplies),
+ .consumer_supplies = int347a_ana_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_vcm_reg_init_data = {
+ .constraints = {
+ .min_uV = 2815200,
+ .max_uV = 2815200,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_vcm_consumer_supplies),
+ .consumer_supplies = int347a_vcm_consumer_supplies,
+};
+
+/* Ensure the always-on VIO regulator has the same voltage as VSIO */
+static const struct regulator_init_data surface_go_tps68470_vio_reg_init_data = {
+ .constraints = {
+ .min_uV = 1800600,
+ .max_uV = 1800600,
+ .apply_uV = true,
+ .always_on = true,
+ },
+};
+
+static const struct regulator_init_data surface_go_tps68470_vsio_reg_init_data = {
+ .constraints = {
+ .min_uV = 1800600,
+ .max_uV = 1800600,
+ .apply_uV = true,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_vsio_consumer_supplies),
+ .consumer_supplies = int347a_vsio_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_aux1_reg_init_data = {
+ .constraints = {
+ .min_uV = 2815200,
+ .max_uV = 2815200,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_aux1_consumer_supplies),
+ .consumer_supplies = int347a_aux1_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_aux2_reg_init_data = {
+ .constraints = {
+ .min_uV = 1800600,
+ .max_uV = 1800600,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_aux2_consumer_supplies),
+ .consumer_supplies = int347a_aux2_consumer_supplies,
+};
+
+static const struct tps68470_regulator_platform_data surface_go_tps68470_pdata = {
+ .reg_init_data = {
+ [TPS68470_CORE] = &surface_go_tps68470_core_reg_init_data,
+ [TPS68470_ANA] = &surface_go_tps68470_ana_reg_init_data,
+ [TPS68470_VCM] = &surface_go_tps68470_vcm_reg_init_data,
+ [TPS68470_VIO] = &surface_go_tps68470_vio_reg_init_data,
+ [TPS68470_VSIO] = &surface_go_tps68470_vsio_reg_init_data,
+ [TPS68470_AUX1] = &surface_go_tps68470_aux1_reg_init_data,
+ [TPS68470_AUX2] = &surface_go_tps68470_aux2_reg_init_data,
+ },
+};
+
+static struct gpiod_lookup_table surface_go_int347a_gpios = {
+ .dev_id = "i2c-INT347A:00",
+ .table = {
+ GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW),
+ { }
+ }
+};
+
+static struct gpiod_lookup_table surface_go_int347e_gpios = {
+ .dev_id = "i2c-INT347E:00",
+ .table = {
+ GPIO_LOOKUP("tps68470-gpio", 5, "enable", GPIO_ACTIVE_HIGH),
+ { }
+ }
+};
+
+static const struct int3472_tps68470_board_data surface_go_tps68470_board_data = {
+ .dev_name = "i2c-INT3472:05",
+ .tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+ .n_gpiod_lookups = 2,
+ .tps68470_gpio_lookup_tables = {
+ &surface_go_int347a_gpios,
+ &surface_go_int347e_gpios,
+ },
+};
+
+static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = {
+ .dev_name = "i2c-INT3472:01",
+ .tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+ .n_gpiod_lookups = 2,
+ .tps68470_gpio_lookup_tables = {
+ &surface_go_int347a_gpios,
+ &surface_go_int347e_gpios,
+ },
+};
+
+static const struct dmi_system_id int3472_tps68470_board_data_table[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
+ },
+ .driver_data = (void *)&surface_go_tps68470_board_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Go 2"),
+ },
+ .driver_data = (void *)&surface_go_tps68470_board_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ },
+ .driver_data = (void *)&surface_go3_tps68470_board_data,
+ },
+ { }
+};
+
+const struct int3472_tps68470_board_data *int3472_tps68470_get_board_data(const char *dev_name)
+{
+ const struct int3472_tps68470_board_data *board_data;
+ const struct dmi_system_id *match;
+
+ for (match = dmi_first_match(int3472_tps68470_board_data_table);
+ match;
+ match = dmi_first_match(match + 1)) {
+ board_data = match->driver_data;
+ if (strcmp(board_data->dev_name, dev_name) == 0)
+ return board_data;
+ }
+
+ return NULL;
+}
diff --git a/drivers/platform/x86/intel/ishtp_eclite.c b/drivers/platform/x86/intel/ishtp_eclite.c
new file mode 100644
index 0000000000..93ac8b2dbf
--- /dev/null
+++ b/drivers/platform/x86/intel/ishtp_eclite.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel ECLite opregion driver for talking to ECLite firmware running on
+ * Intel Integrated Sensor Hub (ISH) using ISH Transport Protocol (ISHTP)
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/intel-ish-client-if.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/uaccess.h>
+
+#define ECLITE_DATA_OPREGION_ID 0x9E
+#define ECLITE_CMD_OPREGION_ID 0x9F
+
+#define ECL_MSG_DATA 0x1
+#define ECL_MSG_EVENT 0x2
+
+#define ECL_ISH_READ 0x1
+#define ECL_ISH_WRITE 0x2
+#define ECL_ISH_HEADER_VERSION 0
+
+#define ECL_CL_RX_RING_SIZE 16
+#define ECL_CL_TX_RING_SIZE 8
+
+#define ECL_DATA_OPR_BUFLEN 384
+#define ECL_EVENTS_NOTIFY 333
+
+#define cmd_opr_offsetof(element) offsetof(struct opregion_cmd, element)
+#define cl_data_to_dev(opr_dev) ishtp_device((opr_dev)->cl_device)
+
+#ifndef BITS_TO_BYTES
+#define BITS_TO_BYTES(x) ((x) / 8)
+#endif
+
+struct opregion_cmd {
+ unsigned int command;
+ unsigned int offset;
+ unsigned int length;
+ unsigned int event_id;
+};
+
+struct opregion_data {
+ char data[ECL_DATA_OPR_BUFLEN];
+};
+
+struct opregion_context {
+ struct opregion_cmd cmd_area;
+ struct opregion_data data_area;
+};
+
+struct ecl_message_header {
+ unsigned int version:2;
+ unsigned int data_type:2;
+ unsigned int request_type:2;
+ unsigned int offset:9;
+ unsigned int data_len:9;
+ unsigned int event:8;
+};
+
+struct ecl_message {
+ struct ecl_message_header header;
+ char payload[ECL_DATA_OPR_BUFLEN];
+};
+
+struct ishtp_opregion_dev {
+ struct opregion_context opr_context;
+ struct ishtp_cl *ecl_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_cl_rb *rb;
+ struct acpi_device *adev;
+ unsigned int dsm_event_id;
+ unsigned int ish_link_ready;
+ unsigned int ish_read_done;
+ unsigned int acpi_init_done;
+ wait_queue_head_t read_wait;
+ struct work_struct event_work;
+ struct work_struct reset_work;
+ /* lock for opregion context */
+ struct mutex lock;
+
+};
+
+/* eclite ishtp client UUID: 6a19cc4b-d760-4de3-b14d-f25ebd0fbcd9 */
+static const struct ishtp_device_id ecl_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3,
+ 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9), },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, ecl_ishtp_id_table);
+
+/* ACPI DSM UUID: 91d936a7-1f01-49c6-a6b4-72f00ad8d8a5 */
+static const guid_t ecl_acpi_guid =
+ GUID_INIT(0x91d936a7, 0x1f01, 0x49c6, 0xa6,
+ 0xb4, 0x72, 0xf0, 0x0a, 0xd8, 0xd8, 0xa5);
+
+/**
+ * ecl_ish_cl_read() - Read data from eclite FW
+ *
+ * @opr_dev: pointer to opregion device
+ *
+ * This function issues a read request to eclite FW and waits until it
+ * receives a response. When response is received the read data is copied to
+ * opregion buffer.
+ */
+static int ecl_ish_cl_read(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message_header header;
+ int len, rv;
+
+ if (!opr_dev->ish_link_ready)
+ return -EIO;
+
+ if ((opr_dev->opr_context.cmd_area.offset +
+ opr_dev->opr_context.cmd_area.length) > ECL_DATA_OPR_BUFLEN) {
+ return -EINVAL;
+ }
+
+ header.version = ECL_ISH_HEADER_VERSION;
+ header.data_type = ECL_MSG_DATA;
+ header.request_type = ECL_ISH_READ;
+ header.offset = opr_dev->opr_context.cmd_area.offset;
+ header.data_len = opr_dev->opr_context.cmd_area.length;
+ header.event = opr_dev->opr_context.cmd_area.event_id;
+ len = sizeof(header);
+
+ opr_dev->ish_read_done = false;
+ rv = ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&header, len);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ish-read : send failed\n");
+ return -EIO;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_rd] Req: off : %x, len : %x\n",
+ header.offset,
+ header.data_len);
+
+ rv = wait_event_interruptible_timeout(opr_dev->read_wait,
+ opr_dev->ish_read_done,
+ 2 * HZ);
+ if (!rv) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "[ish_rd] No response from firmware\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ecl_ish_cl_write() - This function writes data to eclite FW.
+ *
+ * @opr_dev: pointer to opregion device
+ *
+ * This function writes data to eclite FW.
+ */
+static int ecl_ish_cl_write(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message message;
+ int len;
+
+ if (!opr_dev->ish_link_ready)
+ return -EIO;
+
+ if ((opr_dev->opr_context.cmd_area.offset +
+ opr_dev->opr_context.cmd_area.length) > ECL_DATA_OPR_BUFLEN) {
+ return -EINVAL;
+ }
+
+ message.header.version = ECL_ISH_HEADER_VERSION;
+ message.header.data_type = ECL_MSG_DATA;
+ message.header.request_type = ECL_ISH_WRITE;
+ message.header.offset = opr_dev->opr_context.cmd_area.offset;
+ message.header.data_len = opr_dev->opr_context.cmd_area.length;
+ message.header.event = opr_dev->opr_context.cmd_area.event_id;
+ len = sizeof(struct ecl_message_header) + message.header.data_len;
+
+ memcpy(message.payload,
+ opr_dev->opr_context.data_area.data + message.header.offset,
+ message.header.data_len);
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_wr] off : %x, len : %x\n",
+ message.header.offset,
+ message.header.data_len);
+
+ return ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&message, len);
+}
+
+static acpi_status
+ecl_opregion_cmd_handler(u32 function, acpi_physical_address address,
+ u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ struct opregion_cmd *cmd;
+ acpi_status status = AE_OK;
+
+ if (!region_context || !value64)
+ return AE_BAD_PARAMETER;
+
+ if (function == ACPI_READ)
+ return AE_ERROR;
+
+ opr_dev = (struct ishtp_opregion_dev *)region_context;
+
+ mutex_lock(&opr_dev->lock);
+
+ cmd = &opr_dev->opr_context.cmd_area;
+
+ switch (address) {
+ case cmd_opr_offsetof(command):
+ cmd->command = (u32)*value64;
+
+ if (cmd->command == ECL_ISH_READ)
+ status = ecl_ish_cl_read(opr_dev);
+ else if (cmd->command == ECL_ISH_WRITE)
+ status = ecl_ish_cl_write(opr_dev);
+ else
+ status = AE_ERROR;
+ break;
+ case cmd_opr_offsetof(offset):
+ cmd->offset = (u32)*value64;
+ break;
+ case cmd_opr_offsetof(length):
+ cmd->length = (u32)*value64;
+ break;
+ case cmd_opr_offsetof(event_id):
+ cmd->event_id = (u32)*value64;
+ break;
+ default:
+ status = AE_ERROR;
+ }
+
+ mutex_unlock(&opr_dev->lock);
+
+ return status;
+}
+
+static acpi_status
+ecl_opregion_data_handler(u32 function, acpi_physical_address address,
+ u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ unsigned int bytes = BITS_TO_BYTES(bits);
+ void *data_addr;
+
+ if (!region_context || !value64)
+ return AE_BAD_PARAMETER;
+
+ if (address + bytes > ECL_DATA_OPR_BUFLEN)
+ return AE_BAD_PARAMETER;
+
+ opr_dev = (struct ishtp_opregion_dev *)region_context;
+
+ mutex_lock(&opr_dev->lock);
+
+ data_addr = &opr_dev->opr_context.data_area.data[address];
+
+ if (function == ACPI_READ) {
+ memcpy(value64, data_addr, bytes);
+ } else if (function == ACPI_WRITE) {
+ memcpy(data_addr, value64, bytes);
+ } else {
+ mutex_unlock(&opr_dev->lock);
+ return AE_BAD_PARAMETER;
+ }
+
+ mutex_unlock(&opr_dev->lock);
+
+ return AE_OK;
+}
+
+static int acpi_find_eclite_device(struct ishtp_opregion_dev *opr_dev)
+{
+ struct acpi_device *adev;
+
+ /* Find ECLite device and save reference */
+ adev = acpi_dev_get_first_match_dev("INTC1035", NULL, -1);
+ if (!adev) {
+ dev_err(cl_data_to_dev(opr_dev), "eclite ACPI device not found\n");
+ return -ENODEV;
+ }
+
+ opr_dev->adev = adev;
+
+ return 0;
+}
+
+static int acpi_opregion_init(struct ishtp_opregion_dev *opr_dev)
+{
+ acpi_status status;
+
+ status = acpi_install_address_space_handler(opr_dev->adev->handle,
+ ECLITE_CMD_OPREGION_ID,
+ ecl_opregion_cmd_handler,
+ NULL, opr_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "cmd space handler install failed\n");
+ return -ENODEV;
+ }
+
+ status = acpi_install_address_space_handler(opr_dev->adev->handle,
+ ECLITE_DATA_OPREGION_ID,
+ ecl_opregion_data_handler,
+ NULL, opr_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "data space handler install failed\n");
+
+ acpi_remove_address_space_handler(opr_dev->adev->handle,
+ ECLITE_CMD_OPREGION_ID,
+ ecl_opregion_cmd_handler);
+ return -ENODEV;
+ }
+ opr_dev->acpi_init_done = true;
+
+ dev_dbg(cl_data_to_dev(opr_dev), "Opregion handlers are installed\n");
+
+ return 0;
+}
+
+static void acpi_opregion_deinit(struct ishtp_opregion_dev *opr_dev)
+{
+ acpi_remove_address_space_handler(opr_dev->adev->handle,
+ ECLITE_CMD_OPREGION_ID,
+ ecl_opregion_cmd_handler);
+
+ acpi_remove_address_space_handler(opr_dev->adev->handle,
+ ECLITE_DATA_OPREGION_ID,
+ ecl_opregion_data_handler);
+ opr_dev->acpi_init_done = false;
+}
+
+static void ecl_acpi_invoke_dsm(struct work_struct *work)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ union acpi_object *obj;
+
+ opr_dev = container_of(work, struct ishtp_opregion_dev, event_work);
+ if (!opr_dev->acpi_init_done)
+ return;
+
+ obj = acpi_evaluate_dsm(opr_dev->adev->handle, &ecl_acpi_guid, 0,
+ opr_dev->dsm_event_id, NULL);
+ if (!obj) {
+ dev_warn(cl_data_to_dev(opr_dev), "_DSM fn call failed\n");
+ return;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev), "Exec DSM function code: %d success\n",
+ opr_dev->dsm_event_id);
+
+ ACPI_FREE(obj);
+}
+
+static void ecl_ish_process_rx_data(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message *message =
+ (struct ecl_message *)opr_dev->rb->buffer.data;
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_rd] Resp: off : %x, len : %x\n",
+ message->header.offset,
+ message->header.data_len);
+
+ if ((message->header.offset + message->header.data_len) >
+ ECL_DATA_OPR_BUFLEN) {
+ return;
+ }
+
+ memcpy(opr_dev->opr_context.data_area.data + message->header.offset,
+ message->payload, message->header.data_len);
+
+ opr_dev->ish_read_done = true;
+ wake_up_interruptible(&opr_dev->read_wait);
+}
+
+static void ecl_ish_process_rx_event(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message_header *header =
+ (struct ecl_message_header *)opr_dev->rb->buffer.data;
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_ev] Evt received: %8x\n", header->event);
+
+ opr_dev->dsm_event_id = header->event;
+ schedule_work(&opr_dev->event_work);
+}
+
+static int ecl_ish_cl_enable_events(struct ishtp_opregion_dev *opr_dev,
+ bool config_enable)
+{
+ struct ecl_message message;
+ int len;
+
+ message.header.version = ECL_ISH_HEADER_VERSION;
+ message.header.data_type = ECL_MSG_DATA;
+ message.header.request_type = ECL_ISH_WRITE;
+ message.header.offset = ECL_EVENTS_NOTIFY;
+ message.header.data_len = 1;
+ message.payload[0] = config_enable;
+
+ len = sizeof(struct ecl_message_header) + message.header.data_len;
+
+ return ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&message, len);
+}
+
+static void ecl_ishtp_cl_event_cb(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev;
+ struct ecl_message_header *header;
+ struct ishtp_cl_rb *rb;
+
+ opr_dev = ishtp_get_client_data(ecl_ishtp_cl);
+ while ((rb = ishtp_cl_rx_get_rb(opr_dev->ecl_ishtp_cl)) != NULL) {
+ opr_dev->rb = rb;
+ header = (struct ecl_message_header *)rb->buffer.data;
+
+ if (header->data_type == ECL_MSG_DATA)
+ ecl_ish_process_rx_data(opr_dev);
+ else if (header->data_type == ECL_MSG_EVENT)
+ ecl_ish_process_rx_event(opr_dev);
+ else
+ /* Got an event with wrong data_type, ignore it */
+ dev_err(cl_data_to_dev(opr_dev),
+ "[ish_cb] Received wrong data_type\n");
+
+ ishtp_cl_io_rb_recycle(rb);
+ }
+}
+
+static int ecl_ishtp_cl_init(struct ishtp_cl *ecl_ishtp_cl)
+{
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_device *dev;
+ int rv;
+
+ rv = ishtp_cl_link(ecl_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ishtp_cl_link failed\n");
+ return rv;
+ }
+
+ dev = ishtp_get_ishtp_device(ecl_ishtp_cl);
+
+ /* Connect to FW client */
+ ishtp_set_tx_ring_size(ecl_ishtp_cl, ECL_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(ecl_ishtp_cl, ECL_CL_RX_RING_SIZE);
+
+ fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_id_table[0].guid);
+ if (!fw_client) {
+ dev_err(cl_data_to_dev(opr_dev), "fw client not found\n");
+ return -ENOENT;
+ }
+
+ ishtp_cl_set_fw_client_id(ecl_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+
+ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_CONNECTING);
+
+ rv = ishtp_cl_connect(ecl_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "client connect failed\n");
+
+ ishtp_cl_unlink(ecl_ishtp_cl);
+ return rv;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev), "Host connected to fw client\n");
+
+ return 0;
+}
+
+static void ecl_ishtp_cl_deinit(struct ishtp_cl *ecl_ishtp_cl)
+{
+ ishtp_cl_unlink(ecl_ishtp_cl);
+ ishtp_cl_flush_queues(ecl_ishtp_cl);
+ ishtp_cl_free(ecl_ishtp_cl);
+}
+
+static void ecl_ishtp_cl_reset_handler(struct work_struct *work)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ struct ishtp_cl_device *cl_device;
+ struct ishtp_cl *ecl_ishtp_cl;
+ int rv;
+ int retry;
+
+ opr_dev = container_of(work, struct ishtp_opregion_dev, reset_work);
+
+ opr_dev->ish_link_ready = false;
+
+ cl_device = opr_dev->cl_device;
+ ecl_ishtp_cl = opr_dev->ecl_ishtp_cl;
+
+ ecl_ishtp_cl_deinit(ecl_ishtp_cl);
+
+ ecl_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!ecl_ishtp_cl)
+ return;
+
+ ishtp_set_drvdata(cl_device, ecl_ishtp_cl);
+ ishtp_set_client_data(ecl_ishtp_cl, opr_dev);
+
+ opr_dev->ecl_ishtp_cl = ecl_ishtp_cl;
+
+ for (retry = 0; retry < 3; ++retry) {
+ rv = ecl_ishtp_cl_init(ecl_ishtp_cl);
+ if (!rv)
+ break;
+ }
+ if (rv) {
+ ishtp_cl_free(ecl_ishtp_cl);
+ opr_dev->ecl_ishtp_cl = NULL;
+ dev_err(cl_data_to_dev(opr_dev),
+ "[ish_rst] Reset failed. Link not ready.\n");
+ return;
+ }
+
+ ishtp_register_event_cb(cl_device, ecl_ishtp_cl_event_cb);
+ dev_info(cl_data_to_dev(opr_dev),
+ "[ish_rst] Reset Success. Link ready.\n");
+
+ opr_dev->ish_link_ready = true;
+
+ if (opr_dev->acpi_init_done)
+ return;
+
+ rv = acpi_opregion_init(opr_dev);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "ACPI opregion init failed\n");
+ }
+}
+
+static int ecl_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl;
+ struct ishtp_opregion_dev *opr_dev;
+ int rv;
+
+ opr_dev = devm_kzalloc(ishtp_device(cl_device), sizeof(*opr_dev),
+ GFP_KERNEL);
+ if (!opr_dev)
+ return -ENOMEM;
+
+ ecl_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!ecl_ishtp_cl)
+ return -ENOMEM;
+
+ ishtp_set_drvdata(cl_device, ecl_ishtp_cl);
+ ishtp_set_client_data(ecl_ishtp_cl, opr_dev);
+ opr_dev->ecl_ishtp_cl = ecl_ishtp_cl;
+ opr_dev->cl_device = cl_device;
+
+ init_waitqueue_head(&opr_dev->read_wait);
+ INIT_WORK(&opr_dev->event_work, ecl_acpi_invoke_dsm);
+ INIT_WORK(&opr_dev->reset_work, ecl_ishtp_cl_reset_handler);
+
+ /* Initialize ish client device */
+ rv = ecl_ishtp_cl_init(ecl_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "Client init failed\n");
+ goto err_exit;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev), "eclite-ishtp client initialised\n");
+
+ opr_dev->ish_link_ready = true;
+ mutex_init(&opr_dev->lock);
+
+ rv = acpi_find_eclite_device(opr_dev);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ECLite ACPI ID not found\n");
+ goto err_exit;
+ }
+
+ /* Register a handler for eclite fw events */
+ ishtp_register_event_cb(cl_device, ecl_ishtp_cl_event_cb);
+
+ /* Now init opregion handlers */
+ rv = acpi_opregion_init(opr_dev);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ACPI opregion init failed\n");
+ goto err_exit;
+ }
+
+ /* Reprobe devices depending on ECLite - battery, fan, etc. */
+ acpi_dev_clear_dependencies(opr_dev->adev);
+
+ return 0;
+err_exit:
+ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(ecl_ishtp_cl);
+ ecl_ishtp_cl_deinit(ecl_ishtp_cl);
+
+ return rv;
+}
+
+static void ecl_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+
+ if (opr_dev->acpi_init_done)
+ acpi_opregion_deinit(opr_dev);
+
+ acpi_dev_put(opr_dev->adev);
+
+ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(ecl_ishtp_cl);
+ ecl_ishtp_cl_deinit(ecl_ishtp_cl);
+
+ cancel_work_sync(&opr_dev->reset_work);
+ cancel_work_sync(&opr_dev->event_work);
+}
+
+static int ecl_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+
+ schedule_work(&opr_dev->reset_work);
+
+ return 0;
+}
+
+static int ecl_ishtp_cl_suspend(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+
+ if (acpi_target_system_state() == ACPI_STATE_S0)
+ return 0;
+
+ acpi_opregion_deinit(opr_dev);
+ ecl_ish_cl_enable_events(opr_dev, false);
+
+ return 0;
+}
+
+static int ecl_ishtp_cl_resume(struct device *device)
+{
+ /* A reset is expected to call after an Sx. At this point
+ * we are not sure if the link is up or not to restore anything,
+ * so do nothing in resume path
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops ecl_ishtp_pm_ops = {
+ .suspend = ecl_ishtp_cl_suspend,
+ .resume = ecl_ishtp_cl_resume,
+};
+
+static struct ishtp_cl_driver ecl_ishtp_cl_driver = {
+ .name = "ishtp-eclite",
+ .id = ecl_ishtp_id_table,
+ .probe = ecl_ishtp_cl_probe,
+ .remove = ecl_ishtp_cl_remove,
+ .reset = ecl_ishtp_cl_reset,
+ .driver.pm = &ecl_ishtp_pm_ops,
+};
+
+static int __init ecl_ishtp_init(void)
+{
+ return ishtp_cl_driver_register(&ecl_ishtp_cl_driver, THIS_MODULE);
+}
+
+static void __exit ecl_ishtp_exit(void)
+{
+ return ishtp_cl_driver_unregister(&ecl_ishtp_cl_driver);
+}
+
+late_initcall(ecl_ishtp_init);
+module_exit(ecl_ishtp_exit);
+
+MODULE_DESCRIPTION("ISH ISHTP eclite client opregion driver");
+MODULE_AUTHOR("K Naduvalath, Sumesh <sumesh.k.naduvalath@intel.com>");
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/mrfld_pwrbtn.c b/drivers/platform/x86/intel/mrfld_pwrbtn.c
new file mode 100644
index 0000000000..549a3f586f
--- /dev/null
+++ b/drivers/platform/x86/intel/mrfld_pwrbtn.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power-button driver for Basin Cove PMIC
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mfd/intel_soc_pmic_mrfld.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/slab.h>
+
+#define BCOVE_PBSTATUS 0x27
+#define BCOVE_PBSTATUS_PBLVL BIT(4) /* 1 - release, 0 - press */
+
+static irqreturn_t mrfld_pwrbtn_interrupt(int irq, void *dev_id)
+{
+ struct input_dev *input = dev_id;
+ struct device *dev = input->dev.parent;
+ struct regmap *regmap = dev_get_drvdata(dev);
+ unsigned int state;
+ int ret;
+
+ ret = regmap_read(regmap, BCOVE_PBSTATUS, &state);
+ if (ret)
+ return IRQ_NONE;
+
+ dev_dbg(dev, "PBSTATUS=0x%x\n", state);
+ input_report_key(input, KEY_POWER, !(state & BCOVE_PBSTATUS_PBLVL));
+ input_sync(input);
+
+ regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0);
+ return IRQ_HANDLED;
+}
+
+static int mrfld_pwrbtn_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct input_dev *input;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+ input->name = pdev->name;
+ input->phys = "power-button/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = dev;
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ ret = input_register_device(input);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, pmic->regmap);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_pwrbtn_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, pdev->name,
+ input);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pmic->regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0);
+ regmap_update_bits(pmic->regmap, BCOVE_MPBIRQ, BCOVE_PBIRQ_PBTN, 0);
+
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+ return 0;
+}
+
+static void mrfld_pwrbtn_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_pm_clear_wake_irq(dev);
+ device_init_wakeup(dev, false);
+}
+
+static const struct platform_device_id mrfld_pwrbtn_id_table[] = {
+ { .name = "mrfld_bcove_pwrbtn" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, mrfld_pwrbtn_id_table);
+
+static struct platform_driver mrfld_pwrbtn_driver = {
+ .driver = {
+ .name = "mrfld_bcove_pwrbtn",
+ },
+ .probe = mrfld_pwrbtn_probe,
+ .remove_new = mrfld_pwrbtn_remove,
+ .id_table = mrfld_pwrbtn_id_table,
+};
+module_platform_driver(mrfld_pwrbtn_driver);
+
+MODULE_DESCRIPTION("Power-button driver for Basin Cove PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c
new file mode 100644
index 0000000000..fa720967e6
--- /dev/null
+++ b/drivers/platform/x86/intel/oaktrail.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Intel OakTrail Platform support
+ *
+ * Copyright (C) 2010-2011 Intel Corporation
+ * Author: Yin Kangkai (kangkai.yin@intel.com)
+ *
+ * based on Compal driver, Copyright (C) 2008 Cezary Jackiewicz
+ * <cezary.jackiewicz (at) gmail.com>, based on MSI driver
+ * Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
+ *
+ * This driver does below things:
+ * 1. registers itself in the Linux backlight control in
+ * /sys/class/backlight/intel_oaktrail/
+ *
+ * 2. registers in the rfkill subsystem here: /sys/class/rfkill/rfkillX/
+ * for these components: wifi, bluetooth, wwan (3g), gps
+ *
+ * This driver might work on other products based on Oaktrail. If you
+ * want to try it you can pass force=1 as argument to the module which
+ * will force it to load even when the DMI data doesn't identify the
+ * product as compatible.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+
+#include <acpi/video.h>
+
+#define DRIVER_NAME "intel_oaktrail"
+#define DRIVER_VERSION "0.4ac1"
+
+/*
+ * This is the devices status address in EC space, and the control bits
+ * definition:
+ *
+ * (1 << 0): Camera enable/disable, RW.
+ * (1 << 1): Bluetooth enable/disable, RW.
+ * (1 << 2): GPS enable/disable, RW.
+ * (1 << 3): WiFi enable/disable, RW.
+ * (1 << 4): WWAN (3G) enable/disable, RW.
+ * (1 << 5): Touchscreen enable/disable, Read Only.
+ */
+#define OT_EC_DEVICE_STATE_ADDRESS 0xD6
+
+#define OT_EC_CAMERA_MASK (1 << 0)
+#define OT_EC_BT_MASK (1 << 1)
+#define OT_EC_GPS_MASK (1 << 2)
+#define OT_EC_WIFI_MASK (1 << 3)
+#define OT_EC_WWAN_MASK (1 << 4)
+#define OT_EC_TS_MASK (1 << 5)
+
+/*
+ * This is the address in EC space and commands used to control LCD backlight:
+ *
+ * Two steps needed to change the LCD backlight:
+ * 1. write the backlight percentage into OT_EC_BL_BRIGHTNESS_ADDRESS;
+ * 2. write OT_EC_BL_CONTROL_ON_DATA into OT_EC_BL_CONTROL_ADDRESS.
+ *
+ * To read the LCD back light, just read out the value from
+ * OT_EC_BL_BRIGHTNESS_ADDRESS.
+ *
+ * LCD backlight brightness range: 0 - 100 (OT_EC_BL_BRIGHTNESS_MAX)
+ */
+#define OT_EC_BL_BRIGHTNESS_ADDRESS 0x44
+#define OT_EC_BL_BRIGHTNESS_MAX 100
+#define OT_EC_BL_CONTROL_ADDRESS 0x3A
+#define OT_EC_BL_CONTROL_ON_DATA 0x1A
+
+
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
+
+static struct platform_device *oaktrail_device;
+static struct backlight_device *oaktrail_bl_device;
+static struct rfkill *bt_rfkill;
+static struct rfkill *gps_rfkill;
+static struct rfkill *wifi_rfkill;
+static struct rfkill *wwan_rfkill;
+
+
+/* rfkill */
+static int oaktrail_rfkill_set(void *data, bool blocked)
+{
+ u8 value;
+ u8 result;
+ unsigned long radio = (unsigned long) data;
+
+ ec_read(OT_EC_DEVICE_STATE_ADDRESS, &result);
+
+ if (!blocked)
+ value = (u8) (result | radio);
+ else
+ value = (u8) (result & ~radio);
+
+ ec_write(OT_EC_DEVICE_STATE_ADDRESS, value);
+
+ return 0;
+}
+
+static const struct rfkill_ops oaktrail_rfkill_ops = {
+ .set_block = oaktrail_rfkill_set,
+};
+
+static struct rfkill *oaktrail_rfkill_new(char *name, enum rfkill_type type,
+ unsigned long mask)
+{
+ struct rfkill *rfkill_dev;
+ u8 value;
+ int err;
+
+ rfkill_dev = rfkill_alloc(name, &oaktrail_device->dev, type,
+ &oaktrail_rfkill_ops, (void *)mask);
+ if (!rfkill_dev)
+ return ERR_PTR(-ENOMEM);
+
+ ec_read(OT_EC_DEVICE_STATE_ADDRESS, &value);
+ rfkill_init_sw_state(rfkill_dev, (value & mask) != 1);
+
+ err = rfkill_register(rfkill_dev);
+ if (err) {
+ rfkill_destroy(rfkill_dev);
+ return ERR_PTR(err);
+ }
+
+ return rfkill_dev;
+}
+
+static inline void __oaktrail_rfkill_cleanup(struct rfkill *rf)
+{
+ if (rf) {
+ rfkill_unregister(rf);
+ rfkill_destroy(rf);
+ }
+}
+
+static void oaktrail_rfkill_cleanup(void)
+{
+ __oaktrail_rfkill_cleanup(wifi_rfkill);
+ __oaktrail_rfkill_cleanup(bt_rfkill);
+ __oaktrail_rfkill_cleanup(gps_rfkill);
+ __oaktrail_rfkill_cleanup(wwan_rfkill);
+}
+
+static int oaktrail_rfkill_init(void)
+{
+ int ret;
+
+ wifi_rfkill = oaktrail_rfkill_new("oaktrail-wifi",
+ RFKILL_TYPE_WLAN,
+ OT_EC_WIFI_MASK);
+ if (IS_ERR(wifi_rfkill)) {
+ ret = PTR_ERR(wifi_rfkill);
+ wifi_rfkill = NULL;
+ goto cleanup;
+ }
+
+ bt_rfkill = oaktrail_rfkill_new("oaktrail-bluetooth",
+ RFKILL_TYPE_BLUETOOTH,
+ OT_EC_BT_MASK);
+ if (IS_ERR(bt_rfkill)) {
+ ret = PTR_ERR(bt_rfkill);
+ bt_rfkill = NULL;
+ goto cleanup;
+ }
+
+ gps_rfkill = oaktrail_rfkill_new("oaktrail-gps",
+ RFKILL_TYPE_GPS,
+ OT_EC_GPS_MASK);
+ if (IS_ERR(gps_rfkill)) {
+ ret = PTR_ERR(gps_rfkill);
+ gps_rfkill = NULL;
+ goto cleanup;
+ }
+
+ wwan_rfkill = oaktrail_rfkill_new("oaktrail-wwan",
+ RFKILL_TYPE_WWAN,
+ OT_EC_WWAN_MASK);
+ if (IS_ERR(wwan_rfkill)) {
+ ret = PTR_ERR(wwan_rfkill);
+ wwan_rfkill = NULL;
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ oaktrail_rfkill_cleanup();
+ return ret;
+}
+
+
+/* backlight */
+static int get_backlight_brightness(struct backlight_device *b)
+{
+ u8 value;
+ ec_read(OT_EC_BL_BRIGHTNESS_ADDRESS, &value);
+
+ return value;
+}
+
+static int set_backlight_brightness(struct backlight_device *b)
+{
+ u8 percent = (u8) b->props.brightness;
+ if (percent < 0 || percent > OT_EC_BL_BRIGHTNESS_MAX)
+ return -EINVAL;
+
+ ec_write(OT_EC_BL_BRIGHTNESS_ADDRESS, percent);
+ ec_write(OT_EC_BL_CONTROL_ADDRESS, OT_EC_BL_CONTROL_ON_DATA);
+
+ return 0;
+}
+
+static const struct backlight_ops oaktrail_bl_ops = {
+ .get_brightness = get_backlight_brightness,
+ .update_status = set_backlight_brightness,
+};
+
+static int oaktrail_backlight_init(void)
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = OT_EC_BL_BRIGHTNESS_MAX;
+ bd = backlight_device_register(DRIVER_NAME,
+ &oaktrail_device->dev, NULL,
+ &oaktrail_bl_ops,
+ &props);
+
+ if (IS_ERR(bd)) {
+ oaktrail_bl_device = NULL;
+ pr_warn("Unable to register backlight device\n");
+ return PTR_ERR(bd);
+ }
+
+ oaktrail_bl_device = bd;
+
+ bd->props.brightness = get_backlight_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static void oaktrail_backlight_exit(void)
+{
+ backlight_device_unregister(oaktrail_bl_device);
+}
+
+static int oaktrail_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver oaktrail_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = oaktrail_probe,
+};
+
+static int dmi_check_cb(const struct dmi_system_id *id)
+{
+ pr_info("Identified model '%s'\n", id->ident);
+ return 0;
+}
+
+static const struct dmi_system_id oaktrail_dmi_table[] __initconst = {
+ {
+ .ident = "OakTrail platform",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "OakTrail platform"),
+ },
+ .callback = dmi_check_cb
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, oaktrail_dmi_table);
+
+static int __init oaktrail_init(void)
+{
+ int ret;
+
+ if (acpi_disabled) {
+ pr_err("ACPI needs to be enabled for this driver to work!\n");
+ return -ENODEV;
+ }
+
+ if (!force && !dmi_check_system(oaktrail_dmi_table)) {
+ pr_err("Platform not recognized (You could try the module's force-parameter)");
+ return -ENODEV;
+ }
+
+ ret = platform_driver_register(&oaktrail_driver);
+ if (ret) {
+ pr_warn("Unable to register platform driver\n");
+ goto err_driver_reg;
+ }
+
+ oaktrail_device = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!oaktrail_device) {
+ pr_warn("Unable to allocate platform device\n");
+ ret = -ENOMEM;
+ goto err_device_alloc;
+ }
+
+ ret = platform_device_add(oaktrail_device);
+ if (ret) {
+ pr_warn("Unable to add platform device\n");
+ goto err_device_add;
+ }
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ ret = oaktrail_backlight_init();
+ if (ret)
+ goto err_backlight;
+ }
+
+ ret = oaktrail_rfkill_init();
+ if (ret) {
+ pr_warn("Setup rfkill failed\n");
+ goto err_rfkill;
+ }
+
+ pr_info("Driver "DRIVER_VERSION" successfully loaded\n");
+ return 0;
+
+err_rfkill:
+ oaktrail_backlight_exit();
+err_backlight:
+ platform_device_del(oaktrail_device);
+err_device_add:
+ platform_device_put(oaktrail_device);
+err_device_alloc:
+ platform_driver_unregister(&oaktrail_driver);
+err_driver_reg:
+
+ return ret;
+}
+
+static void __exit oaktrail_cleanup(void)
+{
+ oaktrail_backlight_exit();
+ oaktrail_rfkill_cleanup();
+ platform_device_unregister(oaktrail_device);
+ platform_driver_unregister(&oaktrail_driver);
+
+ pr_info("Driver unloaded\n");
+}
+
+module_init(oaktrail_init);
+module_exit(oaktrail_cleanup);
+
+MODULE_AUTHOR("Yin Kangkai (kangkai.yin@intel.com)");
+MODULE_DESCRIPTION("Intel Oaktrail Platform ACPI Extras");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/pmc/Kconfig b/drivers/platform/x86/intel/pmc/Kconfig
new file mode 100644
index 0000000000..b526597e4d
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Intel x86 Platform-Specific Drivers
+#
+
+config INTEL_PMC_CORE
+ tristate "Intel PMC Core driver"
+ depends on PCI
+ depends on ACPI
+ help
+ The Intel Platform Controller Hub for Intel Core SoCs provides access
+ to Power Management Controller registers via various interfaces. This
+ driver can utilize debugging capabilities and supported features as
+ exposed by the Power Management Controller. It also may perform some
+ tasks in the PMC in order to enable transition into the SLPS0 state.
+ It should be selected on all Intel platforms supported by the driver.
+
+ Supported features:
+ - SLP_S0_RESIDENCY counter
+ - PCH IP Power Gating status
+ - LTR Ignore / LTR Show
+ - MPHY/PLL gating status (Sunrisepoint PCH only)
+ - SLPS0 Debug registers (Cannonlake/Icelake PCH)
+ - Low Power Mode registers (Tigerlake and beyond)
+ - PMC quirks as needed to enable SLPS0/S0ix
diff --git a/drivers/platform/x86/intel/pmc/Makefile b/drivers/platform/x86/intel/pmc/Makefile
new file mode 100644
index 0000000000..3a4cf1cbc1
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Intel x86 Platform-Specific Drivers
+#
+
+intel_pmc_core-y := core.o core_ssram.o spt.o cnp.o \
+ icl.o tgl.o adl.o mtl.o
+obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
+intel_pmc_core_pltdrv-y := pltdrv.o
+obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core_pltdrv.o
diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c
new file mode 100644
index 0000000000..606f7678bc
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/adl.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Alder Lake PCH.
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+
+#include "core.h"
+
+/* Alder Lake: PGD PFET Enable Ack Status Register(s) bitmap */
+const struct pmc_bit_map adl_pfear_map[] = {
+ {"SPI/eSPI", BIT(2)},
+ {"XHCI", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"SPC", BIT(6)},
+ {"GBE", BIT(7)},
+
+ {"SATA", BIT(0)},
+ {"HDA_PGD0", BIT(1)},
+ {"HDA_PGD1", BIT(2)},
+ {"HDA_PGD2", BIT(3)},
+ {"HDA_PGD3", BIT(4)},
+ {"SPD", BIT(5)},
+ {"LPSS", BIT(6)},
+
+ {"SMB", BIT(0)},
+ {"ISH", BIT(1)},
+ {"ITH", BIT(3)},
+
+ {"XDCI", BIT(1)},
+ {"DCI", BIT(2)},
+ {"CSE", BIT(3)},
+ {"CSME_KVM", BIT(4)},
+ {"CSME_PMT", BIT(5)},
+ {"CSME_CLINK", BIT(6)},
+ {"CSME_PTIO", BIT(7)},
+
+ {"CSME_USBR", BIT(0)},
+ {"CSME_SUSRAM", BIT(1)},
+ {"CSME_SMT1", BIT(2)},
+ {"CSME_SMS2", BIT(4)},
+ {"CSME_SMS1", BIT(5)},
+ {"CSME_RTC", BIT(6)},
+ {"CSME_PSF", BIT(7)},
+
+ {"CNVI", BIT(3)},
+ {"HDA_PGD4", BIT(2)},
+ {"HDA_PGD5", BIT(3)},
+ {"HDA_PGD6", BIT(4)},
+ {}
+};
+
+const struct pmc_bit_map *ext_adl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ adl_pfear_map,
+ NULL
+};
+
+const struct pmc_bit_map adl_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", CNP_PMC_LTR_RESERVED},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+const struct pmc_bit_map adl_clocksource_status_map[] = {
+ {"CLKPART1_OFF_STS", BIT(0)},
+ {"CLKPART2_OFF_STS", BIT(1)},
+ {"CLKPART3_OFF_STS", BIT(2)},
+ {"CLKPART4_OFF_STS", BIT(3)},
+ {"CLKPART5_OFF_STS", BIT(4)},
+ {"CLKPART6_OFF_STS", BIT(5)},
+ {"CLKPART7_OFF_STS", BIT(6)},
+ {"CLKPART8_OFF_STS", BIT(7)},
+ {"PCIE0PLL_OFF_STS", BIT(10)},
+ {"PCIE1PLL_OFF_STS", BIT(11)},
+ {"PCIE2PLL_OFF_STS", BIT(12)},
+ {"PCIE3PLL_OFF_STS", BIT(13)},
+ {"PCIE4PLL_OFF_STS", BIT(14)},
+ {"PCIE5PLL_OFF_STS", BIT(15)},
+ {"PCIE6PLL_OFF_STS", BIT(16)},
+ {"USB2PLL_OFF_STS", BIT(18)},
+ {"OCPLL_OFF_STS", BIT(22)},
+ {"AUDIOPLL_OFF_STS", BIT(23)},
+ {"GBEPLL_OFF_STS", BIT(24)},
+ {"Fast_XTAL_Osc_OFF_STS", BIT(25)},
+ {"AC_Ring_Osc_OFF_STS", BIT(26)},
+ {"MC_Ring_Osc_OFF_STS", BIT(27)},
+ {"SATAPLL_OFF_STS", BIT(29)},
+ {"USB3PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map adl_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0)},
+ {"DMI_PGD0_PG_STS", BIT(1)},
+ {"ESPISPI_PGD0_PG_STS", BIT(2)},
+ {"XHCI_PGD0_PG_STS", BIT(3)},
+ {"SPA_PGD0_PG_STS", BIT(4)},
+ {"SPB_PGD0_PG_STS", BIT(5)},
+ {"SPC_PGD0_PG_STS", BIT(6)},
+ {"GBE_PGD0_PG_STS", BIT(7)},
+ {"SATA_PGD0_PG_STS", BIT(8)},
+ {"DSP_PGD0_PG_STS", BIT(9)},
+ {"DSP_PGD1_PG_STS", BIT(10)},
+ {"DSP_PGD2_PG_STS", BIT(11)},
+ {"DSP_PGD3_PG_STS", BIT(12)},
+ {"SPD_PGD0_PG_STS", BIT(13)},
+ {"LPSS_PGD0_PG_STS", BIT(14)},
+ {"SMB_PGD0_PG_STS", BIT(16)},
+ {"ISH_PGD0_PG_STS", BIT(17)},
+ {"NPK_PGD0_PG_STS", BIT(19)},
+ {"PECI_PGD0_PG_STS", BIT(21)},
+ {"XDCI_PGD0_PG_STS", BIT(25)},
+ {"EXI_PGD0_PG_STS", BIT(26)},
+ {"CSE_PGD0_PG_STS", BIT(27)},
+ {"KVMCC_PGD0_PG_STS", BIT(28)},
+ {"PMT_PGD0_PG_STS", BIT(29)},
+ {"CLINK_PGD0_PG_STS", BIT(30)},
+ {"PTIO_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map adl_power_gating_status_1_map[] = {
+ {"USBR0_PGD0_PG_STS", BIT(0)},
+ {"SMT1_PGD0_PG_STS", BIT(2)},
+ {"CSMERTC_PGD0_PG_STS", BIT(6)},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7)},
+ {"CNVI_PGD0_PG_STS", BIT(19)},
+ {"DSP_PGD4_PG_STS", BIT(26)},
+ {"SPG_PGD0_PG_STS", BIT(27)},
+ {"SPE_PGD0_PG_STS", BIT(28)},
+ {}
+};
+
+const struct pmc_bit_map adl_power_gating_status_2_map[] = {
+ {"THC0_PGD0_PG_STS", BIT(7)},
+ {"THC1_PGD0_PG_STS", BIT(8)},
+ {"SPF_PGD0_PG_STS", BIT(14)},
+ {}
+};
+
+const struct pmc_bit_map adl_d3_status_0_map[] = {
+ {"ISH_D3_STS", BIT(2)},
+ {"LPSS_D3_STS", BIT(3)},
+ {"XDCI_D3_STS", BIT(4)},
+ {"XHCI_D3_STS", BIT(5)},
+ {"SPA_D3_STS", BIT(12)},
+ {"SPB_D3_STS", BIT(13)},
+ {"SPC_D3_STS", BIT(14)},
+ {"SPD_D3_STS", BIT(15)},
+ {"SPE_D3_STS", BIT(16)},
+ {"DSP_D3_STS", BIT(19)},
+ {"SATA_D3_STS", BIT(20)},
+ {"DMI_D3_STS", BIT(22)},
+ {}
+};
+
+const struct pmc_bit_map adl_d3_status_1_map[] = {
+ {"GBE_D3_STS", BIT(19)},
+ {"CNVI_D3_STS", BIT(27)},
+ {}
+};
+
+const struct pmc_bit_map adl_d3_status_2_map[] = {
+ {"CSMERTC_D3_STS", BIT(1)},
+ {"CSE_D3_STS", BIT(4)},
+ {"KVMCC_D3_STS", BIT(5)},
+ {"USBR0_D3_STS", BIT(6)},
+ {"SMT1_D3_STS", BIT(8)},
+ {"PTIO_D3_STS", BIT(16)},
+ {"PMT_D3_STS", BIT(17)},
+ {}
+};
+
+const struct pmc_bit_map adl_d3_status_3_map[] = {
+ {"THC0_D3_STS", BIT(14)},
+ {"THC1_D3_STS", BIT(15)},
+ {}
+};
+
+const struct pmc_bit_map adl_vnn_req_status_0_map[] = {
+ {"ISH_VNN_REQ_STS", BIT(2)},
+ {"ESPISPI_VNN_REQ_STS", BIT(18)},
+ {"DSP_VNN_REQ_STS", BIT(19)},
+ {}
+};
+
+const struct pmc_bit_map adl_vnn_req_status_1_map[] = {
+ {"NPK_VNN_REQ_STS", BIT(4)},
+ {"EXI_VNN_REQ_STS", BIT(9)},
+ {"GBE_VNN_REQ_STS", BIT(19)},
+ {"SMB_VNN_REQ_STS", BIT(25)},
+ {"CNVI_VNN_REQ_STS", BIT(27)},
+ {}
+};
+
+const struct pmc_bit_map adl_vnn_req_status_2_map[] = {
+ {"CSMERTC_VNN_REQ_STS", BIT(1)},
+ {"CSE_VNN_REQ_STS", BIT(4)},
+ {"SMT1_VNN_REQ_STS", BIT(8)},
+ {"CLINK_VNN_REQ_STS", BIT(14)},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20)},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21)},
+ {"GPIOCOM2_VNN_REQ_STS", BIT(22)},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23)},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24)},
+ {}
+};
+
+const struct pmc_bit_map adl_vnn_req_status_3_map[] = {
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11)},
+ {}
+};
+
+const struct pmc_bit_map adl_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS", BIT(0)},
+ {"PCIe_LPM_En_REQ_STS", BIT(3)},
+ {"ITH_REQ_STS", BIT(5)},
+ {"CNVI_REQ_STS", BIT(6)},
+ {"ISH_REQ_STS", BIT(7)},
+ {"USB2_SUS_PG_Sys_REQ_STS", BIT(10)},
+ {"PCIe_Clk_REQ_STS", BIT(12)},
+ {"MPHY_Core_DL_REQ_STS", BIT(16)},
+ {"Break-even_En_REQ_STS", BIT(17)},
+ {"MPHY_SUS_REQ_STS", BIT(22)},
+ {"xDCI_attached_REQ_STS", BIT(24)},
+ {}
+};
+
+const struct pmc_bit_map *adl_lpm_maps[] = {
+ adl_clocksource_status_map,
+ adl_power_gating_status_0_map,
+ adl_power_gating_status_1_map,
+ adl_power_gating_status_2_map,
+ adl_d3_status_0_map,
+ adl_d3_status_1_map,
+ adl_d3_status_2_map,
+ adl_d3_status_3_map,
+ adl_vnn_req_status_0_map,
+ adl_vnn_req_status_1_map,
+ adl_vnn_req_status_2_map,
+ adl_vnn_req_status_3_map,
+ adl_vnn_misc_status_map,
+ tgl_signal_status_map,
+ NULL
+};
+
+const struct pmc_reg_map adl_reg_map = {
+ .pfear_sts = ext_adl_pfear_map,
+ .slp_s0_offset = ADL_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = adl_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
+ .lpm_num_modes = ADL_LPM_NUM_MODES,
+ .lpm_num_maps = ADL_LPM_NUM_MAPS,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .etr3_offset = ETR3_OFFSET,
+ .lpm_sts_latch_en_offset = ADL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_priority_offset = ADL_LPM_PRI_OFFSET,
+ .lpm_en_offset = ADL_LPM_EN_OFFSET,
+ .lpm_residency_offset = ADL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = adl_lpm_maps,
+ .lpm_status_offset = ADL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = ADL_LPM_LIVE_STATUS_OFFSET,
+};
+
+int adl_core_init(struct pmc_dev *pmcdev)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ int ret;
+
+ pmcdev->suspend = cnl_suspend;
+ pmcdev->resume = cnl_resume;
+
+ pmc->map = &adl_reg_map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c
new file mode 100644
index 0000000000..98b3665120
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/cnp.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Cannon Lake Point PCH.
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+
+#include "core.h"
+
+/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
+const struct pmc_bit_map cnp_pfear_map[] = {
+ {"PMC", BIT(0)},
+ {"OPI-DMI", BIT(1)},
+ {"SPI/eSPI", BIT(2)},
+ {"XHCI", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"SPC", BIT(6)},
+ {"GBE", BIT(7)},
+
+ {"SATA", BIT(0)},
+ {"HDA_PGD0", BIT(1)},
+ {"HDA_PGD1", BIT(2)},
+ {"HDA_PGD2", BIT(3)},
+ {"HDA_PGD3", BIT(4)},
+ {"SPD", BIT(5)},
+ {"LPSS", BIT(6)},
+ {"LPC", BIT(7)},
+
+ {"SMB", BIT(0)},
+ {"ISH", BIT(1)},
+ {"P2SB", BIT(2)},
+ {"NPK_VNN", BIT(3)},
+ {"SDX", BIT(4)},
+ {"SPE", BIT(5)},
+ {"Fuse", BIT(6)},
+ {"SBR8", BIT(7)},
+
+ {"CSME_FSC", BIT(0)},
+ {"USB3_OTG", BIT(1)},
+ {"EXI", BIT(2)},
+ {"CSE", BIT(3)},
+ {"CSME_KVM", BIT(4)},
+ {"CSME_PMT", BIT(5)},
+ {"CSME_CLINK", BIT(6)},
+ {"CSME_PTIO", BIT(7)},
+
+ {"CSME_USBR", BIT(0)},
+ {"CSME_SUSRAM", BIT(1)},
+ {"CSME_SMT1", BIT(2)},
+ {"CSME_SMT4", BIT(3)},
+ {"CSME_SMS2", BIT(4)},
+ {"CSME_SMS1", BIT(5)},
+ {"CSME_RTC", BIT(6)},
+ {"CSME_PSF", BIT(7)},
+
+ {"SBR0", BIT(0)},
+ {"SBR1", BIT(1)},
+ {"SBR2", BIT(2)},
+ {"SBR3", BIT(3)},
+ {"SBR4", BIT(4)},
+ {"SBR5", BIT(5)},
+ {"CSME_PECI", BIT(6)},
+ {"PSF1", BIT(7)},
+
+ {"PSF2", BIT(0)},
+ {"PSF3", BIT(1)},
+ {"PSF4", BIT(2)},
+ {"CNVI", BIT(3)},
+ {"UFS0", BIT(4)},
+ {"EMMC", BIT(5)},
+ {"SPF", BIT(6)},
+ {"SBR6", BIT(7)},
+
+ {"SBR7", BIT(0)},
+ {"NPK_AON", BIT(1)},
+ {"HDA_PGD4", BIT(2)},
+ {"HDA_PGD5", BIT(3)},
+ {"HDA_PGD6", BIT(4)},
+ {"PSF6", BIT(5)},
+ {"PSF7", BIT(6)},
+ {"PSF8", BIT(7)},
+ {}
+};
+
+const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ cnp_pfear_map,
+ NULL
+};
+
+const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
+ {"AUDIO_D3", BIT(0)},
+ {"OTG_D3", BIT(1)},
+ {"XHCI_D3", BIT(2)},
+ {"LPIO_D3", BIT(3)},
+ {"SDX_D3", BIT(4)},
+ {"SATA_D3", BIT(5)},
+ {"UFS0_D3", BIT(6)},
+ {"UFS1_D3", BIT(7)},
+ {"EMMC_D3", BIT(8)},
+ {}
+};
+
+const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
+ {"SDIO_PLL_OFF", BIT(0)},
+ {"USB2_PLL_OFF", BIT(1)},
+ {"AUDIO_PLL_OFF", BIT(2)},
+ {"OC_PLL_OFF", BIT(3)},
+ {"MAIN_PLL_OFF", BIT(4)},
+ {"XOSC_OFF", BIT(5)},
+ {"LPC_CLKS_GATED", BIT(6)},
+ {"PCIE_CLKREQS_IDLE", BIT(7)},
+ {"AUDIO_ROSC_OFF", BIT(8)},
+ {"HPET_XOSC_CLK_REQ", BIT(9)},
+ {"PMC_ROSC_SLOW_CLK", BIT(10)},
+ {"AON2_ROSC_GATED", BIT(11)},
+ {"CLKACKS_DEASSERTED", BIT(12)},
+ {}
+};
+
+const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
+ {"MPHY_CORE_GATED", BIT(0)},
+ {"CSME_GATED", BIT(1)},
+ {"USB2_SUS_GATED", BIT(2)},
+ {"DYN_FLEX_IO_IDLE", BIT(3)},
+ {"GBE_NO_LINK", BIT(4)},
+ {"THERM_SEN_DISABLED", BIT(5)},
+ {"PCIE_LOW_POWER", BIT(6)},
+ {"ISH_VNNAON_REQ_ACT", BIT(7)},
+ {"ISH_VNN_REQ_ACT", BIT(8)},
+ {"CNV_VNNAON_REQ_ACT", BIT(9)},
+ {"CNV_VNN_REQ_ACT", BIT(10)},
+ {"NPK_VNNON_REQ_ACT", BIT(11)},
+ {"PMSYNC_STATE_IDLE", BIT(12)},
+ {"ALST_GT_THRES", BIT(13)},
+ {"PMC_ARC_PG_READY", BIT(14)},
+ {}
+};
+
+const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
+ cnp_slps0_dbg0_map,
+ cnp_slps0_dbg1_map,
+ cnp_slps0_dbg2_map,
+ NULL
+};
+
+const struct pmc_bit_map cnp_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"Reserved", CNP_PMC_LTR_RESERVED},
+ {"ME", CNP_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"EVA", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"CAMERA", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+const struct pmc_reg_map cnp_reg_map = {
+ .pfear_sts = ext_cnp_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
+ .etr3_offset = ETR3_OFFSET,
+};
+
+void cnl_suspend(struct pmc_dev *pmcdev)
+{
+ /*
+ * Due to a hardware limitation, the GBE LTR blocks PC10
+ * when a cable is attached. To unblock PC10 during suspend,
+ * tell the PMC to ignore it.
+ */
+ pmc_core_send_ltr_ignore(pmcdev, 3, 1);
+}
+
+int cnl_resume(struct pmc_dev *pmcdev)
+{
+ pmc_core_send_ltr_ignore(pmcdev, 3, 0);
+
+ return pmc_core_resume_common(pmcdev);
+}
+
+int cnp_core_init(struct pmc_dev *pmcdev)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ int ret;
+
+ pmcdev->suspend = cnl_suspend;
+ pmcdev->resume = cnl_resume;
+
+ pmc->map = &cnp_reg_map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
new file mode 100644
index 0000000000..022afb97d5
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -0,0 +1,1409 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Core SoC Power Management Controller Driver
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/msr.h>
+#include <asm/tsc.h>
+
+#include "core.h"
+
+/* Maximum number of modes supported by platfoms that has low power mode capability */
+const char *pmc_lpm_modes[] = {
+ "S0i2.0",
+ "S0i2.1",
+ "S0i2.2",
+ "S0i3.0",
+ "S0i3.1",
+ "S0i3.2",
+ "S0i3.3",
+ "S0i3.4",
+ NULL
+};
+
+/* PKGC MSRs are common across Intel Core SoCs */
+const struct pmc_bit_map msr_map[] = {
+ {"Package C2", MSR_PKG_C2_RESIDENCY},
+ {"Package C3", MSR_PKG_C3_RESIDENCY},
+ {"Package C6", MSR_PKG_C6_RESIDENCY},
+ {"Package C7", MSR_PKG_C7_RESIDENCY},
+ {"Package C8", MSR_PKG_C8_RESIDENCY},
+ {"Package C9", MSR_PKG_C9_RESIDENCY},
+ {"Package C10", MSR_PKG_C10_RESIDENCY},
+ {}
+};
+
+static inline u32 pmc_core_reg_read(struct pmc *pmc, int reg_offset)
+{
+ return readl(pmc->regbase + reg_offset);
+}
+
+static inline void pmc_core_reg_write(struct pmc *pmc, int reg_offset,
+ u32 val)
+{
+ writel(val, pmc->regbase + reg_offset);
+}
+
+static inline u64 pmc_core_adjust_slp_s0_step(struct pmc *pmc, u32 value)
+{
+ /*
+ * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
+ * used as a workaround which uses 30.5 usec tick. All other client
+ * programs have the legacy SLP_S0 residency counter that is using the 122
+ * usec tick.
+ */
+ const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
+
+ if (pmc->map == &adl_reg_map)
+ return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
+ else
+ return (u64)value * pmc->map->slp_s0_res_counter_step;
+}
+
+static int set_etr3(struct pmc_dev *pmcdev)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const struct pmc_reg_map *map = pmc->map;
+ u32 reg;
+ int err;
+
+ if (!map->etr3_offset)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pmcdev->lock);
+
+ /* check if CF9 is locked */
+ reg = pmc_core_reg_read(pmc, map->etr3_offset);
+ if (reg & ETR3_CF9LOCK) {
+ err = -EACCES;
+ goto out_unlock;
+ }
+
+ /* write CF9 global reset bit */
+ reg |= ETR3_CF9GR;
+ pmc_core_reg_write(pmc, map->etr3_offset, reg);
+
+ reg = pmc_core_reg_read(pmc, map->etr3_offset);
+ if (!(reg & ETR3_CF9GR)) {
+ err = -EIO;
+ goto out_unlock;
+ }
+
+ err = 0;
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+static umode_t etr3_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const struct pmc_reg_map *map = pmc->map;
+ u32 reg;
+
+ mutex_lock(&pmcdev->lock);
+ reg = pmc_core_reg_read(pmc, map->etr3_offset);
+ mutex_unlock(&pmcdev->lock);
+
+ return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
+}
+
+static ssize_t etr3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const struct pmc_reg_map *map = pmc->map;
+ u32 reg;
+
+ if (!map->etr3_offset)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmc, map->etr3_offset);
+ reg &= ETR3_CF9GR | ETR3_CF9LOCK;
+
+ mutex_unlock(&pmcdev->lock);
+
+ return sysfs_emit(buf, "0x%08x", reg);
+}
+
+static ssize_t etr3_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ int err;
+ u32 reg;
+
+ err = kstrtouint(buf, 16, &reg);
+ if (err)
+ return err;
+
+ /* allow only CF9 writes */
+ if (reg != ETR3_CF9GR)
+ return -EINVAL;
+
+ err = set_etr3(pmcdev);
+ if (err)
+ return err;
+
+ return len;
+}
+static DEVICE_ATTR_RW(etr3);
+
+static struct attribute *pmc_attrs[] = {
+ &dev_attr_etr3.attr,
+ NULL
+};
+
+static const struct attribute_group pmc_attr_group = {
+ .attrs = pmc_attrs,
+ .is_visible = etr3_is_visible,
+};
+
+static const struct attribute_group *pmc_dev_groups[] = {
+ &pmc_attr_group,
+ NULL
+};
+
+static int pmc_core_dev_state_get(void *data, u64 *val)
+{
+ struct pmc *pmc = data;
+ const struct pmc_reg_map *map = pmc->map;
+ u32 value;
+
+ value = pmc_core_reg_read(pmc, map->slp_s0_offset);
+ *val = pmc_core_adjust_slp_s0_step(pmc, value);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
+
+static int pmc_core_check_read_lock_bit(struct pmc *pmc)
+{
+ u32 value;
+
+ value = pmc_core_reg_read(pmc, pmc->map->pm_cfg_offset);
+ return value & BIT(pmc->map->pm_read_disable_bit);
+}
+
+static void pmc_core_slps0_display(struct pmc *pmc, struct device *dev,
+ struct seq_file *s)
+{
+ const struct pmc_bit_map **maps = pmc->map->slps0_dbg_maps;
+ const struct pmc_bit_map *map;
+ int offset = pmc->map->slps0_dbg_offset;
+ u32 data;
+
+ while (*maps) {
+ map = *maps;
+ data = pmc_core_reg_read(pmc, offset);
+ offset += 4;
+ while (map->name) {
+ if (dev)
+ dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ? "Yes" : "No");
+ if (s)
+ seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
+ map->name,
+ data & map->bit_mask ? "Yes" : "No");
+ ++map;
+ }
+ ++maps;
+ }
+}
+
+static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
+{
+ int idx;
+
+ for (idx = 0; maps[idx]; idx++)
+ ;/* Nothing */
+
+ return idx;
+}
+
+static void pmc_core_lpm_display(struct pmc *pmc, struct device *dev,
+ struct seq_file *s, u32 offset, int pmc_index,
+ const char *str,
+ const struct pmc_bit_map **maps)
+{
+ int index, idx, len = 32, bit_mask, arr_size;
+ u32 *lpm_regs;
+
+ arr_size = pmc_core_lpm_get_arr_size(maps);
+ lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
+ if (!lpm_regs)
+ return;
+
+ for (index = 0; index < arr_size; index++) {
+ lpm_regs[index] = pmc_core_reg_read(pmc, offset);
+ offset += 4;
+ }
+
+ for (idx = 0; idx < arr_size; idx++) {
+ if (dev)
+ dev_info(dev, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx,
+ lpm_regs[idx]);
+ if (s)
+ seq_printf(s, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx,
+ lpm_regs[idx]);
+ for (index = 0; maps[idx][index].name && index < len; index++) {
+ bit_mask = maps[idx][index].bit_mask;
+ if (dev)
+ dev_info(dev, "PMC%d:%-30s %-30d\n", pmc_index,
+ maps[idx][index].name,
+ lpm_regs[idx] & bit_mask ? 1 : 0);
+ if (s)
+ seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_index,
+ maps[idx][index].name,
+ lpm_regs[idx] & bit_mask ? 1 : 0);
+ }
+ }
+
+ kfree(lpm_regs);
+}
+
+static bool slps0_dbg_latch;
+
+static inline u8 pmc_core_reg_read_byte(struct pmc *pmc, int offset)
+{
+ return readb(pmc->regbase + offset);
+}
+
+static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
+ int pmc_index, u8 pf_reg, const struct pmc_bit_map **pf_map)
+{
+ seq_printf(s, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n",
+ pmc_index, ip, pf_map[idx][index].name,
+ pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
+}
+
+static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ struct pmc *pmc = pmcdev->pmcs[i];
+ const struct pmc_bit_map **maps;
+ u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
+ int index, iter, idx, ip = 0;
+
+ if (!pmc)
+ continue;
+
+ maps = pmc->map->pfear_sts;
+ iter = pmc->map->ppfear0_offset;
+
+ for (index = 0; index < pmc->map->ppfear_buckets &&
+ index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
+ pf_regs[index] = pmc_core_reg_read_byte(pmc, iter);
+
+ for (idx = 0; maps[idx]; idx++) {
+ for (index = 0; maps[idx][index].name &&
+ index < pmc->map->ppfear_buckets * 8; ip++, index++)
+ pmc_core_display_map(s, index, idx, ip, i,
+ pf_regs[index / 8], maps);
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
+
+/* This function should return link status, 0 means ready */
+static int pmc_core_mtpmc_link_status(struct pmc *pmc)
+{
+ u32 value;
+
+ value = pmc_core_reg_read(pmc, SPT_PMC_PM_STS_OFFSET);
+ return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
+}
+
+static int pmc_core_send_msg(struct pmc *pmc, u32 *addr_xram)
+{
+ u32 dest;
+ int timeout;
+
+ for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
+ if (pmc_core_mtpmc_link_status(pmc) == 0)
+ break;
+ msleep(5);
+ }
+
+ if (timeout <= 0 && pmc_core_mtpmc_link_status(pmc))
+ return -EBUSY;
+
+ dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
+ pmc_core_reg_write(pmc, SPT_PMC_MTPMC_OFFSET, dest);
+ return 0;
+}
+
+static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const struct pmc_bit_map *map = pmc->map->mphy_sts;
+ u32 mphy_core_reg_low, mphy_core_reg_high;
+ u32 val_low, val_high;
+ int index, err = 0;
+
+ if (pmcdev->pmc_xram_read_bit) {
+ seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
+ return 0;
+ }
+
+ mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16);
+ mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
+
+ mutex_lock(&pmcdev->lock);
+
+ if (pmc_core_send_msg(pmc, &mphy_core_reg_low) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ msleep(10);
+ val_low = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
+
+ if (pmc_core_send_msg(pmc, &mphy_core_reg_high) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ msleep(10);
+ val_high = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
+
+ for (index = 0; index < 8 && map[index].name; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val_low ? "Not power gated" :
+ "Power gated");
+ }
+
+ for (index = 8; map[index].name; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val_high ? "Not power gated" :
+ "Power gated");
+ }
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
+
+static int pmc_core_pll_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const struct pmc_bit_map *map = pmc->map->pll_sts;
+ u32 mphy_common_reg, val;
+ int index, err = 0;
+
+ if (pmcdev->pmc_xram_read_bit) {
+ seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
+ return 0;
+ }
+
+ mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
+ mutex_lock(&pmcdev->lock);
+
+ if (pmc_core_send_msg(pmc, &mphy_common_reg) != 0) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
+ msleep(10);
+ val = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
+
+ for (index = 0; map[index].name ; index++) {
+ seq_printf(s, "%-32s\tState: %s\n",
+ map[index].name,
+ map[index].bit_mask & val ? "Active" : "Idle");
+ }
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+ return err;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
+
+int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
+{
+ struct pmc *pmc;
+ const struct pmc_reg_map *map;
+ u32 reg;
+ int pmc_index, ltr_index;
+
+ ltr_index = value;
+ /* For platforms with multiple pmcs, ltr index value given by user
+ * is based on the contiguous indexes from ltr_show output.
+ * pmc index and ltr index needs to be calculated from it.
+ */
+ for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) {
+ pmc = pmcdev->pmcs[pmc_index];
+
+ if (!pmc)
+ continue;
+
+ map = pmc->map;
+ if (ltr_index <= map->ltr_ignore_max)
+ break;
+
+ /* Along with IP names, ltr_show map includes CURRENT_PLATFORM
+ * and AGGREGATED_SYSTEM values per PMC. Take these two index
+ * values into account in ltr_index calculation. Also, to start
+ * ltr index from zero for next pmc, subtract it by 1.
+ */
+ ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1;
+ }
+
+ if (pmc_index >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0)
+ return -EINVAL;
+
+ pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
+
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset);
+ if (ignore)
+ reg |= BIT(ltr_index);
+ else
+ reg &= ~BIT(ltr_index);
+ pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg);
+
+ mutex_unlock(&pmcdev->lock);
+
+ return 0;
+}
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
+ u32 buf_size, value;
+ int err;
+
+ buf_size = min_t(u32, count, 64);
+
+ err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
+ if (err)
+ return err;
+
+ err = pmc_core_send_ltr_ignore(pmcdev, value, 1);
+
+ return err == 0 ? count : err;
+}
+
+static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_ltr_ignore_ops = {
+ .open = pmc_core_ltr_ignore_open,
+ .read = seq_read,
+ .write = pmc_core_ltr_ignore_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const struct pmc_reg_map *map = pmc->map;
+ u32 fd;
+
+ mutex_lock(&pmcdev->lock);
+
+ if (!reset && !slps0_dbg_latch)
+ goto out_unlock;
+
+ fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset);
+ if (reset)
+ fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
+ else
+ fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
+ pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd);
+
+ slps0_dbg_latch = false;
+
+out_unlock:
+ mutex_unlock(&pmcdev->lock);
+}
+
+static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+
+ pmc_core_slps0_dbg_latch(pmcdev, false);
+ pmc_core_slps0_display(pmcdev->pmcs[PMC_IDX_MAIN], NULL, s);
+ pmc_core_slps0_dbg_latch(pmcdev, true);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
+
+static u32 convert_ltr_scale(u32 val)
+{
+ /*
+ * As per PCIE specification supporting document
+ * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
+ * Tolerance Reporting data payload is encoded in a
+ * 3 bit scale and 10 bit value fields. Values are
+ * multiplied by the indicated scale to yield an absolute time
+ * value, expressible in a range from 1 nanosecond to
+ * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
+ *
+ * scale encoding is as follows:
+ *
+ * ----------------------------------------------
+ * |scale factor | Multiplier (ns) |
+ * ----------------------------------------------
+ * | 0 | 1 |
+ * | 1 | 32 |
+ * | 2 | 1024 |
+ * | 3 | 32768 |
+ * | 4 | 1048576 |
+ * | 5 | 33554432 |
+ * | 6 | Invalid |
+ * | 7 | Invalid |
+ * ----------------------------------------------
+ */
+ if (val > 5) {
+ pr_warn("Invalid LTR scale factor.\n");
+ return 0;
+ }
+
+ return 1U << (5 * val);
+}
+
+static int pmc_core_ltr_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
+ u32 ltr_raw_data, scale, val;
+ u16 snoop_ltr, nonsnoop_ltr;
+ int i, index, ltr_index = 0;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ struct pmc *pmc = pmcdev->pmcs[i];
+ const struct pmc_bit_map *map;
+
+ if (!pmc)
+ continue;
+
+ map = pmc->map->ltr_show_sts;
+ for (index = 0; map[index].name; index++) {
+ decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
+ ltr_raw_data = pmc_core_reg_read(pmc,
+ map[index].bit_mask);
+ snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
+ nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
+
+ if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
+ scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
+ val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
+ decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
+ }
+ if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
+ scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
+ val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
+ decoded_snoop_ltr = val * convert_ltr_scale(scale);
+ }
+
+ seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
+ ltr_index, i, map[index].name, ltr_raw_data,
+ decoded_non_snoop_ltr,
+ decoded_snoop_ltr);
+ ltr_index++;
+ }
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
+
+static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset,
+ const int lpm_adj_x2)
+{
+ u64 lpm_res = pmc_core_reg_read(pmc, offset);
+
+ return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
+}
+
+static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
+ u32 offset = pmc->map->lpm_residency_offset;
+ int i, mode;
+
+ seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
+
+ pmc_for_each_mode(i, mode, pmcdev) {
+ seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
+ adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
+
+static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ struct pmc *pmc = pmcdev->pmcs[i];
+ const struct pmc_bit_map **maps;
+ u32 offset;
+
+ if (!pmc)
+ continue;
+ maps = pmc->map->lpm_sts;
+ offset = pmc->map->lpm_status_offset;
+ pmc_core_lpm_display(pmc, NULL, s, offset, i, "STATUS", maps);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
+
+static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ struct pmc *pmc = pmcdev->pmcs[i];
+ const struct pmc_bit_map **maps;
+ u32 offset;
+
+ if (!pmc)
+ continue;
+ maps = pmc->map->lpm_sts;
+ offset = pmc->map->lpm_live_status_offset;
+ pmc_core_lpm_display(pmc, NULL, s, offset, i, "LIVE_STATUS", maps);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
+
+static void pmc_core_substate_req_header_show(struct seq_file *s)
+{
+ struct pmc_dev *pmcdev = s->private;
+ int i, mode;
+
+ seq_printf(s, "%30s |", "Element");
+ pmc_for_each_mode(i, mode, pmcdev)
+ seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
+
+ seq_printf(s, " %9s |\n", "Status");
+}
+
+static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const struct pmc_bit_map **maps = pmc->map->lpm_sts;
+ const struct pmc_bit_map *map;
+ const int num_maps = pmc->map->lpm_num_maps;
+ u32 sts_offset = pmc->map->lpm_status_offset;
+ u32 *lpm_req_regs = pmc->lpm_req_regs;
+ int mp;
+
+ /* Display the header */
+ pmc_core_substate_req_header_show(s);
+
+ /* Loop over maps */
+ for (mp = 0; mp < num_maps; mp++) {
+ u32 req_mask = 0;
+ u32 lpm_status;
+ int mode, idx, i, len = 32;
+
+ /*
+ * Capture the requirements and create a mask so that we only
+ * show an element if it's required for at least one of the
+ * enabled low power modes
+ */
+ pmc_for_each_mode(idx, mode, pmcdev)
+ req_mask |= lpm_req_regs[mp + (mode * num_maps)];
+
+ /* Get the last latched status for this map */
+ lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4));
+
+ /* Loop over elements in this map */
+ map = maps[mp];
+ for (i = 0; map[i].name && i < len; i++) {
+ u32 bit_mask = map[i].bit_mask;
+
+ if (!(bit_mask & req_mask))
+ /*
+ * Not required for any enabled states
+ * so don't display
+ */
+ continue;
+
+ /* Display the element name in the first column */
+ seq_printf(s, "%30s |", map[i].name);
+
+ /* Loop over the enabled states and display if required */
+ pmc_for_each_mode(idx, mode, pmcdev) {
+ if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
+ seq_printf(s, " %9s |",
+ "Required");
+ else
+ seq_printf(s, " %9s |", " ");
+ }
+
+ /* In Status column, show the last captured state of this agent */
+ if (lpm_status & bit_mask)
+ seq_printf(s, " %9s |", "Yes");
+ else
+ seq_printf(s, " %9s |", " ");
+
+ seq_puts(s, "\n");
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
+
+static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ bool c10;
+ u32 reg;
+ int idx, mode;
+
+ reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
+ if (reg & LPM_STS_LATCH_MODE) {
+ seq_puts(s, "c10");
+ c10 = false;
+ } else {
+ seq_puts(s, "[c10]");
+ c10 = true;
+ }
+
+ pmc_for_each_mode(idx, mode, pmcdev) {
+ if ((BIT(mode) & reg) && !c10)
+ seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
+ else
+ seq_printf(s, " %s", pmc_lpm_modes[mode]);
+ }
+
+ seq_puts(s, " clear\n");
+
+ return 0;
+}
+
+static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct pmc_dev *pmcdev = s->private;
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ bool clear = false, c10 = false;
+ unsigned char buf[8];
+ int idx, m, mode;
+ u32 reg;
+
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ /*
+ * Allowed strings are:
+ * Any enabled substate, e.g. 'S0i2.0'
+ * 'c10'
+ * 'clear'
+ */
+ mode = sysfs_match_string(pmc_lpm_modes, buf);
+
+ /* Check string matches enabled mode */
+ pmc_for_each_mode(idx, m, pmcdev)
+ if (mode == m)
+ break;
+
+ if (mode != m || mode < 0) {
+ if (sysfs_streq(buf, "clear"))
+ clear = true;
+ else if (sysfs_streq(buf, "c10"))
+ c10 = true;
+ else
+ return -EINVAL;
+ }
+
+ if (clear) {
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset);
+ reg |= ETR3_CLEAR_LPM_EVENTS;
+ pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg);
+
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+ }
+
+ if (c10) {
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
+ reg &= ~LPM_STS_LATCH_MODE;
+ pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
+
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+ }
+
+ /*
+ * For LPM mode latching we set the latch enable bit and selected mode
+ * and clear everything else.
+ */
+ reg = LPM_STS_LATCH_MODE | BIT(mode);
+ mutex_lock(&pmcdev->lock);
+ pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
+ mutex_unlock(&pmcdev->lock);
+
+ return count;
+}
+DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
+
+static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
+{
+ struct pmc *pmc = s->private;
+ const struct pmc_bit_map *map = pmc->map->msr_sts;
+ u64 pcstate_count;
+ int index;
+
+ for (index = 0; map[index].name ; index++) {
+ if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
+ continue;
+
+ pcstate_count *= 1000;
+ do_div(pcstate_count, tsc_khz);
+ seq_printf(s, "%-8s : %llu\n", map[index].name,
+ pcstate_count);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
+
+static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
+{
+ int i, j;
+
+ if (!lpm_pri)
+ return false;
+ /*
+ * Each byte contains the priority level for 2 modes (7:4 and 3:0).
+ * In a 32 bit register this allows for describing 8 modes. Store the
+ * levels and look for values out of range.
+ */
+ for (i = 0; i < 8; i++) {
+ int level = lpm_pri & GENMASK(3, 0);
+
+ if (level >= LPM_MAX_NUM_MODES)
+ return false;
+
+ mode_order[i] = level;
+ lpm_pri >>= 4;
+ }
+
+ /* Check that we have unique values */
+ for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++)
+ for (j = i + 1; j < LPM_MAX_NUM_MODES; j++)
+ if (mode_order[i] == mode_order[j])
+ return false;
+
+ return true;
+}
+
+static void pmc_core_get_low_power_modes(struct platform_device *pdev)
+{
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI;
+ u8 mode_order[LPM_MAX_NUM_MODES];
+ u32 lpm_pri;
+ u32 lpm_en;
+ int mode, i, p;
+
+ /* Use LPM Maps to indicate support for substates */
+ if (!pmc->map->lpm_num_maps)
+ return;
+
+ lpm_en = pmc_core_reg_read(pmc, pmc->map->lpm_en_offset);
+ /* For MTL, BIT 31 is not an lpm mode but a enable bit.
+ * Lower byte is enough to cover the number of lpm modes for all
+ * platforms and hence mask the upper 3 bytes.
+ */
+ pmcdev->num_lpm_modes = hweight32(lpm_en & 0xFF);
+
+ /* Read 32 bit LPM_PRI register */
+ lpm_pri = pmc_core_reg_read(pmc, pmc->map->lpm_priority_offset);
+
+
+ /*
+ * If lpm_pri value passes verification, then override the default
+ * modes here. Otherwise stick with the default.
+ */
+ if (pmc_core_pri_verify(lpm_pri, mode_order))
+ /* Get list of modes in priority order */
+ for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
+ pri_order[mode_order[mode]] = mode;
+ else
+ dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n");
+
+ /*
+ * Loop through all modes from lowest to highest priority,
+ * and capture all enabled modes in order
+ */
+ i = 0;
+ for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
+ int mode = pri_order[p];
+
+ if (!(BIT(mode) & lpm_en))
+ continue;
+
+ pmcdev->lpm_en_modes[i++] = mode;
+ }
+}
+
+int get_primary_reg_base(struct pmc *pmc)
+{
+ u64 slp_s0_addr;
+
+ if (lpit_read_residency_count_address(&slp_s0_addr)) {
+ pmc->base_addr = PMC_BASE_ADDR_DEFAULT;
+
+ if (page_is_ram(PHYS_PFN(pmc->base_addr)))
+ return -ENODEV;
+ } else {
+ pmc->base_addr = slp_s0_addr - pmc->map->slp_s0_offset;
+ }
+
+ pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
+ if (!pmc->regbase)
+ return -ENOMEM;
+ return 0;
+}
+
+static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+ debugfs_remove_recursive(pmcdev->dbgfs_dir);
+}
+
+static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+ struct pmc *primary_pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("pmc_core", NULL);
+ pmcdev->dbgfs_dir = dir;
+
+ debugfs_create_file("slp_s0_residency_usec", 0444, dir, primary_pmc,
+ &pmc_core_dev_state);
+
+ if (primary_pmc->map->pfear_sts)
+ debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
+ pmcdev, &pmc_core_ppfear_fops);
+
+ debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
+ &pmc_core_ltr_ignore_ops);
+
+ debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
+
+ debugfs_create_file("package_cstate_show", 0444, dir, primary_pmc,
+ &pmc_core_pkgc_fops);
+
+ if (primary_pmc->map->pll_sts)
+ debugfs_create_file("pll_status", 0444, dir, pmcdev,
+ &pmc_core_pll_fops);
+
+ if (primary_pmc->map->mphy_sts)
+ debugfs_create_file("mphy_core_lanes_power_gating_status",
+ 0444, dir, pmcdev,
+ &pmc_core_mphy_pg_fops);
+
+ if (primary_pmc->map->slps0_dbg_maps) {
+ debugfs_create_file("slp_s0_debug_status", 0444,
+ dir, pmcdev,
+ &pmc_core_slps0_dbg_fops);
+
+ debugfs_create_bool("slp_s0_dbg_latch", 0644,
+ dir, &slps0_dbg_latch);
+ }
+
+ if (primary_pmc->map->lpm_en_offset) {
+ debugfs_create_file("substate_residencies", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_res_fops);
+ }
+
+ if (primary_pmc->map->lpm_status_offset) {
+ debugfs_create_file("substate_status_registers", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_sts_regs_fops);
+ debugfs_create_file("substate_live_status_registers", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_l_sts_regs_fops);
+ debugfs_create_file("lpm_latch_mode", 0644,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_lpm_latch_mode_fops);
+ }
+
+ if (primary_pmc->lpm_req_regs) {
+ debugfs_create_file("substate_requirements", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_substate_req_regs_fops);
+ }
+}
+
+static const struct x86_cpu_id intel_pmc_core_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, spt_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, spt_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, spt_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, spt_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, cnp_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, icl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, icl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, cnp_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, cnp_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, icl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, adl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, adl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, adl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, mtl_core_init),
+ {}
+};
+
+MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
+
+static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+ { }
+};
+
+/*
+ * This quirk can be used on those platforms where
+ * the platform BIOS enforces 24Mhz crystal to shutdown
+ * before PMC can assert SLP_S0#.
+ */
+static bool xtal_ignore;
+static int quirk_xtal_ignore(const struct dmi_system_id *id)
+{
+ xtal_ignore = true;
+ return 0;
+}
+
+static void pmc_core_xtal_ignore(struct pmc *pmc)
+{
+ u32 value;
+
+ value = pmc_core_reg_read(pmc, pmc->map->pm_vric1_offset);
+ /* 24MHz Crystal Shutdown Qualification Disable */
+ value |= SPT_PMC_VRIC1_XTALSDQDIS;
+ /* Low Voltage Mode Enable */
+ value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
+ pmc_core_reg_write(pmc, pmc->map->pm_vric1_offset, value);
+}
+
+static const struct dmi_system_id pmc_core_dmi_table[] = {
+ {
+ .callback = quirk_xtal_ignore,
+ .ident = "HP Elite x2 1013 G3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
+ },
+ },
+ {}
+};
+
+static void pmc_core_do_dmi_quirks(struct pmc *pmc)
+{
+ dmi_check_system(pmc_core_dmi_table);
+
+ if (xtal_ignore)
+ pmc_core_xtal_ignore(pmc);
+}
+
+static void pmc_core_clean_structure(struct platform_device *pdev)
+{
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ struct pmc *pmc = pmcdev->pmcs[i];
+
+ if (pmc)
+ iounmap(pmc->regbase);
+ }
+
+ if (pmcdev->ssram_pcidev) {
+ pci_dev_put(pmcdev->ssram_pcidev);
+ pci_disable_device(pmcdev->ssram_pcidev);
+ }
+ platform_set_drvdata(pdev, NULL);
+ mutex_destroy(&pmcdev->lock);
+}
+
+static int pmc_core_probe(struct platform_device *pdev)
+{
+ static bool device_initialized;
+ struct pmc_dev *pmcdev;
+ const struct x86_cpu_id *cpu_id;
+ int (*core_init)(struct pmc_dev *pmcdev);
+ struct pmc *primary_pmc;
+ int ret;
+
+ if (device_initialized)
+ return -ENODEV;
+
+ pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
+ if (!pmcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pmcdev);
+ pmcdev->pdev = pdev;
+
+ cpu_id = x86_match_cpu(intel_pmc_core_ids);
+ if (!cpu_id)
+ return -ENODEV;
+
+ core_init = (int (*)(struct pmc_dev *))cpu_id->driver_data;
+
+ /* Primary PMC */
+ primary_pmc = devm_kzalloc(&pdev->dev, sizeof(*primary_pmc), GFP_KERNEL);
+ if (!primary_pmc)
+ return -ENOMEM;
+ pmcdev->pmcs[PMC_IDX_MAIN] = primary_pmc;
+
+ /*
+ * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
+ * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
+ * in this case.
+ */
+ if (core_init == spt_core_init && !pci_dev_present(pmc_pci_ids))
+ core_init = cnp_core_init;
+
+ mutex_init(&pmcdev->lock);
+ ret = core_init(pmcdev);
+ if (ret) {
+ pmc_core_clean_structure(pdev);
+ return ret;
+ }
+
+ pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(primary_pmc);
+ pmc_core_get_low_power_modes(pdev);
+ pmc_core_do_dmi_quirks(primary_pmc);
+
+ pmc_core_dbgfs_register(pmcdev);
+ pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) *
+ pmc_core_adjust_slp_s0_step(primary_pmc, 1));
+
+ device_initialized = true;
+ dev_info(&pdev->dev, " initialized\n");
+
+ return 0;
+}
+
+static void pmc_core_remove(struct platform_device *pdev)
+{
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+ pmc_core_dbgfs_unregister(pmcdev);
+ pmc_core_clean_structure(pdev);
+}
+
+static bool warn_on_s0ix_failures;
+module_param(warn_on_s0ix_failures, bool, 0644);
+MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
+
+static __maybe_unused int pmc_core_suspend(struct device *dev)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+
+ if (pmcdev->suspend)
+ pmcdev->suspend(pmcdev);
+
+ /* Check if the syspend will actually use S0ix */
+ if (pm_suspend_via_firmware())
+ return 0;
+
+ /* Save PC10 residency for checking later */
+ if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
+ return -EIO;
+
+ /* Save S0ix residency for checking later */
+ if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter))
+ return -EIO;
+
+ return 0;
+}
+
+static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
+{
+ u64 pc10_counter;
+
+ if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
+ return false;
+
+ if (pc10_counter == pmcdev->pc10_counter)
+ return true;
+
+ return false;
+}
+
+static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
+{
+ u64 s0ix_counter;
+
+ if (pmc_core_dev_state_get(pmcdev->pmcs[PMC_IDX_MAIN], &s0ix_counter))
+ return false;
+
+ pm_report_hw_sleep_time((u32)(s0ix_counter - pmcdev->s0ix_counter));
+
+ if (s0ix_counter == pmcdev->s0ix_counter)
+ return true;
+
+ return false;
+}
+
+int pmc_core_resume_common(struct pmc_dev *pmcdev)
+{
+ struct device *dev = &pmcdev->pdev->dev;
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const struct pmc_bit_map **maps = pmc->map->lpm_sts;
+ int offset = pmc->map->lpm_status_offset;
+ int i;
+
+ /* Check if the syspend used S0ix */
+ if (pm_suspend_via_firmware())
+ return 0;
+
+ if (!pmc_core_is_s0ix_failed(pmcdev))
+ return 0;
+
+ if (!warn_on_s0ix_failures)
+ return 0;
+
+ if (pmc_core_is_pc10_failed(pmcdev)) {
+ /* S0ix failed because of PC10 entry failure */
+ dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
+ pmcdev->pc10_counter);
+ return 0;
+ }
+
+ /* The real interesting case - S0ix failed - lets ask PMC why. */
+ dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
+ pmcdev->s0ix_counter);
+
+ if (pmc->map->slps0_dbg_maps)
+ pmc_core_slps0_display(pmc, dev, NULL);
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ struct pmc *pmc = pmcdev->pmcs[i];
+
+ if (!pmc)
+ continue;
+ if (pmc->map->lpm_sts)
+ pmc_core_lpm_display(pmc, dev, NULL, offset, i, "STATUS", maps);
+ }
+
+ return 0;
+}
+
+static __maybe_unused int pmc_core_resume(struct device *dev)
+{
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+
+ if (pmcdev->resume)
+ return pmcdev->resume(pmcdev);
+
+ return pmc_core_resume_common(pmcdev);
+}
+
+static const struct dev_pm_ops pmc_core_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
+};
+
+static const struct acpi_device_id pmc_core_acpi_ids[] = {
+ {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
+
+static struct platform_driver pmc_core_driver = {
+ .driver = {
+ .name = "intel_pmc_core",
+ .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
+ .pm = &pmc_core_pm_ops,
+ .dev_groups = pmc_dev_groups,
+ },
+ .probe = pmc_core_probe,
+ .remove_new = pmc_core_remove,
+};
+
+module_platform_driver(pmc_core_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel PMC Core Driver");
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
new file mode 100644
index 0000000000..b66dacbfb9
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -0,0 +1,527 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Core SoC Power Management Controller Header File
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ */
+
+#ifndef PMC_CORE_H
+#define PMC_CORE_H
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/platform_device.h>
+
+#define SLP_S0_RES_COUNTER_MASK GENMASK(31, 0)
+
+#define PMC_BASE_ADDR_DEFAULT 0xFE000000
+#define MAX_NUM_PMC 3
+
+/* Sunrise Point Power Management Controller PCI Device ID */
+#define SPT_PMC_PCI_DEVICE_ID 0x9d21
+#define SPT_PMC_BASE_ADDR_OFFSET 0x48
+#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c
+#define SPT_PMC_PM_CFG_OFFSET 0x18
+#define SPT_PMC_PM_STS_OFFSET 0x1c
+#define SPT_PMC_MTPMC_OFFSET 0x20
+#define SPT_PMC_MFPMC_OFFSET 0x38
+#define SPT_PMC_LTR_IGNORE_OFFSET 0x30C
+#define SPT_PMC_VRIC1_OFFSET 0x31c
+#define SPT_PMC_MPHY_CORE_STS_0 0x1143
+#define SPT_PMC_MPHY_CORE_STS_1 0x1142
+#define SPT_PMC_MPHY_COM_STS_0 0x1155
+#define SPT_PMC_MMIO_REG_LEN 0x1000
+#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x68
+#define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1)
+#define MTPMC_MASK 0xffff0000
+#define PPFEAR_MAX_NUM_ENTRIES 12
+#define SPT_PPFEAR_NUM_ENTRIES 5
+#define SPT_PMC_READ_DISABLE_BIT 0x16
+#define SPT_PMC_MSG_FULL_STS_BIT 0x18
+#define NUM_RETRIES 100
+#define SPT_NUM_IP_IGN_ALLOWED 17
+
+#define SPT_PMC_LTR_CUR_PLT 0x350
+#define SPT_PMC_LTR_CUR_ASLT 0x354
+#define SPT_PMC_LTR_SPA 0x360
+#define SPT_PMC_LTR_SPB 0x364
+#define SPT_PMC_LTR_SATA 0x368
+#define SPT_PMC_LTR_GBE 0x36C
+#define SPT_PMC_LTR_XHCI 0x370
+#define SPT_PMC_LTR_RESERVED 0x374
+#define SPT_PMC_LTR_ME 0x378
+#define SPT_PMC_LTR_EVA 0x37C
+#define SPT_PMC_LTR_SPC 0x380
+#define SPT_PMC_LTR_AZ 0x384
+#define SPT_PMC_LTR_LPSS 0x38C
+#define SPT_PMC_LTR_CAM 0x390
+#define SPT_PMC_LTR_SPD 0x394
+#define SPT_PMC_LTR_SPE 0x398
+#define SPT_PMC_LTR_ESPI 0x39C
+#define SPT_PMC_LTR_SCC 0x3A0
+#define SPT_PMC_LTR_ISH 0x3A4
+
+/* Sunrise Point: PGD PFET Enable Ack Status Registers */
+enum ppfear_regs {
+ SPT_PMC_XRAM_PPFEAR0A = 0x590,
+ SPT_PMC_XRAM_PPFEAR0B,
+ SPT_PMC_XRAM_PPFEAR0C,
+ SPT_PMC_XRAM_PPFEAR0D,
+ SPT_PMC_XRAM_PPFEAR1A,
+};
+
+#define SPT_PMC_BIT_PMC BIT(0)
+#define SPT_PMC_BIT_OPI BIT(1)
+#define SPT_PMC_BIT_SPI BIT(2)
+#define SPT_PMC_BIT_XHCI BIT(3)
+#define SPT_PMC_BIT_SPA BIT(4)
+#define SPT_PMC_BIT_SPB BIT(5)
+#define SPT_PMC_BIT_SPC BIT(6)
+#define SPT_PMC_BIT_GBE BIT(7)
+
+#define SPT_PMC_BIT_SATA BIT(0)
+#define SPT_PMC_BIT_HDA_PGD0 BIT(1)
+#define SPT_PMC_BIT_HDA_PGD1 BIT(2)
+#define SPT_PMC_BIT_HDA_PGD2 BIT(3)
+#define SPT_PMC_BIT_HDA_PGD3 BIT(4)
+#define SPT_PMC_BIT_RSVD_0B BIT(5)
+#define SPT_PMC_BIT_LPSS BIT(6)
+#define SPT_PMC_BIT_LPC BIT(7)
+
+#define SPT_PMC_BIT_SMB BIT(0)
+#define SPT_PMC_BIT_ISH BIT(1)
+#define SPT_PMC_BIT_P2SB BIT(2)
+#define SPT_PMC_BIT_DFX BIT(3)
+#define SPT_PMC_BIT_SCC BIT(4)
+#define SPT_PMC_BIT_RSVD_0C BIT(5)
+#define SPT_PMC_BIT_FUSE BIT(6)
+#define SPT_PMC_BIT_CAMREA BIT(7)
+
+#define SPT_PMC_BIT_RSVD_0D BIT(0)
+#define SPT_PMC_BIT_USB3_OTG BIT(1)
+#define SPT_PMC_BIT_EXI BIT(2)
+#define SPT_PMC_BIT_CSE BIT(3)
+#define SPT_PMC_BIT_CSME_KVM BIT(4)
+#define SPT_PMC_BIT_CSME_PMT BIT(5)
+#define SPT_PMC_BIT_CSME_CLINK BIT(6)
+#define SPT_PMC_BIT_CSME_PTIO BIT(7)
+
+#define SPT_PMC_BIT_CSME_USBR BIT(0)
+#define SPT_PMC_BIT_CSME_SUSRAM BIT(1)
+#define SPT_PMC_BIT_CSME_SMT BIT(2)
+#define SPT_PMC_BIT_RSVD_1A BIT(3)
+#define SPT_PMC_BIT_CSME_SMS2 BIT(4)
+#define SPT_PMC_BIT_CSME_SMS1 BIT(5)
+#define SPT_PMC_BIT_CSME_RTC BIT(6)
+#define SPT_PMC_BIT_CSME_PSF BIT(7)
+
+#define SPT_PMC_BIT_MPHY_LANE0 BIT(0)
+#define SPT_PMC_BIT_MPHY_LANE1 BIT(1)
+#define SPT_PMC_BIT_MPHY_LANE2 BIT(2)
+#define SPT_PMC_BIT_MPHY_LANE3 BIT(3)
+#define SPT_PMC_BIT_MPHY_LANE4 BIT(4)
+#define SPT_PMC_BIT_MPHY_LANE5 BIT(5)
+#define SPT_PMC_BIT_MPHY_LANE6 BIT(6)
+#define SPT_PMC_BIT_MPHY_LANE7 BIT(7)
+
+#define SPT_PMC_BIT_MPHY_LANE8 BIT(0)
+#define SPT_PMC_BIT_MPHY_LANE9 BIT(1)
+#define SPT_PMC_BIT_MPHY_LANE10 BIT(2)
+#define SPT_PMC_BIT_MPHY_LANE11 BIT(3)
+#define SPT_PMC_BIT_MPHY_LANE12 BIT(4)
+#define SPT_PMC_BIT_MPHY_LANE13 BIT(5)
+#define SPT_PMC_BIT_MPHY_LANE14 BIT(6)
+#define SPT_PMC_BIT_MPHY_LANE15 BIT(7)
+
+#define SPT_PMC_BIT_MPHY_CMN_LANE0 BIT(0)
+#define SPT_PMC_BIT_MPHY_CMN_LANE1 BIT(1)
+#define SPT_PMC_BIT_MPHY_CMN_LANE2 BIT(2)
+#define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3)
+
+#define SPT_PMC_VRIC1_SLPS0LVEN BIT(13)
+#define SPT_PMC_VRIC1_XTALSDQDIS BIT(22)
+
+/* Cannonlake Power Management Controller register offsets */
+#define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4
+#define CNP_PMC_PM_CFG_OFFSET 0x1818
+#define CNP_PMC_SLP_S0_RES_COUNTER_OFFSET 0x193C
+#define CNP_PMC_LTR_IGNORE_OFFSET 0x1B0C
+/* Cannonlake: PGD PFET Enable Ack Status Register(s) start */
+#define CNP_PMC_HOST_PPFEAR0A 0x1D90
+
+#define CNP_PMC_LATCH_SLPS0_EVENTS BIT(31)
+
+#define CNP_PMC_MMIO_REG_LEN 0x2000
+#define CNP_PPFEAR_NUM_ENTRIES 8
+#define CNP_PMC_READ_DISABLE_BIT 22
+#define CNP_NUM_IP_IGN_ALLOWED 19
+#define CNP_PMC_LTR_CUR_PLT 0x1B50
+#define CNP_PMC_LTR_CUR_ASLT 0x1B54
+#define CNP_PMC_LTR_SPA 0x1B60
+#define CNP_PMC_LTR_SPB 0x1B64
+#define CNP_PMC_LTR_SATA 0x1B68
+#define CNP_PMC_LTR_GBE 0x1B6C
+#define CNP_PMC_LTR_XHCI 0x1B70
+#define CNP_PMC_LTR_RESERVED 0x1B74
+#define CNP_PMC_LTR_ME 0x1B78
+#define CNP_PMC_LTR_EVA 0x1B7C
+#define CNP_PMC_LTR_SPC 0x1B80
+#define CNP_PMC_LTR_AZ 0x1B84
+#define CNP_PMC_LTR_LPSS 0x1B8C
+#define CNP_PMC_LTR_CAM 0x1B90
+#define CNP_PMC_LTR_SPD 0x1B94
+#define CNP_PMC_LTR_SPE 0x1B98
+#define CNP_PMC_LTR_ESPI 0x1B9C
+#define CNP_PMC_LTR_SCC 0x1BA0
+#define CNP_PMC_LTR_ISH 0x1BA4
+#define CNP_PMC_LTR_CNV 0x1BF0
+#define CNP_PMC_LTR_EMMC 0x1BF4
+#define CNP_PMC_LTR_UFSX2 0x1BF8
+
+#define LTR_DECODED_VAL GENMASK(9, 0)
+#define LTR_DECODED_SCALE GENMASK(12, 10)
+#define LTR_REQ_SNOOP BIT(15)
+#define LTR_REQ_NONSNOOP BIT(31)
+
+#define ICL_PPFEAR_NUM_ENTRIES 9
+#define ICL_NUM_IP_IGN_ALLOWED 20
+#define ICL_PMC_LTR_WIGIG 0x1BFC
+#define ICL_PMC_SLP_S0_RES_COUNTER_STEP 0x64
+
+#define LPM_MAX_NUM_MODES 8
+#define LPM_DEFAULT_PRI { 7, 6, 2, 5, 4, 1, 3, 0 }
+
+#define GET_X2_COUNTER(v) ((v) >> 1)
+#define LPM_STS_LATCH_MODE BIT(31)
+
+#define TGL_PMC_SLP_S0_RES_COUNTER_STEP 0x7A
+#define TGL_PMC_LTR_THC0 0x1C04
+#define TGL_PMC_LTR_THC1 0x1C08
+#define TGL_NUM_IP_IGN_ALLOWED 23
+#define TGL_PMC_LPM_RES_COUNTER_STEP_X2 61 /* 30.5us * 2 */
+
+#define ADL_PMC_LTR_SPF 0x1C00
+#define ADL_NUM_IP_IGN_ALLOWED 23
+#define ADL_PMC_SLP_S0_RES_COUNTER_OFFSET 0x1098
+
+/*
+ * Tigerlake Power Management Controller register offsets
+ */
+#define TGL_LPM_STS_LATCH_EN_OFFSET 0x1C34
+#define TGL_LPM_EN_OFFSET 0x1C78
+#define TGL_LPM_RESIDENCY_OFFSET 0x1C80
+
+/* Tigerlake Low Power Mode debug registers */
+#define TGL_LPM_STATUS_OFFSET 0x1C3C
+#define TGL_LPM_LIVE_STATUS_OFFSET 0x1C5C
+#define TGL_LPM_PRI_OFFSET 0x1C7C
+#define TGL_LPM_NUM_MAPS 6
+
+/* Extended Test Mode Register 3 (CNL and later) */
+#define ETR3_OFFSET 0x1048
+#define ETR3_CF9GR BIT(20)
+#define ETR3_CF9LOCK BIT(31)
+
+/* Extended Test Mode Register LPM bits (TGL and later */
+#define ETR3_CLEAR_LPM_EVENTS BIT(28)
+
+/* Alder Lake Power Management Controller register offsets */
+#define ADL_LPM_EN_OFFSET 0x179C
+#define ADL_LPM_RESIDENCY_OFFSET 0x17A4
+#define ADL_LPM_NUM_MODES 2
+#define ADL_LPM_NUM_MAPS 14
+
+/* Alder Lake Low Power Mode debug registers */
+#define ADL_LPM_STATUS_OFFSET 0x170C
+#define ADL_LPM_PRI_OFFSET 0x17A0
+#define ADL_LPM_STATUS_LATCH_EN_OFFSET 0x1704
+#define ADL_LPM_LIVE_STATUS_OFFSET 0x1764
+
+/* Meteor Lake Power Management Controller register offsets */
+#define MTL_LPM_EN_OFFSET 0x1798
+#define MTL_LPM_RESIDENCY_OFFSET 0x17A0
+
+/* Meteor Lake Low Power Mode debug registers */
+#define MTL_LPM_PRI_OFFSET 0x179C
+#define MTL_LPM_STATUS_LATCH_EN_OFFSET 0x16F8
+#define MTL_LPM_STATUS_OFFSET 0x1700
+#define MTL_LPM_LIVE_STATUS_OFFSET 0x175C
+#define MTL_PMC_LTR_IOE_PMC 0x1C0C
+#define MTL_PMC_LTR_ESE 0x1BAC
+#define MTL_PMC_LTR_RESERVED 0x1BA4
+#define MTL_IOE_PMC_MMIO_REG_LEN 0x23A4
+#define MTL_SOCM_NUM_IP_IGN_ALLOWED 25
+#define MTL_SOC_PMC_MMIO_REG_LEN 0x2708
+#define MTL_PMC_LTR_SPG 0x1B74
+
+/* Meteor Lake PGD PFET Enable Ack Status */
+#define MTL_SOCM_PPFEAR_NUM_ENTRIES 8
+#define MTL_IOE_PPFEAR_NUM_ENTRIES 10
+
+extern const char *pmc_lpm_modes[];
+
+struct pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+/**
+ * struct pmc_reg_map - Structure used to define parameter unique to a
+ PCH family
+ * @pfear_sts: Maps name of IP block to PPFEAR* bit
+ * @mphy_sts: Maps name of MPHY lane to MPHY status lane status bit
+ * @pll_sts: Maps name of PLL to corresponding bit status
+ * @slps0_dbg_maps: Array of SLP_S0_DBG* registers containing debug info
+ * @ltr_show_sts: Maps PCH IP Names to their MMIO register offsets
+ * @slp_s0_offset: PWRMBASE offset to read SLP_S0 residency
+ * @ltr_ignore_offset: PWRMBASE offset to read/write LTR ignore bit
+ * @regmap_length: Length of memory to map from PWRMBASE address to access
+ * @ppfear0_offset: PWRMBASE offset to read PPFEAR*
+ * @ppfear_buckets: Number of 8 bits blocks to read all IP blocks from
+ * PPFEAR
+ * @pm_cfg_offset: PWRMBASE offset to PM_CFG register
+ * @pm_read_disable_bit: Bit index to read PMC_READ_DISABLE
+ * @slps0_dbg_offset: PWRMBASE offset to SLP_S0_DEBUG_REG*
+ *
+ * Each PCH has unique set of register offsets and bit indexes. This structure
+ * captures them to have a common implementation.
+ */
+struct pmc_reg_map {
+ const struct pmc_bit_map **pfear_sts;
+ const struct pmc_bit_map *mphy_sts;
+ const struct pmc_bit_map *pll_sts;
+ const struct pmc_bit_map **slps0_dbg_maps;
+ const struct pmc_bit_map *ltr_show_sts;
+ const struct pmc_bit_map *msr_sts;
+ const struct pmc_bit_map **lpm_sts;
+ const u32 slp_s0_offset;
+ const int slp_s0_res_counter_step;
+ const u32 ltr_ignore_offset;
+ const int regmap_length;
+ const u32 ppfear0_offset;
+ const int ppfear_buckets;
+ const u32 pm_cfg_offset;
+ const int pm_read_disable_bit;
+ const u32 slps0_dbg_offset;
+ const u32 ltr_ignore_max;
+ const u32 pm_vric1_offset;
+ /* Low Power Mode registers */
+ const int lpm_num_maps;
+ const int lpm_num_modes;
+ const int lpm_res_counter_step_x2;
+ const u32 lpm_sts_latch_en_offset;
+ const u32 lpm_en_offset;
+ const u32 lpm_priority_offset;
+ const u32 lpm_residency_offset;
+ const u32 lpm_status_offset;
+ const u32 lpm_live_status_offset;
+ const u32 etr3_offset;
+};
+
+/**
+ * struct pmc_info - Structure to keep pmc info
+ * @devid: device id of the pmc device
+ * @map: pointer to a pmc_reg_map struct that contains platform
+ * specific attributes
+ */
+struct pmc_info {
+ u16 devid;
+ const struct pmc_reg_map *map;
+};
+
+/**
+ * struct pmc - pmc private info structure
+ * @base_addr: contains pmc base address
+ * @regbase: pointer to io-remapped memory location
+ * @map: pointer to pmc_reg_map struct that contains platform
+ * specific attributes
+ * @lpm_req_regs: List of substate requirements
+ *
+ * pmc contains info about one power management controller device.
+ */
+struct pmc {
+ u64 base_addr;
+ void __iomem *regbase;
+ const struct pmc_reg_map *map;
+ u32 *lpm_req_regs;
+};
+
+/**
+ * struct pmc_dev - pmc device structure
+ * @devs: pointer to an array of pmc pointers
+ * @pdev: pointer to platform_device struct
+ * @ssram_pcidev: pointer to pci device struct for the PMC SSRAM
+ * @dbgfs_dir: path to debugfs interface
+ * @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers
+ * used to read MPHY PG and PLL status are available
+ * @mutex_lock: mutex to complete one transcation
+ * @pc10_counter: PC10 residency counter
+ * @s0ix_counter: S0ix residency (step adjusted)
+ * @num_lpm_modes: Count of enabled modes
+ * @lpm_en_modes: Array of enabled modes from lowest to highest priority
+ * @suspend: Function to perform platform specific suspend
+ * @resume: Function to perform platform specific resume
+ *
+ * pmc_dev contains info about power management controller device.
+ */
+struct pmc_dev {
+ struct pmc *pmcs[MAX_NUM_PMC];
+ struct dentry *dbgfs_dir;
+ struct platform_device *pdev;
+ struct pci_dev *ssram_pcidev;
+ int pmc_xram_read_bit;
+ struct mutex lock; /* generic mutex lock for PMC Core */
+
+ u64 pc10_counter;
+ u64 s0ix_counter;
+ int num_lpm_modes;
+ int lpm_en_modes[LPM_MAX_NUM_MODES];
+ void (*suspend)(struct pmc_dev *pmcdev);
+ int (*resume)(struct pmc_dev *pmcdev);
+
+ bool has_die_c6;
+ u32 die_c6_offset;
+ struct telem_endpoint *punit_ep;
+ struct pmc_info *regmap_list;
+};
+
+enum pmc_index {
+ PMC_IDX_MAIN,
+ PMC_IDX_SOC = PMC_IDX_MAIN,
+ PMC_IDX_IOE,
+ PMC_IDX_PCH,
+ PMC_IDX_MAX
+};
+
+extern const struct pmc_bit_map msr_map[];
+extern const struct pmc_bit_map spt_pll_map[];
+extern const struct pmc_bit_map spt_mphy_map[];
+extern const struct pmc_bit_map spt_pfear_map[];
+extern const struct pmc_bit_map *ext_spt_pfear_map[];
+extern const struct pmc_bit_map spt_ltr_show_map[];
+extern const struct pmc_reg_map spt_reg_map;
+extern const struct pmc_bit_map cnp_pfear_map[];
+extern const struct pmc_bit_map *ext_cnp_pfear_map[];
+extern const struct pmc_bit_map cnp_slps0_dbg0_map[];
+extern const struct pmc_bit_map cnp_slps0_dbg1_map[];
+extern const struct pmc_bit_map cnp_slps0_dbg2_map[];
+extern const struct pmc_bit_map *cnp_slps0_dbg_maps[];
+extern const struct pmc_bit_map cnp_ltr_show_map[];
+extern const struct pmc_reg_map cnp_reg_map;
+extern const struct pmc_bit_map icl_pfear_map[];
+extern const struct pmc_bit_map *ext_icl_pfear_map[];
+extern const struct pmc_reg_map icl_reg_map;
+extern const struct pmc_bit_map tgl_pfear_map[];
+extern const struct pmc_bit_map *ext_tgl_pfear_map[];
+extern const struct pmc_bit_map tgl_clocksource_status_map[];
+extern const struct pmc_bit_map tgl_power_gating_status_map[];
+extern const struct pmc_bit_map tgl_d3_status_map[];
+extern const struct pmc_bit_map tgl_vnn_req_status_map[];
+extern const struct pmc_bit_map tgl_vnn_misc_status_map[];
+extern const struct pmc_bit_map tgl_signal_status_map[];
+extern const struct pmc_bit_map *tgl_lpm_maps[];
+extern const struct pmc_reg_map tgl_reg_map;
+extern const struct pmc_bit_map adl_pfear_map[];
+extern const struct pmc_bit_map *ext_adl_pfear_map[];
+extern const struct pmc_bit_map adl_ltr_show_map[];
+extern const struct pmc_bit_map adl_clocksource_status_map[];
+extern const struct pmc_bit_map adl_power_gating_status_0_map[];
+extern const struct pmc_bit_map adl_power_gating_status_1_map[];
+extern const struct pmc_bit_map adl_power_gating_status_2_map[];
+extern const struct pmc_bit_map adl_d3_status_0_map[];
+extern const struct pmc_bit_map adl_d3_status_1_map[];
+extern const struct pmc_bit_map adl_d3_status_2_map[];
+extern const struct pmc_bit_map adl_d3_status_3_map[];
+extern const struct pmc_bit_map adl_vnn_req_status_0_map[];
+extern const struct pmc_bit_map adl_vnn_req_status_1_map[];
+extern const struct pmc_bit_map adl_vnn_req_status_2_map[];
+extern const struct pmc_bit_map adl_vnn_req_status_3_map[];
+extern const struct pmc_bit_map adl_vnn_misc_status_map[];
+extern const struct pmc_bit_map *adl_lpm_maps[];
+extern const struct pmc_reg_map adl_reg_map;
+extern const struct pmc_bit_map mtl_socm_pfear_map[];
+extern const struct pmc_bit_map *ext_mtl_socm_pfear_map[];
+extern const struct pmc_bit_map mtl_socm_ltr_show_map[];
+extern const struct pmc_bit_map mtl_socm_clocksource_status_map[];
+extern const struct pmc_bit_map mtl_socm_power_gating_status_0_map[];
+extern const struct pmc_bit_map mtl_socm_power_gating_status_1_map[];
+extern const struct pmc_bit_map mtl_socm_power_gating_status_2_map[];
+extern const struct pmc_bit_map mtl_socm_d3_status_0_map[];
+extern const struct pmc_bit_map mtl_socm_d3_status_1_map[];
+extern const struct pmc_bit_map mtl_socm_d3_status_2_map[];
+extern const struct pmc_bit_map mtl_socm_d3_status_3_map[];
+extern const struct pmc_bit_map mtl_socm_vnn_req_status_0_map[];
+extern const struct pmc_bit_map mtl_socm_vnn_req_status_1_map[];
+extern const struct pmc_bit_map mtl_socm_vnn_req_status_2_map[];
+extern const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[];
+extern const struct pmc_bit_map mtl_socm_vnn_misc_status_map[];
+extern const struct pmc_bit_map mtl_socm_signal_status_map[];
+extern const struct pmc_bit_map *mtl_socm_lpm_maps[];
+extern const struct pmc_reg_map mtl_socm_reg_map;
+extern const struct pmc_bit_map mtl_ioep_pfear_map[];
+extern const struct pmc_bit_map *ext_mtl_ioep_pfear_map[];
+extern const struct pmc_bit_map mtl_ioep_ltr_show_map[];
+extern const struct pmc_bit_map mtl_ioep_clocksource_status_map[];
+extern const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[];
+extern const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[];
+extern const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[];
+extern const struct pmc_bit_map mtl_ioep_d3_status_0_map[];
+extern const struct pmc_bit_map mtl_ioep_d3_status_1_map[];
+extern const struct pmc_bit_map mtl_ioep_d3_status_2_map[];
+extern const struct pmc_bit_map mtl_ioep_d3_status_3_map[];
+extern const struct pmc_bit_map mtl_ioep_vnn_req_status_0_map[];
+extern const struct pmc_bit_map mtl_ioep_vnn_req_status_1_map[];
+extern const struct pmc_bit_map mtl_ioep_vnn_req_status_2_map[];
+extern const struct pmc_bit_map mtl_ioep_vnn_req_status_3_map[];
+extern const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[];
+extern const struct pmc_bit_map *mtl_ioep_lpm_maps[];
+extern const struct pmc_reg_map mtl_ioep_reg_map;
+extern const struct pmc_bit_map mtl_ioem_pfear_map[];
+extern const struct pmc_bit_map *ext_mtl_ioem_pfear_map[];
+extern const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[];
+extern const struct pmc_bit_map mtl_ioem_vnn_req_status_1_map[];
+extern const struct pmc_bit_map *mtl_ioem_lpm_maps[];
+extern const struct pmc_reg_map mtl_ioem_reg_map;
+
+extern void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev);
+int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore);
+
+int pmc_core_resume_common(struct pmc_dev *pmcdev);
+int get_primary_reg_base(struct pmc *pmc);
+
+extern void pmc_core_ssram_init(struct pmc_dev *pmcdev);
+
+int spt_core_init(struct pmc_dev *pmcdev);
+int cnp_core_init(struct pmc_dev *pmcdev);
+int icl_core_init(struct pmc_dev *pmcdev);
+int tgl_core_init(struct pmc_dev *pmcdev);
+int adl_core_init(struct pmc_dev *pmcdev);
+int mtl_core_init(struct pmc_dev *pmcdev);
+
+void cnl_suspend(struct pmc_dev *pmcdev);
+int cnl_resume(struct pmc_dev *pmcdev);
+
+#define pmc_for_each_mode(i, mode, pmcdev) \
+ for (i = 0, mode = pmcdev->lpm_en_modes[i]; \
+ i < pmcdev->num_lpm_modes; \
+ i++, mode = pmcdev->lpm_en_modes[i])
+
+#define DEFINE_PMC_CORE_ATTR_WRITE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .release = single_release, \
+}
+
+#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c
new file mode 100644
index 0000000000..13fa16f0d5
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/core_ssram.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains functions to handle discovery of PMC metrics located
+ * in the PMC SSRAM PCI device.
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "core.h"
+
+#define SSRAM_HDR_SIZE 0x100
+#define SSRAM_PWRM_OFFSET 0x14
+#define SSRAM_DVSEC_OFFSET 0x1C
+#define SSRAM_DVSEC_SIZE 0x10
+#define SSRAM_PCH_OFFSET 0x60
+#define SSRAM_IOE_OFFSET 0x68
+#define SSRAM_DEVID_OFFSET 0x70
+
+static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16 devid)
+{
+ for (; list->map; ++list)
+ if (devid == list->devid)
+ return list->map;
+
+ return NULL;
+}
+
+static inline u64 get_base(void __iomem *addr, u32 offset)
+{
+ return lo_hi_readq(addr + offset) & GENMASK_ULL(63, 3);
+}
+
+static void
+pmc_core_pmc_add(struct pmc_dev *pmcdev, u64 pwrm_base,
+ const struct pmc_reg_map *reg_map, int pmc_index)
+{
+ struct pmc *pmc = pmcdev->pmcs[pmc_index];
+
+ if (!pwrm_base)
+ return;
+
+ /* Memory for primary PMC has been allocated in core.c */
+ if (!pmc) {
+ pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL);
+ if (!pmc)
+ return;
+ }
+
+ pmc->map = reg_map;
+ pmc->base_addr = pwrm_base;
+ pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
+
+ if (!pmc->regbase) {
+ devm_kfree(&pmcdev->pdev->dev, pmc);
+ return;
+ }
+
+ pmcdev->pmcs[pmc_index] = pmc;
+}
+
+static void
+pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, void __iomem *ssram, u32 offset,
+ int pmc_idx)
+{
+ u64 pwrm_base;
+ u16 devid;
+
+ if (pmc_idx != PMC_IDX_SOC) {
+ u64 ssram_base = get_base(ssram, offset);
+
+ if (!ssram_base)
+ return;
+
+ ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
+ if (!ssram)
+ return;
+ }
+
+ pwrm_base = get_base(ssram, SSRAM_PWRM_OFFSET);
+ devid = readw(ssram + SSRAM_DEVID_OFFSET);
+
+ if (pmcdev->regmap_list) {
+ const struct pmc_reg_map *map;
+
+ map = pmc_core_find_regmap(pmcdev->regmap_list, devid);
+ if (map)
+ pmc_core_pmc_add(pmcdev, pwrm_base, map, pmc_idx);
+ }
+
+ if (pmc_idx != PMC_IDX_SOC)
+ iounmap(ssram);
+}
+
+void pmc_core_ssram_init(struct pmc_dev *pmcdev)
+{
+ void __iomem *ssram;
+ struct pci_dev *pcidev;
+ u64 ssram_base;
+ int ret;
+
+ pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, 2));
+ if (!pcidev)
+ goto out;
+
+ ret = pcim_enable_device(pcidev);
+ if (ret)
+ goto release_dev;
+
+ ssram_base = pcidev->resource[0].start;
+ ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
+ if (!ssram)
+ goto disable_dev;
+
+ pmcdev->ssram_pcidev = pcidev;
+
+ pmc_core_ssram_get_pmc(pmcdev, ssram, 0, PMC_IDX_SOC);
+ pmc_core_ssram_get_pmc(pmcdev, ssram, SSRAM_IOE_OFFSET, PMC_IDX_IOE);
+ pmc_core_ssram_get_pmc(pmcdev, ssram, SSRAM_PCH_OFFSET, PMC_IDX_PCH);
+
+ iounmap(ssram);
+out:
+ return;
+
+disable_dev:
+ pci_disable_device(pcidev);
+release_dev:
+ pci_dev_put(pcidev);
+}
diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c
new file mode 100644
index 0000000000..d08e317423
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/icl.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Ice Lake PCH.
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+
+#include "core.h"
+
+const struct pmc_bit_map icl_pfear_map[] = {
+ {"RES_65", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"TAM", BIT(3)},
+ {"GBETSN", BIT(4)},
+ {"TBTLSX", BIT(5)},
+ {"RES_71", BIT(6)},
+ {"RES_72", BIT(7)},
+ {}
+};
+
+const struct pmc_bit_map *ext_icl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of icl_reg_map for
+ * a list of core SoCs using this.
+ */
+ cnp_pfear_map,
+ icl_pfear_map,
+ NULL
+};
+
+const struct pmc_reg_map icl_reg_map = {
+ .pfear_sts = ext_icl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = ICL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
+ .etr3_offset = ETR3_OFFSET,
+};
+
+int icl_core_init(struct pmc_dev *pmcdev)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+
+ pmc->map = &icl_reg_map;
+ return get_primary_reg_base(pmc);
+}
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
new file mode 100644
index 0000000000..504e3e273c
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -0,0 +1,1009 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Meteor Lake PCH.
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+
+#include <linux/pci.h>
+#include "core.h"
+
+/*
+ * Die Mapping to Product.
+ * Product SOCDie IOEDie PCHDie
+ * MTL-M SOC-M IOE-M None
+ * MTL-P SOC-M IOE-P None
+ * MTL-S SOC-S IOE-P PCH-S
+ */
+
+const struct pmc_bit_map mtl_socm_pfear_map[] = {
+ {"PMC", BIT(0)},
+ {"OPI", BIT(1)},
+ {"SPI", BIT(2)},
+ {"XHCI", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"SPC", BIT(6)},
+ {"GBE", BIT(7)},
+
+ {"SATA", BIT(0)},
+ {"DSP0", BIT(1)},
+ {"DSP1", BIT(2)},
+ {"DSP2", BIT(3)},
+ {"DSP3", BIT(4)},
+ {"SPD", BIT(5)},
+ {"LPSS", BIT(6)},
+ {"LPC", BIT(7)},
+
+ {"SMB", BIT(0)},
+ {"ISH", BIT(1)},
+ {"P2SB", BIT(2)},
+ {"NPK_VNN", BIT(3)},
+ {"SDX", BIT(4)},
+ {"SPE", BIT(5)},
+ {"FUSE", BIT(6)},
+ {"SBR8", BIT(7)},
+
+ {"RSVD24", BIT(0)},
+ {"OTG", BIT(1)},
+ {"EXI", BIT(2)},
+ {"CSE", BIT(3)},
+ {"CSME_KVM", BIT(4)},
+ {"CSME_PMT", BIT(5)},
+ {"CSME_CLINK", BIT(6)},
+ {"CSME_PTIO", BIT(7)},
+
+ {"CSME_USBR", BIT(0)},
+ {"CSME_SUSRAM", BIT(1)},
+ {"CSME_SMT1", BIT(2)},
+ {"RSVD35", BIT(3)},
+ {"CSME_SMS2", BIT(4)},
+ {"CSME_SMS", BIT(5)},
+ {"CSME_RTC", BIT(6)},
+ {"CSME_PSF", BIT(7)},
+
+ {"SBR0", BIT(0)},
+ {"SBR1", BIT(1)},
+ {"SBR2", BIT(2)},
+ {"SBR3", BIT(3)},
+ {"SBR4", BIT(4)},
+ {"SBR5", BIT(5)},
+ {"RSVD46", BIT(6)},
+ {"PSF1", BIT(7)},
+
+ {"PSF2", BIT(0)},
+ {"PSF3", BIT(1)},
+ {"PSF4", BIT(2)},
+ {"CNVI", BIT(3)},
+ {"UFSX2", BIT(4)},
+ {"EMMC", BIT(5)},
+ {"SPF", BIT(6)},
+ {"SBR6", BIT(7)},
+
+ {"SBR7", BIT(0)},
+ {"NPK_AON", BIT(1)},
+ {"HDA4", BIT(2)},
+ {"HDA5", BIT(3)},
+ {"HDA6", BIT(4)},
+ {"PSF6", BIT(5)},
+ {"RSVD62", BIT(6)},
+ {"RSVD63", BIT(7)},
+ {}
+};
+
+const struct pmc_bit_map *ext_mtl_socm_pfear_map[] = {
+ mtl_socm_pfear_map,
+ NULL
+};
+
+const struct pmc_bit_map mtl_socm_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", MTL_PMC_LTR_SPG},
+ {"ESE", MTL_PMC_LTR_ESE},
+ {"IOE_PMC", MTL_PMC_LTR_IOE_PMC},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_clocksource_status_map[] = {
+ {"AON2_OFF_STS", BIT(0)},
+ {"AON3_OFF_STS", BIT(1)},
+ {"AON4_OFF_STS", BIT(2)},
+ {"AON5_OFF_STS", BIT(3)},
+ {"AON1_OFF_STS", BIT(4)},
+ {"XTAL_LVM_OFF_STS", BIT(5)},
+ {"MPFPW1_0_PLL_OFF_STS", BIT(6)},
+ {"MPFPW1_1_PLL_OFF_STS", BIT(7)},
+ {"USB3_PLL_OFF_STS", BIT(8)},
+ {"AON3_SPL_OFF_STS", BIT(9)},
+ {"MPFPW2_0_PLL_OFF_STS", BIT(12)},
+ {"MPFPW3_0_PLL_OFF_STS", BIT(13)},
+ {"XTAL_AGGR_OFF_STS", BIT(17)},
+ {"USB2_PLL_OFF_STS", BIT(18)},
+ {"FILTER_PLL_OFF_STS", BIT(22)},
+ {"ACE_PLL_OFF_STS", BIT(24)},
+ {"FABRIC_PLL_OFF_STS", BIT(25)},
+ {"SOC_PLL_OFF_STS", BIT(26)},
+ {"PCIFAB_PLL_OFF_STS", BIT(27)},
+ {"REF_PLL_OFF_STS", BIT(28)},
+ {"IMG_PLL_OFF_STS", BIT(29)},
+ {"RTC_PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0)},
+ {"DMI_PGD0_PG_STS", BIT(1)},
+ {"ESPISPI_PGD0_PG_STS", BIT(2)},
+ {"XHCI_PGD0_PG_STS", BIT(3)},
+ {"SPA_PGD0_PG_STS", BIT(4)},
+ {"SPB_PGD0_PG_STS", BIT(5)},
+ {"SPC_PGD0_PG_STS", BIT(6)},
+ {"GBE_PGD0_PG_STS", BIT(7)},
+ {"SATA_PGD0_PG_STS", BIT(8)},
+ {"PSF13_PGD0_PG_STS", BIT(9)},
+ {"SOC_D2D_PGD3_PG_STS", BIT(10)},
+ {"MPFPW3_PGD0_PG_STS", BIT(11)},
+ {"ESE_PGD0_PG_STS", BIT(12)},
+ {"SPD_PGD0_PG_STS", BIT(13)},
+ {"LPSS_PGD0_PG_STS", BIT(14)},
+ {"LPC_PGD0_PG_STS", BIT(15)},
+ {"SMB_PGD0_PG_STS", BIT(16)},
+ {"ISH_PGD0_PG_STS", BIT(17)},
+ {"P2S_PGD0_PG_STS", BIT(18)},
+ {"NPK_PGD0_PG_STS", BIT(19)},
+ {"DBG_SBR_PGD0_PG_STS", BIT(20)},
+ {"SBRG_PGD0_PG_STS", BIT(21)},
+ {"FUSE_PGD0_PG_STS", BIT(22)},
+ {"SBR8_PGD0_PG_STS", BIT(23)},
+ {"SOC_D2D_PGD2_PG_STS", BIT(24)},
+ {"XDCI_PGD0_PG_STS", BIT(25)},
+ {"EXI_PGD0_PG_STS", BIT(26)},
+ {"CSE_PGD0_PG_STS", BIT(27)},
+ {"KVMCC_PGD0_PG_STS", BIT(28)},
+ {"PMT_PGD0_PG_STS", BIT(29)},
+ {"CLINK_PGD0_PG_STS", BIT(30)},
+ {"PTIO_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_power_gating_status_1_map[] = {
+ {"USBR0_PGD0_PG_STS", BIT(0)},
+ {"SUSRAM_PGD0_PG_STS", BIT(1)},
+ {"SMT1_PGD0_PG_STS", BIT(2)},
+ {"FIACPCB_U_PGD0_PG_STS", BIT(3)},
+ {"SMS2_PGD0_PG_STS", BIT(4)},
+ {"SMS1_PGD0_PG_STS", BIT(5)},
+ {"CSMERTC_PGD0_PG_STS", BIT(6)},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7)},
+ {"SBR0_PGD0_PG_STS", BIT(8)},
+ {"SBR1_PGD0_PG_STS", BIT(9)},
+ {"SBR2_PGD0_PG_STS", BIT(10)},
+ {"SBR3_PGD0_PG_STS", BIT(11)},
+ {"U3FPW1_PGD0_PG_STS", BIT(12)},
+ {"SBR5_PGD0_PG_STS", BIT(13)},
+ {"MPFPW1_PGD0_PG_STS", BIT(14)},
+ {"UFSPW1_PGD0_PG_STS", BIT(15)},
+ {"FIA_X_PGD0_PG_STS", BIT(16)},
+ {"SOC_D2D_PGD0_PG_STS", BIT(17)},
+ {"MPFPW2_PGD0_PG_STS", BIT(18)},
+ {"CNVI_PGD0_PG_STS", BIT(19)},
+ {"UFSX2_PGD0_PG_STS", BIT(20)},
+ {"ENDBG_PGD0_PG_STS", BIT(21)},
+ {"DBG_PSF_PGD0_PG_STS", BIT(22)},
+ {"SBR6_PGD0_PG_STS", BIT(23)},
+ {"SBR7_PGD0_PG_STS", BIT(24)},
+ {"NPK_PGD1_PG_STS", BIT(25)},
+ {"FIACPCB_X_PGD0_PG_STS", BIT(26)},
+ {"DBC_PGD0_PG_STS", BIT(27)},
+ {"FUSEGPSB_PGD0_PG_STS", BIT(28)},
+ {"PSF6_PGD0_PG_STS", BIT(29)},
+ {"PSF7_PGD0_PG_STS", BIT(30)},
+ {"GBETSN1_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_power_gating_status_2_map[] = {
+ {"PSF8_PGD0_PG_STS", BIT(0)},
+ {"FIA_PGD0_PG_STS", BIT(1)},
+ {"SOC_D2D_PGD1_PG_STS", BIT(2)},
+ {"FIA_U_PGD0_PG_STS", BIT(3)},
+ {"TAM_PGD0_PG_STS", BIT(4)},
+ {"GBETSN_PGD0_PG_STS", BIT(5)},
+ {"TBTLSX_PGD0_PG_STS", BIT(6)},
+ {"THC0_PGD0_PG_STS", BIT(7)},
+ {"THC1_PGD0_PG_STS", BIT(8)},
+ {"PMC_PGD1_PG_STS", BIT(9)},
+ {"GNA_PGD0_PG_STS", BIT(10)},
+ {"ACE_PGD0_PG_STS", BIT(11)},
+ {"ACE_PGD1_PG_STS", BIT(12)},
+ {"ACE_PGD2_PG_STS", BIT(13)},
+ {"ACE_PGD3_PG_STS", BIT(14)},
+ {"ACE_PGD4_PG_STS", BIT(15)},
+ {"ACE_PGD5_PG_STS", BIT(16)},
+ {"ACE_PGD6_PG_STS", BIT(17)},
+ {"ACE_PGD7_PG_STS", BIT(18)},
+ {"ACE_PGD8_PG_STS", BIT(19)},
+ {"FIA_PGS_PGD0_PG_STS", BIT(20)},
+ {"FIACPCB_PGS_PGD0_PG_STS", BIT(21)},
+ {"FUSEPMSB_PGD0_PG_STS", BIT(22)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_d3_status_0_map[] = {
+ {"LPSS_D3_STS", BIT(3)},
+ {"XDCI_D3_STS", BIT(4)},
+ {"XHCI_D3_STS", BIT(5)},
+ {"SPA_D3_STS", BIT(12)},
+ {"SPB_D3_STS", BIT(13)},
+ {"SPC_D3_STS", BIT(14)},
+ {"SPD_D3_STS", BIT(15)},
+ {"ESPISPI_D3_STS", BIT(18)},
+ {"SATA_D3_STS", BIT(20)},
+ {"PSTH_D3_STS", BIT(21)},
+ {"DMI_D3_STS", BIT(22)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_d3_status_1_map[] = {
+ {"GBETSN1_D3_STS", BIT(14)},
+ {"GBE_D3_STS", BIT(19)},
+ {"ITSS_D3_STS", BIT(23)},
+ {"P2S_D3_STS", BIT(24)},
+ {"CNVI_D3_STS", BIT(27)},
+ {"UFSX2_D3_STS", BIT(28)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_d3_status_2_map[] = {
+ {"GNA_D3_STS", BIT(0)},
+ {"CSMERTC_D3_STS", BIT(1)},
+ {"SUSRAM_D3_STS", BIT(2)},
+ {"CSE_D3_STS", BIT(4)},
+ {"KVMCC_D3_STS", BIT(5)},
+ {"USBR0_D3_STS", BIT(6)},
+ {"ISH_D3_STS", BIT(7)},
+ {"SMT1_D3_STS", BIT(8)},
+ {"SMT2_D3_STS", BIT(9)},
+ {"SMT3_D3_STS", BIT(10)},
+ {"CLINK_D3_STS", BIT(14)},
+ {"PTIO_D3_STS", BIT(16)},
+ {"PMT_D3_STS", BIT(17)},
+ {"SMS1_D3_STS", BIT(18)},
+ {"SMS2_D3_STS", BIT(19)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_d3_status_3_map[] = {
+ {"ESE_D3_STS", BIT(2)},
+ {"GBETSN_D3_STS", BIT(13)},
+ {"THC0_D3_STS", BIT(14)},
+ {"THC1_D3_STS", BIT(15)},
+ {"ACE_D3_STS", BIT(23)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_vnn_req_status_0_map[] = {
+ {"LPSS_VNN_REQ_STS", BIT(3)},
+ {"FIA_VNN_REQ_STS", BIT(17)},
+ {"ESPISPI_VNN_REQ_STS", BIT(18)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_vnn_req_status_1_map[] = {
+ {"NPK_VNN_REQ_STS", BIT(4)},
+ {"DFXAGG_VNN_REQ_STS", BIT(8)},
+ {"EXI_VNN_REQ_STS", BIT(9)},
+ {"P2D_VNN_REQ_STS", BIT(18)},
+ {"GBE_VNN_REQ_STS", BIT(19)},
+ {"SMB_VNN_REQ_STS", BIT(25)},
+ {"LPC_VNN_REQ_STS", BIT(26)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_vnn_req_status_2_map[] = {
+ {"CSMERTC_VNN_REQ_STS", BIT(1)},
+ {"CSE_VNN_REQ_STS", BIT(4)},
+ {"ISH_VNN_REQ_STS", BIT(7)},
+ {"SMT1_VNN_REQ_STS", BIT(8)},
+ {"CLINK_VNN_REQ_STS", BIT(14)},
+ {"SMS1_VNN_REQ_STS", BIT(18)},
+ {"SMS2_VNN_REQ_STS", BIT(19)},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20)},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21)},
+ {"GPIOCOM2_VNN_REQ_STS", BIT(22)},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23)},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[] = {
+ {"ESE_VNN_REQ_STS", BIT(2)},
+ {"DTS0_VNN_REQ_STS", BIT(7)},
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS", BIT(0)},
+ {"TS_OFF_REQ_STS", BIT(1)},
+ {"PNDE_MET_REQ_STS", BIT(2)},
+ {"PCIE_DEEP_PM_REQ_STS", BIT(3)},
+ {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4)},
+ {"NPK_VNNAON_REQ_STS", BIT(5)},
+ {"VNN_SOC_REQ_STS", BIT(6)},
+ {"ISH_VNNAON_REQ_STS", BIT(7)},
+ {"IOE_COND_MET_S02I2_0_REQ_STS", BIT(8)},
+ {"IOE_COND_MET_S02I2_1_REQ_STS", BIT(9)},
+ {"IOE_COND_MET_S02I2_2_REQ_STS", BIT(10)},
+ {"PLT_GREATER_REQ_STS", BIT(11)},
+ {"PCIE_CLKREQ_REQ_STS", BIT(12)},
+ {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)},
+ {"PM_SYNC_STATES_REQ_STS", BIT(14)},
+ {"EA_REQ_STS", BIT(15)},
+ {"MPHY_CORE_OFF_REQ_STS", BIT(16)},
+ {"BRK_EV_EN_REQ_STS", BIT(17)},
+ {"AUTO_DEMO_EN_REQ_STS", BIT(18)},
+ {"ITSS_CLK_SRC_REQ_STS", BIT(19)},
+ {"LPC_CLK_SRC_REQ_STS", BIT(20)},
+ {"ARC_IDLE_REQ_STS", BIT(21)},
+ {"MPHY_SUS_REQ_STS", BIT(22)},
+ {"FIA_DEEP_PM_REQ_STS", BIT(23)},
+ {"UXD_CONNECTED_REQ_STS", BIT(24)},
+ {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)},
+ {"USB2_VNNAON_ACT_REQ_STS", BIT(26)},
+ {"PRE_WAKE0_REQ_STS", BIT(27)},
+ {"PRE_WAKE1_REQ_STS", BIT(28)},
+ {"PRE_WAKE2_EN_REQ_STS", BIT(29)},
+ {"WOV_REQ_STS", BIT(30)},
+ {"CNVI_V1P05_REQ_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map mtl_socm_signal_status_map[] = {
+ {"LSX_Wake0_En_STS", BIT(0)},
+ {"LSX_Wake0_Pol_STS", BIT(1)},
+ {"LSX_Wake1_En_STS", BIT(2)},
+ {"LSX_Wake1_Pol_STS", BIT(3)},
+ {"LSX_Wake2_En_STS", BIT(4)},
+ {"LSX_Wake2_Pol_STS", BIT(5)},
+ {"LSX_Wake3_En_STS", BIT(6)},
+ {"LSX_Wake3_Pol_STS", BIT(7)},
+ {"LSX_Wake4_En_STS", BIT(8)},
+ {"LSX_Wake4_Pol_STS", BIT(9)},
+ {"LSX_Wake5_En_STS", BIT(10)},
+ {"LSX_Wake5_Pol_STS", BIT(11)},
+ {"LSX_Wake6_En_STS", BIT(12)},
+ {"LSX_Wake6_Pol_STS", BIT(13)},
+ {"LSX_Wake7_En_STS", BIT(14)},
+ {"LSX_Wake7_Pol_STS", BIT(15)},
+ {"LPSS_Wake0_En_STS", BIT(16)},
+ {"LPSS_Wake0_Pol_STS", BIT(17)},
+ {"LPSS_Wake1_En_STS", BIT(18)},
+ {"LPSS_Wake1_Pol_STS", BIT(19)},
+ {"Int_Timer_SS_Wake0_En_STS", BIT(20)},
+ {"Int_Timer_SS_Wake0_Pol_STS", BIT(21)},
+ {"Int_Timer_SS_Wake1_En_STS", BIT(22)},
+ {"Int_Timer_SS_Wake1_Pol_STS", BIT(23)},
+ {"Int_Timer_SS_Wake2_En_STS", BIT(24)},
+ {"Int_Timer_SS_Wake2_Pol_STS", BIT(25)},
+ {"Int_Timer_SS_Wake3_En_STS", BIT(26)},
+ {"Int_Timer_SS_Wake3_Pol_STS", BIT(27)},
+ {"Int_Timer_SS_Wake4_En_STS", BIT(28)},
+ {"Int_Timer_SS_Wake4_Pol_STS", BIT(29)},
+ {"Int_Timer_SS_Wake5_En_STS", BIT(30)},
+ {"Int_Timer_SS_Wake5_Pol_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map *mtl_socm_lpm_maps[] = {
+ mtl_socm_clocksource_status_map,
+ mtl_socm_power_gating_status_0_map,
+ mtl_socm_power_gating_status_1_map,
+ mtl_socm_power_gating_status_2_map,
+ mtl_socm_d3_status_0_map,
+ mtl_socm_d3_status_1_map,
+ mtl_socm_d3_status_2_map,
+ mtl_socm_d3_status_3_map,
+ mtl_socm_vnn_req_status_0_map,
+ mtl_socm_vnn_req_status_1_map,
+ mtl_socm_vnn_req_status_2_map,
+ mtl_socm_vnn_req_status_3_map,
+ mtl_socm_vnn_misc_status_map,
+ mtl_socm_signal_status_map,
+ NULL
+};
+
+const struct pmc_reg_map mtl_socm_reg_map = {
+ .pfear_sts = ext_mtl_socm_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = mtl_socm_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = MTL_SOC_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = MTL_SOCM_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .lpm_num_maps = ADL_LPM_NUM_MAPS,
+ .ltr_ignore_max = MTL_SOCM_NUM_IP_IGN_ALLOWED,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .etr3_offset = ETR3_OFFSET,
+ .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_priority_offset = MTL_LPM_PRI_OFFSET,
+ .lpm_en_offset = MTL_LPM_EN_OFFSET,
+ .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = mtl_socm_lpm_maps,
+ .lpm_status_offset = MTL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+};
+
+const struct pmc_bit_map mtl_ioep_pfear_map[] = {
+ {"PMC_0", BIT(0)},
+ {"OPI", BIT(1)},
+ {"TCSS", BIT(2)},
+ {"RSVD3", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"SPC", BIT(6)},
+ {"IOE_D2D_3", BIT(7)},
+
+ {"RSVD8", BIT(0)},
+ {"RSVD9", BIT(1)},
+ {"SPE", BIT(2)},
+ {"RSVD11", BIT(3)},
+ {"RSVD12", BIT(4)},
+ {"SPD", BIT(5)},
+ {"ACE_7", BIT(6)},
+ {"RSVD15", BIT(7)},
+
+ {"ACE_0", BIT(0)},
+ {"FIACPCB_P", BIT(1)},
+ {"P2S", BIT(2)},
+ {"RSVD19", BIT(3)},
+ {"ACE_8", BIT(4)},
+ {"IOE_D2D_0", BIT(5)},
+ {"FUSE", BIT(6)},
+ {"RSVD23", BIT(7)},
+
+ {"FIACPCB_P5", BIT(0)},
+ {"ACE_3", BIT(1)},
+ {"RSF5", BIT(2)},
+ {"ACE_2", BIT(3)},
+ {"ACE_4", BIT(4)},
+ {"RSVD29", BIT(5)},
+ {"RSF10", BIT(6)},
+ {"MPFPW5", BIT(7)},
+
+ {"PSF9", BIT(0)},
+ {"MPFPW4", BIT(1)},
+ {"RSVD34", BIT(2)},
+ {"RSVD35", BIT(3)},
+ {"RSVD36", BIT(4)},
+ {"RSVD37", BIT(5)},
+ {"RSVD38", BIT(6)},
+ {"RSVD39", BIT(7)},
+
+ {"SBR0", BIT(0)},
+ {"SBR1", BIT(1)},
+ {"SBR2", BIT(2)},
+ {"SBR3", BIT(3)},
+ {"SBR4", BIT(4)},
+ {"SBR5", BIT(5)},
+ {"RSVD46", BIT(6)},
+ {"RSVD47", BIT(7)},
+
+ {"RSVD48", BIT(0)},
+ {"FIA_P5", BIT(1)},
+ {"RSVD50", BIT(2)},
+ {"RSVD51", BIT(3)},
+ {"RSVD52", BIT(4)},
+ {"RSVD53", BIT(5)},
+ {"RSVD54", BIT(6)},
+ {"ACE_1", BIT(7)},
+
+ {"RSVD56", BIT(0)},
+ {"ACE_5", BIT(1)},
+ {"RSVD58", BIT(2)},
+ {"G5FPW1", BIT(3)},
+ {"RSVD60", BIT(4)},
+ {"ACE_6", BIT(5)},
+ {"RSVD62", BIT(6)},
+ {"GBETSN1", BIT(7)},
+
+ {"RSVD64", BIT(0)},
+ {"FIA", BIT(1)},
+ {"RSVD66", BIT(2)},
+ {"FIA_P", BIT(3)},
+ {"TAM", BIT(4)},
+ {"GBETSN", BIT(5)},
+ {"IOE_D2D_2", BIT(6)},
+ {"IOE_D2D_1", BIT(7)},
+
+ {"SPF", BIT(0)},
+ {"PMC_1", BIT(1)},
+ {}
+};
+
+const struct pmc_bit_map *ext_mtl_ioep_pfear_map[] = {
+ mtl_ioep_pfear_map,
+ NULL
+};
+
+const struct pmc_bit_map mtl_ioep_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"Reserved", MTL_PMC_LTR_RESERVED},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", MTL_PMC_LTR_SPG},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_clocksource_status_map[] = {
+ {"AON2_OFF_STS", BIT(0)},
+ {"AON3_OFF_STS", BIT(1)},
+ {"AON4_OFF_STS", BIT(2)},
+ {"AON5_OFF_STS", BIT(3)},
+ {"AON1_OFF_STS", BIT(4)},
+ {"TBT_PLL_OFF_STS", BIT(5)},
+ {"TMU_PLL_OFF_STS", BIT(6)},
+ {"BCLK_PLL_OFF_STS", BIT(7)},
+ {"D2D_PLL_OFF_STS", BIT(8)},
+ {"AON3_SPL_OFF_STS", BIT(9)},
+ {"MPFPW4_0_PLL_OFF_STS", BIT(12)},
+ {"MPFPW5_0_PLL_OFF_STS", BIT(13)},
+ {"G5FPW_0_PLL_OFF_STS", BIT(14)},
+ {"G5FPW_1_PLL_OFF_STS", BIT(15)},
+ {"XTAL_AGGR_OFF_STS", BIT(17)},
+ {"FABRIC_PLL_OFF_STS", BIT(25)},
+ {"SOC_PLL_OFF_STS", BIT(26)},
+ {"REF_PLL_OFF_STS", BIT(28)},
+ {"RTC_PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0)},
+ {"DMI_PGD0_PG_STS", BIT(1)},
+ {"TCSS_PGD0_PG_STS", BIT(2)},
+ {"SPA_PGD0_PG_STS", BIT(4)},
+ {"SPB_PGD0_PG_STS", BIT(5)},
+ {"SPC_PGD0_PG_STS", BIT(6)},
+ {"IOE_D2D_PGD3_PG_STS", BIT(7)},
+ {"SPE_PGD0_PG_STS", BIT(10)},
+ {"SPD_PGD0_PG_STS", BIT(13)},
+ {"ACE_PGD7_PG_STS", BIT(14)},
+ {"ACE_PGD0_PG_STS", BIT(16)},
+ {"FIACPCB_P_PGD0_PG_STS", BIT(17)},
+ {"P2S_PGD0_PG_STS", BIT(18)},
+ {"ACE_PGD8_PG_STS", BIT(20)},
+ {"IOE_D2D_PGD0_PG_STS", BIT(21)},
+ {"FUSE_PGD0_PG_STS", BIT(22)},
+ {"FIACPCB_P5_PGD0_PG_STS", BIT(24)},
+ {"ACE_PGD3_PG_STS", BIT(25)},
+ {"PSF5_PGD0_PG_STS", BIT(26)},
+ {"ACE_PGD2_PG_STS", BIT(27)},
+ {"ACE_PGD4_PG_STS", BIT(28)},
+ {"PSF10_PGD0_PG_STS", BIT(30)},
+ {"MPFPW5_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[] = {
+ {"PSF9_PGD0_PG_STS", BIT(0)},
+ {"MPFPW4_PGD0_PG_STS", BIT(1)},
+ {"SBR0_PGD0_PG_STS", BIT(8)},
+ {"SBR1_PGD0_PG_STS", BIT(9)},
+ {"SBR2_PGD0_PG_STS", BIT(10)},
+ {"SBR3_PGD0_PG_STS", BIT(11)},
+ {"SBR4_PGD0_PG_STS", BIT(12)},
+ {"SBR5_PGD0_PG_STS", BIT(13)},
+ {"FIA_P5_PGD0_PG_STS", BIT(17)},
+ {"ACE_PGD1_PGD0_PG_STS", BIT(23)},
+ {"ACE_PGD5_PGD1_PG_STS", BIT(25)},
+ {"G5FPW1_PGD0_PG_STS", BIT(27)},
+ {"ACE_PGD6_PG_STS", BIT(29)},
+ {"GBETSN1_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[] = {
+ {"FIA_PGD0_PG_STS", BIT(1)},
+ {"FIA_P_PGD0_PG_STS", BIT(3)},
+ {"TAM_PGD0_PG_STS", BIT(4)},
+ {"GBETSN_PGD0_PG_STS", BIT(5)},
+ {"IOE_D2D_PGD2_PG_STS", BIT(6)},
+ {"IOE_D2D_PGD1_PG_STS", BIT(7)},
+ {"SPF_PGD0_PG_STS", BIT(8)},
+ {"PMC_PGD1_PG_STS", BIT(9)},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_d3_status_0_map[] = {
+ {"SPF_D3_STS", BIT(0)},
+ {"SPA_D3_STS", BIT(12)},
+ {"SPB_D3_STS", BIT(13)},
+ {"SPC_D3_STS", BIT(14)},
+ {"SPD_D3_STS", BIT(15)},
+ {"SPE_D3_STS", BIT(16)},
+ {"DMI_D3_STS", BIT(22)},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_d3_status_1_map[] = {
+ {"GBETSN1_D3_STS", BIT(14)},
+ {"P2S_D3_STS", BIT(24)},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_d3_status_2_map[] = {
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_d3_status_3_map[] = {
+ {"GBETSN_D3_STS", BIT(13)},
+ {"ACE_D3_STS", BIT(23)},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_vnn_req_status_0_map[] = {
+ {"FIA_VNN_REQ_STS", BIT(17)},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_vnn_req_status_1_map[] = {
+ {"DFXAGG_VNN_REQ_STS", BIT(8)},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_vnn_req_status_2_map[] = {
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_vnn_req_status_3_map[] = {
+ {"DTS0_VNN_REQ_STS", BIT(7)},
+ {"DISP_VNN_REQ_STS", BIT(19)},
+ {}
+};
+
+const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS", BIT(0)},
+ {"TS_OFF_REQ_STS", BIT(1)},
+ {"PNDE_MET_REQ_STS", BIT(2)},
+ {"PCIE_DEEP_PM_REQ_STS", BIT(3)},
+ {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4)},
+ {"NPK_VNNAON_REQ_STS", BIT(5)},
+ {"VNN_SOC_REQ_STS", BIT(6)},
+ {"USB_DEVICE_ATTACHED_REQ_STS", BIT(8)},
+ {"FIA_EXIT_REQ_STS", BIT(9)},
+ {"USB2_SUS_PG_REQ_STS", BIT(10)},
+ {"PLT_GREATER_REQ_STS", BIT(11)},
+ {"PCIE_CLKREQ_REQ_STS", BIT(12)},
+ {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)},
+ {"PM_SYNC_STATES_REQ_STS", BIT(14)},
+ {"EA_REQ_STS", BIT(15)},
+ {"MPHY_CORE_OFF_REQ_STS", BIT(16)},
+ {"BRK_EV_EN_REQ_STS", BIT(17)},
+ {"AUTO_DEMO_EN_REQ_STS", BIT(18)},
+ {"ITSS_CLK_SRC_REQ_STS", BIT(19)},
+ {"LPC_CLK_SRC_REQ_STS", BIT(20)},
+ {"ARC_IDLE_REQ_STS", BIT(21)},
+ {"MPHY_SUS_REQ_STS", BIT(22)},
+ {"FIA_DEEP_PM_REQ_STS", BIT(23)},
+ {"UXD_CONNECTED_REQ_STS", BIT(24)},
+ {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)},
+ {"USB2_VNNAON_ACT_REQ_STS", BIT(26)},
+ {"PRE_WAKE0_REQ_STS", BIT(27)},
+ {"PRE_WAKE1_REQ_STS", BIT(28)},
+ {"PRE_WAKE2_EN_REQ_STS", BIT(29)},
+ {"WOV_REQ_STS", BIT(30)},
+ {"CNVI_V1P05_REQ_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map *mtl_ioep_lpm_maps[] = {
+ mtl_ioep_clocksource_status_map,
+ mtl_ioep_power_gating_status_0_map,
+ mtl_ioep_power_gating_status_1_map,
+ mtl_ioep_power_gating_status_2_map,
+ mtl_ioep_d3_status_0_map,
+ mtl_ioep_d3_status_1_map,
+ mtl_ioep_d3_status_2_map,
+ mtl_ioep_d3_status_3_map,
+ mtl_ioep_vnn_req_status_0_map,
+ mtl_ioep_vnn_req_status_1_map,
+ mtl_ioep_vnn_req_status_2_map,
+ mtl_ioep_vnn_req_status_3_map,
+ mtl_ioep_vnn_misc_status_map,
+ mtl_socm_signal_status_map,
+ NULL
+};
+
+const struct pmc_reg_map mtl_ioep_reg_map = {
+ .regmap_length = MTL_IOE_PMC_MMIO_REG_LEN,
+ .pfear_sts = ext_mtl_ioep_pfear_map,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = MTL_IOE_PPFEAR_NUM_ENTRIES,
+ .lpm_status_offset = MTL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+ .lpm_sts = mtl_ioep_lpm_maps,
+ .ltr_show_sts = mtl_ioep_ltr_show_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
+};
+
+const struct pmc_bit_map mtl_ioem_pfear_map[] = {
+ {"PMC_0", BIT(0)},
+ {"OPI", BIT(1)},
+ {"TCSS", BIT(2)},
+ {"RSVD3", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"SPC", BIT(6)},
+ {"IOE_D2D_3", BIT(7)},
+
+ {"RSVD8", BIT(0)},
+ {"RSVD9", BIT(1)},
+ {"SPE", BIT(2)},
+ {"RSVD11", BIT(3)},
+ {"RSVD12", BIT(4)},
+ {"SPD", BIT(5)},
+ {"ACE_7", BIT(6)},
+ {"RSVD15", BIT(7)},
+
+ {"ACE_0", BIT(0)},
+ {"FIACPCB_P", BIT(1)},
+ {"P2S", BIT(2)},
+ {"RSVD19", BIT(3)},
+ {"ACE_8", BIT(4)},
+ {"IOE_D2D_0", BIT(5)},
+ {"FUSE", BIT(6)},
+ {"RSVD23", BIT(7)},
+
+ {"FIACPCB_P5", BIT(0)},
+ {"ACE_3", BIT(1)},
+ {"RSF5", BIT(2)},
+ {"ACE_2", BIT(3)},
+ {"ACE_4", BIT(4)},
+ {"RSVD29", BIT(5)},
+ {"RSF10", BIT(6)},
+ {"MPFPW5", BIT(7)},
+
+ {"PSF9", BIT(0)},
+ {"MPFPW4", BIT(1)},
+ {"RSVD34", BIT(2)},
+ {"RSVD35", BIT(3)},
+ {"RSVD36", BIT(4)},
+ {"RSVD37", BIT(5)},
+ {"RSVD38", BIT(6)},
+ {"RSVD39", BIT(7)},
+
+ {"SBR0", BIT(0)},
+ {"SBR1", BIT(1)},
+ {"SBR2", BIT(2)},
+ {"SBR3", BIT(3)},
+ {"SBR4", BIT(4)},
+ {"RSVD45", BIT(5)},
+ {"RSVD46", BIT(6)},
+ {"RSVD47", BIT(7)},
+
+ {"RSVD48", BIT(0)},
+ {"FIA_P5", BIT(1)},
+ {"RSVD50", BIT(2)},
+ {"RSVD51", BIT(3)},
+ {"RSVD52", BIT(4)},
+ {"RSVD53", BIT(5)},
+ {"RSVD54", BIT(6)},
+ {"ACE_1", BIT(7)},
+
+ {"RSVD56", BIT(0)},
+ {"ACE_5", BIT(1)},
+ {"RSVD58", BIT(2)},
+ {"G5FPW1", BIT(3)},
+ {"RSVD60", BIT(4)},
+ {"ACE_6", BIT(5)},
+ {"RSVD62", BIT(6)},
+ {"GBETSN1", BIT(7)},
+
+ {"RSVD64", BIT(0)},
+ {"FIA", BIT(1)},
+ {"RSVD66", BIT(2)},
+ {"FIA_P", BIT(3)},
+ {"TAM", BIT(4)},
+ {"GBETSN", BIT(5)},
+ {"IOE_D2D_2", BIT(6)},
+ {"IOE_D2D_1", BIT(7)},
+
+ {"SPF", BIT(0)},
+ {"PMC_1", BIT(1)},
+ {}
+};
+
+const struct pmc_bit_map *ext_mtl_ioem_pfear_map[] = {
+ mtl_ioem_pfear_map,
+ NULL
+};
+
+const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[] = {
+ {"PSF9_PGD0_PG_STS", BIT(0)},
+ {"MPFPW4_PGD0_PG_STS", BIT(1)},
+ {"SBR0_PGD0_PG_STS", BIT(8)},
+ {"SBR1_PGD0_PG_STS", BIT(9)},
+ {"SBR2_PGD0_PG_STS", BIT(10)},
+ {"SBR3_PGD0_PG_STS", BIT(11)},
+ {"SBR4_PGD0_PG_STS", BIT(12)},
+ {"FIA_P5_PGD0_PG_STS", BIT(17)},
+ {"ACE_PGD1_PGD0_PG_STS", BIT(23)},
+ {"ACE_PGD5_PGD1_PG_STS", BIT(25)},
+ {"G5FPW1_PGD0_PG_STS", BIT(27)},
+ {"ACE_PGD6_PG_STS", BIT(29)},
+ {"GBETSN1_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map *mtl_ioem_lpm_maps[] = {
+ mtl_ioep_clocksource_status_map,
+ mtl_ioep_power_gating_status_0_map,
+ mtl_ioem_power_gating_status_1_map,
+ mtl_ioep_power_gating_status_2_map,
+ mtl_ioep_d3_status_0_map,
+ mtl_ioep_d3_status_1_map,
+ mtl_ioep_d3_status_2_map,
+ mtl_ioep_d3_status_3_map,
+ mtl_ioep_vnn_req_status_0_map,
+ mtl_ioep_vnn_req_status_1_map,
+ mtl_ioep_vnn_req_status_2_map,
+ mtl_ioep_vnn_req_status_3_map,
+ mtl_ioep_vnn_misc_status_map,
+ mtl_socm_signal_status_map,
+ NULL
+};
+
+const struct pmc_reg_map mtl_ioem_reg_map = {
+ .regmap_length = MTL_IOE_PMC_MMIO_REG_LEN,
+ .pfear_sts = ext_mtl_ioem_pfear_map,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = MTL_IOE_PPFEAR_NUM_ENTRIES,
+ .lpm_status_offset = MTL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+ .lpm_sts = mtl_ioem_lpm_maps,
+ .ltr_show_sts = mtl_ioep_ltr_show_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
+};
+
+#define PMC_DEVID_SOCM 0x7e7f
+#define PMC_DEVID_IOEP 0x7ecf
+#define PMC_DEVID_IOEM 0x7ebf
+static struct pmc_info mtl_pmc_info_list[] = {
+ {
+ .devid = PMC_DEVID_SOCM,
+ .map = &mtl_socm_reg_map,
+ },
+ {
+ .devid = PMC_DEVID_IOEP,
+ .map = &mtl_ioep_reg_map,
+ },
+ {
+ .devid = PMC_DEVID_IOEM,
+ .map = &mtl_ioem_reg_map
+ },
+ {}
+};
+
+#define MTL_GNA_PCI_DEV 0x7e4c
+#define MTL_IPU_PCI_DEV 0x7d19
+#define MTL_VPU_PCI_DEV 0x7d1d
+static void mtl_set_device_d3(unsigned int device)
+{
+ struct pci_dev *pcidev;
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ if (pcidev) {
+ if (!device_trylock(&pcidev->dev)) {
+ pci_dev_put(pcidev);
+ return;
+ }
+ if (!pcidev->dev.driver) {
+ dev_info(&pcidev->dev, "Setting to D3hot\n");
+ pci_set_power_state(pcidev, PCI_D3hot);
+ }
+ device_unlock(&pcidev->dev);
+ pci_dev_put(pcidev);
+ }
+}
+
+/*
+ * Set power state of select devices that do not have drivers to D3
+ * so that they do not block Package C entry.
+ */
+static void mtl_d3_fixup(void)
+{
+ mtl_set_device_d3(MTL_GNA_PCI_DEV);
+ mtl_set_device_d3(MTL_IPU_PCI_DEV);
+ mtl_set_device_d3(MTL_VPU_PCI_DEV);
+}
+
+static int mtl_resume(struct pmc_dev *pmcdev)
+{
+ mtl_d3_fixup();
+ pmc_core_send_ltr_ignore(pmcdev, 3, 0);
+
+ return pmc_core_resume_common(pmcdev);
+}
+
+int mtl_core_init(struct pmc_dev *pmcdev)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
+ int ret = 0;
+
+ mtl_d3_fixup();
+
+ pmcdev->suspend = cnl_suspend;
+ pmcdev->resume = mtl_resume;
+
+ pmcdev->regmap_list = mtl_pmc_info_list;
+ pmc_core_ssram_init(pmcdev);
+
+ /* If regbase not assigned, set map and discover using legacy method */
+ if (!pmc->regbase) {
+ pmc->map = &mtl_socm_reg_map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
new file mode 100644
index 0000000000..ddfba38c21
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Intel PMC Core platform init
+ * Copyright (c) 2019, Google Inc.
+ * Author - Rajat Jain
+ *
+ * This code instantiates platform devices for intel_pmc_core driver, only
+ * on supported platforms that may not have the ACPI devices in the ACPI tables.
+ * No new platforms should be added here, because we expect that new platforms
+ * should all have the ACPI device, which is the preferred way of enumeration.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#include <xen/xen.h>
+
+static void intel_pmc_core_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct platform_device *pmc_core_device;
+
+/*
+ * intel_pmc_core_platform_ids is the list of platforms where we want to
+ * instantiate the platform_device if not already instantiated. This is
+ * different than intel_pmc_core_ids in intel_pmc_core.c which is the
+ * list of platforms that the driver supports for pmc_core device. The
+ * other list may grow, but this list should not.
+ */
+static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &pmc_core_device),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &pmc_core_device),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
+
+static int __init pmc_core_platform_init(void)
+{
+ int retval;
+
+ /* Skip creating the platform device if ACPI already has a device */
+ if (acpi_dev_present("INT33A1", NULL, -1))
+ return -ENODEV;
+
+ /*
+ * Skip forcefully attaching the device for VMs. Make an exception for
+ * Xen dom0, which does have full hardware access.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR) && !xen_initial_domain())
+ return -ENODEV;
+
+ if (!x86_match_cpu(intel_pmc_core_platform_ids))
+ return -ENODEV;
+
+ pmc_core_device = kzalloc(sizeof(*pmc_core_device), GFP_KERNEL);
+ if (!pmc_core_device)
+ return -ENOMEM;
+
+ pmc_core_device->name = "intel_pmc_core";
+ pmc_core_device->dev.release = intel_pmc_core_release;
+
+ retval = platform_device_register(pmc_core_device);
+ if (retval)
+ platform_device_put(pmc_core_device);
+
+ return retval;
+}
+
+static void __exit pmc_core_platform_exit(void)
+{
+ platform_device_unregister(pmc_core_device);
+}
+
+module_init(pmc_core_platform_init);
+module_exit(pmc_core_platform_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/pmc/spt.c b/drivers/platform/x86/intel/pmc/spt.c
new file mode 100644
index 0000000000..4b6f5cbda1
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/spt.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Sunrise Point PCH.
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+
+#include "core.h"
+
+const struct pmc_bit_map spt_pll_map[] = {
+ {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
+ {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
+ {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
+ {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
+ {}
+};
+
+const struct pmc_bit_map spt_mphy_map[] = {
+ {"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0},
+ {"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1},
+ {"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2},
+ {"MPHY CORE LANE 3", SPT_PMC_BIT_MPHY_LANE3},
+ {"MPHY CORE LANE 4", SPT_PMC_BIT_MPHY_LANE4},
+ {"MPHY CORE LANE 5", SPT_PMC_BIT_MPHY_LANE5},
+ {"MPHY CORE LANE 6", SPT_PMC_BIT_MPHY_LANE6},
+ {"MPHY CORE LANE 7", SPT_PMC_BIT_MPHY_LANE7},
+ {"MPHY CORE LANE 8", SPT_PMC_BIT_MPHY_LANE8},
+ {"MPHY CORE LANE 9", SPT_PMC_BIT_MPHY_LANE9},
+ {"MPHY CORE LANE 10", SPT_PMC_BIT_MPHY_LANE10},
+ {"MPHY CORE LANE 11", SPT_PMC_BIT_MPHY_LANE11},
+ {"MPHY CORE LANE 12", SPT_PMC_BIT_MPHY_LANE12},
+ {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
+ {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
+ {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
+ {}
+};
+
+const struct pmc_bit_map spt_pfear_map[] = {
+ {"PMC", SPT_PMC_BIT_PMC},
+ {"OPI-DMI", SPT_PMC_BIT_OPI},
+ {"SPI / eSPI", SPT_PMC_BIT_SPI},
+ {"XHCI", SPT_PMC_BIT_XHCI},
+ {"SPA", SPT_PMC_BIT_SPA},
+ {"SPB", SPT_PMC_BIT_SPB},
+ {"SPC", SPT_PMC_BIT_SPC},
+ {"GBE", SPT_PMC_BIT_GBE},
+ {"SATA", SPT_PMC_BIT_SATA},
+ {"HDA-PGD0", SPT_PMC_BIT_HDA_PGD0},
+ {"HDA-PGD1", SPT_PMC_BIT_HDA_PGD1},
+ {"HDA-PGD2", SPT_PMC_BIT_HDA_PGD2},
+ {"HDA-PGD3", SPT_PMC_BIT_HDA_PGD3},
+ {"RSVD", SPT_PMC_BIT_RSVD_0B},
+ {"LPSS", SPT_PMC_BIT_LPSS},
+ {"LPC", SPT_PMC_BIT_LPC},
+ {"SMB", SPT_PMC_BIT_SMB},
+ {"ISH", SPT_PMC_BIT_ISH},
+ {"P2SB", SPT_PMC_BIT_P2SB},
+ {"DFX", SPT_PMC_BIT_DFX},
+ {"SCC", SPT_PMC_BIT_SCC},
+ {"RSVD", SPT_PMC_BIT_RSVD_0C},
+ {"FUSE", SPT_PMC_BIT_FUSE},
+ {"CAMERA", SPT_PMC_BIT_CAMREA},
+ {"RSVD", SPT_PMC_BIT_RSVD_0D},
+ {"USB3-OTG", SPT_PMC_BIT_USB3_OTG},
+ {"EXI", SPT_PMC_BIT_EXI},
+ {"CSE", SPT_PMC_BIT_CSE},
+ {"CSME_KVM", SPT_PMC_BIT_CSME_KVM},
+ {"CSME_PMT", SPT_PMC_BIT_CSME_PMT},
+ {"CSME_CLINK", SPT_PMC_BIT_CSME_CLINK},
+ {"CSME_PTIO", SPT_PMC_BIT_CSME_PTIO},
+ {"CSME_USBR", SPT_PMC_BIT_CSME_USBR},
+ {"CSME_SUSRAM", SPT_PMC_BIT_CSME_SUSRAM},
+ {"CSME_SMT", SPT_PMC_BIT_CSME_SMT},
+ {"RSVD", SPT_PMC_BIT_RSVD_1A},
+ {"CSME_SMS2", SPT_PMC_BIT_CSME_SMS2},
+ {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
+ {"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
+ {"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
+ {}
+};
+
+const struct pmc_bit_map *ext_spt_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of spt_reg_map for
+ * a list of core SoCs using this.
+ */
+ spt_pfear_map,
+ NULL
+};
+
+const struct pmc_bit_map spt_ltr_show_map[] = {
+ {"SOUTHPORT_A", SPT_PMC_LTR_SPA},
+ {"SOUTHPORT_B", SPT_PMC_LTR_SPB},
+ {"SATA", SPT_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", SPT_PMC_LTR_GBE},
+ {"XHCI", SPT_PMC_LTR_XHCI},
+ {"Reserved", SPT_PMC_LTR_RESERVED},
+ {"ME", SPT_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"EVA", SPT_PMC_LTR_EVA},
+ {"SOUTHPORT_C", SPT_PMC_LTR_SPC},
+ {"HD_AUDIO", SPT_PMC_LTR_AZ},
+ {"LPSS", SPT_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", SPT_PMC_LTR_SPD},
+ {"SOUTHPORT_E", SPT_PMC_LTR_SPE},
+ {"CAMERA", SPT_PMC_LTR_CAM},
+ {"ESPI", SPT_PMC_LTR_ESPI},
+ {"SCC", SPT_PMC_LTR_SCC},
+ {"ISH", SPT_PMC_LTR_ISH},
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", SPT_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", SPT_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+const struct pmc_reg_map spt_reg_map = {
+ .pfear_sts = ext_spt_pfear_map,
+ .mphy_sts = spt_mphy_map,
+ .pll_sts = spt_pll_map,
+ .ltr_show_sts = spt_ltr_show_map,
+ .msr_sts = msr_map,
+ .slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = SPT_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A,
+ .ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
+ .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
+};
+
+int spt_core_init(struct pmc_dev *pmcdev)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+
+ pmc->map = &spt_reg_map;
+ return get_primary_reg_base(pmc);
+}
diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c
new file mode 100644
index 0000000000..e88d3d00c8
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/tgl.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Tiger Lake PCH.
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+
+#include "core.h"
+
+#define ACPI_S0IX_DSM_UUID "57a6512e-3979-4e9d-9708-ff13b2508972"
+#define ACPI_GET_LOW_MODE_REGISTERS 1
+
+const struct pmc_bit_map tgl_pfear_map[] = {
+ {"PSF9", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"RES_68", BIT(3)},
+ {"RES_69", BIT(4)},
+ {"RES_70", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {}
+};
+
+const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of tgl_reg_map for
+ * a list of core SoCs using this.
+ */
+ cnp_pfear_map,
+ tgl_pfear_map,
+ NULL
+};
+
+const struct pmc_bit_map tgl_clocksource_status_map[] = {
+ {"USB2PLL_OFF_STS", BIT(18)},
+ {"PCIe/USB3.1_Gen2PLL_OFF_STS", BIT(19)},
+ {"PCIe_Gen3PLL_OFF_STS", BIT(20)},
+ {"OPIOPLL_OFF_STS", BIT(21)},
+ {"OCPLL_OFF_STS", BIT(22)},
+ {"MainPLL_OFF_STS", BIT(23)},
+ {"MIPIPLL_OFF_STS", BIT(24)},
+ {"Fast_XTAL_Osc_OFF_STS", BIT(25)},
+ {"AC_Ring_Osc_OFF_STS", BIT(26)},
+ {"MC_Ring_Osc_OFF_STS", BIT(27)},
+ {"SATAPLL_OFF_STS", BIT(29)},
+ {"XTAL_USB2PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map tgl_power_gating_status_map[] = {
+ {"CSME_PG_STS", BIT(0)},
+ {"SATA_PG_STS", BIT(1)},
+ {"xHCI_PG_STS", BIT(2)},
+ {"UFSX2_PG_STS", BIT(3)},
+ {"OTG_PG_STS", BIT(5)},
+ {"SPA_PG_STS", BIT(6)},
+ {"SPB_PG_STS", BIT(7)},
+ {"SPC_PG_STS", BIT(8)},
+ {"SPD_PG_STS", BIT(9)},
+ {"SPE_PG_STS", BIT(10)},
+ {"SPF_PG_STS", BIT(11)},
+ {"LSX_PG_STS", BIT(13)},
+ {"P2SB_PG_STS", BIT(14)},
+ {"PSF_PG_STS", BIT(15)},
+ {"SBR_PG_STS", BIT(16)},
+ {"OPIDMI_PG_STS", BIT(17)},
+ {"THC0_PG_STS", BIT(18)},
+ {"THC1_PG_STS", BIT(19)},
+ {"GBETSN_PG_STS", BIT(20)},
+ {"GBE_PG_STS", BIT(21)},
+ {"LPSS_PG_STS", BIT(22)},
+ {"MMP_UFSX2_PG_STS", BIT(23)},
+ {"MMP_UFSX2B_PG_STS", BIT(24)},
+ {"FIA_PG_STS", BIT(25)},
+ {}
+};
+
+const struct pmc_bit_map tgl_d3_status_map[] = {
+ {"ADSP_D3_STS", BIT(0)},
+ {"SATA_D3_STS", BIT(1)},
+ {"xHCI0_D3_STS", BIT(2)},
+ {"xDCI1_D3_STS", BIT(5)},
+ {"SDX_D3_STS", BIT(6)},
+ {"EMMC_D3_STS", BIT(7)},
+ {"IS_D3_STS", BIT(8)},
+ {"THC0_D3_STS", BIT(9)},
+ {"THC1_D3_STS", BIT(10)},
+ {"GBE_D3_STS", BIT(11)},
+ {"GBE_TSN_D3_STS", BIT(12)},
+ {}
+};
+
+const struct pmc_bit_map tgl_vnn_req_status_map[] = {
+ {"GPIO_COM0_VNN_REQ_STS", BIT(1)},
+ {"GPIO_COM1_VNN_REQ_STS", BIT(2)},
+ {"GPIO_COM2_VNN_REQ_STS", BIT(3)},
+ {"GPIO_COM3_VNN_REQ_STS", BIT(4)},
+ {"GPIO_COM4_VNN_REQ_STS", BIT(5)},
+ {"GPIO_COM5_VNN_REQ_STS", BIT(6)},
+ {"Audio_VNN_REQ_STS", BIT(7)},
+ {"ISH_VNN_REQ_STS", BIT(8)},
+ {"CNVI_VNN_REQ_STS", BIT(9)},
+ {"eSPI_VNN_REQ_STS", BIT(10)},
+ {"Display_VNN_REQ_STS", BIT(11)},
+ {"DTS_VNN_REQ_STS", BIT(12)},
+ {"SMBUS_VNN_REQ_STS", BIT(14)},
+ {"CSME_VNN_REQ_STS", BIT(15)},
+ {"SMLINK0_VNN_REQ_STS", BIT(16)},
+ {"SMLINK1_VNN_REQ_STS", BIT(17)},
+ {"CLINK_VNN_REQ_STS", BIT(20)},
+ {"DCI_VNN_REQ_STS", BIT(21)},
+ {"ITH_VNN_REQ_STS", BIT(22)},
+ {"CSME_VNN_REQ_STS", BIT(24)},
+ {"GBE_VNN_REQ_STS", BIT(25)},
+ {}
+};
+
+const struct pmc_bit_map tgl_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS_0", BIT(0)},
+ {"PCIe_LPM_En_REQ_STS_3", BIT(3)},
+ {"ITH_REQ_STS_5", BIT(5)},
+ {"CNVI_REQ_STS_6", BIT(6)},
+ {"ISH_REQ_STS_7", BIT(7)},
+ {"USB2_SUS_PG_Sys_REQ_STS_10", BIT(10)},
+ {"PCIe_Clk_REQ_STS_12", BIT(12)},
+ {"MPHY_Core_DL_REQ_STS_16", BIT(16)},
+ {"Break-even_En_REQ_STS_17", BIT(17)},
+ {"Auto-demo_En_REQ_STS_18", BIT(18)},
+ {"MPHY_SUS_REQ_STS_22", BIT(22)},
+ {"xDCI_attached_REQ_STS_24", BIT(24)},
+ {}
+};
+
+const struct pmc_bit_map tgl_signal_status_map[] = {
+ {"LSX_Wake0_En_STS", BIT(0)},
+ {"LSX_Wake0_Pol_STS", BIT(1)},
+ {"LSX_Wake1_En_STS", BIT(2)},
+ {"LSX_Wake1_Pol_STS", BIT(3)},
+ {"LSX_Wake2_En_STS", BIT(4)},
+ {"LSX_Wake2_Pol_STS", BIT(5)},
+ {"LSX_Wake3_En_STS", BIT(6)},
+ {"LSX_Wake3_Pol_STS", BIT(7)},
+ {"LSX_Wake4_En_STS", BIT(8)},
+ {"LSX_Wake4_Pol_STS", BIT(9)},
+ {"LSX_Wake5_En_STS", BIT(10)},
+ {"LSX_Wake5_Pol_STS", BIT(11)},
+ {"LSX_Wake6_En_STS", BIT(12)},
+ {"LSX_Wake6_Pol_STS", BIT(13)},
+ {"LSX_Wake7_En_STS", BIT(14)},
+ {"LSX_Wake7_Pol_STS", BIT(15)},
+ {"Intel_Se_IO_Wake0_En_STS", BIT(16)},
+ {"Intel_Se_IO_Wake0_Pol_STS", BIT(17)},
+ {"Intel_Se_IO_Wake1_En_STS", BIT(18)},
+ {"Intel_Se_IO_Wake1_Pol_STS", BIT(19)},
+ {"Int_Timer_SS_Wake0_En_STS", BIT(20)},
+ {"Int_Timer_SS_Wake0_Pol_STS", BIT(21)},
+ {"Int_Timer_SS_Wake1_En_STS", BIT(22)},
+ {"Int_Timer_SS_Wake1_Pol_STS", BIT(23)},
+ {"Int_Timer_SS_Wake2_En_STS", BIT(24)},
+ {"Int_Timer_SS_Wake2_Pol_STS", BIT(25)},
+ {"Int_Timer_SS_Wake3_En_STS", BIT(26)},
+ {"Int_Timer_SS_Wake3_Pol_STS", BIT(27)},
+ {"Int_Timer_SS_Wake4_En_STS", BIT(28)},
+ {"Int_Timer_SS_Wake4_Pol_STS", BIT(29)},
+ {"Int_Timer_SS_Wake5_En_STS", BIT(30)},
+ {"Int_Timer_SS_Wake5_Pol_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map *tgl_lpm_maps[] = {
+ tgl_clocksource_status_map,
+ tgl_power_gating_status_map,
+ tgl_d3_status_map,
+ tgl_vnn_req_status_map,
+ tgl_vnn_misc_status_map,
+ tgl_signal_status_map,
+ NULL
+};
+
+const struct pmc_reg_map tgl_reg_map = {
+ .pfear_sts = ext_tgl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+ .lpm_num_maps = TGL_LPM_NUM_MAPS,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET,
+ .lpm_en_offset = TGL_LPM_EN_OFFSET,
+ .lpm_priority_offset = TGL_LPM_PRI_OFFSET,
+ .lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = tgl_lpm_maps,
+ .lpm_status_offset = TGL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET,
+ .etr3_offset = ETR3_OFFSET,
+};
+
+void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev)
+{
+ struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ const int num_maps = pmc->map->lpm_num_maps;
+ u32 lpm_size = LPM_MAX_NUM_MODES * num_maps * 4;
+ union acpi_object *out_obj;
+ struct acpi_device *adev;
+ guid_t s0ix_dsm_guid;
+ u32 *lpm_req_regs, *addr;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return;
+
+ guid_parse(ACPI_S0IX_DSM_UUID, &s0ix_dsm_guid);
+
+ out_obj = acpi_evaluate_dsm_typed(adev->handle, &s0ix_dsm_guid, 0,
+ ACPI_GET_LOW_MODE_REGISTERS, NULL, ACPI_TYPE_BUFFER);
+ if (out_obj) {
+ u32 size = out_obj->buffer.length;
+
+ if (size != lpm_size) {
+ acpi_handle_debug(adev->handle,
+ "_DSM returned unexpected buffer size, have %u, expect %u\n",
+ size, lpm_size);
+ goto free_acpi_obj;
+ }
+ } else {
+ acpi_handle_debug(adev->handle,
+ "_DSM function 0 evaluation failed\n");
+ goto free_acpi_obj;
+ }
+
+ addr = (u32 *)out_obj->buffer.pointer;
+
+ lpm_req_regs = devm_kzalloc(&pdev->dev, lpm_size * sizeof(u32),
+ GFP_KERNEL);
+ if (!lpm_req_regs)
+ goto free_acpi_obj;
+
+ memcpy(lpm_req_regs, addr, lpm_size);
+ pmc->lpm_req_regs = lpm_req_regs;
+
+free_acpi_obj:
+ ACPI_FREE(out_obj);
+}
+
+int tgl_core_init(struct pmc_dev *pmcdev)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ int ret;
+
+ pmc->map = &tgl_reg_map;
+
+ pmcdev->suspend = cnl_suspend;
+ pmcdev->resume = cnl_resume;
+
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+
+ pmc_core_get_tgl_lpm_reqs(pmcdev->pdev);
+
+ return 0;
+}
diff --git a/drivers/platform/x86/intel/pmt/Kconfig b/drivers/platform/x86/intel/pmt/Kconfig
new file mode 100644
index 0000000000..e916fc9662
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel Platform Monitoring Technology drivers
+#
+
+config INTEL_PMT_CLASS
+ tristate
+ help
+ The Intel Platform Monitoring Technology (PMT) class driver provides
+ the basic sysfs interface and file hierarchy used by PMT devices.
+
+ For more information, see:
+ <file:Documentation/ABI/testing/sysfs-class-intel_pmt>
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_class.
+
+config INTEL_PMT_TELEMETRY
+ tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver"
+ depends on INTEL_VSEC
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitory Technology (PMT) Telemetry driver provides
+ access to hardware telemetry metrics on devices that support the
+ feature.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_telemetry.
+
+config INTEL_PMT_CRASHLOG
+ tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver"
+ depends on INTEL_VSEC
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitoring Technology (PMT) crashlog driver provides
+ access to hardware crashlog capabilities on devices that support the
+ feature.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_crashlog.
diff --git a/drivers/platform/x86/intel/pmt/Makefile b/drivers/platform/x86/intel/pmt/Makefile
new file mode 100644
index 0000000000..279e158c7c
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/intel/pmt
+# Intel Platform Monitoring Technology Drivers
+#
+
+obj-$(CONFIG_INTEL_PMT_CLASS) += pmt_class.o
+pmt_class-y := class.o
+obj-$(CONFIG_INTEL_PMT_TELEMETRY) += pmt_telemetry.o
+pmt_telemetry-y := telemetry.o
+obj-$(CONFIG_INTEL_PMT_CRASHLOG) += pmt_crashlog.o
+pmt_crashlog-y := crashlog.o
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
new file mode 100644
index 0000000000..f32a233470
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitory Technology Telemetry driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+
+#include "../vsec.h"
+#include "class.h"
+
+#define PMT_XA_START 0
+#define PMT_XA_MAX INT_MAX
+#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
+#define GUID_SPR_PUNIT 0x9956f43f
+
+bool intel_pmt_is_early_client_hw(struct device *dev)
+{
+ struct intel_vsec_device *ivdev = dev_to_ivdev(dev);
+
+ /*
+ * Early implementations of PMT on client platforms have some
+ * differences from the server platforms (which use the Out Of Band
+ * Management Services Module OOBMSM).
+ */
+ return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW);
+}
+EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw, INTEL_PMT);
+
+static inline int
+pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count)
+{
+ int i, remain;
+ u64 *buf = to;
+
+ if (!IS_ALIGNED((unsigned long)from, 8))
+ return -EFAULT;
+
+ for (i = 0; i < count/8; i++)
+ buf[i] = readq(&from[i]);
+
+ /* Copy any remaining bytes */
+ remain = count % 8;
+ if (remain) {
+ u64 tmp = readq(&from[i]);
+
+ memcpy(&buf[i], &tmp, remain);
+ }
+
+ return count;
+}
+
+/*
+ * sysfs
+ */
+static ssize_t
+intel_pmt_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct intel_pmt_entry *entry = container_of(attr,
+ struct intel_pmt_entry,
+ pmt_bin_attr);
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off >= entry->size)
+ return 0;
+
+ if (count > entry->size - off)
+ count = entry->size - off;
+
+ if (entry->guid == GUID_SPR_PUNIT)
+ /* PUNIT on SPR only supports aligned 64-bit read */
+ count = pmt_memcpy64_fromio(buf, entry->base + off, count);
+ else
+ memcpy_fromio(buf, entry->base + off, count);
+
+ return count;
+}
+
+static int
+intel_pmt_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, struct vm_area_struct *vma)
+{
+ struct intel_pmt_entry *entry = container_of(attr,
+ struct intel_pmt_entry,
+ pmt_bin_attr);
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ struct device *dev = kobj_to_dev(kobj);
+ unsigned long phys = entry->base_addr;
+ unsigned long pfn = PFN_DOWN(phys);
+ unsigned long psize;
+
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ return -EROFS;
+
+ psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE;
+ if (vsize > psize) {
+ dev_err(dev, "Requested mmap size is too large\n");
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn,
+ vsize, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static ssize_t
+guid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%x\n", entry->guid);
+}
+static DEVICE_ATTR_RO(guid);
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%zu\n", entry->size);
+}
+static DEVICE_ATTR_RO(size);
+
+static ssize_t
+offset_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu\n", offset_in_page(entry->base_addr));
+}
+static DEVICE_ATTR_RO(offset);
+
+static struct attribute *intel_pmt_attrs[] = {
+ &dev_attr_guid.attr,
+ &dev_attr_size.attr,
+ &dev_attr_offset.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(intel_pmt);
+
+static struct class intel_pmt_class = {
+ .name = "intel_pmt",
+ .dev_groups = intel_pmt_groups,
+};
+
+static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
+ struct intel_pmt_header *header,
+ struct device *dev,
+ struct resource *disc_res)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev->parent);
+ u8 bir;
+
+ /*
+ * The base offset should always be 8 byte aligned.
+ *
+ * For non-local access types the lower 3 bits of base offset
+ * contains the index of the base address register where the
+ * telemetry can be found.
+ */
+ bir = GET_BIR(header->base_offset);
+
+ /* Local access and BARID only for now */
+ switch (header->access_type) {
+ case ACCESS_LOCAL:
+ if (bir) {
+ dev_err(dev,
+ "Unsupported BAR index %d for access type %d\n",
+ bir, header->access_type);
+ return -EINVAL;
+ }
+ /*
+ * For access_type LOCAL, the base address is as follows:
+ * base address = end of discovery region + base offset
+ */
+ entry->base_addr = disc_res->end + 1 + header->base_offset;
+
+ /*
+ * Some hardware use a different calculation for the base address
+ * when access_type == ACCESS_LOCAL. On the these systems
+ * ACCCESS_LOCAL refers to an address in the same BAR as the
+ * header but at a fixed offset. But as the header address was
+ * supplied to the driver, we don't know which BAR it was in.
+ * So search for the bar whose range includes the header address.
+ */
+ if (intel_pmt_is_early_client_hw(dev)) {
+ int i;
+
+ entry->base_addr = 0;
+ for (i = 0; i < 6; i++)
+ if (disc_res->start >= pci_resource_start(pci_dev, i) &&
+ (disc_res->start <= pci_resource_end(pci_dev, i))) {
+ entry->base_addr = pci_resource_start(pci_dev, i) +
+ header->base_offset;
+ break;
+ }
+ if (!entry->base_addr)
+ return -EINVAL;
+ }
+
+ break;
+ case ACCESS_BARID:
+ /*
+ * If another BAR was specified then the base offset
+ * represents the offset within that BAR. SO retrieve the
+ * address from the parent PCI device and add offset.
+ */
+ entry->base_addr = pci_resource_start(pci_dev, bir) +
+ GET_ADDRESS(header->base_offset);
+ break;
+ default:
+ dev_err(dev, "Unsupported access type %d\n",
+ header->access_type);
+ return -EINVAL;
+ }
+
+ entry->guid = header->guid;
+ entry->size = header->size;
+
+ return 0;
+}
+
+static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns,
+ struct device *parent)
+{
+ struct resource res = {0};
+ struct device *dev;
+ int ret;
+
+ ret = xa_alloc(ns->xa, &entry->devid, entry, PMT_XA_LIMIT, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ dev = device_create(&intel_pmt_class, parent, MKDEV(0, 0), entry,
+ "%s%d", ns->name, entry->devid);
+
+ if (IS_ERR(dev)) {
+ dev_err(parent, "Could not create %s%d device node\n",
+ ns->name, entry->devid);
+ ret = PTR_ERR(dev);
+ goto fail_dev_create;
+ }
+
+ entry->kobj = &dev->kobj;
+
+ if (ns->attr_grp) {
+ ret = sysfs_create_group(entry->kobj, ns->attr_grp);
+ if (ret)
+ goto fail_sysfs;
+ }
+
+ /* if size is 0 assume no data buffer, so no file needed */
+ if (!entry->size)
+ return 0;
+
+ res.start = entry->base_addr;
+ res.end = res.start + entry->size - 1;
+ res.flags = IORESOURCE_MEM;
+
+ entry->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(entry->base)) {
+ ret = PTR_ERR(entry->base);
+ goto fail_ioremap;
+ }
+
+ sysfs_bin_attr_init(&entry->pmt_bin_attr);
+ entry->pmt_bin_attr.attr.name = ns->name;
+ entry->pmt_bin_attr.attr.mode = 0440;
+ entry->pmt_bin_attr.mmap = intel_pmt_mmap;
+ entry->pmt_bin_attr.read = intel_pmt_read;
+ entry->pmt_bin_attr.size = entry->size;
+
+ ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr);
+ if (!ret)
+ return 0;
+
+fail_ioremap:
+ if (ns->attr_grp)
+ sysfs_remove_group(entry->kobj, ns->attr_grp);
+fail_sysfs:
+ device_unregister(dev);
+fail_dev_create:
+ xa_erase(ns->xa, entry->devid);
+
+ return ret;
+}
+
+int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns,
+ struct intel_vsec_device *intel_vsec_dev, int idx)
+{
+ struct device *dev = &intel_vsec_dev->auxdev.dev;
+ struct intel_pmt_header header;
+ struct resource *disc_res;
+ int ret;
+
+ disc_res = &intel_vsec_dev->resource[idx];
+
+ entry->disc_table = devm_ioremap_resource(dev, disc_res);
+ if (IS_ERR(entry->disc_table))
+ return PTR_ERR(entry->disc_table);
+
+ ret = ns->pmt_header_decode(entry, &header, dev);
+ if (ret)
+ return ret;
+
+ ret = intel_pmt_populate_entry(entry, &header, dev, disc_res);
+ if (ret)
+ return ret;
+
+ return intel_pmt_dev_register(entry, ns, dev);
+
+}
+EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create, INTEL_PMT);
+
+void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns)
+{
+ struct device *dev = kobj_to_dev(entry->kobj);
+
+ if (entry->size)
+ sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr);
+
+ if (ns->attr_grp)
+ sysfs_remove_group(entry->kobj, ns->attr_grp);
+
+ device_unregister(dev);
+ xa_erase(ns->xa, entry->devid);
+}
+EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_destroy, INTEL_PMT);
+
+static int __init pmt_class_init(void)
+{
+ return class_register(&intel_pmt_class);
+}
+
+static void __exit pmt_class_exit(void)
+{
+ class_unregister(&intel_pmt_class);
+}
+
+module_init(pmt_class_init);
+module_exit(pmt_class_exit);
+
+MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Class driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h
new file mode 100644
index 0000000000..db11d58867
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/class.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INTEL_PMT_CLASS_H
+#define _INTEL_PMT_CLASS_H
+
+#include <linux/xarray.h>
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include "../vsec.h"
+
+/* PMT access types */
+#define ACCESS_BARID 2
+#define ACCESS_LOCAL 3
+
+/* PMT discovery base address/offset register layout */
+#define GET_BIR(v) ((v) & GENMASK(2, 0))
+#define GET_ADDRESS(v) ((v) & GENMASK(31, 3))
+
+struct intel_pmt_entry {
+ struct bin_attribute pmt_bin_attr;
+ struct kobject *kobj;
+ void __iomem *disc_table;
+ void __iomem *base;
+ unsigned long base_addr;
+ size_t size;
+ u32 guid;
+ int devid;
+};
+
+struct intel_pmt_header {
+ u32 base_offset;
+ u32 size;
+ u32 guid;
+ u8 access_type;
+};
+
+struct intel_pmt_namespace {
+ const char *name;
+ struct xarray *xa;
+ const struct attribute_group *attr_grp;
+ int (*pmt_header_decode)(struct intel_pmt_entry *entry,
+ struct intel_pmt_header *header,
+ struct device *dev);
+};
+
+bool intel_pmt_is_early_client_hw(struct device *dev);
+int intel_pmt_dev_create(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns,
+ struct intel_vsec_device *dev, int idx);
+void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns);
+#endif
diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c
new file mode 100644
index 0000000000..bbb3d61d09
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/crashlog.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitoring Technology Crashlog driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/overflow.h>
+
+#include "../vsec.h"
+#include "class.h"
+
+/* Crashlog discovery header types */
+#define CRASH_TYPE_OOBMSM 1
+
+/* Control Flags */
+#define CRASHLOG_FLAG_DISABLE BIT(28)
+
+/*
+ * Bits 29 and 30 control the state of bit 31.
+ *
+ * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured.
+ * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31.
+ * Bit 31 is the read-only status with a 1 indicating log is complete.
+ */
+#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(29)
+#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(30)
+#define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31)
+#define CRASHLOG_FLAG_TRIGGER_MASK GENMASK(31, 28)
+
+/* Crashlog Discovery Header */
+#define CONTROL_OFFSET 0x0
+#define GUID_OFFSET 0x4
+#define BASE_OFFSET 0x8
+#define SIZE_OFFSET 0xC
+#define GET_ACCESS(v) ((v) & GENMASK(3, 0))
+#define GET_TYPE(v) (((v) & GENMASK(7, 4)) >> 4)
+#define GET_VERSION(v) (((v) & GENMASK(19, 16)) >> 16)
+/* size is in bytes */
+#define GET_SIZE(v) ((v) * sizeof(u32))
+
+struct crashlog_entry {
+ /* entry must be first member of struct */
+ struct intel_pmt_entry entry;
+ struct mutex control_mutex;
+};
+
+struct pmt_crashlog_priv {
+ int num_entries;
+ struct crashlog_entry entry[];
+};
+
+/*
+ * I/O
+ */
+static bool pmt_crashlog_complete(struct intel_pmt_entry *entry)
+{
+ u32 control = readl(entry->disc_table + CONTROL_OFFSET);
+
+ /* return current value of the crashlog complete flag */
+ return !!(control & CRASHLOG_FLAG_TRIGGER_COMPLETE);
+}
+
+static bool pmt_crashlog_disabled(struct intel_pmt_entry *entry)
+{
+ u32 control = readl(entry->disc_table + CONTROL_OFFSET);
+
+ /* return current value of the crashlog disabled flag */
+ return !!(control & CRASHLOG_FLAG_DISABLE);
+}
+
+static bool pmt_crashlog_supported(struct intel_pmt_entry *entry)
+{
+ u32 discovery_header = readl(entry->disc_table + CONTROL_OFFSET);
+ u32 crash_type, version;
+
+ crash_type = GET_TYPE(discovery_header);
+ version = GET_VERSION(discovery_header);
+
+ /*
+ * Currently we only recognize OOBMSM version 0 devices.
+ * We can ignore all other crashlog devices in the system.
+ */
+ return crash_type == CRASH_TYPE_OOBMSM && version == 0;
+}
+
+static void pmt_crashlog_set_disable(struct intel_pmt_entry *entry,
+ bool disable)
+{
+ u32 control = readl(entry->disc_table + CONTROL_OFFSET);
+
+ /* clear trigger bits so we are only modifying disable flag */
+ control &= ~CRASHLOG_FLAG_TRIGGER_MASK;
+
+ if (disable)
+ control |= CRASHLOG_FLAG_DISABLE;
+ else
+ control &= ~CRASHLOG_FLAG_DISABLE;
+
+ writel(control, entry->disc_table + CONTROL_OFFSET);
+}
+
+static void pmt_crashlog_set_clear(struct intel_pmt_entry *entry)
+{
+ u32 control = readl(entry->disc_table + CONTROL_OFFSET);
+
+ control &= ~CRASHLOG_FLAG_TRIGGER_MASK;
+ control |= CRASHLOG_FLAG_TRIGGER_CLEAR;
+
+ writel(control, entry->disc_table + CONTROL_OFFSET);
+}
+
+static void pmt_crashlog_set_execute(struct intel_pmt_entry *entry)
+{
+ u32 control = readl(entry->disc_table + CONTROL_OFFSET);
+
+ control &= ~CRASHLOG_FLAG_TRIGGER_MASK;
+ control |= CRASHLOG_FLAG_TRIGGER_EXECUTE;
+
+ writel(control, entry->disc_table + CONTROL_OFFSET);
+}
+
+/*
+ * sysfs
+ */
+static ssize_t
+enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+ int enabled = !pmt_crashlog_disabled(entry);
+
+ return sprintf(buf, "%d\n", enabled);
+}
+
+static ssize_t
+enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct crashlog_entry *entry;
+ bool enabled;
+ int result;
+
+ entry = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &enabled);
+ if (result)
+ return result;
+
+ mutex_lock(&entry->control_mutex);
+ pmt_crashlog_set_disable(&entry->entry, !enabled);
+ mutex_unlock(&entry->control_mutex);
+
+ return count;
+}
+static DEVICE_ATTR_RW(enable);
+
+static ssize_t
+trigger_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct intel_pmt_entry *entry;
+ int trigger;
+
+ entry = dev_get_drvdata(dev);
+ trigger = pmt_crashlog_complete(entry);
+
+ return sprintf(buf, "%d\n", trigger);
+}
+
+static ssize_t
+trigger_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct crashlog_entry *entry;
+ bool trigger;
+ int result;
+
+ entry = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &trigger);
+ if (result)
+ return result;
+
+ mutex_lock(&entry->control_mutex);
+
+ if (!trigger) {
+ pmt_crashlog_set_clear(&entry->entry);
+ } else if (pmt_crashlog_complete(&entry->entry)) {
+ /* we cannot trigger a new crash if one is still pending */
+ result = -EEXIST;
+ goto err;
+ } else if (pmt_crashlog_disabled(&entry->entry)) {
+ /* if device is currently disabled, return busy */
+ result = -EBUSY;
+ goto err;
+ } else {
+ pmt_crashlog_set_execute(&entry->entry);
+ }
+
+ result = count;
+err:
+ mutex_unlock(&entry->control_mutex);
+ return result;
+}
+static DEVICE_ATTR_RW(trigger);
+
+static struct attribute *pmt_crashlog_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_trigger.attr,
+ NULL
+};
+
+static const struct attribute_group pmt_crashlog_group = {
+ .attrs = pmt_crashlog_attrs,
+};
+
+static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry,
+ struct intel_pmt_header *header,
+ struct device *dev)
+{
+ void __iomem *disc_table = entry->disc_table;
+ struct crashlog_entry *crashlog;
+
+ if (!pmt_crashlog_supported(entry))
+ return 1;
+
+ /* initialize control mutex */
+ crashlog = container_of(entry, struct crashlog_entry, entry);
+ mutex_init(&crashlog->control_mutex);
+
+ header->access_type = GET_ACCESS(readl(disc_table));
+ header->guid = readl(disc_table + GUID_OFFSET);
+ header->base_offset = readl(disc_table + BASE_OFFSET);
+
+ /* Size is measured in DWORDS, but accessor returns bytes */
+ header->size = GET_SIZE(readl(disc_table + SIZE_OFFSET));
+
+ return 0;
+}
+
+static DEFINE_XARRAY_ALLOC(crashlog_array);
+static struct intel_pmt_namespace pmt_crashlog_ns = {
+ .name = "crashlog",
+ .xa = &crashlog_array,
+ .attr_grp = &pmt_crashlog_group,
+ .pmt_header_decode = pmt_crashlog_header_decode,
+};
+
+/*
+ * initialization
+ */
+static void pmt_crashlog_remove(struct auxiliary_device *auxdev)
+{
+ struct pmt_crashlog_priv *priv = auxiliary_get_drvdata(auxdev);
+ int i;
+
+ for (i = 0; i < priv->num_entries; i++)
+ intel_pmt_dev_destroy(&priv->entry[i].entry, &pmt_crashlog_ns);
+}
+
+static int pmt_crashlog_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
+ struct pmt_crashlog_priv *priv;
+ size_t size;
+ int i, ret;
+
+ size = struct_size(priv, entry, intel_vsec_dev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ auxiliary_set_drvdata(auxdev, priv);
+
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
+ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries].entry;
+
+ ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i);
+ if (ret < 0)
+ goto abort_probe;
+ if (ret)
+ continue;
+
+ priv->num_entries++;
+ }
+
+ return 0;
+abort_probe:
+ pmt_crashlog_remove(auxdev);
+ return ret;
+}
+
+static const struct auxiliary_device_id pmt_crashlog_id_table[] = {
+ { .name = "intel_vsec.crashlog" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_crashlog_id_table);
+
+static struct auxiliary_driver pmt_crashlog_aux_driver = {
+ .id_table = pmt_crashlog_id_table,
+ .remove = pmt_crashlog_remove,
+ .probe = pmt_crashlog_probe,
+};
+
+static int __init pmt_crashlog_init(void)
+{
+ return auxiliary_driver_register(&pmt_crashlog_aux_driver);
+}
+
+static void __exit pmt_crashlog_exit(void)
+{
+ auxiliary_driver_unregister(&pmt_crashlog_aux_driver);
+ xa_destroy(&crashlog_array);
+}
+
+module_init(pmt_crashlog_init);
+module_exit(pmt_crashlog_exit);
+
+MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Crashlog driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(INTEL_PMT);
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
new file mode 100644
index 0000000000..39cbc87cc2
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitory Technology Telemetry driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "David E. Box" <david.e.box@linux.intel.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/overflow.h>
+
+#include "../vsec.h"
+#include "class.h"
+
+#define TELEM_SIZE_OFFSET 0x0
+#define TELEM_GUID_OFFSET 0x4
+#define TELEM_BASE_OFFSET 0x8
+#define TELEM_ACCESS(v) ((v) & GENMASK(3, 0))
+#define TELEM_TYPE(v) (((v) & GENMASK(7, 4)) >> 4)
+/* size is in bytes */
+#define TELEM_SIZE(v) (((v) & GENMASK(27, 12)) >> 10)
+
+/* Used by client hardware to identify a fixed telemetry entry*/
+#define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000
+
+enum telem_type {
+ TELEM_TYPE_PUNIT = 0,
+ TELEM_TYPE_CRASHLOG,
+ TELEM_TYPE_PUNIT_FIXED,
+};
+
+struct pmt_telem_priv {
+ int num_entries;
+ struct intel_pmt_entry entry[];
+};
+
+static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
+ struct device *dev)
+{
+ u32 guid = readl(entry->disc_table + TELEM_GUID_OFFSET);
+
+ if (intel_pmt_is_early_client_hw(dev)) {
+ u32 type = TELEM_TYPE(readl(entry->disc_table));
+
+ if ((type == TELEM_TYPE_PUNIT_FIXED) ||
+ (guid == TELEM_CLIENT_FIXED_BLOCK_GUID))
+ return true;
+ }
+
+ return false;
+}
+
+static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
+ struct intel_pmt_header *header,
+ struct device *dev)
+{
+ void __iomem *disc_table = entry->disc_table;
+
+ if (pmt_telem_region_overlaps(entry, dev))
+ return 1;
+
+ header->access_type = TELEM_ACCESS(readl(disc_table));
+ header->guid = readl(disc_table + TELEM_GUID_OFFSET);
+ header->base_offset = readl(disc_table + TELEM_BASE_OFFSET);
+
+ /* Size is measured in DWORDS, but accessor returns bytes */
+ header->size = TELEM_SIZE(readl(disc_table));
+
+ /*
+ * Some devices may expose non-functioning entries that are
+ * reserved for future use. They have zero size. Do not fail
+ * probe for these. Just ignore them.
+ */
+ if (header->size == 0 || header->access_type == 0xF)
+ return 1;
+
+ return 0;
+}
+
+static DEFINE_XARRAY_ALLOC(telem_array);
+static struct intel_pmt_namespace pmt_telem_ns = {
+ .name = "telem",
+ .xa = &telem_array,
+ .pmt_header_decode = pmt_telem_header_decode,
+};
+
+static void pmt_telem_remove(struct auxiliary_device *auxdev)
+{
+ struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev);
+ int i;
+
+ for (i = 0; i < priv->num_entries; i++)
+ intel_pmt_dev_destroy(&priv->entry[i], &pmt_telem_ns);
+}
+
+static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
+ struct pmt_telem_priv *priv;
+ size_t size;
+ int i, ret;
+
+ size = struct_size(priv, entry, intel_vsec_dev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ auxiliary_set_drvdata(auxdev, priv);
+
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
+ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries];
+
+ ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
+ if (ret < 0)
+ goto abort_probe;
+ if (ret)
+ continue;
+
+ priv->num_entries++;
+ }
+
+ return 0;
+abort_probe:
+ pmt_telem_remove(auxdev);
+ return ret;
+}
+
+static const struct auxiliary_device_id pmt_telem_id_table[] = {
+ { .name = "intel_vsec.telemetry" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_telem_id_table);
+
+static struct auxiliary_driver pmt_telem_aux_driver = {
+ .id_table = pmt_telem_id_table,
+ .remove = pmt_telem_remove,
+ .probe = pmt_telem_probe,
+};
+
+static int __init pmt_telem_init(void)
+{
+ return auxiliary_driver_register(&pmt_telem_aux_driver);
+}
+module_init(pmt_telem_init);
+
+static void __exit pmt_telem_exit(void)
+{
+ auxiliary_driver_unregister(&pmt_telem_aux_driver);
+ xa_destroy(&telem_array);
+}
+module_exit(pmt_telem_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Telemetry driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(INTEL_PMT);
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
new file mode 100644
index 0000000000..cd0ba84cc8
--- /dev/null
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Intel P-Unit Mailbox IPC mechanism
+ *
+ * (C) Copyright 2015 Intel Corporation
+ *
+ * The heart of the P-Unit is the Foxton microcontroller and its firmware,
+ * which provide mailbox interface for power management usage.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/intel_punit_ipc.h>
+
+/* IPC Mailbox registers */
+#define OFFSET_DATA_LOW 0x0
+#define OFFSET_DATA_HIGH 0x4
+/* bit field of interface register */
+#define CMD_RUN BIT(31)
+#define CMD_ERRCODE_MASK GENMASK(7, 0)
+#define CMD_PARA1_SHIFT 8
+#define CMD_PARA2_SHIFT 16
+
+#define CMD_TIMEOUT_SECONDS 1
+
+enum {
+ BASE_DATA = 0,
+ BASE_IFACE,
+ BASE_MAX,
+};
+
+typedef struct {
+ struct device *dev;
+ struct mutex lock;
+ int irq;
+ struct completion cmd_complete;
+ /* base of interface and data registers */
+ void __iomem *base[RESERVED_IPC][BASE_MAX];
+ IPC_TYPE type;
+} IPC_DEV;
+
+static IPC_DEV *punit_ipcdev;
+
+static inline u32 ipc_read_status(IPC_DEV *ipcdev, IPC_TYPE type)
+{
+ return readl(ipcdev->base[type][BASE_IFACE]);
+}
+
+static inline void ipc_write_cmd(IPC_DEV *ipcdev, IPC_TYPE type, u32 cmd)
+{
+ writel(cmd, ipcdev->base[type][BASE_IFACE]);
+}
+
+static inline u32 ipc_read_data_low(IPC_DEV *ipcdev, IPC_TYPE type)
+{
+ return readl(ipcdev->base[type][BASE_DATA] + OFFSET_DATA_LOW);
+}
+
+static inline u32 ipc_read_data_high(IPC_DEV *ipcdev, IPC_TYPE type)
+{
+ return readl(ipcdev->base[type][BASE_DATA] + OFFSET_DATA_HIGH);
+}
+
+static inline void ipc_write_data_low(IPC_DEV *ipcdev, IPC_TYPE type, u32 data)
+{
+ writel(data, ipcdev->base[type][BASE_DATA] + OFFSET_DATA_LOW);
+}
+
+static inline void ipc_write_data_high(IPC_DEV *ipcdev, IPC_TYPE type, u32 data)
+{
+ writel(data, ipcdev->base[type][BASE_DATA] + OFFSET_DATA_HIGH);
+}
+
+static const char *ipc_err_string(int error)
+{
+ if (error == IPC_PUNIT_ERR_SUCCESS)
+ return "no error";
+ else if (error == IPC_PUNIT_ERR_INVALID_CMD)
+ return "invalid command";
+ else if (error == IPC_PUNIT_ERR_INVALID_PARAMETER)
+ return "invalid parameter";
+ else if (error == IPC_PUNIT_ERR_CMD_TIMEOUT)
+ return "command timeout";
+ else if (error == IPC_PUNIT_ERR_CMD_LOCKED)
+ return "command locked";
+ else if (error == IPC_PUNIT_ERR_INVALID_VR_ID)
+ return "invalid vr id";
+ else if (error == IPC_PUNIT_ERR_VR_ERR)
+ return "vr error";
+ else
+ return "unknown error";
+}
+
+static int intel_punit_ipc_check_status(IPC_DEV *ipcdev, IPC_TYPE type)
+{
+ int loops = CMD_TIMEOUT_SECONDS * USEC_PER_SEC;
+ int errcode;
+ int status;
+
+ if (ipcdev->irq) {
+ if (!wait_for_completion_timeout(&ipcdev->cmd_complete,
+ CMD_TIMEOUT_SECONDS * HZ)) {
+ dev_err(ipcdev->dev, "IPC timed out\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ while ((ipc_read_status(ipcdev, type) & CMD_RUN) && --loops)
+ udelay(1);
+ if (!loops) {
+ dev_err(ipcdev->dev, "IPC timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ status = ipc_read_status(ipcdev, type);
+ errcode = status & CMD_ERRCODE_MASK;
+ if (errcode) {
+ dev_err(ipcdev->dev, "IPC failed: %s, IPC_STS=0x%x\n",
+ ipc_err_string(errcode), status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_punit_ipc_simple_command() - Simple IPC command
+ * @cmd: IPC command code.
+ * @para1: First 8bit parameter, set 0 if not used.
+ * @para2: Second 8bit parameter, set 0 if not used.
+ *
+ * Send a IPC command to P-Unit when there is no data transaction
+ *
+ * Return: IPC error code or 0 on success.
+ */
+int intel_punit_ipc_simple_command(int cmd, int para1, int para2)
+{
+ IPC_DEV *ipcdev = punit_ipcdev;
+ IPC_TYPE type;
+ u32 val;
+ int ret;
+
+ mutex_lock(&ipcdev->lock);
+
+ reinit_completion(&ipcdev->cmd_complete);
+ type = (cmd & IPC_PUNIT_CMD_TYPE_MASK) >> IPC_TYPE_OFFSET;
+
+ val = cmd & ~IPC_PUNIT_CMD_TYPE_MASK;
+ val |= CMD_RUN | para2 << CMD_PARA2_SHIFT | para1 << CMD_PARA1_SHIFT;
+ ipc_write_cmd(ipcdev, type, val);
+ ret = intel_punit_ipc_check_status(ipcdev, type);
+
+ mutex_unlock(&ipcdev->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(intel_punit_ipc_simple_command);
+
+/**
+ * intel_punit_ipc_command() - IPC command with data and pointers
+ * @cmd: IPC command code.
+ * @para1: First 8bit parameter, set 0 if not used.
+ * @para2: Second 8bit parameter, set 0 if not used.
+ * @in: Input data, 32bit for BIOS cmd, two 32bit for GTD and ISPD.
+ * @out: Output data.
+ *
+ * Send a IPC command to P-Unit with data transaction
+ *
+ * Return: IPC error code or 0 on success.
+ */
+int intel_punit_ipc_command(u32 cmd, u32 para1, u32 para2, u32 *in, u32 *out)
+{
+ IPC_DEV *ipcdev = punit_ipcdev;
+ IPC_TYPE type;
+ u32 val;
+ int ret;
+
+ mutex_lock(&ipcdev->lock);
+
+ reinit_completion(&ipcdev->cmd_complete);
+ type = (cmd & IPC_PUNIT_CMD_TYPE_MASK) >> IPC_TYPE_OFFSET;
+
+ if (in) {
+ ipc_write_data_low(ipcdev, type, *in);
+ if (type == GTDRIVER_IPC || type == ISPDRIVER_IPC)
+ ipc_write_data_high(ipcdev, type, *++in);
+ }
+
+ val = cmd & ~IPC_PUNIT_CMD_TYPE_MASK;
+ val |= CMD_RUN | para2 << CMD_PARA2_SHIFT | para1 << CMD_PARA1_SHIFT;
+ ipc_write_cmd(ipcdev, type, val);
+
+ ret = intel_punit_ipc_check_status(ipcdev, type);
+ if (ret)
+ goto out;
+
+ if (out) {
+ *out = ipc_read_data_low(ipcdev, type);
+ if (type == GTDRIVER_IPC || type == ISPDRIVER_IPC)
+ *++out = ipc_read_data_high(ipcdev, type);
+ }
+
+out:
+ mutex_unlock(&ipcdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_punit_ipc_command);
+
+static irqreturn_t intel_punit_ioc(int irq, void *dev_id)
+{
+ IPC_DEV *ipcdev = dev_id;
+
+ complete(&ipcdev->cmd_complete);
+ return IRQ_HANDLED;
+}
+
+static int intel_punit_get_bars(struct platform_device *pdev)
+{
+ void __iomem *addr;
+
+ /*
+ * The following resources are required
+ * - BIOS_IPC BASE_DATA
+ * - BIOS_IPC BASE_IFACE
+ */
+ addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+ punit_ipcdev->base[BIOS_IPC][BASE_DATA] = addr;
+
+ addr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+ punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
+
+ /*
+ * The following resources are optional
+ * - ISPDRIVER_IPC BASE_DATA
+ * - ISPDRIVER_IPC BASE_IFACE
+ * - GTDRIVER_IPC BASE_DATA
+ * - GTDRIVER_IPC BASE_IFACE
+ */
+ addr = devm_platform_ioremap_resource(pdev, 2);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+
+ addr = devm_platform_ioremap_resource(pdev, 3);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+
+ addr = devm_platform_ioremap_resource(pdev, 4);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+
+ addr = devm_platform_ioremap_resource(pdev, 5);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+
+ return 0;
+}
+
+static int intel_punit_ipc_probe(struct platform_device *pdev)
+{
+ int irq, ret;
+
+ punit_ipcdev = devm_kzalloc(&pdev->dev,
+ sizeof(*punit_ipcdev), GFP_KERNEL);
+ if (!punit_ipcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, punit_ipcdev);
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "Invalid IRQ, using polling mode\n");
+ } else {
+ ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc,
+ IRQF_NO_SUSPEND, "intel_punit_ipc",
+ &punit_ipcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", irq);
+ return ret;
+ }
+ punit_ipcdev->irq = irq;
+ }
+
+ ret = intel_punit_get_bars(pdev);
+ if (ret)
+ return ret;
+
+ punit_ipcdev->dev = &pdev->dev;
+ mutex_init(&punit_ipcdev->lock);
+ init_completion(&punit_ipcdev->cmd_complete);
+
+ return 0;
+}
+
+static const struct acpi_device_id punit_ipc_acpi_ids[] = {
+ { "INT34D4", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids);
+
+static struct platform_driver intel_punit_ipc_driver = {
+ .probe = intel_punit_ipc_probe,
+ .driver = {
+ .name = "intel_punit_ipc",
+ .acpi_match_table = punit_ipc_acpi_ids,
+ },
+};
+
+static int __init intel_punit_ipc_init(void)
+{
+ return platform_driver_register(&intel_punit_ipc_driver);
+}
+
+static void __exit intel_punit_ipc_exit(void)
+{
+ platform_driver_unregister(&intel_punit_ipc_driver);
+}
+
+MODULE_AUTHOR("Zha Qipeng <qipeng.zha@intel.com>");
+MODULE_DESCRIPTION("Intel P-Unit IPC driver");
+MODULE_LICENSE("GPL v2");
+
+/* Some modules are dependent on this, so init earlier */
+fs_initcall(intel_punit_ipc_init);
+module_exit(intel_punit_ipc_exit);
diff --git a/drivers/platform/x86/intel/rst.c b/drivers/platform/x86/intel/rst.c
new file mode 100644
index 0000000000..35814a7707
--- /dev/null
+++ b/drivers/platform/x86/intel/rst.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2013 Matthew Garrett <mjg59@srcf.ucam.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+MODULE_LICENSE("GPL");
+
+static ssize_t irst_show_wakeup_events(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi;
+ unsigned long long value;
+ acpi_status status;
+
+ acpi = to_acpi_device(dev);
+
+ status = acpi_evaluate_integer(acpi->handle, "GFFS", NULL, &value);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return sprintf(buf, "%lld\n", value);
+}
+
+static ssize_t irst_store_wakeup_events(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi;
+ acpi_status status;
+ unsigned long value;
+ int error;
+
+ acpi = to_acpi_device(dev);
+
+ error = kstrtoul(buf, 0, &value);
+ if (error)
+ return error;
+
+ status = acpi_execute_simple_method(acpi->handle, "SFFS", value);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return count;
+}
+
+static struct device_attribute irst_wakeup_attr = {
+ .attr = { .name = "wakeup_events", .mode = 0600 },
+ .show = irst_show_wakeup_events,
+ .store = irst_store_wakeup_events
+};
+
+static ssize_t irst_show_wakeup_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi;
+ unsigned long long value;
+ acpi_status status;
+
+ acpi = to_acpi_device(dev);
+
+ status = acpi_evaluate_integer(acpi->handle, "GFTV", NULL, &value);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return sprintf(buf, "%lld\n", value);
+}
+
+static ssize_t irst_store_wakeup_time(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi;
+ acpi_status status;
+ unsigned long value;
+ int error;
+
+ acpi = to_acpi_device(dev);
+
+ error = kstrtoul(buf, 0, &value);
+ if (error)
+ return error;
+
+ status = acpi_execute_simple_method(acpi->handle, "SFTV", value);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return count;
+}
+
+static struct device_attribute irst_timeout_attr = {
+ .attr = { .name = "wakeup_time", .mode = 0600 },
+ .show = irst_show_wakeup_time,
+ .store = irst_store_wakeup_time
+};
+
+static int irst_add(struct acpi_device *acpi)
+{
+ int error;
+
+ error = device_create_file(&acpi->dev, &irst_timeout_attr);
+ if (unlikely(error))
+ return error;
+
+ error = device_create_file(&acpi->dev, &irst_wakeup_attr);
+ if (unlikely(error))
+ device_remove_file(&acpi->dev, &irst_timeout_attr);
+
+ return error;
+}
+
+static void irst_remove(struct acpi_device *acpi)
+{
+ device_remove_file(&acpi->dev, &irst_wakeup_attr);
+ device_remove_file(&acpi->dev, &irst_timeout_attr);
+}
+
+static const struct acpi_device_id irst_ids[] = {
+ {"INT3392", 0},
+ {"", 0}
+};
+
+static struct acpi_driver irst_driver = {
+ .owner = THIS_MODULE,
+ .name = "intel_rapid_start",
+ .class = "intel_rapid_start",
+ .ids = irst_ids,
+ .ops = {
+ .add = irst_add,
+ .remove = irst_remove,
+ },
+};
+
+module_acpi_driver(irst_driver);
+
+MODULE_DEVICE_TABLE(acpi, irst_ids);
diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c
new file mode 100644
index 0000000000..556e7c6dbb
--- /dev/null
+++ b/drivers/platform/x86/intel/sdsi.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel On Demand (Software Defined Silicon) driver
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "David E. Box" <david.e.box@linux.intel.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "vsec.h"
+
+#define ACCESS_TYPE_BARID 2
+#define ACCESS_TYPE_LOCAL 3
+
+#define SDSI_MIN_SIZE_DWORDS 276
+#define SDSI_SIZE_MAILBOX 1024
+#define SDSI_SIZE_REGS 80
+#define SDSI_SIZE_CMD sizeof(u64)
+
+/*
+ * Write messages are currently up to the size of the mailbox
+ * while read messages are up to 4 times the size of the
+ * mailbox, sent in packets
+ */
+#define SDSI_SIZE_WRITE_MSG SDSI_SIZE_MAILBOX
+#define SDSI_SIZE_READ_MSG (SDSI_SIZE_MAILBOX * 4)
+
+#define SDSI_ENABLED_FEATURES_OFFSET 16
+#define SDSI_FEATURE_SDSI BIT(3)
+#define SDSI_FEATURE_METERING BIT(26)
+
+#define SDSI_SOCKET_ID_OFFSET 64
+#define SDSI_SOCKET_ID GENMASK(3, 0)
+
+#define SDSI_MBOX_CMD_SUCCESS 0x40
+#define SDSI_MBOX_CMD_TIMEOUT 0x80
+
+#define MBOX_TIMEOUT_US 500000
+#define MBOX_TIMEOUT_ACQUIRE_US 1000
+#define MBOX_POLLING_PERIOD_US 100
+#define MBOX_ACQUIRE_NUM_RETRIES 5
+#define MBOX_ACQUIRE_RETRY_DELAY_MS 500
+#define MBOX_MAX_PACKETS 4
+
+#define MBOX_OWNER_NONE 0x00
+#define MBOX_OWNER_INBAND 0x01
+
+#define CTRL_RUN_BUSY BIT(0)
+#define CTRL_READ_WRITE BIT(1)
+#define CTRL_SOM BIT(2)
+#define CTRL_EOM BIT(3)
+#define CTRL_OWNER GENMASK(5, 4)
+#define CTRL_COMPLETE BIT(6)
+#define CTRL_READY BIT(7)
+#define CTRL_STATUS GENMASK(15, 8)
+#define CTRL_PACKET_SIZE GENMASK(31, 16)
+#define CTRL_MSG_SIZE GENMASK(63, 48)
+
+#define DISC_TABLE_SIZE 12
+#define DT_ACCESS_TYPE GENMASK(3, 0)
+#define DT_SIZE GENMASK(27, 12)
+#define DT_TBIR GENMASK(2, 0)
+#define DT_OFFSET(v) ((v) & GENMASK(31, 3))
+
+#define SDSI_GUID_V1 0x006DD191
+#define GUID_V1_CNTRL_SIZE 8
+#define GUID_V1_REGS_SIZE 72
+#define SDSI_GUID_V2 0xF210D9EF
+#define GUID_V2_CNTRL_SIZE 16
+#define GUID_V2_REGS_SIZE 80
+
+enum sdsi_command {
+ SDSI_CMD_PROVISION_AKC = 0x0004,
+ SDSI_CMD_PROVISION_CAP = 0x0008,
+ SDSI_CMD_READ_STATE = 0x0010,
+ SDSI_CMD_READ_METER = 0x0014,
+};
+
+struct sdsi_mbox_info {
+ u64 *payload;
+ void *buffer;
+ int size;
+};
+
+struct disc_table {
+ u32 access_info;
+ u32 guid;
+ u32 offset;
+};
+
+struct sdsi_priv {
+ struct mutex mb_lock; /* Mailbox access lock */
+ struct device *dev;
+ void __iomem *control_addr;
+ void __iomem *mbox_addr;
+ void __iomem *regs_addr;
+ int control_size;
+ int maibox_size;
+ int registers_size;
+ u32 guid;
+ u32 features;
+};
+
+/* SDSi mailbox operations must be performed using 64bit mov instructions */
+static __always_inline void
+sdsi_memcpy64_toio(u64 __iomem *to, const u64 *from, size_t count_bytes)
+{
+ size_t count = count_bytes / sizeof(*to);
+ int i;
+
+ for (i = 0; i < count; i++)
+ writeq(from[i], &to[i]);
+}
+
+static __always_inline void
+sdsi_memcpy64_fromio(u64 *to, const u64 __iomem *from, size_t count_bytes)
+{
+ size_t count = count_bytes / sizeof(*to);
+ int i;
+
+ for (i = 0; i < count; i++)
+ to[i] = readq(&from[i]);
+}
+
+static inline void sdsi_complete_transaction(struct sdsi_priv *priv)
+{
+ u64 control = FIELD_PREP(CTRL_COMPLETE, 1);
+
+ lockdep_assert_held(&priv->mb_lock);
+ writeq(control, priv->control_addr);
+}
+
+static int sdsi_status_to_errno(u32 status)
+{
+ switch (status) {
+ case SDSI_MBOX_CMD_SUCCESS:
+ return 0;
+ case SDSI_MBOX_CMD_TIMEOUT:
+ return -ETIMEDOUT;
+ default:
+ return -EIO;
+ }
+}
+
+static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
+ size_t *data_size)
+{
+ struct device *dev = priv->dev;
+ u32 total, loop, eom, status, message_size;
+ u64 control;
+ int ret;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ /* Format and send the read command */
+ control = FIELD_PREP(CTRL_EOM, 1) |
+ FIELD_PREP(CTRL_SOM, 1) |
+ FIELD_PREP(CTRL_RUN_BUSY, 1) |
+ FIELD_PREP(CTRL_PACKET_SIZE, info->size);
+ writeq(control, priv->control_addr);
+
+ /* For reads, data sizes that are larger than the mailbox size are read in packets. */
+ total = 0;
+ loop = 0;
+ do {
+ void *buf = info->buffer + (SDSI_SIZE_MAILBOX * loop);
+ u32 packet_size;
+
+ /* Poll on ready bit */
+ ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
+ MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
+ if (ret)
+ break;
+
+ eom = FIELD_GET(CTRL_EOM, control);
+ status = FIELD_GET(CTRL_STATUS, control);
+ packet_size = FIELD_GET(CTRL_PACKET_SIZE, control);
+ message_size = FIELD_GET(CTRL_MSG_SIZE, control);
+
+ ret = sdsi_status_to_errno(status);
+ if (ret)
+ break;
+
+ /* Only the last packet can be less than the mailbox size. */
+ if (!eom && packet_size != SDSI_SIZE_MAILBOX) {
+ dev_err(dev, "Invalid packet size\n");
+ ret = -EPROTO;
+ break;
+ }
+
+ if (packet_size > SDSI_SIZE_MAILBOX) {
+ dev_err(dev, "Packet size too large\n");
+ ret = -EPROTO;
+ break;
+ }
+
+ sdsi_memcpy64_fromio(buf, priv->mbox_addr, round_up(packet_size, SDSI_SIZE_CMD));
+
+ total += packet_size;
+
+ sdsi_complete_transaction(priv);
+ } while (!eom && ++loop < MBOX_MAX_PACKETS);
+
+ if (ret) {
+ sdsi_complete_transaction(priv);
+ return ret;
+ }
+
+ if (!eom) {
+ dev_err(dev, "Exceeded read attempts\n");
+ return -EPROTO;
+ }
+
+ /* Message size check is only valid for multi-packet transfers */
+ if (loop && total != message_size)
+ dev_warn(dev, "Read count %u differs from expected count %u\n",
+ total, message_size);
+
+ *data_size = total;
+
+ return 0;
+}
+
+static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
+{
+ u64 control;
+ u32 status;
+ int ret;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ /* Write rest of the payload */
+ sdsi_memcpy64_toio(priv->mbox_addr + SDSI_SIZE_CMD, info->payload + 1,
+ info->size - SDSI_SIZE_CMD);
+
+ /* Format and send the write command */
+ control = FIELD_PREP(CTRL_EOM, 1) |
+ FIELD_PREP(CTRL_SOM, 1) |
+ FIELD_PREP(CTRL_RUN_BUSY, 1) |
+ FIELD_PREP(CTRL_READ_WRITE, 1) |
+ FIELD_PREP(CTRL_PACKET_SIZE, info->size);
+ writeq(control, priv->control_addr);
+
+ /* Poll on ready bit */
+ ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
+ MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
+
+ if (ret)
+ goto release_mbox;
+
+ status = FIELD_GET(CTRL_STATUS, control);
+ ret = sdsi_status_to_errno(status);
+
+release_mbox:
+ sdsi_complete_transaction(priv);
+
+ return ret;
+}
+
+static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
+{
+ u64 control;
+ u32 owner;
+ int ret, retries = 0;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ /* Check mailbox is available */
+ control = readq(priv->control_addr);
+ owner = FIELD_GET(CTRL_OWNER, control);
+ if (owner != MBOX_OWNER_NONE)
+ return -EBUSY;
+
+ /*
+ * If there has been no recent transaction and no one owns the mailbox,
+ * we should acquire it in under 1ms. However, if we've accessed it
+ * recently it may take up to 2.1 seconds to acquire it again.
+ */
+ do {
+ /* Write first qword of payload */
+ writeq(info->payload[0], priv->mbox_addr);
+
+ /* Check for ownership */
+ ret = readq_poll_timeout(priv->control_addr, control,
+ FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_INBAND,
+ MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
+
+ if (FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_NONE &&
+ retries++ < MBOX_ACQUIRE_NUM_RETRIES) {
+ msleep(MBOX_ACQUIRE_RETRY_DELAY_MS);
+ continue;
+ }
+
+ /* Either we got it or someone else did. */
+ break;
+ } while (true);
+
+ return ret;
+}
+
+static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ ret = sdsi_mbox_acquire(priv, info);
+ if (ret)
+ return ret;
+
+ return sdsi_mbox_cmd_write(priv, info);
+}
+
+static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, size_t *data_size)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mb_lock);
+
+ ret = sdsi_mbox_acquire(priv, info);
+ if (ret)
+ return ret;
+
+ return sdsi_mbox_cmd_read(priv, info, data_size);
+}
+
+static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count,
+ enum sdsi_command command)
+{
+ struct sdsi_mbox_info info;
+ int ret;
+
+ if (count > (SDSI_SIZE_WRITE_MSG - SDSI_SIZE_CMD))
+ return -EOVERFLOW;
+
+ /* Qword aligned message + command qword */
+ info.size = round_up(count, SDSI_SIZE_CMD) + SDSI_SIZE_CMD;
+
+ info.payload = kzalloc(info.size, GFP_KERNEL);
+ if (!info.payload)
+ return -ENOMEM;
+
+ /* Copy message to payload buffer */
+ memcpy(info.payload, buf, count);
+
+ /* Command is last qword of payload buffer */
+ info.payload[(info.size - SDSI_SIZE_CMD) / SDSI_SIZE_CMD] = command;
+
+ ret = mutex_lock_interruptible(&priv->mb_lock);
+ if (ret)
+ goto free_payload;
+ ret = sdsi_mbox_write(priv, &info);
+ mutex_unlock(&priv->mb_lock);
+
+free_payload:
+ kfree(info.payload);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ if (off)
+ return -ESPIPE;
+
+ return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC);
+}
+static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
+
+static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ if (off)
+ return -ESPIPE;
+
+ return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP);
+}
+static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
+
+static ssize_t
+certificate_read(u64 command, struct sdsi_priv *priv, char *buf, loff_t off,
+ size_t count)
+{
+ struct sdsi_mbox_info info;
+ size_t size;
+ int ret;
+
+ if (off)
+ return 0;
+
+ /* Buffer for return data */
+ info.buffer = kmalloc(SDSI_SIZE_READ_MSG, GFP_KERNEL);
+ if (!info.buffer)
+ return -ENOMEM;
+
+ info.payload = &command;
+ info.size = sizeof(command);
+
+ ret = mutex_lock_interruptible(&priv->mb_lock);
+ if (ret)
+ goto free_buffer;
+ ret = sdsi_mbox_read(priv, &info, &size);
+ mutex_unlock(&priv->mb_lock);
+ if (ret < 0)
+ goto free_buffer;
+
+ if (size > count)
+ size = count;
+
+ memcpy(buf, info.buffer, size);
+
+free_buffer:
+ kfree(info.buffer);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t
+state_certificate_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ return certificate_read(SDSI_CMD_READ_STATE, priv, buf, off, count);
+}
+static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
+
+static ssize_t
+meter_certificate_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ return certificate_read(SDSI_CMD_READ_METER, priv, buf, off, count);
+}
+static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
+
+static ssize_t registers_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+ void __iomem *addr = priv->regs_addr;
+ int size = priv->registers_size;
+
+ /*
+ * The check below is performed by the sysfs caller based on the static
+ * file size. But this may be greater than the actual size which is based
+ * on the GUID. So check here again based on actual size before reading.
+ */
+ if (off >= size)
+ return 0;
+
+ if (off + count > size)
+ count = size - off;
+
+ memcpy_fromio(buf, addr + off, count);
+
+ return count;
+}
+static BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
+
+static struct bin_attribute *sdsi_bin_attrs[] = {
+ &bin_attr_registers,
+ &bin_attr_state_certificate,
+ &bin_attr_meter_certificate,
+ &bin_attr_provision_akc,
+ &bin_attr_provision_cap,
+ NULL
+};
+
+static umode_t
+sdsi_battr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ /* Registers file is always readable if the device is present */
+ if (attr == &bin_attr_registers)
+ return attr->attr.mode;
+
+ /* All other attributes not visible if BIOS has not enabled On Demand */
+ if (!(priv->features & SDSI_FEATURE_SDSI))
+ return 0;
+
+ if (attr == &bin_attr_meter_certificate)
+ return (priv->features & SDSI_FEATURE_METERING) ?
+ attr->attr.mode : 0;
+
+ return attr->attr.mode;
+}
+
+static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sdsi_priv *priv = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "0x%x\n", priv->guid);
+}
+static DEVICE_ATTR_RO(guid);
+
+static struct attribute *sdsi_attrs[] = {
+ &dev_attr_guid.attr,
+ NULL
+};
+
+static const struct attribute_group sdsi_group = {
+ .attrs = sdsi_attrs,
+ .bin_attrs = sdsi_bin_attrs,
+ .is_bin_visible = sdsi_battr_is_visible,
+};
+__ATTRIBUTE_GROUPS(sdsi);
+
+static int sdsi_get_layout(struct sdsi_priv *priv, struct disc_table *table)
+{
+ switch (table->guid) {
+ case SDSI_GUID_V1:
+ priv->control_size = GUID_V1_CNTRL_SIZE;
+ priv->registers_size = GUID_V1_REGS_SIZE;
+ break;
+ case SDSI_GUID_V2:
+ priv->control_size = GUID_V2_CNTRL_SIZE;
+ priv->registers_size = GUID_V2_REGS_SIZE;
+ break;
+ default:
+ dev_err(priv->dev, "Unrecognized GUID 0x%x\n", table->guid);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sdsi_map_mbox_registers(struct sdsi_priv *priv, struct pci_dev *parent,
+ struct disc_table *disc_table, struct resource *disc_res)
+{
+ u32 access_type = FIELD_GET(DT_ACCESS_TYPE, disc_table->access_info);
+ u32 size = FIELD_GET(DT_SIZE, disc_table->access_info);
+ u32 tbir = FIELD_GET(DT_TBIR, disc_table->offset);
+ u32 offset = DT_OFFSET(disc_table->offset);
+ struct resource res = {};
+
+ /* Starting location of SDSi MMIO region based on access type */
+ switch (access_type) {
+ case ACCESS_TYPE_LOCAL:
+ if (tbir) {
+ dev_err(priv->dev, "Unsupported BAR index %u for access type %u\n",
+ tbir, access_type);
+ return -EINVAL;
+ }
+
+ /*
+ * For access_type LOCAL, the base address is as follows:
+ * base address = end of discovery region + base offset + 1
+ */
+ res.start = disc_res->end + offset + 1;
+ break;
+
+ case ACCESS_TYPE_BARID:
+ res.start = pci_resource_start(parent, tbir) + offset;
+ break;
+
+ default:
+ dev_err(priv->dev, "Unrecognized access_type %u\n", access_type);
+ return -EINVAL;
+ }
+
+ res.end = res.start + size * sizeof(u32) - 1;
+ res.flags = IORESOURCE_MEM;
+
+ priv->control_addr = devm_ioremap_resource(priv->dev, &res);
+ if (IS_ERR(priv->control_addr))
+ return PTR_ERR(priv->control_addr);
+
+ priv->mbox_addr = priv->control_addr + priv->control_size;
+ priv->regs_addr = priv->mbox_addr + SDSI_SIZE_MAILBOX;
+
+ priv->features = readq(priv->regs_addr + SDSI_ENABLED_FEATURES_OFFSET);
+
+ return 0;
+}
+
+static int sdsi_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ struct intel_vsec_device *intel_cap_dev = auxdev_to_ivdev(auxdev);
+ struct disc_table disc_table;
+ struct resource *disc_res;
+ void __iomem *disc_addr;
+ struct sdsi_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&auxdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &auxdev->dev;
+ mutex_init(&priv->mb_lock);
+ auxiliary_set_drvdata(auxdev, priv);
+
+ /* Get the SDSi discovery table */
+ disc_res = &intel_cap_dev->resource[0];
+ disc_addr = devm_ioremap_resource(&auxdev->dev, disc_res);
+ if (IS_ERR(disc_addr))
+ return PTR_ERR(disc_addr);
+
+ memcpy_fromio(&disc_table, disc_addr, DISC_TABLE_SIZE);
+
+ priv->guid = disc_table.guid;
+
+ /* Get guid based layout info */
+ ret = sdsi_get_layout(priv, &disc_table);
+ if (ret)
+ return ret;
+
+ /* Map the SDSi mailbox registers */
+ ret = sdsi_map_mbox_registers(priv, intel_cap_dev->pcidev, &disc_table, disc_res);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct auxiliary_device_id sdsi_aux_id_table[] = {
+ { .name = "intel_vsec.sdsi" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, sdsi_aux_id_table);
+
+static struct auxiliary_driver sdsi_aux_driver = {
+ .driver = {
+ .dev_groups = sdsi_groups,
+ },
+ .id_table = sdsi_aux_id_table,
+ .probe = sdsi_probe,
+ /* No remove. All resources are handled under devm */
+};
+module_auxiliary_driver(sdsi_aux_driver);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel On Demand (SDSi) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/smartconnect.c b/drivers/platform/x86/intel/smartconnect.c
new file mode 100644
index 0000000000..64c2dc9347
--- /dev/null
+++ b/drivers/platform/x86/intel/smartconnect.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2013 Matthew Garrett <mjg59@srcf.ucam.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+
+MODULE_LICENSE("GPL");
+
+static int smartconnect_acpi_init(struct acpi_device *acpi)
+{
+ unsigned long long value;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ if (value & 0x1) {
+ dev_info(&acpi->dev, "Disabling Intel Smart Connect\n");
+ status = acpi_execute_simple_method(acpi->handle, "SAOS", 0);
+ }
+
+ return 0;
+}
+
+static const struct acpi_device_id smartconnect_ids[] = {
+ {"INT33A0", 0},
+ {"", 0}
+};
+MODULE_DEVICE_TABLE(acpi, smartconnect_ids);
+
+static struct acpi_driver smartconnect_driver = {
+ .owner = THIS_MODULE,
+ .name = "intel_smart_connect",
+ .class = "intel_smart_connect",
+ .ids = smartconnect_ids,
+ .ops = {
+ .add = smartconnect_acpi_init,
+ },
+};
+
+module_acpi_driver(smartconnect_driver);
diff --git a/drivers/platform/x86/intel/speed_select_if/Kconfig b/drivers/platform/x86/intel/speed_select_if/Kconfig
new file mode 100644
index 0000000000..4eb3ad299d
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/Kconfig
@@ -0,0 +1,21 @@
+menu "Intel Speed Select Technology interface support"
+ depends on PCI
+ depends on X86_64 || COMPILE_TEST
+
+config INTEL_SPEED_SELECT_TPMI
+ tristate
+
+config INTEL_SPEED_SELECT_INTERFACE
+ tristate "Intel(R) Speed Select Technology interface drivers"
+ select INTEL_SPEED_SELECT_TPMI if INTEL_TPMI
+ help
+ This config enables the Intel(R) Speed Select Technology interface
+ drivers. The Intel(R) speed select technology features are non
+ architectural and only supported on specific Xeon(R) servers.
+ These drivers provide interface to directly communicate with hardware
+ via MMIO and Mail boxes to enumerate and control all the speed select
+ features.
+
+ Enable this config, if there is a need to enable and control the
+ Intel(R) Speed Select Technology features from the user space.
+endmenu
diff --git a/drivers/platform/x86/intel/speed_select_if/Makefile b/drivers/platform/x86/intel/speed_select_if/Makefile
new file mode 100644
index 0000000000..1d878a36d0
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile - Intel Speed Select Interface drivers
+# Copyright (c) 2019, Intel Corporation.
+#
+
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_common.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mmio.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_pci.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_msr.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_TPMI) += isst_tpmi_core.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_TPMI) += isst_tpmi.o
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
new file mode 100644
index 0000000000..08df949460
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: Common functions
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/cpuhotplug.h>
+#include <linux/fs.h>
+#include <linux/hashtable.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#include "isst_if_common.h"
+
+#define MSR_THREAD_ID_INFO 0x53
+#define MSR_PM_LOGICAL_ID 0x54
+#define MSR_CPU_BUS_NUMBER 0x128
+
+static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
+
+static int punit_msr_white_list[] = {
+ MSR_TURBO_RATIO_LIMIT,
+ MSR_CONFIG_TDP_CONTROL,
+ MSR_TURBO_RATIO_LIMIT1,
+ MSR_TURBO_RATIO_LIMIT2,
+ MSR_PM_LOGICAL_ID,
+};
+
+struct isst_valid_cmd_ranges {
+ u16 cmd;
+ u16 sub_cmd_beg;
+ u16 sub_cmd_end;
+};
+
+struct isst_cmd_set_req_type {
+ u16 cmd;
+ u16 sub_cmd;
+ u16 param;
+};
+
+static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
+ {0xD0, 0x00, 0x03},
+ {0x7F, 0x00, 0x0C},
+ {0x7F, 0x10, 0x12},
+ {0x7F, 0x20, 0x23},
+ {0x94, 0x03, 0x03},
+ {0x95, 0x03, 0x03},
+};
+
+static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
+ {0xD0, 0x00, 0x08},
+ {0xD0, 0x01, 0x08},
+ {0xD0, 0x02, 0x08},
+ {0xD0, 0x03, 0x08},
+ {0x7F, 0x02, 0x00},
+ {0x7F, 0x08, 0x00},
+ {0x95, 0x03, 0x03},
+};
+
+struct isst_cmd {
+ struct hlist_node hnode;
+ u64 data;
+ u32 cmd;
+ int cpu;
+ int mbox_cmd_type;
+ u32 param;
+};
+
+static bool isst_hpm_support;
+
+static DECLARE_HASHTABLE(isst_hash, 8);
+static DEFINE_MUTEX(isst_hash_lock);
+
+static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
+ u32 data)
+{
+ struct isst_cmd *sst_cmd;
+
+ sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
+ if (!sst_cmd)
+ return -ENOMEM;
+
+ sst_cmd->cpu = cpu;
+ sst_cmd->cmd = cmd;
+ sst_cmd->mbox_cmd_type = mbox_cmd_type;
+ sst_cmd->param = param;
+ sst_cmd->data = data;
+
+ hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
+
+ return 0;
+}
+
+static void isst_delete_hash(void)
+{
+ struct isst_cmd *sst_cmd;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
+ hash_del(&sst_cmd->hnode);
+ kfree(sst_cmd);
+ }
+}
+
+/**
+ * isst_store_cmd() - Store command to a hash table
+ * @cmd: Mailbox command.
+ * @sub_cmd: Mailbox sub-command or MSR id.
+ * @cpu: Target CPU for the command
+ * @mbox_cmd_type: Mailbox or MSR command.
+ * @param: Mailbox parameter.
+ * @data: Mailbox request data or MSR data.
+ *
+ * Stores the command to a hash table if there is no such command already
+ * stored. If already stored update the latest parameter and data for the
+ * command.
+ *
+ * Return: Return result of store to hash table, 0 for success, others for
+ * failure.
+ */
+int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
+ u32 param, u64 data)
+{
+ struct isst_cmd *sst_cmd;
+ int full_cmd, ret;
+
+ full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
+ full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
+ mutex_lock(&isst_hash_lock);
+ hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
+ if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
+ sst_cmd->mbox_cmd_type == mbox_cmd_type) {
+ sst_cmd->param = param;
+ sst_cmd->data = data;
+ mutex_unlock(&isst_hash_lock);
+ return 0;
+ }
+ }
+
+ ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
+ mutex_unlock(&isst_hash_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(isst_store_cmd);
+
+static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
+ struct isst_cmd *sst_cmd)
+{
+ struct isst_if_mbox_cmd mbox_cmd;
+ int wr_only;
+
+ mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
+ mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
+ mbox_cmd.parameter = sst_cmd->param;
+ mbox_cmd.req_data = sst_cmd->data;
+ mbox_cmd.logical_cpu = sst_cmd->cpu;
+ (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
+}
+
+/**
+ * isst_resume_common() - Process Resume request
+ *
+ * On resume replay all mailbox commands and MSRs.
+ *
+ * Return: None.
+ */
+void isst_resume_common(void)
+{
+ struct isst_cmd *sst_cmd;
+ int i;
+
+ hash_for_each(isst_hash, i, sst_cmd, hnode) {
+ struct isst_if_cmd_cb *cb;
+
+ if (sst_cmd->mbox_cmd_type) {
+ cb = &punit_callbacks[ISST_IF_DEV_MBOX];
+ if (cb->registered)
+ isst_mbox_resume_command(cb, sst_cmd);
+ } else {
+ wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
+ sst_cmd->data);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(isst_resume_common);
+
+static void isst_restore_msr_local(int cpu)
+{
+ struct isst_cmd *sst_cmd;
+ int i;
+
+ mutex_lock(&isst_hash_lock);
+ for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
+ if (!punit_msr_white_list[i])
+ break;
+
+ hash_for_each_possible(isst_hash, sst_cmd, hnode,
+ punit_msr_white_list[i]) {
+ if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
+ wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
+ }
+ }
+ mutex_unlock(&isst_hash_lock);
+}
+
+/**
+ * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
+ * @cmd: Pointer to the command structure to verify.
+ *
+ * Invalid command to PUNIT to may result in instability of the platform.
+ * This function has a whitelist of commands, which are allowed.
+ *
+ * Return: Return true if the command is invalid, else false.
+ */
+bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
+{
+ int i;
+
+ if (cmd->logical_cpu >= nr_cpu_ids)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
+ if (cmd->command == isst_valid_cmds[i].cmd &&
+ (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
+ cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
+
+/**
+ * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
+ * @cmd: Pointer to the command structure to verify.
+ *
+ * Check if the given mail box level is set request and not a get request.
+ *
+ * Return: Return true if the command is set_req, else false.
+ */
+bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
+ if (cmd->command == isst_cmd_set_reqs[i].cmd &&
+ cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
+ cmd->parameter == isst_cmd_set_reqs[i].param) {
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
+
+static int isst_if_api_version;
+
+static int isst_if_get_platform_info(void __user *argp)
+{
+ struct isst_if_platform_info info;
+
+ info.api_version = isst_if_api_version;
+ info.driver_version = ISST_IF_DRIVER_VERSION;
+ info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
+ info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
+ info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
+
+ if (copy_to_user(argp, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define ISST_MAX_BUS_NUMBER 2
+
+struct isst_if_cpu_info {
+ /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
+ int bus_info[ISST_MAX_BUS_NUMBER];
+ struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
+ int punit_cpu_id;
+ int numa_node;
+};
+
+struct isst_if_pkg_info {
+ struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
+};
+
+static struct isst_if_cpu_info *isst_cpu_info;
+static struct isst_if_pkg_info *isst_pkg_info;
+
+static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
+{
+ struct pci_dev *matched_pci_dev = NULL;
+ struct pci_dev *pci_dev = NULL;
+ struct pci_dev *_pci_dev = NULL;
+ int no_matches = 0, pkg_id;
+ int bus_number;
+
+ if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
+ return NULL;
+
+ pkg_id = topology_physical_package_id(cpu);
+
+ bus_number = isst_cpu_info[cpu].bus_info[bus_no];
+ if (bus_number < 0)
+ return NULL;
+
+ for_each_pci_dev(_pci_dev) {
+ int node;
+
+ if (_pci_dev->bus->number != bus_number ||
+ _pci_dev->devfn != PCI_DEVFN(dev, fn))
+ continue;
+
+ ++no_matches;
+ if (!matched_pci_dev)
+ matched_pci_dev = _pci_dev;
+
+ node = dev_to_node(&_pci_dev->dev);
+ if (node == NUMA_NO_NODE) {
+ pr_info_once("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
+ cpu, bus_no, dev, fn);
+ continue;
+ }
+
+ if (node == isst_cpu_info[cpu].numa_node) {
+ isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
+
+ pci_dev = _pci_dev;
+ break;
+ }
+ }
+
+ /*
+ * If there is no numa matched pci_dev, then there can be following cases:
+ * 1. CONFIG_NUMA is not defined: In this case if there is only single device
+ * match, then we don't need numa information. Simply return last match.
+ * Othewise return NULL.
+ * 2. NUMA information is not exposed via _SEG method. In this case it is similar
+ * to case 1.
+ * 3. Numa information doesn't match with CPU numa node and more than one match
+ * return NULL.
+ */
+ if (!pci_dev && no_matches == 1)
+ pci_dev = matched_pci_dev;
+
+ /* Return pci_dev pointer for any matched CPU in the package */
+ if (!pci_dev)
+ pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
+
+ return pci_dev;
+}
+
+/**
+ * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
+ * @cpu: Logical CPU number.
+ * @bus_no: The bus number assigned by the hardware.
+ * @dev: The device number assigned by the hardware.
+ * @fn: The function number assigned by the hardware.
+ *
+ * Using cached bus information, find out the PCI device for a bus number,
+ * device and function.
+ *
+ * Return: Return pci_dev pointer or NULL.
+ */
+struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
+{
+ struct pci_dev *pci_dev;
+
+ if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
+ return NULL;
+
+ pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
+
+ if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
+ return pci_dev;
+
+ return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
+}
+EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
+
+static int isst_if_cpu_online(unsigned int cpu)
+{
+ u64 data;
+ int ret;
+
+ isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
+
+ ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
+ if (ret) {
+ /* This is not a fatal error on MSR mailbox only I/F */
+ isst_cpu_info[cpu].bus_info[0] = -1;
+ isst_cpu_info[cpu].bus_info[1] = -1;
+ } else {
+ isst_cpu_info[cpu].bus_info[0] = data & 0xff;
+ isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
+ isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
+ isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
+ }
+
+ if (isst_hpm_support) {
+
+ ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
+ if (!ret)
+ goto set_punit_id;
+ }
+
+ ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
+ if (ret) {
+ isst_cpu_info[cpu].punit_cpu_id = -1;
+ return ret;
+ }
+
+set_punit_id:
+ isst_cpu_info[cpu].punit_cpu_id = data;
+
+ isst_restore_msr_local(cpu);
+
+ return 0;
+}
+
+static int isst_if_online_id;
+
+static int isst_if_cpu_info_init(void)
+{
+ int ret;
+
+ isst_cpu_info = kcalloc(num_possible_cpus(),
+ sizeof(*isst_cpu_info),
+ GFP_KERNEL);
+ if (!isst_cpu_info)
+ return -ENOMEM;
+
+ isst_pkg_info = kcalloc(topology_max_packages(),
+ sizeof(*isst_pkg_info),
+ GFP_KERNEL);
+ if (!isst_pkg_info) {
+ kfree(isst_cpu_info);
+ return -ENOMEM;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/isst-if:online",
+ isst_if_cpu_online, NULL);
+ if (ret < 0) {
+ kfree(isst_pkg_info);
+ kfree(isst_cpu_info);
+ return ret;
+ }
+
+ isst_if_online_id = ret;
+
+ return 0;
+}
+
+static void isst_if_cpu_info_exit(void)
+{
+ cpuhp_remove_state(isst_if_online_id);
+ kfree(isst_pkg_info);
+ kfree(isst_cpu_info);
+};
+
+static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_cpu_map *cpu_map;
+
+ cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
+ if (cpu_map->logical_cpu >= nr_cpu_ids ||
+ cpu_map->logical_cpu >= num_possible_cpus())
+ return -EINVAL;
+
+ *write_only = 0;
+ cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
+
+ return 0;
+}
+
+static bool match_punit_msr_white_list(int msr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
+ if (punit_msr_white_list[i] == msr)
+ return true;
+ }
+
+ return false;
+}
+
+static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_msr_cmd *msr_cmd;
+ int ret;
+
+ msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
+
+ if (!match_punit_msr_white_list(msr_cmd->msr))
+ return -EINVAL;
+
+ if (msr_cmd->logical_cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (msr_cmd->read_write) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
+ msr_cmd->msr,
+ msr_cmd->data);
+ *write_only = 1;
+ if (!ret && !resume)
+ ret = isst_store_cmd(0, msr_cmd->msr,
+ msr_cmd->logical_cpu,
+ 0, 0, msr_cmd->data);
+ } else {
+ u64 data;
+
+ ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
+ msr_cmd->msr, &data);
+ if (!ret) {
+ msr_cmd->data = data;
+ *write_only = 0;
+ }
+ }
+
+
+ return ret;
+}
+
+static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
+{
+ unsigned char __user *ptr;
+ u32 cmd_count;
+ u8 *cmd_ptr;
+ long ret;
+ int i;
+
+ /* Each multi command has u32 command count as the first field */
+ if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
+ return -EFAULT;
+
+ if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
+ return -EINVAL;
+
+ cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
+ if (!cmd_ptr)
+ return -ENOMEM;
+
+ /* cb->offset points to start of the command after the command count */
+ ptr = argp + cb->offset;
+
+ for (i = 0; i < cmd_count; ++i) {
+ int wr_only;
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
+ if (ret)
+ break;
+
+ if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ptr += cb->cmd_size;
+ }
+
+ kfree(cmd_ptr);
+
+ return i ? i : ret;
+}
+
+static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct isst_if_cmd_cb cmd_cb;
+ struct isst_if_cmd_cb *cb;
+ long ret = -ENOTTY;
+ int i;
+
+ switch (cmd) {
+ case ISST_IF_GET_PLATFORM_INFO:
+ ret = isst_if_get_platform_info(argp);
+ break;
+ case ISST_IF_GET_PHY_ID:
+ cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
+ cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
+ cmd_cb.cmd_callback = isst_if_proc_phyid_req;
+ ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
+ break;
+ case ISST_IF_IO_CMD:
+ cb = &punit_callbacks[ISST_IF_DEV_MMIO];
+ if (cb->registered)
+ ret = isst_if_exec_multi_cmd(argp, cb);
+ break;
+ case ISST_IF_MBOX_COMMAND:
+ cb = &punit_callbacks[ISST_IF_DEV_MBOX];
+ if (cb->registered)
+ ret = isst_if_exec_multi_cmd(argp, cb);
+ break;
+ case ISST_IF_MSR_COMMAND:
+ cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
+ cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
+ cmd_cb.cmd_callback = isst_if_msr_cmd_req;
+ ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
+ break;
+ default:
+ for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
+ struct isst_if_cmd_cb *cb = &punit_callbacks[i];
+ int ret;
+
+ if (cb->def_ioctl) {
+ ret = cb->def_ioctl(file, cmd, arg);
+ if (!ret)
+ return ret;
+ }
+ }
+ break;
+ }
+
+ return ret;
+}
+
+/* Lock to prevent module registration when already opened by user space */
+static DEFINE_MUTEX(punit_misc_dev_open_lock);
+/* Lock to allow one shared misc device for all ISST interfaces */
+static DEFINE_MUTEX(punit_misc_dev_reg_lock);
+static int misc_usage_count;
+static int misc_device_ret;
+static int misc_device_open;
+
+static int isst_if_open(struct inode *inode, struct file *file)
+{
+ int i, ret = 0;
+
+ /* Fail open, if a module is going away */
+ mutex_lock(&punit_misc_dev_open_lock);
+ for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
+ struct isst_if_cmd_cb *cb = &punit_callbacks[i];
+
+ if (cb->registered && !try_module_get(cb->owner)) {
+ ret = -ENODEV;
+ break;
+ }
+ }
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; ++j) {
+ struct isst_if_cmd_cb *cb;
+
+ cb = &punit_callbacks[j];
+ if (cb->registered)
+ module_put(cb->owner);
+ }
+ } else {
+ misc_device_open++;
+ }
+ mutex_unlock(&punit_misc_dev_open_lock);
+
+ return ret;
+}
+
+static int isst_if_relase(struct inode *inode, struct file *f)
+{
+ int i;
+
+ mutex_lock(&punit_misc_dev_open_lock);
+ misc_device_open--;
+ for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
+ struct isst_if_cmd_cb *cb = &punit_callbacks[i];
+
+ if (cb->registered)
+ module_put(cb->owner);
+ }
+ mutex_unlock(&punit_misc_dev_open_lock);
+
+ return 0;
+}
+
+static const struct file_operations isst_if_char_driver_ops = {
+ .open = isst_if_open,
+ .unlocked_ioctl = isst_if_def_ioctl,
+ .release = isst_if_relase,
+};
+
+static struct miscdevice isst_if_char_driver = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "isst_interface",
+ .fops = &isst_if_char_driver_ops,
+};
+
+static const struct x86_cpu_id hpm_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
+ {}
+};
+
+static int isst_misc_reg(void)
+{
+ mutex_lock(&punit_misc_dev_reg_lock);
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ if (!misc_usage_count) {
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(hpm_cpu_ids);
+ if (id)
+ isst_hpm_support = true;
+
+ misc_device_ret = isst_if_cpu_info_init();
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ misc_device_ret = misc_register(&isst_if_char_driver);
+ if (misc_device_ret) {
+ isst_if_cpu_info_exit();
+ goto unlock_exit;
+ }
+ }
+ misc_usage_count++;
+
+unlock_exit:
+ mutex_unlock(&punit_misc_dev_reg_lock);
+
+ return misc_device_ret;
+}
+
+static void isst_misc_unreg(void)
+{
+ mutex_lock(&punit_misc_dev_reg_lock);
+ if (misc_usage_count)
+ misc_usage_count--;
+ if (!misc_usage_count && !misc_device_ret) {
+ misc_deregister(&isst_if_char_driver);
+ isst_if_cpu_info_exit();
+ }
+ mutex_unlock(&punit_misc_dev_reg_lock);
+}
+
+/**
+ * isst_if_cdev_register() - Register callback for IOCTL
+ * @device_type: The device type this callback handling.
+ * @cb: Callback structure.
+ *
+ * This function registers a callback to device type. On very first call
+ * it will register a misc device, which is used for user kernel interface.
+ * Other calls simply increment ref count. Registry will fail, if the user
+ * already opened misc device for operation. Also if the misc device
+ * creation failed, then it will not try again and all callers will get
+ * failure code.
+ *
+ * Return: Return the return value from the misc creation device or -EINVAL
+ * for unsupported device type.
+ */
+int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
+{
+ int ret;
+
+ if (device_type >= ISST_IF_DEV_MAX)
+ return -EINVAL;
+
+ mutex_lock(&punit_misc_dev_open_lock);
+ /* Device is already open, we don't want to add new callbacks */
+ if (misc_device_open) {
+ mutex_unlock(&punit_misc_dev_open_lock);
+ return -EAGAIN;
+ }
+ if (!cb->api_version)
+ cb->api_version = ISST_IF_API_VERSION;
+ if (cb->api_version > isst_if_api_version)
+ isst_if_api_version = cb->api_version;
+ memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
+ punit_callbacks[device_type].registered = 1;
+ mutex_unlock(&punit_misc_dev_open_lock);
+
+ ret = isst_misc_reg();
+ if (ret) {
+ /*
+ * No need of mutex as the misc device register failed
+ * as no one can open device yet. Hence no contention.
+ */
+ punit_callbacks[device_type].registered = 0;
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(isst_if_cdev_register);
+
+/**
+ * isst_if_cdev_unregister() - Unregister callback for IOCTL
+ * @device_type: The device type to unregister.
+ *
+ * This function unregisters the previously registered callback. If this
+ * is the last callback unregistering, then misc device is removed.
+ *
+ * Return: None.
+ */
+void isst_if_cdev_unregister(int device_type)
+{
+ isst_misc_unreg();
+ mutex_lock(&punit_misc_dev_open_lock);
+ punit_callbacks[device_type].def_ioctl = NULL;
+ punit_callbacks[device_type].registered = 0;
+ if (device_type == ISST_IF_DEV_MBOX)
+ isst_delete_hash();
+ mutex_unlock(&punit_misc_dev_open_lock);
+}
+EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
new file mode 100644
index 0000000000..1004f2c9cc
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Speed Select Interface: Drivers Internal defines
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#ifndef __ISST_IF_COMMON_H
+#define __ISST_IF_COMMON_H
+
+#define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_0 0x3451
+#define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_0 0x3459
+
+#define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_1 0x3251
+#define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1 0x3259
+
+/*
+ * Validate maximum commands in a single request.
+ * This is enough to handle command to every core in one ioctl, or all
+ * possible message id to one CPU. Limit is also helpful for resonse time
+ * per IOCTL request, as PUNIT may take different times to process each
+ * request and may hold for long for too many commands.
+ */
+#define ISST_IF_CMD_LIMIT 64
+
+#define ISST_IF_API_VERSION 0x01
+#define ISST_IF_DRIVER_VERSION 0x01
+
+#define ISST_IF_DEV_MBOX 0
+#define ISST_IF_DEV_MMIO 1
+#define ISST_IF_DEV_TPMI 2
+#define ISST_IF_DEV_MAX 3
+
+/**
+ * struct isst_if_cmd_cb - Used to register a IOCTL handler
+ * @registered: Used by the common code to store registry. Caller don't
+ * to touch this field
+ * @cmd_size: The command size of the individual command in IOCTL
+ * @offset: Offset to the first valid member in command structure.
+ * This will be the offset of the start of the command
+ * after command count field
+ * @api_version: API version supported for this target. 0, if none.
+ * @owner: Registered module owner
+ * @cmd_callback: Callback function to handle IOCTL. The callback has the
+ * command pointer with data for command. There is a pointer
+ * called write_only, which when set, will not copy the
+ * response to user ioctl buffer. The "resume" argument
+ * can be used to avoid storing the command for replay
+ * during system resume
+ * @def_ioctl: Default IOCTL handler callback, if there is no match in
+ * the existing list of IOCTL handled by the common handler.
+ *
+ * This structure is used to register an handler for IOCTL. To avoid
+ * code duplication common code handles all the IOCTL command read/write
+ * including handling multiple command in single IOCTL. The caller just
+ * need to execute a command via the registered callback.
+ */
+struct isst_if_cmd_cb {
+ int registered;
+ int cmd_size;
+ int offset;
+ int api_version;
+ struct module *owner;
+ long (*cmd_callback)(u8 *ptr, int *write_only, int resume);
+ long (*def_ioctl)(struct file *file, unsigned int cmd, unsigned long arg);
+};
+
+/* Internal interface functions */
+int isst_if_cdev_register(int type, struct isst_if_cmd_cb *cb);
+void isst_if_cdev_unregister(int type);
+struct pci_dev *isst_if_get_pci_dev(int cpu, int bus, int dev, int fn);
+bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *mbox_cmd);
+bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd);
+int isst_store_cmd(int cmd, int sub_command, u32 cpu, int mbox_cmd,
+ u32 param, u64 data);
+void isst_resume_common(void);
+#endif
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
new file mode 100644
index 0000000000..1b6eab0710
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: Mbox via MSR Interface
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/cpuhotplug.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/topology.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#include "isst_if_common.h"
+
+#define MSR_OS_MAILBOX_INTERFACE 0xB0
+#define MSR_OS_MAILBOX_DATA 0xB1
+#define MSR_OS_MAILBOX_BUSY_BIT 31
+
+/*
+ * Based on experiments count is never more than 1, as the MSR overhead
+ * is enough to finish the command. So here this is the worst case number.
+ */
+#define OS_MAILBOX_RETRY_COUNT 3
+
+static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter,
+ u32 command_data, u32 *response_data)
+{
+ u32 retries;
+ u64 data;
+ int ret;
+
+ /* Poll for rb bit == 0 */
+ retries = OS_MAILBOX_RETRY_COUNT;
+ do {
+ rdmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+ if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ continue;
+ }
+ ret = 0;
+ break;
+ } while (--retries);
+
+ if (ret)
+ return ret;
+
+ /* Write DATA register */
+ wrmsrl(MSR_OS_MAILBOX_DATA, command_data);
+
+ /* Write command register */
+ data = BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT) |
+ (parameter & GENMASK_ULL(13, 0)) << 16 |
+ (sub_command << 8) |
+ command;
+ wrmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+
+ /* Poll for rb bit == 0 */
+ retries = OS_MAILBOX_RETRY_COUNT;
+ do {
+ rdmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+ if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ continue;
+ }
+
+ if (data & 0xff)
+ return -ENXIO;
+
+ if (response_data) {
+ rdmsrl(MSR_OS_MAILBOX_DATA, data);
+ *response_data = data;
+ }
+ ret = 0;
+ break;
+ } while (--retries);
+
+ return ret;
+}
+
+struct msrl_action {
+ int err;
+ struct isst_if_mbox_cmd *mbox_cmd;
+};
+
+/* revisit, smp_call_function_single should be enough for atomic mailbox! */
+static void msrl_update_func(void *info)
+{
+ struct msrl_action *act = info;
+
+ act->err = isst_if_send_mbox_cmd(act->mbox_cmd->command,
+ act->mbox_cmd->sub_command,
+ act->mbox_cmd->parameter,
+ act->mbox_cmd->req_data,
+ &act->mbox_cmd->resp_data);
+}
+
+static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct msrl_action action;
+ int ret;
+
+ action.mbox_cmd = (struct isst_if_mbox_cmd *)cmd_ptr;
+
+ if (isst_if_mbox_cmd_invalid(action.mbox_cmd))
+ return -EINVAL;
+
+ if (isst_if_mbox_cmd_set_req(action.mbox_cmd) &&
+ !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * To complete mailbox command, we need to access two MSRs.
+ * So we don't want race to complete a mailbox transcation.
+ * Here smp_call ensures that msrl_update_func() has no race
+ * and also with wait flag, wait for completion.
+ * smp_call_function_single is using get_cpu() and put_cpu().
+ */
+ ret = smp_call_function_single(action.mbox_cmd->logical_cpu,
+ msrl_update_func, &action, 1);
+ if (ret)
+ return ret;
+
+ if (!action.err && !resume && isst_if_mbox_cmd_set_req(action.mbox_cmd))
+ action.err = isst_store_cmd(action.mbox_cmd->command,
+ action.mbox_cmd->sub_command,
+ action.mbox_cmd->logical_cpu, 1,
+ action.mbox_cmd->parameter,
+ action.mbox_cmd->req_data);
+ *write_only = 0;
+
+ return action.err;
+}
+
+
+static int isst_pm_notify(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ isst_resume_common();
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block isst_pm_nb = {
+ .notifier_call = isst_pm_notify,
+};
+
+static const struct x86_cpu_id isst_if_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids);
+
+static int __init isst_if_mbox_init(void)
+{
+ struct isst_if_cmd_cb cb;
+ const struct x86_cpu_id *id;
+ u64 data;
+ int ret;
+
+ id = x86_match_cpu(isst_if_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ /* Check presence of mailbox MSRs */
+ ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data);
+ if (ret)
+ return ret;
+
+ ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data);
+ if (ret)
+ return ret;
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd_size = sizeof(struct isst_if_mbox_cmd);
+ cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd);
+ cb.cmd_callback = isst_if_mbox_proc_cmd;
+ cb.owner = THIS_MODULE;
+ ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb);
+ if (ret)
+ return ret;
+
+ ret = register_pm_notifier(&isst_pm_nb);
+ if (ret)
+ isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+
+ return ret;
+}
+module_init(isst_if_mbox_init)
+
+static void __exit isst_if_mbox_exit(void)
+{
+ unregister_pm_notifier(&isst_pm_nb);
+ isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+}
+module_exit(isst_if_mbox_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel speed select interface mailbox driver");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
new file mode 100644
index 0000000000..df1fc6c719
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: Mbox via PCI Interface
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+
+#include "isst_if_common.h"
+
+#define PUNIT_MAILBOX_DATA 0xA0
+#define PUNIT_MAILBOX_INTERFACE 0xA4
+#define PUNIT_MAILBOX_BUSY_BIT 31
+
+/*
+ * The average time to complete mailbox commands is less than 40us. Most of
+ * the commands complete in few micro seconds. But the same firmware handles
+ * requests from all power management features.
+ * We can create a scenario where we flood the firmware with requests then
+ * the mailbox response can be delayed for 100s of micro seconds. So define
+ * two timeouts. One for average case and one for long.
+ * If the firmware is taking more than average, just call cond_resched().
+ */
+#define OS_MAILBOX_TIMEOUT_AVG_US 40
+#define OS_MAILBOX_TIMEOUT_MAX_US 1000
+
+struct isst_if_device {
+ struct mutex mutex;
+};
+
+static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ struct isst_if_mbox_cmd *mbox_cmd)
+{
+ s64 tm_delta = 0;
+ ktime_t tm;
+ u32 data;
+ int ret;
+
+ /* Poll for rb bit == 0 */
+ tm = ktime_get();
+ do {
+ ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
+ &data);
+ if (ret)
+ return ret;
+
+ if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ tm_delta = ktime_us_delta(ktime_get(), tm);
+ if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
+ cond_resched();
+ continue;
+ }
+ ret = 0;
+ break;
+ } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
+
+ if (ret)
+ return ret;
+
+ /* Write DATA register */
+ ret = pci_write_config_dword(pdev, PUNIT_MAILBOX_DATA,
+ mbox_cmd->req_data);
+ if (ret)
+ return ret;
+
+ /* Write command register */
+ data = BIT_ULL(PUNIT_MAILBOX_BUSY_BIT) |
+ (mbox_cmd->parameter & GENMASK_ULL(13, 0)) << 16 |
+ (mbox_cmd->sub_command << 8) |
+ mbox_cmd->command;
+
+ ret = pci_write_config_dword(pdev, PUNIT_MAILBOX_INTERFACE, data);
+ if (ret)
+ return ret;
+
+ /* Poll for rb bit == 0 */
+ tm_delta = 0;
+ tm = ktime_get();
+ do {
+ ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
+ &data);
+ if (ret)
+ return ret;
+
+ if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ tm_delta = ktime_us_delta(ktime_get(), tm);
+ if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
+ cond_resched();
+ continue;
+ }
+
+ if (data & 0xff)
+ return -ENXIO;
+
+ ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_DATA, &data);
+ if (ret)
+ return ret;
+
+ mbox_cmd->resp_data = data;
+ ret = 0;
+ break;
+ } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
+
+ return ret;
+}
+
+static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_mbox_cmd *mbox_cmd;
+ struct isst_if_device *punit_dev;
+ struct pci_dev *pdev;
+ int ret;
+
+ mbox_cmd = (struct isst_if_mbox_cmd *)cmd_ptr;
+
+ if (isst_if_mbox_cmd_invalid(mbox_cmd))
+ return -EINVAL;
+
+ if (isst_if_mbox_cmd_set_req(mbox_cmd) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ pdev = isst_if_get_pci_dev(mbox_cmd->logical_cpu, 1, 30, 1);
+ if (!pdev)
+ return -EINVAL;
+
+ punit_dev = pci_get_drvdata(pdev);
+ if (!punit_dev)
+ return -EINVAL;
+
+ /*
+ * Basically we are allowing one complete mailbox transaction on
+ * a mapped PCI device at a time.
+ */
+ mutex_lock(&punit_dev->mutex);
+ ret = isst_if_mbox_cmd(pdev, mbox_cmd);
+ if (!ret && !resume && isst_if_mbox_cmd_set_req(mbox_cmd))
+ ret = isst_store_cmd(mbox_cmd->command,
+ mbox_cmd->sub_command,
+ mbox_cmd->logical_cpu, 1,
+ mbox_cmd->parameter,
+ mbox_cmd->req_data);
+ mutex_unlock(&punit_dev->mutex);
+ if (ret)
+ return ret;
+
+ *write_only = 0;
+
+ return 0;
+}
+
+static const struct pci_device_id isst_if_mbox_ids[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_0)},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1)},
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids);
+
+static int isst_if_mbox_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct isst_if_device *punit_dev;
+ struct isst_if_cmd_cb cb;
+ int ret;
+
+ punit_dev = devm_kzalloc(&pdev->dev, sizeof(*punit_dev), GFP_KERNEL);
+ if (!punit_dev)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ mutex_init(&punit_dev->mutex);
+ pci_set_drvdata(pdev, punit_dev);
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd_size = sizeof(struct isst_if_mbox_cmd);
+ cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd);
+ cb.cmd_callback = isst_if_mbox_proc_cmd;
+ cb.owner = THIS_MODULE;
+ ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb);
+
+ if (ret)
+ mutex_destroy(&punit_dev->mutex);
+
+ return ret;
+}
+
+static void isst_if_mbox_remove(struct pci_dev *pdev)
+{
+ struct isst_if_device *punit_dev;
+
+ punit_dev = pci_get_drvdata(pdev);
+ isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+ mutex_destroy(&punit_dev->mutex);
+}
+
+static int __maybe_unused isst_if_resume(struct device *device)
+{
+ isst_resume_common();
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(isst_if_pm_ops, NULL, isst_if_resume);
+
+static struct pci_driver isst_if_pci_driver = {
+ .name = "isst_if_mbox_pci",
+ .id_table = isst_if_mbox_ids,
+ .probe = isst_if_mbox_probe,
+ .remove = isst_if_mbox_remove,
+ .driver.pm = &isst_if_pm_ops,
+};
+
+module_pci_driver(isst_if_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel speed select interface pci mailbox driver");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
new file mode 100644
index 0000000000..ff49025ec0
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: MMIO Interface
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+
+#include "isst_if_common.h"
+
+struct isst_mmio_range {
+ int beg;
+ int end;
+};
+
+static struct isst_mmio_range mmio_range_devid_0[] = {
+ {0x04, 0x14},
+ {0x20, 0xD0},
+};
+
+static struct isst_mmio_range mmio_range_devid_1[] = {
+ {0x04, 0x14},
+ {0x20, 0x11C},
+};
+
+struct isst_if_device {
+ void __iomem *punit_mmio;
+ u32 range_0[5];
+ u32 range_1[64];
+ struct isst_mmio_range *mmio_range;
+ struct mutex mutex;
+};
+
+static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_device *punit_dev;
+ struct isst_if_io_reg *io_reg;
+ struct pci_dev *pdev;
+
+ io_reg = (struct isst_if_io_reg *)cmd_ptr;
+
+ if (io_reg->reg % 4)
+ return -EINVAL;
+
+ if (io_reg->read_write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ pdev = isst_if_get_pci_dev(io_reg->logical_cpu, 0, 0, 1);
+ if (!pdev)
+ return -EINVAL;
+
+ punit_dev = pci_get_drvdata(pdev);
+ if (!punit_dev)
+ return -EINVAL;
+
+ if (io_reg->reg < punit_dev->mmio_range[0].beg ||
+ io_reg->reg > punit_dev->mmio_range[1].end)
+ return -EINVAL;
+
+ /*
+ * Ensure that operation is complete on a PCI device to avoid read
+ * write race by using per PCI device mutex.
+ */
+ mutex_lock(&punit_dev->mutex);
+ if (io_reg->read_write) {
+ writel(io_reg->value, punit_dev->punit_mmio+io_reg->reg);
+ *write_only = 1;
+ } else {
+ io_reg->value = readl(punit_dev->punit_mmio+io_reg->reg);
+ *write_only = 0;
+ }
+ mutex_unlock(&punit_dev->mutex);
+
+ return 0;
+}
+
+static const struct pci_device_id isst_if_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, RAPL_PRIO_DEVID_0, &mmio_range_devid_0)},
+ { PCI_DEVICE_DATA(INTEL, RAPL_PRIO_DEVID_1, &mmio_range_devid_1)},
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, isst_if_ids);
+
+static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct isst_if_device *punit_dev;
+ struct isst_if_cmd_cb cb;
+ u32 mmio_base, pcu_base;
+ u64 base_addr;
+ int ret;
+
+ punit_dev = devm_kzalloc(&pdev->dev, sizeof(*punit_dev), GFP_KERNEL);
+ if (!punit_dev)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_read_config_dword(pdev, 0xD0, &mmio_base);
+ if (ret)
+ return ret;
+
+ ret = pci_read_config_dword(pdev, 0xFC, &pcu_base);
+ if (ret)
+ return ret;
+
+ pcu_base &= GENMASK(10, 0);
+ base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12;
+ punit_dev->punit_mmio = devm_ioremap(&pdev->dev, base_addr, 256);
+ if (!punit_dev->punit_mmio)
+ return -ENOMEM;
+
+ mutex_init(&punit_dev->mutex);
+ pci_set_drvdata(pdev, punit_dev);
+ punit_dev->mmio_range = (struct isst_mmio_range *) ent->driver_data;
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd_size = sizeof(struct isst_if_io_reg);
+ cb.offset = offsetof(struct isst_if_io_regs, io_reg);
+ cb.cmd_callback = isst_if_mmio_rd_wr;
+ cb.owner = THIS_MODULE;
+ ret = isst_if_cdev_register(ISST_IF_DEV_MMIO, &cb);
+ if (ret)
+ mutex_destroy(&punit_dev->mutex);
+
+ return ret;
+}
+
+static void isst_if_remove(struct pci_dev *pdev)
+{
+ struct isst_if_device *punit_dev;
+
+ punit_dev = pci_get_drvdata(pdev);
+ isst_if_cdev_unregister(ISST_IF_DEV_MMIO);
+ mutex_destroy(&punit_dev->mutex);
+}
+
+static int __maybe_unused isst_if_suspend(struct device *device)
+{
+ struct isst_if_device *punit_dev = dev_get_drvdata(device);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i)
+ punit_dev->range_0[i] = readl(punit_dev->punit_mmio +
+ punit_dev->mmio_range[0].beg + 4 * i);
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_1); ++i) {
+ u32 addr;
+
+ addr = punit_dev->mmio_range[1].beg + 4 * i;
+ if (addr > punit_dev->mmio_range[1].end)
+ break;
+ punit_dev->range_1[i] = readl(punit_dev->punit_mmio + addr);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused isst_if_resume(struct device *device)
+{
+ struct isst_if_device *punit_dev = dev_get_drvdata(device);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i)
+ writel(punit_dev->range_0[i], punit_dev->punit_mmio +
+ punit_dev->mmio_range[0].beg + 4 * i);
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_1); ++i) {
+ u32 addr;
+
+ addr = punit_dev->mmio_range[1].beg + 4 * i;
+ if (addr > punit_dev->mmio_range[1].end)
+ break;
+
+ writel(punit_dev->range_1[i], punit_dev->punit_mmio + addr);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(isst_if_pm_ops, isst_if_suspend, isst_if_resume);
+
+static struct pci_driver isst_if_pci_driver = {
+ .name = "isst_if_pci",
+ .id_table = isst_if_ids,
+ .probe = isst_if_probe,
+ .remove = isst_if_remove,
+ .driver.pm = &isst_if_pm_ops,
+};
+
+module_pci_driver(isst_if_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel speed select interface mmio driver");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c
new file mode 100644
index 0000000000..1797219153
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * isst_tpmi.c: SST TPMI interface
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+#include <linux/intel_tpmi.h>
+
+#include "isst_tpmi_core.h"
+
+static int intel_sst_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ int ret;
+
+ ret = tpmi_sst_init();
+ if (ret)
+ return ret;
+
+ ret = tpmi_sst_dev_add(auxdev);
+ if (ret)
+ tpmi_sst_exit();
+
+ return ret;
+}
+
+static void intel_sst_remove(struct auxiliary_device *auxdev)
+{
+ tpmi_sst_dev_remove(auxdev);
+ tpmi_sst_exit();
+}
+
+static int intel_sst_suspend(struct device *dev)
+{
+ tpmi_sst_dev_suspend(to_auxiliary_dev(dev));
+
+ return 0;
+}
+
+static int intel_sst_resume(struct device *dev)
+{
+ tpmi_sst_dev_resume(to_auxiliary_dev(dev));
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(intel_sst_pm, intel_sst_suspend, intel_sst_resume);
+
+static const struct auxiliary_device_id intel_sst_id_table[] = {
+ { .name = "intel_vsec.tpmi-sst" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, intel_sst_id_table);
+
+static struct auxiliary_driver intel_sst_aux_driver = {
+ .id_table = intel_sst_id_table,
+ .remove = intel_sst_remove,
+ .probe = intel_sst_probe,
+ .driver = {
+ .pm = pm_sleep_ptr(&intel_sst_pm),
+ },
+};
+
+module_auxiliary_driver(intel_sst_aux_driver);
+
+MODULE_IMPORT_NS(INTEL_TPMI_SST);
+MODULE_DESCRIPTION("Intel TPMI SST Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
new file mode 100644
index 0000000000..63faa2ea83
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
@@ -0,0 +1,1442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * isst_tpmi.c: SST TPMI interface core
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This information will be useful to understand flows:
+ * In the current generation of platforms, TPMI is supported via OOB
+ * PCI device. This PCI device has one instance per CPU package.
+ * There is a unique TPMI ID for SST. Each TPMI ID also has multiple
+ * entries, representing per power domain information.
+ *
+ * There is one dev file for complete SST information and control same as the
+ * prior generation of hardware. User spaces don't need to know how the
+ * information is presented by the hardware. The TPMI core module implements
+ * the hardware mapping.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/delay.h>
+#include <linux/intel_tpmi.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <uapi/linux/isst_if.h>
+
+#include "isst_tpmi_core.h"
+#include "isst_if_common.h"
+
+/* Supported SST hardware version by this driver */
+#define ISST_HEADER_VERSION 1
+
+/*
+ * Used to indicate if value read from MMIO needs to get multiplied
+ * to get to a standard unit or not.
+ */
+#define SST_MUL_FACTOR_NONE 1
+
+/* Define 100 as a scaling factor frequency ratio to frequency conversion */
+#define SST_MUL_FACTOR_FREQ 100
+
+/* All SST regs are 64 bit size */
+#define SST_REG_SIZE 8
+
+/**
+ * struct sst_header - SST main header
+ * @interface_version: Version number for this interface
+ * @cap_mask: Bitmask of the supported sub features. 1=the sub feature is enabled.
+ * 0=disabled.
+ * Bit[8]= SST_CP enable (1), disable (0)
+ * bit[9]= SST_PP enable (1), disable (0)
+ * other bits are reserved for future use
+ * @cp_offset: Qword (8 bytes) offset to the SST_CP register bank
+ * @pp_offset: Qword (8 bytes) offset to the SST_PP register bank
+ * @reserved: Reserved for future use
+ *
+ * This register allows SW to discover SST capability and the offsets to SST-CP
+ * and SST-PP register banks.
+ */
+struct sst_header {
+ u8 interface_version;
+ u8 cap_mask;
+ u8 cp_offset;
+ u8 pp_offset;
+ u32 reserved;
+} __packed;
+
+/**
+ * struct cp_header - SST-CP (core-power) header
+ * @feature_id: 0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
+ * @feature_rev: Interface Version number for this SST feature
+ * @ratio_unit: Frequency ratio unit. 00: 100MHz. All others are reserved
+ * @reserved: Reserved for future use
+ *
+ * This structure is used store SST-CP header. This is packed to the same
+ * format as defined in the specifications.
+ */
+struct cp_header {
+ u64 feature_id :4;
+ u64 feature_rev :8;
+ u64 ratio_unit :2;
+ u64 reserved :50;
+} __packed;
+
+/**
+ * struct pp_header - SST-PP (Perf profile) header
+ * @feature_id: 0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
+ * @feature_rev: Interface Version number for this SST feature
+ * @level_en_mask: SST-PP level enable/disable fuse mask
+ * @allowed_level_mask: Allowed level mask used for dynamic config level switching
+ * @reserved0: Reserved for future use
+ * @ratio_unit: Frequency ratio unit. 00: 100MHz. All others are reserved
+ * @block_size: Size of PP block in Qword unit (8 bytes)
+ * @dynamic_switch: If set (1), dynamic switching of SST PP is supported
+ * @memory_ratio_unit: Memory Controller frequency ratio unit. 00: 100MHz, others reserved
+ * @reserved1: Reserved for future use
+ *
+ * This structure is used store SST-PP header. This is packed to the same
+ * format as defined in the specifications.
+ */
+struct pp_header {
+ u64 feature_id :4;
+ u64 feature_rev :8;
+ u64 level_en_mask :8;
+ u64 allowed_level_mask :8;
+ u64 reserved0 :4;
+ u64 ratio_unit :2;
+ u64 block_size :8;
+ u64 dynamic_switch :1;
+ u64 memory_ratio_unit :2;
+ u64 reserved1 :19;
+} __packed;
+
+/**
+ * struct feature_offset - Offsets to SST-PP features
+ * @pp_offset: Qword offset within PP level for the SST_PP register bank
+ * @bf_offset: Qword offset within PP level for the SST_BF register bank
+ * @tf_offset: Qword offset within PP level for the SST_TF register bank
+ * @reserved: Reserved for future use
+ *
+ * This structure is used store offsets for SST features in the register bank.
+ * This is packed to the same format as defined in the specifications.
+ */
+struct feature_offset {
+ u64 pp_offset :8;
+ u64 bf_offset :8;
+ u64 tf_offset :8;
+ u64 reserved :40;
+} __packed;
+
+/**
+ * struct levels_offset - Offsets to each SST PP level
+ * @sst_pp_level0_offset: Qword offset to the register block of PP level 0
+ * @sst_pp_level1_offset: Qword offset to the register block of PP level 1
+ * @sst_pp_level2_offset: Qword offset to the register block of PP level 2
+ * @sst_pp_level3_offset: Qword offset to the register block of PP level 3
+ * @sst_pp_level4_offset: Qword offset to the register block of PP level 4
+ * @reserved: Reserved for future use
+ *
+ * This structure is used store offsets of SST PP levels in the register bank.
+ * This is packed to the same format as defined in the specifications.
+ */
+struct levels_offset {
+ u64 sst_pp_level0_offset :8;
+ u64 sst_pp_level1_offset :8;
+ u64 sst_pp_level2_offset :8;
+ u64 sst_pp_level3_offset :8;
+ u64 sst_pp_level4_offset :8;
+ u64 reserved :24;
+} __packed;
+
+/**
+ * struct pp_control_offset - Offsets for SST PP controls
+ * @perf_level: A SST-PP level that SW intends to switch to
+ * @perf_level_lock: SST-PP level select lock. 0 - unlocked. 1 - locked till next reset
+ * @resvd0: Reserved for future use
+ * @current_state: Bit mask to control the enable(1)/disable(0) state of each feature
+ * of the current PP level, bit 0 = BF, bit 1 = TF, bit 2-7 = reserved
+ * @reserved: Reserved for future use
+ *
+ * This structure is used store offsets of SST PP controls in the register bank.
+ * This is packed to the same format as defined in the specifications.
+ */
+struct pp_control_offset {
+ u64 perf_level :3;
+ u64 perf_level_lock :1;
+ u64 resvd0 :4;
+ u64 current_state :8;
+ u64 reserved :48;
+} __packed;
+
+/**
+ * struct pp_status_offset - Offsets for SST PP status fields
+ * @sst_pp_level: Returns the current SST-PP level
+ * @sst_pp_lock: Returns the lock bit setting of perf_level_lock in pp_control_offset
+ * @error_type: Returns last error of SST-PP level change request. 0: no error,
+ * 1: level change not allowed, others: reserved
+ * @feature_state: Bit mask to indicate the enable(1)/disable(0) state of each feature of the
+ * current PP level. bit 0 = BF, bit 1 = TF, bit 2-7 reserved
+ * @reserved0: Reserved for future use
+ * @feature_error_type: Returns last error of the specific feature. Three error_type bits per
+ * feature. i.e. ERROR_TYPE[2:0] for BF, ERROR_TYPE[5:3] for TF, etc.
+ * 0x0: no error, 0x1: The specific feature is not supported by the hardware.
+ * 0x2-0x6: Reserved. 0x7: feature state change is not allowed.
+ * @reserved1: Reserved for future use
+ *
+ * This structure is used store offsets of SST PP status in the register bank.
+ * This is packed to the same format as defined in the specifications.
+ */
+struct pp_status_offset {
+ u64 sst_pp_level :3;
+ u64 sst_pp_lock :1;
+ u64 error_type :4;
+ u64 feature_state :8;
+ u64 reserved0 :16;
+ u64 feature_error_type : 24;
+ u64 reserved1 :8;
+} __packed;
+
+/**
+ * struct perf_level - Used to store perf level and mmio offset
+ * @mmio_offset: mmio offset for a perf level
+ * @level: perf level for this offset
+ *
+ * This structure is used store final mmio offset of each perf level from the
+ * SST base mmio offset.
+ */
+struct perf_level {
+ int mmio_offset;
+ int level;
+};
+
+/**
+ * struct tpmi_per_power_domain_info - Store per power_domain SST info
+ * @package_id: Package id for this power_domain
+ * @power_domain_id: Power domain id, Each entry from the SST-TPMI instance is a power_domain.
+ * @max_level: Max possible PP level possible for this power_domain
+ * @ratio_unit: Ratio unit for converting to MHz
+ * @avx_levels: Number of AVX levels
+ * @pp_block_size: Block size from PP header
+ * @sst_header: Store SST header for this power_domain
+ * @cp_header: Store SST-CP header for this power_domain
+ * @pp_header: Store SST-PP header for this power_domain
+ * @perf_levels: Pointer to each perf level to map level to mmio offset
+ * @feature_offsets: Store feature offsets for each PP-level
+ * @control_offset: Store the control offset for each PP-level
+ * @status_offset: Store the status offset for each PP-level
+ * @sst_base: Mapped SST base IO memory
+ * @auxdev: Auxiliary device instance enumerated this instance
+ * @saved_sst_cp_control: Save SST-CP control configuration to store restore for suspend/resume
+ * @saved_clos_configs: Save SST-CP CLOS configuration to store restore for suspend/resume
+ * @saved_clos_assocs: Save SST-CP CLOS association to store restore for suspend/resume
+ * @saved_pp_control: Save SST-PP control information to store restore for suspend/resume
+ *
+ * This structure is used store complete SST information for a power_domain. This information
+ * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple
+ * power_domains. Each power domain describes its own SST information and has its own controls.
+ */
+struct tpmi_per_power_domain_info {
+ int package_id;
+ int power_domain_id;
+ int max_level;
+ int ratio_unit;
+ int avx_levels;
+ int pp_block_size;
+ struct sst_header sst_header;
+ struct cp_header cp_header;
+ struct pp_header pp_header;
+ struct perf_level *perf_levels;
+ struct feature_offset feature_offsets;
+ struct pp_control_offset control_offset;
+ struct pp_status_offset status_offset;
+ void __iomem *sst_base;
+ struct auxiliary_device *auxdev;
+ u64 saved_sst_cp_control;
+ u64 saved_clos_configs[4];
+ u64 saved_clos_assocs[4];
+ u64 saved_pp_control;
+};
+
+/**
+ * struct tpmi_sst_struct - Store sst info for a package
+ * @package_id: Package id for this aux device instance
+ * @number_of_power_domains: Number of power_domains pointed by power_domain_info pointer
+ * @power_domain_info: Pointer to power domains information
+ *
+ * This structure is used store full SST information for a package.
+ * Each package has a unique OOB PCI device, which enumerates TPMI.
+ * Each Package will have multiple power_domains.
+ */
+struct tpmi_sst_struct {
+ int package_id;
+ int number_of_power_domains;
+ struct tpmi_per_power_domain_info *power_domain_info;
+};
+
+/**
+ * struct tpmi_sst_common_struct - Store all SST instances
+ * @max_index: Maximum instances currently present
+ * @sst_inst: Pointer to per package instance
+ *
+ * Stores every SST Package instance.
+ */
+struct tpmi_sst_common_struct {
+ int max_index;
+ struct tpmi_sst_struct **sst_inst;
+};
+
+/*
+ * Each IOCTL request is processed under this lock. Also used to protect
+ * registration functions and common data structures.
+ */
+static DEFINE_MUTEX(isst_tpmi_dev_lock);
+
+/* Usage count to track, number of TPMI SST instances registered to this core. */
+static int isst_core_usage_count;
+
+/* Stores complete SST information for every package and power_domain */
+static struct tpmi_sst_common_struct isst_common;
+
+#define SST_MAX_AVX_LEVELS 3
+
+#define SST_PP_OFFSET_0 8
+#define SST_PP_OFFSET_1 16
+#define SST_PP_OFFSET_SIZE 8
+
+static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
+ struct tpmi_per_power_domain_info *pd_info,
+ int levels)
+{
+ u64 perf_level_offsets;
+ int i;
+
+ pd_info->perf_levels = devm_kcalloc(&auxdev->dev, levels,
+ sizeof(struct perf_level),
+ GFP_KERNEL);
+ if (!pd_info->perf_levels)
+ return 0;
+
+ pd_info->ratio_unit = pd_info->pp_header.ratio_unit;
+ pd_info->avx_levels = SST_MAX_AVX_LEVELS;
+ pd_info->pp_block_size = pd_info->pp_header.block_size;
+
+ /* Read PP Offset 0: Get feature offset with PP level */
+ *((u64 *)&pd_info->feature_offsets) = readq(pd_info->sst_base +
+ pd_info->sst_header.pp_offset +
+ SST_PP_OFFSET_0);
+
+ perf_level_offsets = readq(pd_info->sst_base + pd_info->sst_header.pp_offset +
+ SST_PP_OFFSET_1);
+
+ for (i = 0; i < levels; ++i) {
+ u64 offset;
+
+ offset = perf_level_offsets & (0xffULL << (i * SST_PP_OFFSET_SIZE));
+ offset >>= (i * 8);
+ offset &= 0xff;
+ offset *= 8; /* Convert to byte from QWORD offset */
+ pd_info->perf_levels[i].mmio_offset = pd_info->sst_header.pp_offset + offset;
+ }
+
+ return 0;
+}
+
+static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info)
+{
+ int i, mask, levels;
+
+ *((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base);
+ pd_info->sst_header.cp_offset *= 8;
+ pd_info->sst_header.pp_offset *= 8;
+
+ if (pd_info->sst_header.interface_version != ISST_HEADER_VERSION) {
+ dev_err(&auxdev->dev, "SST: Unsupported version:%x\n",
+ pd_info->sst_header.interface_version);
+ return -ENODEV;
+ }
+
+ /* Read SST CP Header */
+ *((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset);
+
+ /* Read PP header */
+ *((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset);
+
+ /* Force level_en_mask level 0 */
+ pd_info->pp_header.level_en_mask |= 0x01;
+
+ mask = 0x01;
+ levels = 0;
+ for (i = 0; i < 8; ++i) {
+ if (pd_info->pp_header.level_en_mask & mask)
+ levels = i;
+ mask <<= 1;
+ }
+ pd_info->max_level = levels;
+ sst_add_perf_profiles(auxdev, pd_info, levels + 1);
+
+ return 0;
+}
+
+/*
+ * Map a package and power_domain id to SST information structure unique for a power_domain.
+ * The caller should call under isst_tpmi_dev_lock.
+ */
+static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_domain_id)
+{
+ struct tpmi_per_power_domain_info *power_domain_info;
+ struct tpmi_sst_struct *sst_inst;
+
+ if (pkg_id < 0 || pkg_id > isst_common.max_index ||
+ pkg_id >= topology_max_packages())
+ return NULL;
+
+ sst_inst = isst_common.sst_inst[pkg_id];
+ if (!sst_inst)
+ return NULL;
+
+ if (power_domain_id < 0 || power_domain_id >= sst_inst->number_of_power_domains)
+ return NULL;
+
+ power_domain_info = &sst_inst->power_domain_info[power_domain_id];
+
+ if (power_domain_info && !power_domain_info->sst_base)
+ return NULL;
+
+ return power_domain_info;
+}
+
+static bool disable_dynamic_sst_features(void)
+{
+ u64 value;
+
+ rdmsrl(MSR_PM_ENABLE, value);
+ return !(value & 0x1);
+}
+
+#define _read_cp_info(name_str, name, offset, start, width, mult_factor)\
+{\
+ u64 val, mask;\
+ \
+ val = readq(power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
+ (offset));\
+ mask = GENMASK_ULL((start + width - 1), start);\
+ val &= mask; \
+ val >>= start;\
+ name = (val * mult_factor);\
+}
+
+#define _write_cp_info(name_str, name, offset, start, width, div_factor)\
+{\
+ u64 val, mask;\
+ \
+ val = readq(power_domain_info->sst_base +\
+ power_domain_info->sst_header.cp_offset + (offset));\
+ mask = GENMASK_ULL((start + width - 1), start);\
+ val &= ~mask;\
+ val |= (name / div_factor) << start;\
+ writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
+ (offset));\
+}
+
+#define SST_CP_CONTROL_OFFSET 8
+#define SST_CP_STATUS_OFFSET 16
+
+#define SST_CP_ENABLE_START 0
+#define SST_CP_ENABLE_WIDTH 1
+
+#define SST_CP_PRIORITY_TYPE_START 1
+#define SST_CP_PRIORITY_TYPE_WIDTH 1
+
+static long isst_if_core_power_state(void __user *argp)
+{
+ struct tpmi_per_power_domain_info *power_domain_info;
+ struct isst_core_power core_power;
+
+ if (disable_dynamic_sst_features())
+ return -EFAULT;
+
+ if (copy_from_user(&core_power, argp, sizeof(core_power)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ if (core_power.get_set) {
+ _write_cp_info("cp_enable", core_power.enable, SST_CP_CONTROL_OFFSET,
+ SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
+ _write_cp_info("cp_prio_type", core_power.priority_type, SST_CP_CONTROL_OFFSET,
+ SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
+ SST_MUL_FACTOR_NONE)
+ } else {
+ /* get */
+ _read_cp_info("cp_enable", core_power.enable, SST_CP_STATUS_OFFSET,
+ SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
+ _read_cp_info("cp_prio_type", core_power.priority_type, SST_CP_STATUS_OFFSET,
+ SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
+ SST_MUL_FACTOR_NONE)
+ core_power.supported = !!(power_domain_info->sst_header.cap_mask & BIT(0));
+ if (copy_to_user(argp, &core_power, sizeof(core_power)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#define SST_CLOS_CONFIG_0_OFFSET 24
+
+#define SST_CLOS_CONFIG_PRIO_START 4
+#define SST_CLOS_CONFIG_PRIO_WIDTH 4
+
+#define SST_CLOS_CONFIG_MIN_START 8
+#define SST_CLOS_CONFIG_MIN_WIDTH 8
+
+#define SST_CLOS_CONFIG_MAX_START 16
+#define SST_CLOS_CONFIG_MAX_WIDTH 8
+
+static long isst_if_clos_param(void __user *argp)
+{
+ struct tpmi_per_power_domain_info *power_domain_info;
+ struct isst_clos_param clos_param;
+
+ if (copy_from_user(&clos_param, argp, sizeof(clos_param)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(clos_param.socket_id, clos_param.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ if (clos_param.get_set) {
+ _write_cp_info("clos.min_freq", clos_param.min_freq_mhz,
+ (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
+ SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
+ SST_MUL_FACTOR_FREQ);
+ _write_cp_info("clos.max_freq", clos_param.max_freq_mhz,
+ (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
+ SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
+ SST_MUL_FACTOR_FREQ);
+ _write_cp_info("clos.prio", clos_param.prop_prio,
+ (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
+ SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
+ SST_MUL_FACTOR_NONE);
+ } else {
+ /* get */
+ _read_cp_info("clos.min_freq", clos_param.min_freq_mhz,
+ (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
+ SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
+ SST_MUL_FACTOR_FREQ)
+ _read_cp_info("clos.max_freq", clos_param.max_freq_mhz,
+ (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
+ SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
+ SST_MUL_FACTOR_FREQ)
+ _read_cp_info("clos.prio", clos_param.prop_prio,
+ (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
+ SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
+ SST_MUL_FACTOR_NONE)
+
+ if (copy_to_user(argp, &clos_param, sizeof(clos_param)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#define SST_CLOS_ASSOC_0_OFFSET 56
+#define SST_CLOS_ASSOC_CPUS_PER_REG 16
+#define SST_CLOS_ASSOC_BITS_PER_CPU 4
+
+static long isst_if_clos_assoc(void __user *argp)
+{
+ struct isst_if_clos_assoc_cmds assoc_cmds;
+ unsigned char __user *ptr;
+ int i;
+
+ /* Each multi command has u16 command count as the first field */
+ if (copy_from_user(&assoc_cmds, argp, sizeof(assoc_cmds)))
+ return -EFAULT;
+
+ if (!assoc_cmds.cmd_count || assoc_cmds.cmd_count > ISST_IF_CMD_LIMIT)
+ return -EINVAL;
+
+ ptr = argp + offsetof(struct isst_if_clos_assoc_cmds, assoc_info);
+ for (i = 0; i < assoc_cmds.cmd_count; ++i) {
+ struct tpmi_per_power_domain_info *power_domain_info;
+ struct isst_if_clos_assoc clos_assoc;
+ int punit_id, punit_cpu_no, pkg_id;
+ struct tpmi_sst_struct *sst_inst;
+ int offset, shift, cpu;
+ u64 val, mask, clos;
+
+ if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc)))
+ return -EFAULT;
+
+ if (clos_assoc.socket_id > topology_max_packages())
+ return -EINVAL;
+
+ cpu = clos_assoc.logical_cpu;
+ clos = clos_assoc.clos;
+
+ if (assoc_cmds.punit_cpu_map)
+ punit_cpu_no = cpu;
+ else
+ return -EOPNOTSUPP;
+
+ if (punit_cpu_no < 0)
+ return -EINVAL;
+
+ punit_id = clos_assoc.power_domain_id;
+ pkg_id = clos_assoc.socket_id;
+
+ sst_inst = isst_common.sst_inst[pkg_id];
+
+ if (clos_assoc.power_domain_id > sst_inst->number_of_power_domains)
+ return -EINVAL;
+
+ power_domain_info = &sst_inst->power_domain_info[punit_id];
+
+ offset = SST_CLOS_ASSOC_0_OFFSET +
+ (punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE;
+ shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG;
+ shift *= SST_CLOS_ASSOC_BITS_PER_CPU;
+
+ val = readq(power_domain_info->sst_base +
+ power_domain_info->sst_header.cp_offset + offset);
+ if (assoc_cmds.get_set) {
+ mask = GENMASK_ULL((shift + SST_CLOS_ASSOC_BITS_PER_CPU - 1), shift);
+ val &= ~mask;
+ val |= (clos << shift);
+ writeq(val, power_domain_info->sst_base +
+ power_domain_info->sst_header.cp_offset + offset);
+ } else {
+ val >>= shift;
+ clos_assoc.clos = val & GENMASK(SST_CLOS_ASSOC_BITS_PER_CPU - 1, 0);
+ if (copy_to_user(ptr, &clos_assoc, sizeof(clos_assoc)))
+ return -EFAULT;
+ }
+
+ ptr += sizeof(clos_assoc);
+ }
+
+ return 0;
+}
+
+#define _read_pp_info(name_str, name, offset, start, width, mult_factor)\
+{\
+ u64 val, _mask;\
+ \
+ val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
+ (offset));\
+ _mask = GENMASK_ULL((start + width - 1), start);\
+ val &= _mask;\
+ val >>= start;\
+ name = (val * mult_factor);\
+}
+
+#define _write_pp_info(name_str, name, offset, start, width, div_factor)\
+{\
+ u64 val, _mask;\
+ \
+ val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
+ (offset));\
+ _mask = GENMASK((start + width - 1), start);\
+ val &= ~_mask;\
+ val |= (name / div_factor) << start;\
+ writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
+ (offset));\
+}
+
+#define _read_bf_level_info(name_str, name, level, offset, start, width, mult_factor)\
+{\
+ u64 val, _mask;\
+ \
+ val = readq(power_domain_info->sst_base +\
+ power_domain_info->perf_levels[level].mmio_offset +\
+ (power_domain_info->feature_offsets.bf_offset * 8) + (offset));\
+ _mask = GENMASK_ULL((start + width - 1), start);\
+ val &= _mask; \
+ val >>= start;\
+ name = (val * mult_factor);\
+}
+
+#define _read_tf_level_info(name_str, name, level, offset, start, width, mult_factor)\
+{\
+ u64 val, _mask;\
+ \
+ val = readq(power_domain_info->sst_base +\
+ power_domain_info->perf_levels[level].mmio_offset +\
+ (power_domain_info->feature_offsets.tf_offset * 8) + (offset));\
+ _mask = GENMASK_ULL((start + width - 1), start);\
+ val &= _mask; \
+ val >>= start;\
+ name = (val * mult_factor);\
+}
+
+#define SST_PP_STATUS_OFFSET 32
+
+#define SST_PP_LEVEL_START 0
+#define SST_PP_LEVEL_WIDTH 3
+
+#define SST_PP_LOCK_START 3
+#define SST_PP_LOCK_WIDTH 1
+
+#define SST_PP_FEATURE_STATE_START 8
+#define SST_PP_FEATURE_STATE_WIDTH 8
+
+#define SST_BF_FEATURE_SUPPORTED_START 12
+#define SST_BF_FEATURE_SUPPORTED_WIDTH 1
+
+#define SST_TF_FEATURE_SUPPORTED_START 12
+#define SST_TF_FEATURE_SUPPORTED_WIDTH 1
+
+static int isst_if_get_perf_level(void __user *argp)
+{
+ struct isst_perf_level_info perf_level;
+ struct tpmi_per_power_domain_info *power_domain_info;
+
+ if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ perf_level.max_level = power_domain_info->max_level;
+ perf_level.level_mask = power_domain_info->pp_header.allowed_level_mask;
+ perf_level.feature_rev = power_domain_info->pp_header.feature_rev;
+ _read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET,
+ SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
+ _read_pp_info("locked", perf_level.locked, SST_PP_STATUS_OFFSET,
+ SST_PP_LOCK_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
+ _read_pp_info("feature_state", perf_level.feature_state, SST_PP_STATUS_OFFSET,
+ SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE)
+ perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1));
+
+ _read_bf_level_info("bf_support", perf_level.sst_bf_support, 0, 0,
+ SST_BF_FEATURE_SUPPORTED_START, SST_BF_FEATURE_SUPPORTED_WIDTH,
+ SST_MUL_FACTOR_NONE);
+ _read_tf_level_info("tf_support", perf_level.sst_tf_support, 0, 0,
+ SST_TF_FEATURE_SUPPORTED_START, SST_TF_FEATURE_SUPPORTED_WIDTH,
+ SST_MUL_FACTOR_NONE);
+
+ if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define SST_PP_CONTROL_OFFSET 24
+#define SST_PP_LEVEL_CHANGE_TIME_MS 5
+#define SST_PP_LEVEL_CHANGE_RETRY_COUNT 3
+
+static int isst_if_set_perf_level(void __user *argp)
+{
+ struct isst_perf_level_control perf_level;
+ struct tpmi_per_power_domain_info *power_domain_info;
+ int level, retry = 0;
+
+ if (disable_dynamic_sst_features())
+ return -EFAULT;
+
+ if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level)))
+ return -EINVAL;
+
+ _read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
+ SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
+
+ /* If the requested new level is same as the current level, reject */
+ if (perf_level.level == level)
+ return -EINVAL;
+
+ _write_pp_info("perf_level", perf_level.level, SST_PP_CONTROL_OFFSET,
+ SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
+
+ /* It is possible that firmware is busy (although unlikely), so retry */
+ do {
+ /* Give time to FW to process */
+ msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
+
+ _read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
+ SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
+
+ /* Check if the new level is active */
+ if (perf_level.level == level)
+ break;
+
+ } while (retry++ < SST_PP_LEVEL_CHANGE_RETRY_COUNT);
+
+ /* If the level change didn't happen, return fault */
+ if (perf_level.level != level)
+ return -EFAULT;
+
+ /* Reset the feature state on level change */
+ _write_pp_info("perf_feature", 0, SST_PP_CONTROL_OFFSET,
+ SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
+ SST_MUL_FACTOR_NONE)
+
+ /* Give time to FW to process */
+ msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
+
+ return 0;
+}
+
+static int isst_if_set_perf_feature(void __user *argp)
+{
+ struct isst_perf_feature_control perf_feature;
+ struct tpmi_per_power_domain_info *power_domain_info;
+
+ if (disable_dynamic_sst_features())
+ return -EFAULT;
+
+ if (copy_from_user(&perf_feature, argp, sizeof(perf_feature)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(perf_feature.socket_id, perf_feature.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ _write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET,
+ SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
+ SST_MUL_FACTOR_NONE)
+
+ return 0;
+}
+
+#define _read_pp_level_info(name_str, name, level, offset, start, width, mult_factor)\
+{\
+ u64 val, _mask;\
+ \
+ val = readq(power_domain_info->sst_base +\
+ power_domain_info->perf_levels[level].mmio_offset +\
+ (power_domain_info->feature_offsets.pp_offset * 8) + (offset));\
+ _mask = GENMASK_ULL((start + width - 1), start);\
+ val &= _mask; \
+ val >>= start;\
+ name = (val * mult_factor);\
+}
+
+#define SST_PP_INFO_0_OFFSET 0
+#define SST_PP_INFO_1_OFFSET 8
+#define SST_PP_INFO_2_OFFSET 16
+#define SST_PP_INFO_3_OFFSET 24
+
+/* SST_PP_INFO_4_OFFSET to SST_PP_INFO_9_OFFSET are trl levels */
+#define SST_PP_INFO_4_OFFSET 32
+
+#define SST_PP_INFO_10_OFFSET 80
+#define SST_PP_INFO_11_OFFSET 88
+
+#define SST_PP_P1_SSE_START 0
+#define SST_PP_P1_SSE_WIDTH 8
+
+#define SST_PP_P1_AVX2_START 8
+#define SST_PP_P1_AVX2_WIDTH 8
+
+#define SST_PP_P1_AVX512_START 16
+#define SST_PP_P1_AVX512_WIDTH 8
+
+#define SST_PP_P1_AMX_START 24
+#define SST_PP_P1_AMX_WIDTH 8
+
+#define SST_PP_TDP_START 32
+#define SST_PP_TDP_WIDTH 15
+
+#define SST_PP_T_PROCHOT_START 47
+#define SST_PP_T_PROCHOT_WIDTH 8
+
+#define SST_PP_MAX_MEMORY_FREQ_START 55
+#define SST_PP_MAX_MEMORY_FREQ_WIDTH 7
+
+#define SST_PP_COOLING_TYPE_START 62
+#define SST_PP_COOLING_TYPE_WIDTH 2
+
+#define SST_PP_TRL_0_RATIO_0_START 0
+#define SST_PP_TRL_0_RATIO_0_WIDTH 8
+
+#define SST_PP_TRL_CORES_BUCKET_0_START 0
+#define SST_PP_TRL_CORES_BUCKET_0_WIDTH 8
+
+#define SST_PP_CORE_RATIO_P0_START 0
+#define SST_PP_CORE_RATIO_P0_WIDTH 8
+
+#define SST_PP_CORE_RATIO_P1_START 8
+#define SST_PP_CORE_RATIO_P1_WIDTH 8
+
+#define SST_PP_CORE_RATIO_PN_START 16
+#define SST_PP_CORE_RATIO_PN_WIDTH 8
+
+#define SST_PP_CORE_RATIO_PM_START 24
+#define SST_PP_CORE_RATIO_PM_WIDTH 8
+
+#define SST_PP_CORE_RATIO_P0_FABRIC_START 32
+#define SST_PP_CORE_RATIO_P0_FABRIC_WIDTH 8
+
+#define SST_PP_CORE_RATIO_P1_FABRIC_START 40
+#define SST_PP_CORE_RATIO_P1_FABRIC_WIDTH 8
+
+#define SST_PP_CORE_RATIO_PM_FABRIC_START 48
+#define SST_PP_CORE_RATIO_PM_FABRIC_WIDTH 8
+
+static int isst_if_get_perf_level_info(void __user *argp)
+{
+ struct isst_perf_level_data_info perf_level;
+ struct tpmi_per_power_domain_info *power_domain_info;
+ int i, j;
+
+ if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ if (perf_level.level > power_domain_info->max_level)
+ return -EINVAL;
+
+ if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level.level)))
+ return -EINVAL;
+
+ _read_pp_level_info("tdp_ratio", perf_level.tdp_ratio, perf_level.level,
+ SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
+ SST_MUL_FACTOR_NONE)
+ _read_pp_level_info("base_freq_mhz", perf_level.base_freq_mhz, perf_level.level,
+ SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
+ SST_MUL_FACTOR_FREQ)
+ _read_pp_level_info("base_freq_avx2_mhz", perf_level.base_freq_avx2_mhz, perf_level.level,
+ SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX2_START, SST_PP_P1_AVX2_WIDTH,
+ SST_MUL_FACTOR_FREQ)
+ _read_pp_level_info("base_freq_avx512_mhz", perf_level.base_freq_avx512_mhz,
+ perf_level.level, SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX512_START,
+ SST_PP_P1_AVX512_WIDTH, SST_MUL_FACTOR_FREQ)
+ _read_pp_level_info("base_freq_amx_mhz", perf_level.base_freq_amx_mhz, perf_level.level,
+ SST_PP_INFO_0_OFFSET, SST_PP_P1_AMX_START, SST_PP_P1_AMX_WIDTH,
+ SST_MUL_FACTOR_FREQ)
+
+ _read_pp_level_info("thermal_design_power_w", perf_level.thermal_design_power_w,
+ perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_TDP_START,
+ SST_PP_TDP_WIDTH, SST_MUL_FACTOR_NONE)
+ perf_level.thermal_design_power_w /= 8; /* units are in 1/8th watt */
+ _read_pp_level_info("tjunction_max_c", perf_level.tjunction_max_c, perf_level.level,
+ SST_PP_INFO_1_OFFSET, SST_PP_T_PROCHOT_START, SST_PP_T_PROCHOT_WIDTH,
+ SST_MUL_FACTOR_NONE)
+ _read_pp_level_info("max_memory_freq_mhz", perf_level.max_memory_freq_mhz,
+ perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_MAX_MEMORY_FREQ_START,
+ SST_PP_MAX_MEMORY_FREQ_WIDTH, SST_MUL_FACTOR_FREQ)
+ _read_pp_level_info("cooling_type", perf_level.cooling_type, perf_level.level,
+ SST_PP_INFO_1_OFFSET, SST_PP_COOLING_TYPE_START,
+ SST_PP_COOLING_TYPE_WIDTH, SST_MUL_FACTOR_NONE)
+
+ for (i = 0; i < TRL_MAX_LEVELS; ++i) {
+ for (j = 0; j < TRL_MAX_BUCKETS; ++j)
+ _read_pp_level_info("trl*_bucket*_freq_mhz",
+ perf_level.trl_freq_mhz[i][j], perf_level.level,
+ SST_PP_INFO_4_OFFSET + (i * SST_PP_TRL_0_RATIO_0_WIDTH),
+ j * SST_PP_TRL_0_RATIO_0_WIDTH,
+ SST_PP_TRL_0_RATIO_0_WIDTH,
+ SST_MUL_FACTOR_FREQ);
+ }
+
+ for (i = 0; i < TRL_MAX_BUCKETS; ++i)
+ _read_pp_level_info("bucket*_core_count", perf_level.bucket_core_counts[i],
+ perf_level.level, SST_PP_INFO_10_OFFSET,
+ SST_PP_TRL_CORES_BUCKET_0_WIDTH * i,
+ SST_PP_TRL_CORES_BUCKET_0_WIDTH, SST_MUL_FACTOR_NONE)
+
+ perf_level.max_buckets = TRL_MAX_BUCKETS;
+ perf_level.max_trl_levels = TRL_MAX_LEVELS;
+
+ _read_pp_level_info("p0_freq_mhz", perf_level.p0_freq_mhz, perf_level.level,
+ SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P0_START,
+ SST_PP_CORE_RATIO_P0_WIDTH, SST_MUL_FACTOR_FREQ)
+ _read_pp_level_info("p1_freq_mhz", perf_level.p1_freq_mhz, perf_level.level,
+ SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P1_START,
+ SST_PP_CORE_RATIO_P1_WIDTH, SST_MUL_FACTOR_FREQ)
+ _read_pp_level_info("pn_freq_mhz", perf_level.pn_freq_mhz, perf_level.level,
+ SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PN_START,
+ SST_PP_CORE_RATIO_PN_WIDTH, SST_MUL_FACTOR_FREQ)
+ _read_pp_level_info("pm_freq_mhz", perf_level.pm_freq_mhz, perf_level.level,
+ SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PM_START,
+ SST_PP_CORE_RATIO_PM_WIDTH, SST_MUL_FACTOR_FREQ)
+ _read_pp_level_info("p0_fabric_freq_mhz", perf_level.p0_fabric_freq_mhz,
+ perf_level.level, SST_PP_INFO_11_OFFSET,
+ SST_PP_CORE_RATIO_P0_FABRIC_START,
+ SST_PP_CORE_RATIO_P0_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
+ _read_pp_level_info("p1_fabric_freq_mhz", perf_level.p1_fabric_freq_mhz,
+ perf_level.level, SST_PP_INFO_11_OFFSET,
+ SST_PP_CORE_RATIO_P1_FABRIC_START,
+ SST_PP_CORE_RATIO_P1_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
+ _read_pp_level_info("pm_fabric_freq_mhz", perf_level.pm_fabric_freq_mhz,
+ perf_level.level, SST_PP_INFO_11_OFFSET,
+ SST_PP_CORE_RATIO_PM_FABRIC_START,
+ SST_PP_CORE_RATIO_PM_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
+
+ if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define SST_PP_FUSED_CORE_COUNT_START 0
+#define SST_PP_FUSED_CORE_COUNT_WIDTH 8
+
+#define SST_PP_RSLVD_CORE_COUNT_START 8
+#define SST_PP_RSLVD_CORE_COUNT_WIDTH 8
+
+#define SST_PP_RSLVD_CORE_MASK_START 0
+#define SST_PP_RSLVD_CORE_MASK_WIDTH 64
+
+static int isst_if_get_perf_level_mask(void __user *argp)
+{
+ static struct isst_perf_level_cpu_mask cpumask;
+ struct tpmi_per_power_domain_info *power_domain_info;
+ u64 mask;
+
+ if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ _read_pp_level_info("mask", mask, cpumask.level, SST_PP_INFO_2_OFFSET,
+ SST_PP_RSLVD_CORE_MASK_START, SST_PP_RSLVD_CORE_MASK_WIDTH,
+ SST_MUL_FACTOR_NONE)
+
+ cpumask.mask = mask;
+
+ if (!cpumask.punit_cpu_map)
+ return -EOPNOTSUPP;
+
+ if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define SST_BF_INFO_0_OFFSET 0
+#define SST_BF_INFO_1_OFFSET 8
+
+#define SST_BF_P1_HIGH_START 13
+#define SST_BF_P1_HIGH_WIDTH 8
+
+#define SST_BF_P1_LOW_START 21
+#define SST_BF_P1_LOW_WIDTH 8
+
+#define SST_BF_T_PROHOT_START 38
+#define SST_BF_T_PROHOT_WIDTH 8
+
+#define SST_BF_TDP_START 46
+#define SST_BF_TDP_WIDTH 15
+
+static int isst_if_get_base_freq_info(void __user *argp)
+{
+ static struct isst_base_freq_info base_freq;
+ struct tpmi_per_power_domain_info *power_domain_info;
+
+ if (copy_from_user(&base_freq, argp, sizeof(base_freq)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(base_freq.socket_id, base_freq.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ if (base_freq.level > power_domain_info->max_level)
+ return -EINVAL;
+
+ _read_bf_level_info("p1_high", base_freq.high_base_freq_mhz, base_freq.level,
+ SST_BF_INFO_0_OFFSET, SST_BF_P1_HIGH_START, SST_BF_P1_HIGH_WIDTH,
+ SST_MUL_FACTOR_FREQ)
+ _read_bf_level_info("p1_low", base_freq.low_base_freq_mhz, base_freq.level,
+ SST_BF_INFO_0_OFFSET, SST_BF_P1_LOW_START, SST_BF_P1_LOW_WIDTH,
+ SST_MUL_FACTOR_FREQ)
+ _read_bf_level_info("BF-TJ", base_freq.tjunction_max_c, base_freq.level,
+ SST_BF_INFO_0_OFFSET, SST_BF_T_PROHOT_START, SST_BF_T_PROHOT_WIDTH,
+ SST_MUL_FACTOR_NONE)
+ _read_bf_level_info("BF-tdp", base_freq.thermal_design_power_w, base_freq.level,
+ SST_BF_INFO_0_OFFSET, SST_BF_TDP_START, SST_BF_TDP_WIDTH,
+ SST_MUL_FACTOR_NONE)
+ base_freq.thermal_design_power_w /= 8; /*unit = 1/8th watt*/
+
+ if (copy_to_user(argp, &base_freq, sizeof(base_freq)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define P1_HI_CORE_MASK_START 0
+#define P1_HI_CORE_MASK_WIDTH 64
+
+static int isst_if_get_base_freq_mask(void __user *argp)
+{
+ static struct isst_perf_level_cpu_mask cpumask;
+ struct tpmi_per_power_domain_info *power_domain_info;
+ u64 mask;
+
+ if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ _read_bf_level_info("BF-cpumask", mask, cpumask.level, SST_BF_INFO_1_OFFSET,
+ P1_HI_CORE_MASK_START, P1_HI_CORE_MASK_WIDTH,
+ SST_MUL_FACTOR_NONE)
+
+ cpumask.mask = mask;
+
+ if (!cpumask.punit_cpu_map)
+ return -EOPNOTSUPP;
+
+ if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int isst_if_get_tpmi_instance_count(void __user *argp)
+{
+ struct isst_tpmi_instance_count tpmi_inst;
+ struct tpmi_sst_struct *sst_inst;
+ int i;
+
+ if (copy_from_user(&tpmi_inst, argp, sizeof(tpmi_inst)))
+ return -EFAULT;
+
+ if (tpmi_inst.socket_id >= topology_max_packages())
+ return -EINVAL;
+
+ tpmi_inst.count = isst_common.sst_inst[tpmi_inst.socket_id]->number_of_power_domains;
+
+ sst_inst = isst_common.sst_inst[tpmi_inst.socket_id];
+ tpmi_inst.valid_mask = 0;
+ for (i = 0; i < sst_inst->number_of_power_domains; ++i) {
+ struct tpmi_per_power_domain_info *pd_info;
+
+ pd_info = &sst_inst->power_domain_info[i];
+ if (pd_info->sst_base)
+ tpmi_inst.valid_mask |= BIT(i);
+ }
+
+ if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define SST_TF_INFO_0_OFFSET 0
+#define SST_TF_INFO_1_OFFSET 8
+#define SST_TF_INFO_2_OFFSET 16
+
+#define SST_TF_MAX_LP_CLIP_RATIOS TRL_MAX_LEVELS
+
+#define SST_TF_LP_CLIP_RATIO_0_START 16
+#define SST_TF_LP_CLIP_RATIO_0_WIDTH 8
+
+#define SST_TF_RATIO_0_START 0
+#define SST_TF_RATIO_0_WIDTH 8
+
+#define SST_TF_NUM_CORE_0_START 0
+#define SST_TF_NUM_CORE_0_WIDTH 8
+
+static int isst_if_get_turbo_freq_info(void __user *argp)
+{
+ static struct isst_turbo_freq_info turbo_freq;
+ struct tpmi_per_power_domain_info *power_domain_info;
+ int i, j;
+
+ if (copy_from_user(&turbo_freq, argp, sizeof(turbo_freq)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(turbo_freq.socket_id, turbo_freq.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ if (turbo_freq.level > power_domain_info->max_level)
+ return -EINVAL;
+
+ turbo_freq.max_buckets = TRL_MAX_BUCKETS;
+ turbo_freq.max_trl_levels = TRL_MAX_LEVELS;
+ turbo_freq.max_clip_freqs = SST_TF_MAX_LP_CLIP_RATIOS;
+
+ for (i = 0; i < turbo_freq.max_clip_freqs; ++i)
+ _read_tf_level_info("lp_clip*", turbo_freq.lp_clip_freq_mhz[i],
+ turbo_freq.level, SST_TF_INFO_0_OFFSET,
+ SST_TF_LP_CLIP_RATIO_0_START +
+ (i * SST_TF_LP_CLIP_RATIO_0_WIDTH),
+ SST_TF_LP_CLIP_RATIO_0_WIDTH, SST_MUL_FACTOR_FREQ)
+
+ for (i = 0; i < TRL_MAX_LEVELS; ++i) {
+ for (j = 0; j < TRL_MAX_BUCKETS; ++j)
+ _read_tf_level_info("cydn*_bucket_*_trl",
+ turbo_freq.trl_freq_mhz[i][j], turbo_freq.level,
+ SST_TF_INFO_2_OFFSET + (i * SST_TF_RATIO_0_WIDTH),
+ j * SST_TF_RATIO_0_WIDTH, SST_TF_RATIO_0_WIDTH,
+ SST_MUL_FACTOR_FREQ)
+ }
+
+ for (i = 0; i < TRL_MAX_BUCKETS; ++i)
+ _read_tf_level_info("bucket_*_core_count", turbo_freq.bucket_core_counts[i],
+ turbo_freq.level, SST_TF_INFO_1_OFFSET,
+ SST_TF_NUM_CORE_0_WIDTH * i, SST_TF_NUM_CORE_0_WIDTH,
+ SST_MUL_FACTOR_NONE)
+
+ if (copy_to_user(argp, &turbo_freq, sizeof(turbo_freq)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ long ret = -ENOTTY;
+
+ mutex_lock(&isst_tpmi_dev_lock);
+ switch (cmd) {
+ case ISST_IF_COUNT_TPMI_INSTANCES:
+ ret = isst_if_get_tpmi_instance_count(argp);
+ break;
+ case ISST_IF_CORE_POWER_STATE:
+ ret = isst_if_core_power_state(argp);
+ break;
+ case ISST_IF_CLOS_PARAM:
+ ret = isst_if_clos_param(argp);
+ break;
+ case ISST_IF_CLOS_ASSOC:
+ ret = isst_if_clos_assoc(argp);
+ break;
+ case ISST_IF_PERF_LEVELS:
+ ret = isst_if_get_perf_level(argp);
+ break;
+ case ISST_IF_PERF_SET_LEVEL:
+ ret = isst_if_set_perf_level(argp);
+ break;
+ case ISST_IF_PERF_SET_FEATURE:
+ ret = isst_if_set_perf_feature(argp);
+ break;
+ case ISST_IF_GET_PERF_LEVEL_INFO:
+ ret = isst_if_get_perf_level_info(argp);
+ break;
+ case ISST_IF_GET_PERF_LEVEL_CPU_MASK:
+ ret = isst_if_get_perf_level_mask(argp);
+ break;
+ case ISST_IF_GET_BASE_FREQ_INFO:
+ ret = isst_if_get_base_freq_info(argp);
+ break;
+ case ISST_IF_GET_BASE_FREQ_CPU_MASK:
+ ret = isst_if_get_base_freq_mask(argp);
+ break;
+ case ISST_IF_GET_TURBO_FREQ_INFO:
+ ret = isst_if_get_turbo_freq_info(argp);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&isst_tpmi_dev_lock);
+
+ return ret;
+}
+
+#define TPMI_SST_AUTO_SUSPEND_DELAY_MS 2000
+
+int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
+{
+ struct intel_tpmi_plat_info *plat_info;
+ struct tpmi_sst_struct *tpmi_sst;
+ int i, ret, pkg = 0, inst = 0;
+ int num_resources;
+
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (!plat_info) {
+ dev_err(&auxdev->dev, "No platform info\n");
+ return -EINVAL;
+ }
+
+ pkg = plat_info->package_id;
+ if (pkg >= topology_max_packages()) {
+ dev_err(&auxdev->dev, "Invalid package id :%x\n", pkg);
+ return -EINVAL;
+ }
+
+ if (isst_common.sst_inst[pkg])
+ return -EEXIST;
+
+ num_resources = tpmi_get_resource_count(auxdev);
+
+ if (!num_resources)
+ return -EINVAL;
+
+ tpmi_sst = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_sst), GFP_KERNEL);
+ if (!tpmi_sst)
+ return -ENOMEM;
+
+ tpmi_sst->power_domain_info = devm_kcalloc(&auxdev->dev, num_resources,
+ sizeof(*tpmi_sst->power_domain_info),
+ GFP_KERNEL);
+ if (!tpmi_sst->power_domain_info)
+ return -ENOMEM;
+
+ tpmi_sst->number_of_power_domains = num_resources;
+
+ for (i = 0; i < num_resources; ++i) {
+ struct resource *res;
+
+ res = tpmi_get_resource_at_index(auxdev, i);
+ if (!res) {
+ tpmi_sst->power_domain_info[i].sst_base = NULL;
+ continue;
+ }
+
+ tpmi_sst->power_domain_info[i].package_id = pkg;
+ tpmi_sst->power_domain_info[i].power_domain_id = i;
+ tpmi_sst->power_domain_info[i].auxdev = auxdev;
+ tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res);
+ if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base))
+ return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base);
+
+ ret = sst_main(auxdev, &tpmi_sst->power_domain_info[i]);
+ if (ret) {
+ devm_iounmap(&auxdev->dev, tpmi_sst->power_domain_info[i].sst_base);
+ tpmi_sst->power_domain_info[i].sst_base = NULL;
+ continue;
+ }
+
+ ++inst;
+ }
+
+ if (!inst)
+ return -ENODEV;
+
+ tpmi_sst->package_id = pkg;
+ auxiliary_set_drvdata(auxdev, tpmi_sst);
+
+ mutex_lock(&isst_tpmi_dev_lock);
+ if (isst_common.max_index < pkg)
+ isst_common.max_index = pkg;
+ isst_common.sst_inst[pkg] = tpmi_sst;
+ mutex_unlock(&isst_tpmi_dev_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, INTEL_TPMI_SST);
+
+void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
+{
+ struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
+
+ mutex_lock(&isst_tpmi_dev_lock);
+ isst_common.sst_inst[tpmi_sst->package_id] = NULL;
+ mutex_unlock(&isst_tpmi_dev_lock);
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST);
+
+void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev)
+{
+ struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
+ struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info;
+ void __iomem *cp_base;
+
+ cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
+ power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET);
+
+ memcpy_fromio(power_domain_info->saved_clos_configs, cp_base + SST_CLOS_CONFIG_0_OFFSET,
+ sizeof(power_domain_info->saved_clos_configs));
+
+ memcpy_fromio(power_domain_info->saved_clos_assocs, cp_base + SST_CLOS_ASSOC_0_OFFSET,
+ sizeof(power_domain_info->saved_clos_assocs));
+
+ power_domain_info->saved_pp_control = readq(power_domain_info->sst_base +
+ power_domain_info->sst_header.pp_offset +
+ SST_PP_CONTROL_OFFSET);
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, INTEL_TPMI_SST);
+
+void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
+{
+ struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
+ struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info;
+ void __iomem *cp_base;
+
+ cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
+ writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET);
+
+ memcpy_toio(cp_base + SST_CLOS_CONFIG_0_OFFSET, power_domain_info->saved_clos_configs,
+ sizeof(power_domain_info->saved_clos_configs));
+
+ memcpy_toio(cp_base + SST_CLOS_ASSOC_0_OFFSET, power_domain_info->saved_clos_assocs,
+ sizeof(power_domain_info->saved_clos_assocs));
+
+ writeq(power_domain_info->saved_pp_control, power_domain_info->sst_base +
+ power_domain_info->sst_header.pp_offset + SST_PP_CONTROL_OFFSET);
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, INTEL_TPMI_SST);
+
+#define ISST_TPMI_API_VERSION 0x02
+
+int tpmi_sst_init(void)
+{
+ struct isst_if_cmd_cb cb;
+ int ret = 0;
+
+ mutex_lock(&isst_tpmi_dev_lock);
+
+ if (isst_core_usage_count) {
+ ++isst_core_usage_count;
+ goto init_done;
+ }
+
+ isst_common.sst_inst = kcalloc(topology_max_packages(),
+ sizeof(*isst_common.sst_inst),
+ GFP_KERNEL);
+ if (!isst_common.sst_inst) {
+ ret = -ENOMEM;
+ goto init_done;
+ }
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd_size = sizeof(struct isst_if_io_reg);
+ cb.offset = offsetof(struct isst_if_io_regs, io_reg);
+ cb.cmd_callback = NULL;
+ cb.api_version = ISST_TPMI_API_VERSION;
+ cb.def_ioctl = isst_if_def_ioctl;
+ cb.owner = THIS_MODULE;
+ ret = isst_if_cdev_register(ISST_IF_DEV_TPMI, &cb);
+ if (ret)
+ kfree(isst_common.sst_inst);
+ else
+ ++isst_core_usage_count;
+init_done:
+ mutex_unlock(&isst_tpmi_dev_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, INTEL_TPMI_SST);
+
+void tpmi_sst_exit(void)
+{
+ mutex_lock(&isst_tpmi_dev_lock);
+ if (isst_core_usage_count)
+ --isst_core_usage_count;
+
+ if (!isst_core_usage_count) {
+ isst_if_cdev_unregister(ISST_IF_DEV_TPMI);
+ kfree(isst_common.sst_inst);
+ }
+ mutex_unlock(&isst_tpmi_dev_lock);
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, INTEL_TPMI_SST);
+
+MODULE_IMPORT_NS(INTEL_TPMI);
+MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h
new file mode 100644
index 0000000000..900b483703
--- /dev/null
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Speed Select Interface: Drivers Internal defines
+ * Copyright (c) 2023, Intel Corporation.
+ * All rights reserved.
+ *
+ */
+
+#ifndef _ISST_TPMI_CORE_H
+#define _ISST_TPMI_CORE_H
+
+int tpmi_sst_init(void);
+void tpmi_sst_exit(void);
+int tpmi_sst_dev_add(struct auxiliary_device *auxdev);
+void tpmi_sst_dev_remove(struct auxiliary_device *auxdev);
+void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev);
+void tpmi_sst_dev_resume(struct auxiliary_device *auxdev);
+#endif
diff --git a/drivers/platform/x86/intel/telemetry/Kconfig b/drivers/platform/x86/intel/telemetry/Kconfig
new file mode 100644
index 0000000000..da887bd037
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+config INTEL_TELEMETRY
+ tristate "Intel SoC Telemetry driver"
+ depends on X86_64
+ depends on MFD_INTEL_PMC_BXT
+ depends on INTEL_PUNIT_IPC
+ help
+ This driver provides interfaces to configure and use
+ telemetry for Intel SoC from Apollo Lake onwards.
+ It is also used to get various SoC events and parameters
+ directly via debugfs files. Various tools may use
+ this interface for SoC state monitoring.
diff --git a/drivers/platform/x86/intel/telemetry/Makefile b/drivers/platform/x86/intel/telemetry/Makefile
new file mode 100644
index 0000000000..bfdba5b6c5
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+intel_telemetry_core-y := core.o
+obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o
+intel_telemetry_pltdrv-y := pltdrv.o
+obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_pltdrv.o
+intel_telemetry_debugfs-y := debugfs.o
+obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_debugfs.o
diff --git a/drivers/platform/x86/intel/telemetry/core.c b/drivers/platform/x86/intel/telemetry/core.c
new file mode 100644
index 0000000000..e4be40f73e
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/core.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel SoC Core Telemetry Driver
+ * Copyright (C) 2015, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Telemetry Framework provides platform related PM and performance statistics.
+ * This file provides the core telemetry API implementation.
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <asm/intel_telemetry.h>
+
+#define DRIVER_NAME "intel_telemetry_core"
+
+struct telemetry_core_config {
+ struct telemetry_plt_config *plt_config;
+ const struct telemetry_core_ops *telem_ops;
+};
+
+static struct telemetry_core_config telm_core_conf;
+
+static int telemetry_def_update_events(struct telemetry_evtconfig pss_evtconfig,
+ struct telemetry_evtconfig ioss_evtconfig)
+{
+ return 0;
+}
+
+static int telemetry_def_set_sampling_period(u8 pss_period, u8 ioss_period)
+{
+ return 0;
+}
+
+static int telemetry_def_get_sampling_period(u8 *pss_min_period,
+ u8 *pss_max_period,
+ u8 *ioss_min_period,
+ u8 *ioss_max_period)
+{
+ return 0;
+}
+
+static int telemetry_def_get_eventconfig(
+ struct telemetry_evtconfig *pss_evtconfig,
+ struct telemetry_evtconfig *ioss_evtconfig,
+ int pss_len, int ioss_len)
+{
+ return 0;
+}
+
+static int telemetry_def_get_trace_verbosity(enum telemetry_unit telem_unit,
+ u32 *verbosity)
+{
+ return 0;
+}
+
+
+static int telemetry_def_set_trace_verbosity(enum telemetry_unit telem_unit,
+ u32 verbosity)
+{
+ return 0;
+}
+
+static int telemetry_def_raw_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog,
+ int len, int log_all_evts)
+{
+ return 0;
+}
+
+static int telemetry_def_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog,
+ int len, int log_all_evts)
+{
+ return 0;
+}
+
+static int telemetry_def_add_events(u8 num_pss_evts, u8 num_ioss_evts,
+ u32 *pss_evtmap, u32 *ioss_evtmap)
+{
+ return 0;
+}
+
+static int telemetry_def_reset_events(void)
+{
+ return 0;
+}
+
+static const struct telemetry_core_ops telm_defpltops = {
+ .set_sampling_period = telemetry_def_set_sampling_period,
+ .get_sampling_period = telemetry_def_get_sampling_period,
+ .get_trace_verbosity = telemetry_def_get_trace_verbosity,
+ .set_trace_verbosity = telemetry_def_set_trace_verbosity,
+ .raw_read_eventlog = telemetry_def_raw_read_eventlog,
+ .get_eventconfig = telemetry_def_get_eventconfig,
+ .read_eventlog = telemetry_def_read_eventlog,
+ .update_events = telemetry_def_update_events,
+ .reset_events = telemetry_def_reset_events,
+ .add_events = telemetry_def_add_events,
+};
+
+/**
+ * telemetry_update_events() - Update telemetry Configuration
+ * @pss_evtconfig: PSS related config. No change if num_evts = 0.
+ * @ioss_evtconfig: IOSS related config. No change if num_evts = 0.
+ *
+ * This API updates the IOSS & PSS Telemetry configuration. Old config
+ * is overwritten. Call telemetry_reset_events when logging is over
+ * All sample period values should be in the form of:
+ * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent)
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_update_events(struct telemetry_evtconfig pss_evtconfig,
+ struct telemetry_evtconfig ioss_evtconfig)
+{
+ return telm_core_conf.telem_ops->update_events(pss_evtconfig,
+ ioss_evtconfig);
+}
+EXPORT_SYMBOL_GPL(telemetry_update_events);
+
+
+/**
+ * telemetry_set_sampling_period() - Sets the IOSS & PSS sampling period
+ * @pss_period: placeholder for PSS Period to be set.
+ * Set to 0 if not required to be updated
+ * @ioss_period: placeholder for IOSS Period to be set
+ * Set to 0 if not required to be updated
+ *
+ * All values should be in the form of:
+ * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent)
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_set_sampling_period(u8 pss_period, u8 ioss_period)
+{
+ return telm_core_conf.telem_ops->set_sampling_period(pss_period,
+ ioss_period);
+}
+EXPORT_SYMBOL_GPL(telemetry_set_sampling_period);
+
+/**
+ * telemetry_get_sampling_period() - Get IOSS & PSS min & max sampling period
+ * @pss_min_period: placeholder for PSS Min Period supported
+ * @pss_max_period: placeholder for PSS Max Period supported
+ * @ioss_min_period: placeholder for IOSS Min Period supported
+ * @ioss_max_period: placeholder for IOSS Max Period supported
+ *
+ * All values should be in the form of:
+ * bits[6:3] -> value; bits [0:2]-> Exponent; Period = (Value *16^Exponent)
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_get_sampling_period(u8 *pss_min_period, u8 *pss_max_period,
+ u8 *ioss_min_period, u8 *ioss_max_period)
+{
+ return telm_core_conf.telem_ops->get_sampling_period(pss_min_period,
+ pss_max_period,
+ ioss_min_period,
+ ioss_max_period);
+}
+EXPORT_SYMBOL_GPL(telemetry_get_sampling_period);
+
+
+/**
+ * telemetry_reset_events() - Restore the IOSS & PSS configuration to default
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_reset_events(void)
+{
+ return telm_core_conf.telem_ops->reset_events();
+}
+EXPORT_SYMBOL_GPL(telemetry_reset_events);
+
+/**
+ * telemetry_get_eventconfig() - Returns the pss and ioss events enabled
+ * @pss_evtconfig: Pointer to PSS related configuration.
+ * @ioss_evtconfig: Pointer to IOSS related configuration.
+ * @pss_len: Number of u32 elements allocated for pss_evtconfig array
+ * @ioss_len: Number of u32 elements allocated for ioss_evtconfig array
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_get_eventconfig(struct telemetry_evtconfig *pss_evtconfig,
+ struct telemetry_evtconfig *ioss_evtconfig,
+ int pss_len, int ioss_len)
+{
+ return telm_core_conf.telem_ops->get_eventconfig(pss_evtconfig,
+ ioss_evtconfig,
+ pss_len, ioss_len);
+}
+EXPORT_SYMBOL_GPL(telemetry_get_eventconfig);
+
+/**
+ * telemetry_add_events() - Add IOSS & PSS configuration to existing settings.
+ * @num_pss_evts: Number of PSS Events (<29) in pss_evtmap. Can be 0.
+ * @num_ioss_evts: Number of IOSS Events (<29) in ioss_evtmap. Can be 0.
+ * @pss_evtmap: Array of PSS Event-IDs to Enable
+ * @ioss_evtmap: Array of PSS Event-IDs to Enable
+ *
+ * Events are appended to Old Configuration. In case of total events > 28, it
+ * returns error. Call telemetry_reset_events to reset after eventlog done
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_add_events(u8 num_pss_evts, u8 num_ioss_evts,
+ u32 *pss_evtmap, u32 *ioss_evtmap)
+{
+ return telm_core_conf.telem_ops->add_events(num_pss_evts,
+ num_ioss_evts, pss_evtmap,
+ ioss_evtmap);
+}
+EXPORT_SYMBOL_GPL(telemetry_add_events);
+
+/**
+ * telemetry_read_events() - Fetches samples as specified by evtlog.telem_evt_id
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @evtlog: Array of telemetry_evtlog structs to fill data
+ * evtlog.telem_evt_id specifies the ids to read
+ * @len: Length of array of evtlog
+ *
+ * Return: number of eventlogs read for success, < 0 for failure
+ */
+int telemetry_read_events(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog, int len)
+{
+ return telm_core_conf.telem_ops->read_eventlog(telem_unit, evtlog,
+ len, 0);
+}
+EXPORT_SYMBOL_GPL(telemetry_read_events);
+
+/**
+ * telemetry_raw_read_events() - Fetch samples specified by evtlog.telem_evt_id
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @evtlog: Array of telemetry_evtlog structs to fill data
+ * evtlog.telem_evt_id specifies the ids to read
+ * @len: Length of array of evtlog
+ *
+ * The caller must take care of locking in this case.
+ *
+ * Return: number of eventlogs read for success, < 0 for failure
+ */
+int telemetry_raw_read_events(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog, int len)
+{
+ return telm_core_conf.telem_ops->raw_read_eventlog(telem_unit, evtlog,
+ len, 0);
+}
+EXPORT_SYMBOL_GPL(telemetry_raw_read_events);
+
+/**
+ * telemetry_read_eventlog() - Fetch the Telemetry log from PSS or IOSS
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @evtlog: Array of telemetry_evtlog structs to fill data
+ * @len: Length of array of evtlog
+ *
+ * Return: number of eventlogs read for success, < 0 for failure
+ */
+int telemetry_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog, int len)
+{
+ return telm_core_conf.telem_ops->read_eventlog(telem_unit, evtlog,
+ len, 1);
+}
+EXPORT_SYMBOL_GPL(telemetry_read_eventlog);
+
+/**
+ * telemetry_raw_read_eventlog() - Fetch the Telemetry log from PSS or IOSS
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @evtlog: Array of telemetry_evtlog structs to fill data
+ * @len: Length of array of evtlog
+ *
+ * The caller must take care of locking in this case.
+ *
+ * Return: number of eventlogs read for success, < 0 for failure
+ */
+int telemetry_raw_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog, int len)
+{
+ return telm_core_conf.telem_ops->raw_read_eventlog(telem_unit, evtlog,
+ len, 1);
+}
+EXPORT_SYMBOL_GPL(telemetry_raw_read_eventlog);
+
+
+/**
+ * telemetry_get_trace_verbosity() - Get the IOSS & PSS Trace verbosity
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @verbosity: Pointer to return Verbosity
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_get_trace_verbosity(enum telemetry_unit telem_unit,
+ u32 *verbosity)
+{
+ return telm_core_conf.telem_ops->get_trace_verbosity(telem_unit,
+ verbosity);
+}
+EXPORT_SYMBOL_GPL(telemetry_get_trace_verbosity);
+
+
+/**
+ * telemetry_set_trace_verbosity() - Update the IOSS & PSS Trace verbosity
+ * @telem_unit: Specify whether IOSS or PSS Read
+ * @verbosity: Verbosity to set
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_set_trace_verbosity(enum telemetry_unit telem_unit, u32 verbosity)
+{
+ return telm_core_conf.telem_ops->set_trace_verbosity(telem_unit,
+ verbosity);
+}
+EXPORT_SYMBOL_GPL(telemetry_set_trace_verbosity);
+
+/**
+ * telemetry_set_pltdata() - Set the platform specific Data
+ * @ops: Pointer to ops structure
+ * @pltconfig: Platform config data
+ *
+ * Usage by other than telemetry pltdrv module is invalid
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_set_pltdata(const struct telemetry_core_ops *ops,
+ struct telemetry_plt_config *pltconfig)
+{
+ if (ops)
+ telm_core_conf.telem_ops = ops;
+
+ if (pltconfig)
+ telm_core_conf.plt_config = pltconfig;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(telemetry_set_pltdata);
+
+/**
+ * telemetry_clear_pltdata() - Clear the platform specific Data
+ *
+ * Usage by other than telemetry pltdrv module is invalid
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_clear_pltdata(void)
+{
+ telm_core_conf.telem_ops = &telm_defpltops;
+ telm_core_conf.plt_config = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(telemetry_clear_pltdata);
+
+/**
+ * telemetry_get_pltdata() - Return telemetry platform config
+ *
+ * May be used by other telemetry modules to get platform specific
+ * configuration.
+ */
+struct telemetry_plt_config *telemetry_get_pltdata(void)
+{
+ return telm_core_conf.plt_config;
+}
+EXPORT_SYMBOL_GPL(telemetry_get_pltdata);
+
+static inline int telemetry_get_pssevtname(enum telemetry_unit telem_unit,
+ const char **name, int len)
+{
+ struct telemetry_unit_config psscfg;
+ int i;
+
+ if (!telm_core_conf.plt_config)
+ return -EINVAL;
+
+ psscfg = telm_core_conf.plt_config->pss_config;
+
+ if (len > psscfg.ssram_evts_used)
+ len = psscfg.ssram_evts_used;
+
+ for (i = 0; i < len; i++)
+ name[i] = psscfg.telem_evts[i].name;
+
+ return 0;
+}
+
+static inline int telemetry_get_iossevtname(enum telemetry_unit telem_unit,
+ const char **name, int len)
+{
+ struct telemetry_unit_config iosscfg;
+ int i;
+
+ if (!(telm_core_conf.plt_config))
+ return -EINVAL;
+
+ iosscfg = telm_core_conf.plt_config->ioss_config;
+
+ if (len > iosscfg.ssram_evts_used)
+ len = iosscfg.ssram_evts_used;
+
+ for (i = 0; i < len; i++)
+ name[i] = iosscfg.telem_evts[i].name;
+
+ return 0;
+
+}
+
+/**
+ * telemetry_get_evtname() - Checkif platform config is valid
+ * @telem_unit: Telemetry Unit to check
+ * @name: Array of character pointers to contain name
+ * @len: length of array name provided by user
+ *
+ * Usage by other than telemetry debugfs module is invalid
+ *
+ * Return: 0 success, < 0 for failure
+ */
+int telemetry_get_evtname(enum telemetry_unit telem_unit,
+ const char **name, int len)
+{
+ int ret = -EINVAL;
+
+ if (telem_unit == TELEM_PSS)
+ ret = telemetry_get_pssevtname(telem_unit, name, len);
+
+ else if (telem_unit == TELEM_IOSS)
+ ret = telemetry_get_iossevtname(telem_unit, name, len);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(telemetry_get_evtname);
+
+static int __init telemetry_module_init(void)
+{
+ pr_info(pr_fmt(DRIVER_NAME) " Init\n");
+
+ telm_core_conf.telem_ops = &telm_defpltops;
+ return 0;
+}
+
+static void __exit telemetry_module_exit(void)
+{
+}
+
+module_init(telemetry_module_init);
+module_exit(telemetry_module_exit);
+
+MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>");
+MODULE_DESCRIPTION("Intel SoC Telemetry Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/telemetry/debugfs.c b/drivers/platform/x86/intel/telemetry/debugfs.c
new file mode 100644
index 0000000000..1d4d0fbfd6
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/debugfs.c
@@ -0,0 +1,961 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel SOC Telemetry debugfs Driver: Currently supports APL
+ * Copyright (c) 2015, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This file provides the debugfs interfaces for telemetry.
+ * /sys/kernel/debug/telemetry/pss_info: Shows Primary Control Sub-Sys Counters
+ * /sys/kernel/debug/telemetry/ioss_info: Shows IO Sub-System Counters
+ * /sys/kernel/debug/telemetry/soc_states: Shows SoC State
+ * /sys/kernel/debug/telemetry/pss_trace_verbosity: Read and Change Tracing
+ * Verbosity via firmware
+ * /sys/kernel/debug/telemetry/ioss_race_verbosity: Write and Change Tracing
+ * Verbosity via firmware
+ */
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/mfd/intel_pmc_bxt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/suspend.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/intel_telemetry.h>
+
+#define DRIVER_NAME "telemetry_soc_debugfs"
+#define DRIVER_VERSION "1.0.0"
+
+/* ApolloLake SoC Event-IDs */
+#define TELEM_APL_PSS_PSTATES_ID 0x2802
+#define TELEM_APL_PSS_IDLE_ID 0x2806
+#define TELEM_APL_PCS_IDLE_BLOCKED_ID 0x2C00
+#define TELEM_APL_PCS_S0IX_BLOCKED_ID 0x2C01
+#define TELEM_APL_PSS_WAKEUP_ID 0x2C02
+#define TELEM_APL_PSS_LTR_BLOCKING_ID 0x2C03
+
+#define TELEM_APL_S0IX_TOTAL_OCC_ID 0x4000
+#define TELEM_APL_S0IX_SHLW_OCC_ID 0x4001
+#define TELEM_APL_S0IX_DEEP_OCC_ID 0x4002
+#define TELEM_APL_S0IX_TOTAL_RES_ID 0x4800
+#define TELEM_APL_S0IX_SHLW_RES_ID 0x4801
+#define TELEM_APL_S0IX_DEEP_RES_ID 0x4802
+#define TELEM_APL_D0IX_ID 0x581A
+#define TELEM_APL_D3_ID 0x5819
+#define TELEM_APL_PG_ID 0x5818
+
+#define TELEM_INFO_SRAMEVTS_MASK 0xFF00
+#define TELEM_INFO_SRAMEVTS_SHIFT 0x8
+#define TELEM_SSRAM_READ_TIMEOUT 10
+
+#define TELEM_MASK_BIT 1
+#define TELEM_MASK_BYTE 0xFF
+#define BYTES_PER_LONG 8
+#define TELEM_APL_MASK_PCS_STATE 0xF
+
+/* Max events in bitmap to check for */
+#define TELEM_PSS_IDLE_EVTS 25
+#define TELEM_PSS_IDLE_BLOCKED_EVTS 20
+#define TELEM_PSS_S0IX_BLOCKED_EVTS 20
+#define TELEM_PSS_S0IX_WAKEUP_EVTS 20
+#define TELEM_PSS_LTR_BLOCKING_EVTS 20
+#define TELEM_IOSS_DX_D0IX_EVTS 25
+#define TELEM_IOSS_PG_EVTS 30
+
+#define TELEM_CHECK_AND_PARSE_EVTS(EVTID, EVTNUM, BUF, EVTLOG, EVTDAT, MASK) { \
+ if (evtlog[index].telem_evtid == (EVTID)) { \
+ for (idx = 0; idx < (EVTNUM); idx++) \
+ (BUF)[idx] = ((EVTLOG) >> (EVTDAT)[idx].bit_pos) & \
+ (MASK); \
+ continue; \
+ } \
+}
+
+#define TELEM_CHECK_AND_PARSE_CTRS(EVTID, CTR) { \
+ if (evtlog[index].telem_evtid == (EVTID)) { \
+ (CTR) = evtlog[index].telem_evtlog; \
+ continue; \
+ } \
+}
+
+static u8 suspend_prep_ok;
+static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp;
+static u64 suspend_shlw_res_temp, suspend_deep_res_temp;
+
+struct telemetry_susp_stats {
+ u32 shlw_ctr;
+ u32 deep_ctr;
+ u64 shlw_res;
+ u64 deep_res;
+};
+
+/* Bitmap definitions for default counters in APL */
+struct telem_pss_idle_stateinfo {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_pss_idle_stateinfo telem_apl_pss_idle_data[] = {
+ {"IA_CORE0_C1E", 0},
+ {"IA_CORE1_C1E", 1},
+ {"IA_CORE2_C1E", 2},
+ {"IA_CORE3_C1E", 3},
+ {"IA_CORE0_C6", 16},
+ {"IA_CORE1_C6", 17},
+ {"IA_CORE2_C6", 18},
+ {"IA_CORE3_C6", 19},
+ {"IA_MODULE0_C7", 32},
+ {"IA_MODULE1_C7", 33},
+ {"GT_RC6", 40},
+ {"IUNIT_PROCESSING_IDLE", 41},
+ {"FAR_MEM_IDLE", 43},
+ {"DISPLAY_IDLE", 44},
+ {"IUNIT_INPUT_SYSTEM_IDLE", 45},
+ {"PCS_STATUS", 60},
+};
+
+struct telem_pcs_blkd_info {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_pcs_blkd_info telem_apl_pcs_idle_blkd_data[] = {
+ {"COMPUTE", 0},
+ {"MISC", 8},
+ {"MODULE_ACTIONS_PENDING", 16},
+ {"LTR", 24},
+ {"DISPLAY_WAKE", 32},
+ {"ISP_WAKE", 40},
+ {"PSF0_ACTIVE", 48},
+};
+
+static struct telem_pcs_blkd_info telem_apl_pcs_s0ix_blkd_data[] = {
+ {"LTR", 0},
+ {"IRTL", 8},
+ {"WAKE_DEADLINE_PENDING", 16},
+ {"DISPLAY", 24},
+ {"ISP", 32},
+ {"CORE", 40},
+ {"PMC", 48},
+ {"MISC", 56},
+};
+
+struct telem_pss_ltr_info {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_pss_ltr_info telem_apl_pss_ltr_data[] = {
+ {"CORE_ACTIVE", 0},
+ {"MEM_UP", 8},
+ {"DFX", 16},
+ {"DFX_FORCE_LTR", 24},
+ {"DISPLAY", 32},
+ {"ISP", 40},
+ {"SOUTH", 48},
+};
+
+struct telem_pss_wakeup_info {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_pss_wakeup_info telem_apl_pss_wakeup[] = {
+ {"IP_IDLE", 0},
+ {"DISPLAY_WAKE", 8},
+ {"VOLTAGE_REG_INT", 16},
+ {"DROWSY_TIMER (HOTPLUG)", 24},
+ {"CORE_WAKE", 32},
+ {"MISC_S0IX", 40},
+ {"MISC_ABORT", 56},
+};
+
+struct telem_ioss_d0ix_stateinfo {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_ioss_d0ix_stateinfo telem_apl_ioss_d0ix_data[] = {
+ {"CSE", 0},
+ {"SCC2", 1},
+ {"GMM", 2},
+ {"XDCI", 3},
+ {"XHCI", 4},
+ {"ISH", 5},
+ {"AVS", 6},
+ {"PCIE0P1", 7},
+ {"PECI0P0", 8},
+ {"LPSS", 9},
+ {"SCC", 10},
+ {"PWM", 11},
+ {"PCIE1_P3", 12},
+ {"PCIE1_P2", 13},
+ {"PCIE1_P1", 14},
+ {"PCIE1_P0", 15},
+ {"CNV", 16},
+ {"SATA", 17},
+ {"PRTC", 18},
+};
+
+struct telem_ioss_pg_info {
+ const char *name;
+ u32 bit_pos;
+};
+
+static struct telem_ioss_pg_info telem_apl_ioss_pg_data[] = {
+ {"LPSS", 0},
+ {"SCC", 1},
+ {"P2SB", 2},
+ {"SCC2", 3},
+ {"GMM", 4},
+ {"PCIE0", 5},
+ {"XDCI", 6},
+ {"xHCI", 7},
+ {"CSE", 8},
+ {"SPI", 9},
+ {"AVSPGD4", 10},
+ {"AVSPGD3", 11},
+ {"AVSPGD2", 12},
+ {"AVSPGD1", 13},
+ {"ISH", 14},
+ {"EXI", 15},
+ {"NPKVRC", 16},
+ {"NPKVNN", 17},
+ {"CUNIT", 18},
+ {"FUSE_CTRL", 19},
+ {"PCIE1", 20},
+ {"CNV", 21},
+ {"LPC", 22},
+ {"SATA", 23},
+ {"SMB", 24},
+ {"PRTC", 25},
+};
+
+struct telemetry_debugfs_conf {
+ struct telemetry_susp_stats suspend_stats;
+ struct dentry *telemetry_dbg_dir;
+
+ /* Bitmap Data */
+ struct telem_ioss_d0ix_stateinfo *ioss_d0ix_data;
+ struct telem_pss_idle_stateinfo *pss_idle_data;
+ struct telem_pcs_blkd_info *pcs_idle_blkd_data;
+ struct telem_pcs_blkd_info *pcs_s0ix_blkd_data;
+ struct telem_pss_wakeup_info *pss_wakeup;
+ struct telem_pss_ltr_info *pss_ltr_data;
+ struct telem_ioss_pg_info *ioss_pg_data;
+ u8 pcs_idle_blkd_evts;
+ u8 pcs_s0ix_blkd_evts;
+ u8 pss_wakeup_evts;
+ u8 pss_idle_evts;
+ u8 pss_ltr_evts;
+ u8 ioss_d0ix_evts;
+ u8 ioss_pg_evts;
+
+ /* IDs */
+ u16 pss_ltr_blocking_id;
+ u16 pcs_idle_blkd_id;
+ u16 pcs_s0ix_blkd_id;
+ u16 s0ix_total_occ_id;
+ u16 s0ix_shlw_occ_id;
+ u16 s0ix_deep_occ_id;
+ u16 s0ix_total_res_id;
+ u16 s0ix_shlw_res_id;
+ u16 s0ix_deep_res_id;
+ u16 pss_wakeup_id;
+ u16 ioss_d0ix_id;
+ u16 pstates_id;
+ u16 pss_idle_id;
+ u16 ioss_d3_id;
+ u16 ioss_pg_id;
+};
+
+static struct telemetry_debugfs_conf *debugfs_conf;
+
+static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
+ .pss_idle_data = telem_apl_pss_idle_data,
+ .pcs_idle_blkd_data = telem_apl_pcs_idle_blkd_data,
+ .pcs_s0ix_blkd_data = telem_apl_pcs_s0ix_blkd_data,
+ .pss_ltr_data = telem_apl_pss_ltr_data,
+ .pss_wakeup = telem_apl_pss_wakeup,
+ .ioss_d0ix_data = telem_apl_ioss_d0ix_data,
+ .ioss_pg_data = telem_apl_ioss_pg_data,
+
+ .pss_idle_evts = ARRAY_SIZE(telem_apl_pss_idle_data),
+ .pcs_idle_blkd_evts = ARRAY_SIZE(telem_apl_pcs_idle_blkd_data),
+ .pcs_s0ix_blkd_evts = ARRAY_SIZE(telem_apl_pcs_s0ix_blkd_data),
+ .pss_ltr_evts = ARRAY_SIZE(telem_apl_pss_ltr_data),
+ .pss_wakeup_evts = ARRAY_SIZE(telem_apl_pss_wakeup),
+ .ioss_d0ix_evts = ARRAY_SIZE(telem_apl_ioss_d0ix_data),
+ .ioss_pg_evts = ARRAY_SIZE(telem_apl_ioss_pg_data),
+
+ .pstates_id = TELEM_APL_PSS_PSTATES_ID,
+ .pss_idle_id = TELEM_APL_PSS_IDLE_ID,
+ .pcs_idle_blkd_id = TELEM_APL_PCS_IDLE_BLOCKED_ID,
+ .pcs_s0ix_blkd_id = TELEM_APL_PCS_S0IX_BLOCKED_ID,
+ .pss_wakeup_id = TELEM_APL_PSS_WAKEUP_ID,
+ .pss_ltr_blocking_id = TELEM_APL_PSS_LTR_BLOCKING_ID,
+ .s0ix_total_occ_id = TELEM_APL_S0IX_TOTAL_OCC_ID,
+ .s0ix_shlw_occ_id = TELEM_APL_S0IX_SHLW_OCC_ID,
+ .s0ix_deep_occ_id = TELEM_APL_S0IX_DEEP_OCC_ID,
+ .s0ix_total_res_id = TELEM_APL_S0IX_TOTAL_RES_ID,
+ .s0ix_shlw_res_id = TELEM_APL_S0IX_SHLW_RES_ID,
+ .s0ix_deep_res_id = TELEM_APL_S0IX_DEEP_RES_ID,
+ .ioss_d0ix_id = TELEM_APL_D0IX_ID,
+ .ioss_d3_id = TELEM_APL_D3_ID,
+ .ioss_pg_id = TELEM_APL_PG_ID,
+};
+
+static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_debugfs_conf),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, telemetry_debugfs_cpu_ids);
+
+static int telemetry_debugfs_check_evts(void)
+{
+ if ((debugfs_conf->pss_idle_evts > TELEM_PSS_IDLE_EVTS) ||
+ (debugfs_conf->pcs_idle_blkd_evts > TELEM_PSS_IDLE_BLOCKED_EVTS) ||
+ (debugfs_conf->pcs_s0ix_blkd_evts > TELEM_PSS_S0IX_BLOCKED_EVTS) ||
+ (debugfs_conf->pss_ltr_evts > TELEM_PSS_LTR_BLOCKING_EVTS) ||
+ (debugfs_conf->pss_wakeup_evts > TELEM_PSS_S0IX_WAKEUP_EVTS) ||
+ (debugfs_conf->ioss_d0ix_evts > TELEM_IOSS_DX_D0IX_EVTS) ||
+ (debugfs_conf->ioss_pg_evts > TELEM_IOSS_PG_EVTS))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int telem_pss_states_show(struct seq_file *s, void *unused)
+{
+ struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ struct telemetry_debugfs_conf *conf = debugfs_conf;
+ const char *name[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ u32 pcs_idle_blkd[TELEM_PSS_IDLE_BLOCKED_EVTS],
+ pcs_s0ix_blkd[TELEM_PSS_S0IX_BLOCKED_EVTS],
+ pss_s0ix_wakeup[TELEM_PSS_S0IX_WAKEUP_EVTS],
+ pss_ltr_blkd[TELEM_PSS_LTR_BLOCKING_EVTS],
+ pss_idle[TELEM_PSS_IDLE_EVTS];
+ int index, idx, ret, err = 0;
+ u64 pstates = 0;
+
+ ret = telemetry_read_eventlog(TELEM_PSS, evtlog,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (ret < 0)
+ return ret;
+
+ err = telemetry_get_evtname(TELEM_PSS, name,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (err < 0)
+ return err;
+
+ seq_puts(s, "\n----------------------------------------------------\n");
+ seq_puts(s, "\tPSS TELEM EVENTLOG (Residency = field/19.2 us\n");
+ seq_puts(s, "----------------------------------------------------\n");
+ for (index = 0; index < ret; index++) {
+ seq_printf(s, "%-32s %llu\n",
+ name[index], evtlog[index].telem_evtlog);
+
+ /* Fetch PSS IDLE State */
+ if (evtlog[index].telem_evtid == conf->pss_idle_id) {
+ pss_idle[conf->pss_idle_evts - 1] =
+ (evtlog[index].telem_evtlog >>
+ conf->pss_idle_data[conf->pss_idle_evts - 1].bit_pos) &
+ TELEM_APL_MASK_PCS_STATE;
+ }
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->pss_idle_id,
+ conf->pss_idle_evts - 1,
+ pss_idle, evtlog[index].telem_evtlog,
+ conf->pss_idle_data, TELEM_MASK_BIT);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->pcs_idle_blkd_id,
+ conf->pcs_idle_blkd_evts,
+ pcs_idle_blkd,
+ evtlog[index].telem_evtlog,
+ conf->pcs_idle_blkd_data,
+ TELEM_MASK_BYTE);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->pcs_s0ix_blkd_id,
+ conf->pcs_s0ix_blkd_evts,
+ pcs_s0ix_blkd,
+ evtlog[index].telem_evtlog,
+ conf->pcs_s0ix_blkd_data,
+ TELEM_MASK_BYTE);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->pss_wakeup_id,
+ conf->pss_wakeup_evts,
+ pss_s0ix_wakeup,
+ evtlog[index].telem_evtlog,
+ conf->pss_wakeup, TELEM_MASK_BYTE);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->pss_ltr_blocking_id,
+ conf->pss_ltr_evts, pss_ltr_blkd,
+ evtlog[index].telem_evtlog,
+ conf->pss_ltr_data, TELEM_MASK_BYTE);
+
+ if (evtlog[index].telem_evtid == debugfs_conf->pstates_id)
+ pstates = evtlog[index].telem_evtlog;
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "PStates\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Domain\t\t\t\tFreq(Mhz)\n");
+ seq_printf(s, " IA\t\t\t\t %llu\n GT\t\t\t\t %llu\n",
+ (pstates & TELEM_MASK_BYTE)*100,
+ ((pstates >> 8) & TELEM_MASK_BYTE)*50/3);
+
+ seq_printf(s, " IUNIT\t\t\t\t %llu\n SA\t\t\t\t %llu\n",
+ ((pstates >> 16) & TELEM_MASK_BYTE)*25,
+ ((pstates >> 24) & TELEM_MASK_BYTE)*50/3);
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "PSS IDLE Status\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Device\t\t\t\t\tIDLE\n");
+ for (index = 0; index < debugfs_conf->pss_idle_evts; index++) {
+ seq_printf(s, "%-32s\t%u\n",
+ debugfs_conf->pss_idle_data[index].name,
+ pss_idle[index]);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "PSS Idle blkd Status (~1ms saturating bucket)\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Blocker\t\t\t\t\tCount\n");
+ for (index = 0; index < debugfs_conf->pcs_idle_blkd_evts; index++) {
+ seq_printf(s, "%-32s\t%u\n",
+ debugfs_conf->pcs_idle_blkd_data[index].name,
+ pcs_idle_blkd[index]);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "PSS S0ix blkd Status (~1ms saturating bucket)\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Blocker\t\t\t\t\tCount\n");
+ for (index = 0; index < debugfs_conf->pcs_s0ix_blkd_evts; index++) {
+ seq_printf(s, "%-32s\t%u\n",
+ debugfs_conf->pcs_s0ix_blkd_data[index].name,
+ pcs_s0ix_blkd[index]);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "LTR Blocking Status (~1ms saturating bucket)\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Blocker\t\t\t\t\tCount\n");
+ for (index = 0; index < debugfs_conf->pss_ltr_evts; index++) {
+ seq_printf(s, "%-32s\t%u\n",
+ debugfs_conf->pss_ltr_data[index].name,
+ pss_s0ix_wakeup[index]);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "Wakes Status (~1ms saturating bucket)\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Wakes\t\t\t\t\tCount\n");
+ for (index = 0; index < debugfs_conf->pss_wakeup_evts; index++) {
+ seq_printf(s, "%-32s\t%u\n",
+ debugfs_conf->pss_wakeup[index].name,
+ pss_ltr_blkd[index]);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(telem_pss_states);
+
+static int telem_ioss_states_show(struct seq_file *s, void *unused)
+{
+ struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ const char *name[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ int index, ret, err;
+
+ ret = telemetry_read_eventlog(TELEM_IOSS, evtlog,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (ret < 0)
+ return ret;
+
+ err = telemetry_get_evtname(TELEM_IOSS, name,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (err < 0)
+ return err;
+
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "\tI0SS TELEMETRY EVENTLOG\n");
+ seq_puts(s, "--------------------------------------\n");
+ for (index = 0; index < ret; index++) {
+ seq_printf(s, "%-32s 0x%llx\n",
+ name[index], evtlog[index].telem_evtlog);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(telem_ioss_states);
+
+static int telem_soc_states_show(struct seq_file *s, void *unused)
+{
+ u32 d3_sts[TELEM_IOSS_DX_D0IX_EVTS], d0ix_sts[TELEM_IOSS_DX_D0IX_EVTS];
+ u32 pg_sts[TELEM_IOSS_PG_EVTS], pss_idle[TELEM_PSS_IDLE_EVTS];
+ struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ u32 s0ix_total_ctr = 0, s0ix_shlw_ctr = 0, s0ix_deep_ctr = 0;
+ u64 s0ix_total_res = 0, s0ix_shlw_res = 0, s0ix_deep_res = 0;
+ struct telemetry_debugfs_conf *conf = debugfs_conf;
+ struct pci_dev *dev = NULL;
+ int index, idx, ret;
+ u32 d3_state;
+ u16 pmcsr;
+
+ ret = telemetry_read_eventlog(TELEM_IOSS, evtlog,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (ret < 0)
+ return ret;
+
+ for (index = 0; index < ret; index++) {
+ TELEM_CHECK_AND_PARSE_EVTS(conf->ioss_d3_id,
+ conf->ioss_d0ix_evts,
+ d3_sts, evtlog[index].telem_evtlog,
+ conf->ioss_d0ix_data,
+ TELEM_MASK_BIT);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->ioss_pg_id, conf->ioss_pg_evts,
+ pg_sts, evtlog[index].telem_evtlog,
+ conf->ioss_pg_data, TELEM_MASK_BIT);
+
+ TELEM_CHECK_AND_PARSE_EVTS(conf->ioss_d0ix_id,
+ conf->ioss_d0ix_evts,
+ d0ix_sts, evtlog[index].telem_evtlog,
+ conf->ioss_d0ix_data,
+ TELEM_MASK_BIT);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_total_occ_id,
+ s0ix_total_ctr);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_occ_id,
+ s0ix_shlw_ctr);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_occ_id,
+ s0ix_deep_ctr);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_total_res_id,
+ s0ix_total_res);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_res_id,
+ s0ix_shlw_res);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_res_id,
+ s0ix_deep_res);
+ }
+
+ seq_puts(s, "\n---------------------------------------------------\n");
+ seq_puts(s, "S0IX Type\t\t\t Occurrence\t\t Residency(us)\n");
+ seq_puts(s, "---------------------------------------------------\n");
+
+ seq_printf(s, "S0IX Shallow\t\t\t %10u\t %10llu\n",
+ s0ix_shlw_ctr -
+ conf->suspend_stats.shlw_ctr,
+ (u64)((s0ix_shlw_res -
+ conf->suspend_stats.shlw_res)*10/192));
+
+ seq_printf(s, "S0IX Deep\t\t\t %10u\t %10llu\n",
+ s0ix_deep_ctr -
+ conf->suspend_stats.deep_ctr,
+ (u64)((s0ix_deep_res -
+ conf->suspend_stats.deep_res)*10/192));
+
+ seq_printf(s, "Suspend(With S0ixShallow)\t %10u\t %10llu\n",
+ conf->suspend_stats.shlw_ctr,
+ (u64)(conf->suspend_stats.shlw_res*10)/192);
+
+ seq_printf(s, "Suspend(With S0ixDeep)\t\t %10u\t %10llu\n",
+ conf->suspend_stats.deep_ctr,
+ (u64)(conf->suspend_stats.deep_res*10)/192);
+
+ seq_printf(s, "TOTAL S0IX\t\t\t %10u\t %10llu\n", s0ix_total_ctr,
+ (u64)(s0ix_total_res*10/192));
+ seq_puts(s, "\n-------------------------------------------------\n");
+ seq_puts(s, "\t\tDEVICE STATES\n");
+ seq_puts(s, "-------------------------------------------------\n");
+
+ for_each_pci_dev(dev) {
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ d3_state = ((pmcsr & PCI_PM_CTRL_STATE_MASK) ==
+ (__force int)PCI_D3hot) ? 1 : 0;
+
+ seq_printf(s, "pci %04x %04X %s %20.20s: ",
+ dev->vendor, dev->device, dev_name(&dev->dev),
+ dev_driver_string(&dev->dev));
+ seq_printf(s, " d3:%x\n", d3_state);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "D3/D0i3 Status\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Block\t\t D3\t D0i3\n");
+ for (index = 0; index < conf->ioss_d0ix_evts; index++) {
+ seq_printf(s, "%-10s\t %u\t %u\n",
+ conf->ioss_d0ix_data[index].name,
+ d3_sts[index], d0ix_sts[index]);
+ }
+
+ seq_puts(s, "\n--------------------------------------\n");
+ seq_puts(s, "South Complex PowerGate Status\n");
+ seq_puts(s, "--------------------------------------\n");
+ seq_puts(s, "Device\t\t PG\n");
+ for (index = 0; index < conf->ioss_pg_evts; index++) {
+ seq_printf(s, "%-10s\t %u\n",
+ conf->ioss_pg_data[index].name,
+ pg_sts[index]);
+ }
+
+ evtlog->telem_evtid = conf->pss_idle_id;
+ ret = telemetry_read_events(TELEM_PSS, evtlog, 1);
+ if (ret < 0)
+ return ret;
+
+ seq_puts(s, "\n-----------------------------------------\n");
+ seq_puts(s, "North Idle Status\n");
+ seq_puts(s, "-----------------------------------------\n");
+ for (idx = 0; idx < conf->pss_idle_evts - 1; idx++) {
+ pss_idle[idx] = (evtlog->telem_evtlog >>
+ conf->pss_idle_data[idx].bit_pos) &
+ TELEM_MASK_BIT;
+ }
+
+ pss_idle[idx] = (evtlog->telem_evtlog >>
+ conf->pss_idle_data[idx].bit_pos) &
+ TELEM_APL_MASK_PCS_STATE;
+
+ for (index = 0; index < conf->pss_idle_evts; index++) {
+ seq_printf(s, "%-30s %u\n",
+ conf->pss_idle_data[index].name,
+ pss_idle[index]);
+ }
+
+ seq_puts(s, "\nPCS_STATUS Code\n");
+ seq_puts(s, "0:C0 1:C1 2:C1_DN_WT_DEV 3:C2 4:C2_WT_DE_MEM_UP\n");
+ seq_puts(s, "5:C2_WT_DE_MEM_DOWN 6:C2_UP_WT_DEV 7:C2_DN 8:C2_VOA\n");
+ seq_puts(s, "9:C2_VOA_UP 10:S0IX_PRE 11:S0IX\n");
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(telem_soc_states);
+
+static int telem_s0ix_res_get(void *data, u64 *val)
+{
+ struct telemetry_plt_config *plt_config = telemetry_get_pltdata();
+ u64 s0ix_total_res;
+ int ret;
+
+ ret = intel_pmc_s0ix_counter_read(plt_config->pmc, &s0ix_total_res);
+ if (ret) {
+ pr_err("Failed to read S0ix residency");
+ return ret;
+ }
+
+ *val = s0ix_total_res;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(telem_s0ix_fops, telem_s0ix_res_get, NULL, "%llu\n");
+
+static int telem_pss_trc_verb_show(struct seq_file *s, void *unused)
+{
+ u32 verbosity;
+ int err;
+
+ err = telemetry_get_trace_verbosity(TELEM_PSS, &verbosity);
+ if (err) {
+ pr_err("Get PSS Trace Verbosity Failed with Error %d\n", err);
+ return -EFAULT;
+ }
+
+ seq_printf(s, "PSS Trace Verbosity %u\n", verbosity);
+ return 0;
+}
+
+static ssize_t telem_pss_trc_verb_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ u32 verbosity;
+ int err;
+
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
+
+ err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity);
+ if (err) {
+ pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err);
+ return err;
+ }
+
+ return count;
+}
+
+static int telem_pss_trc_verb_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, telem_pss_trc_verb_show, inode->i_private);
+}
+
+static const struct file_operations telem_pss_trc_verb_ops = {
+ .open = telem_pss_trc_verb_open,
+ .read = seq_read,
+ .write = telem_pss_trc_verb_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int telem_ioss_trc_verb_show(struct seq_file *s, void *unused)
+{
+ u32 verbosity;
+ int err;
+
+ err = telemetry_get_trace_verbosity(TELEM_IOSS, &verbosity);
+ if (err) {
+ pr_err("Get IOSS Trace Verbosity Failed with Error %d\n", err);
+ return -EFAULT;
+ }
+
+ seq_printf(s, "IOSS Trace Verbosity %u\n", verbosity);
+ return 0;
+}
+
+static ssize_t telem_ioss_trc_verb_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ u32 verbosity;
+ int err;
+
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
+
+ err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity);
+ if (err) {
+ pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err);
+ return err;
+ }
+
+ return count;
+}
+
+static int telem_ioss_trc_verb_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, telem_ioss_trc_verb_show, inode->i_private);
+}
+
+static const struct file_operations telem_ioss_trc_verb_ops = {
+ .open = telem_ioss_trc_verb_open,
+ .read = seq_read,
+ .write = telem_ioss_trc_verb_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pm_suspend_prep_cb(void)
+{
+ struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ struct telemetry_debugfs_conf *conf = debugfs_conf;
+ int ret, index;
+
+ ret = telemetry_raw_read_eventlog(TELEM_IOSS, evtlog,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (ret < 0) {
+ suspend_prep_ok = 0;
+ goto out;
+ }
+
+ for (index = 0; index < ret; index++) {
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_occ_id,
+ suspend_shlw_ctr_temp);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_occ_id,
+ suspend_deep_ctr_temp);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_res_id,
+ suspend_shlw_res_temp);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_res_id,
+ suspend_deep_res_temp);
+ }
+ suspend_prep_ok = 1;
+out:
+ return NOTIFY_OK;
+}
+
+static int pm_suspend_exit_cb(void)
+{
+ struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
+ static u32 suspend_shlw_ctr_exit, suspend_deep_ctr_exit;
+ static u64 suspend_shlw_res_exit, suspend_deep_res_exit;
+ struct telemetry_debugfs_conf *conf = debugfs_conf;
+ int ret, index;
+
+ if (!suspend_prep_ok)
+ goto out;
+
+ ret = telemetry_raw_read_eventlog(TELEM_IOSS, evtlog,
+ TELEM_MAX_OS_ALLOCATED_EVENTS);
+ if (ret < 0)
+ goto out;
+
+ for (index = 0; index < ret; index++) {
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_occ_id,
+ suspend_shlw_ctr_exit);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_occ_id,
+ suspend_deep_ctr_exit);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_shlw_res_id,
+ suspend_shlw_res_exit);
+
+ TELEM_CHECK_AND_PARSE_CTRS(conf->s0ix_deep_res_id,
+ suspend_deep_res_exit);
+ }
+
+ if ((suspend_shlw_ctr_exit < suspend_shlw_ctr_temp) ||
+ (suspend_deep_ctr_exit < suspend_deep_ctr_temp) ||
+ (suspend_shlw_res_exit < suspend_shlw_res_temp) ||
+ (suspend_deep_res_exit < suspend_deep_res_temp)) {
+ pr_err("Wrong s0ix counters detected\n");
+ goto out;
+ }
+
+ /*
+ * Due to some design limitations in the firmware, sometimes the
+ * counters do not get updated by the time we reach here. As a
+ * workaround, we try to see if this was a genuine case of sleep
+ * failure or not by cross-checking from PMC GCR registers directly.
+ */
+ if (suspend_shlw_ctr_exit == suspend_shlw_ctr_temp &&
+ suspend_deep_ctr_exit == suspend_deep_ctr_temp) {
+ struct telemetry_plt_config *plt_config = telemetry_get_pltdata();
+ struct intel_pmc_dev *pmc = plt_config->pmc;
+
+ ret = intel_pmc_gcr_read64(pmc, PMC_GCR_TELEM_SHLW_S0IX_REG,
+ &suspend_shlw_res_exit);
+ if (ret < 0)
+ goto out;
+
+ ret = intel_pmc_gcr_read64(pmc, PMC_GCR_TELEM_DEEP_S0IX_REG,
+ &suspend_deep_res_exit);
+ if (ret < 0)
+ goto out;
+
+ if (suspend_shlw_res_exit > suspend_shlw_res_temp)
+ suspend_shlw_ctr_exit++;
+
+ if (suspend_deep_res_exit > suspend_deep_res_temp)
+ suspend_deep_ctr_exit++;
+ }
+
+ suspend_shlw_ctr_exit -= suspend_shlw_ctr_temp;
+ suspend_deep_ctr_exit -= suspend_deep_ctr_temp;
+ suspend_shlw_res_exit -= suspend_shlw_res_temp;
+ suspend_deep_res_exit -= suspend_deep_res_temp;
+
+ if (suspend_shlw_ctr_exit != 0) {
+ conf->suspend_stats.shlw_ctr +=
+ suspend_shlw_ctr_exit;
+
+ conf->suspend_stats.shlw_res +=
+ suspend_shlw_res_exit;
+ }
+
+ if (suspend_deep_ctr_exit != 0) {
+ conf->suspend_stats.deep_ctr +=
+ suspend_deep_ctr_exit;
+
+ conf->suspend_stats.deep_res +=
+ suspend_deep_res_exit;
+ }
+
+out:
+ suspend_prep_ok = 0;
+ return NOTIFY_OK;
+}
+
+static int pm_notification(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ return pm_suspend_prep_cb();
+ case PM_POST_SUSPEND:
+ return pm_suspend_exit_cb();
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pm_notifier = {
+ .notifier_call = pm_notification,
+};
+
+static int __init telemetry_debugfs_init(void)
+{
+ const struct x86_cpu_id *id;
+ int err;
+ struct dentry *dir;
+
+ /* Only APL supported for now */
+ id = x86_match_cpu(telemetry_debugfs_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data;
+
+ if (!telemetry_get_pltdata()) {
+ pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n");
+ return -ENODEV;
+ }
+
+ err = telemetry_debugfs_check_evts();
+ if (err < 0) {
+ pr_info("telemetry_debugfs_check_evts failed\n");
+ return -EINVAL;
+ }
+
+ register_pm_notifier(&pm_notifier);
+
+ dir = debugfs_create_dir("telemetry", NULL);
+ debugfs_conf->telemetry_dbg_dir = dir;
+
+ debugfs_create_file("pss_info", S_IFREG | S_IRUGO, dir, NULL,
+ &telem_pss_states_fops);
+ debugfs_create_file("ioss_info", S_IFREG | S_IRUGO, dir, NULL,
+ &telem_ioss_states_fops);
+ debugfs_create_file("soc_states", S_IFREG | S_IRUGO, dir, NULL,
+ &telem_soc_states_fops);
+ debugfs_create_file("s0ix_residency_usec", S_IFREG | S_IRUGO, dir, NULL,
+ &telem_s0ix_fops);
+ debugfs_create_file("pss_trace_verbosity", S_IFREG | S_IRUGO, dir, NULL,
+ &telem_pss_trc_verb_ops);
+ debugfs_create_file("ioss_trace_verbosity", S_IFREG | S_IRUGO, dir,
+ NULL, &telem_ioss_trc_verb_ops);
+ return 0;
+}
+
+static void __exit telemetry_debugfs_exit(void)
+{
+ debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
+ debugfs_conf->telemetry_dbg_dir = NULL;
+ unregister_pm_notifier(&pm_notifier);
+}
+
+late_initcall(telemetry_debugfs_init);
+module_exit(telemetry_debugfs_exit);
+
+MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>");
+MODULE_DESCRIPTION("Intel SoC Telemetry debugfs Interface");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c
new file mode 100644
index 0000000000..06311d0e94
--- /dev/null
+++ b/drivers/platform/x86/intel/telemetry/pltdrv.c
@@ -0,0 +1,1188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel SOC Telemetry Platform Driver: Currently supports APL
+ * Copyright (c) 2015, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This file provides the platform specific telemetry implementation for APL.
+ * It used the PUNIT and PMC IPC interfaces for configuring the counters.
+ * The accumulated results are fetched from SRAM.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/intel_punit_ipc.h>
+#include <asm/intel_telemetry.h>
+
+#define DRIVER_NAME "intel_telemetry"
+#define DRIVER_VERSION "1.0.0"
+
+#define TELEM_TRC_VERBOSITY_MASK 0x3
+
+#define TELEM_MIN_PERIOD(x) ((x) & 0x7F0000)
+#define TELEM_MAX_PERIOD(x) ((x) & 0x7F000000)
+#define TELEM_SAMPLE_PERIOD_INVALID(x) ((x) & (BIT(7)))
+#define TELEM_CLEAR_SAMPLE_PERIOD(x) ((x) &= ~0x7F)
+
+#define TELEM_SAMPLING_DEFAULT_PERIOD 0xD
+
+#define TELEM_MAX_EVENTS_SRAM 28
+#define TELEM_SSRAM_STARTTIME_OFFSET 8
+#define TELEM_SSRAM_EVTLOG_OFFSET 16
+
+#define IOSS_TELEM 0xeb
+#define IOSS_TELEM_EVENT_READ 0x0
+#define IOSS_TELEM_EVENT_WRITE 0x1
+#define IOSS_TELEM_INFO_READ 0x2
+#define IOSS_TELEM_TRACE_CTL_READ 0x5
+#define IOSS_TELEM_TRACE_CTL_WRITE 0x6
+#define IOSS_TELEM_EVENT_CTL_READ 0x7
+#define IOSS_TELEM_EVENT_CTL_WRITE 0x8
+#define IOSS_TELEM_EVT_WRITE_SIZE 0x3
+
+#define TELEM_INFO_SRAMEVTS_MASK 0xFF00
+#define TELEM_INFO_SRAMEVTS_SHIFT 0x8
+#define TELEM_SSRAM_READ_TIMEOUT 10
+
+#define TELEM_INFO_NENABLES_MASK 0xFF
+#define TELEM_EVENT_ENABLE 0x8000
+
+#define TELEM_MASK_BIT 1
+#define TELEM_MASK_BYTE 0xFF
+#define BYTES_PER_LONG 8
+#define TELEM_MASK_PCS_STATE 0xF
+
+#define TELEM_DISABLE(x) ((x) &= ~(BIT(31)))
+#define TELEM_CLEAR_EVENTS(x) ((x) |= (BIT(30)))
+#define TELEM_ENABLE_SRAM_EVT_TRACE(x) ((x) &= ~(BIT(30) | BIT(24)))
+#define TELEM_ENABLE_PERIODIC(x) ((x) |= (BIT(23) | BIT(31) | BIT(7)))
+#define TELEM_EXTRACT_VERBOSITY(x, y) ((y) = (((x) >> 27) & 0x3))
+#define TELEM_CLEAR_VERBOSITY_BITS(x) ((x) &= ~(BIT(27) | BIT(28)))
+#define TELEM_SET_VERBOSITY_BITS(x, y) ((x) |= ((y) << 27))
+
+enum telemetry_action {
+ TELEM_UPDATE = 0,
+ TELEM_ADD,
+ TELEM_RESET,
+ TELEM_ACTION_NONE
+};
+
+struct telem_ssram_region {
+ u64 timestamp;
+ u64 start_time;
+ u64 events[TELEM_MAX_EVENTS_SRAM];
+};
+
+static struct telemetry_plt_config *telm_conf;
+
+/*
+ * The following counters are programmed by default during setup.
+ * Only 20 allocated to kernel driver
+ */
+static struct telemetry_evtmap
+ telemetry_apl_ioss_default_events[TELEM_MAX_OS_ALLOCATED_EVENTS] = {
+ {"SOC_S0IX_TOTAL_RES", 0x4800},
+ {"SOC_S0IX_TOTAL_OCC", 0x4000},
+ {"SOC_S0IX_SHALLOW_RES", 0x4801},
+ {"SOC_S0IX_SHALLOW_OCC", 0x4001},
+ {"SOC_S0IX_DEEP_RES", 0x4802},
+ {"SOC_S0IX_DEEP_OCC", 0x4002},
+ {"PMC_POWER_GATE", 0x5818},
+ {"PMC_D3_STATES", 0x5819},
+ {"PMC_D0I3_STATES", 0x581A},
+ {"PMC_S0IX_WAKE_REASON_GPIO", 0x6000},
+ {"PMC_S0IX_WAKE_REASON_TIMER", 0x6001},
+ {"PMC_S0IX_WAKE_REASON_VNNREQ", 0x6002},
+ {"PMC_S0IX_WAKE_REASON_LOWPOWER", 0x6003},
+ {"PMC_S0IX_WAKE_REASON_EXTERNAL", 0x6004},
+ {"PMC_S0IX_WAKE_REASON_MISC", 0x6005},
+ {"PMC_S0IX_BLOCKING_IPS_D3_D0I3", 0x6006},
+ {"PMC_S0IX_BLOCKING_IPS_PG", 0x6007},
+ {"PMC_S0IX_BLOCKING_MISC_IPS_PG", 0x6008},
+ {"PMC_S0IX_BLOCK_IPS_VNN_REQ", 0x6009},
+ {"PMC_S0IX_BLOCK_IPS_CLOCKS", 0x600B},
+};
+
+
+static struct telemetry_evtmap
+ telemetry_apl_pss_default_events[TELEM_MAX_OS_ALLOCATED_EVENTS] = {
+ {"IA_CORE0_C6_RES", 0x0400},
+ {"IA_CORE0_C6_CTR", 0x0000},
+ {"IA_MODULE0_C7_RES", 0x0410},
+ {"IA_MODULE0_C7_CTR", 0x000E},
+ {"IA_C0_RES", 0x0805},
+ {"PCS_LTR", 0x2801},
+ {"PSTATES", 0x2802},
+ {"SOC_S0I3_RES", 0x0409},
+ {"SOC_S0I3_CTR", 0x000A},
+ {"PCS_S0I3_CTR", 0x0009},
+ {"PCS_C1E_RES", 0x041A},
+ {"PCS_IDLE_STATUS", 0x2806},
+ {"IA_PERF_LIMITS", 0x280B},
+ {"GT_PERF_LIMITS", 0x280C},
+ {"PCS_WAKEUP_S0IX_CTR", 0x0030},
+ {"PCS_IDLE_BLOCKED", 0x2C00},
+ {"PCS_S0IX_BLOCKED", 0x2C01},
+ {"PCS_S0IX_WAKE_REASONS", 0x2C02},
+ {"PCS_LTR_BLOCKING", 0x2C03},
+ {"PC2_AND_MEM_SHALLOW_IDLE_RES", 0x1D40},
+};
+
+static struct telemetry_evtmap
+ telemetry_glk_pss_default_events[TELEM_MAX_OS_ALLOCATED_EVENTS] = {
+ {"IA_CORE0_C6_RES", 0x0400},
+ {"IA_CORE0_C6_CTR", 0x0000},
+ {"IA_MODULE0_C7_RES", 0x0410},
+ {"IA_MODULE0_C7_CTR", 0x000C},
+ {"IA_C0_RES", 0x0805},
+ {"PCS_LTR", 0x2801},
+ {"PSTATES", 0x2802},
+ {"SOC_S0I3_RES", 0x0407},
+ {"SOC_S0I3_CTR", 0x0008},
+ {"PCS_S0I3_CTR", 0x0007},
+ {"PCS_C1E_RES", 0x0414},
+ {"PCS_IDLE_STATUS", 0x2806},
+ {"IA_PERF_LIMITS", 0x280B},
+ {"GT_PERF_LIMITS", 0x280C},
+ {"PCS_WAKEUP_S0IX_CTR", 0x0025},
+ {"PCS_IDLE_BLOCKED", 0x2C00},
+ {"PCS_S0IX_BLOCKED", 0x2C01},
+ {"PCS_S0IX_WAKE_REASONS", 0x2C02},
+ {"PCS_LTR_BLOCKING", 0x2C03},
+ {"PC2_AND_MEM_SHALLOW_IDLE_RES", 0x1D40},
+};
+
+/* APL specific Data */
+static struct telemetry_plt_config telem_apl_config = {
+ .pss_config = {
+ .telem_evts = telemetry_apl_pss_default_events,
+ },
+ .ioss_config = {
+ .telem_evts = telemetry_apl_ioss_default_events,
+ },
+};
+
+/* GLK specific Data */
+static struct telemetry_plt_config telem_glk_config = {
+ .pss_config = {
+ .telem_evts = telemetry_glk_pss_default_events,
+ },
+ .ioss_config = {
+ .telem_evts = telemetry_apl_ioss_default_events,
+ },
+};
+
+static const struct x86_cpu_id telemetry_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_config),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_glk_config),
+ {}
+};
+
+MODULE_DEVICE_TABLE(x86cpu, telemetry_cpu_ids);
+
+static inline int telem_get_unitconfig(enum telemetry_unit telem_unit,
+ struct telemetry_unit_config **unit_config)
+{
+ if (telem_unit == TELEM_PSS)
+ *unit_config = &(telm_conf->pss_config);
+ else if (telem_unit == TELEM_IOSS)
+ *unit_config = &(telm_conf->ioss_config);
+ else
+ return -EINVAL;
+
+ return 0;
+
+}
+
+static int telemetry_check_evtid(enum telemetry_unit telem_unit,
+ u32 *evtmap, u8 len,
+ enum telemetry_action action)
+{
+ struct telemetry_unit_config *unit_config;
+ int ret;
+
+ ret = telem_get_unitconfig(telem_unit, &unit_config);
+ if (ret < 0)
+ return ret;
+
+ switch (action) {
+ case TELEM_RESET:
+ if (len > TELEM_MAX_EVENTS_SRAM)
+ return -EINVAL;
+
+ break;
+
+ case TELEM_UPDATE:
+ if (len > TELEM_MAX_EVENTS_SRAM)
+ return -EINVAL;
+
+ if ((len > 0) && (evtmap == NULL))
+ return -EINVAL;
+
+ break;
+
+ case TELEM_ADD:
+ if ((len + unit_config->ssram_evts_used) >
+ TELEM_MAX_EVENTS_SRAM)
+ return -EINVAL;
+
+ if ((len > 0) && (evtmap == NULL))
+ return -EINVAL;
+
+ break;
+
+ default:
+ pr_err("Unknown Telemetry action specified %d\n", action);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static inline int telemetry_plt_config_ioss_event(u32 evt_id, int index)
+{
+ u32 write_buf;
+
+ write_buf = evt_id | TELEM_EVENT_ENABLE;
+ write_buf <<= BITS_PER_BYTE;
+ write_buf |= index;
+
+ return intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_WRITE, &write_buf,
+ IOSS_TELEM_EVT_WRITE_SIZE, NULL, 0);
+}
+
+static inline int telemetry_plt_config_pss_event(u32 evt_id, int index)
+{
+ u32 write_buf;
+ int ret;
+
+ write_buf = evt_id | TELEM_EVENT_ENABLE;
+ ret = intel_punit_ipc_command(IPC_PUNIT_BIOS_WRITE_TELE_EVENT,
+ index, 0, &write_buf, NULL);
+
+ return ret;
+}
+
+static int telemetry_setup_iossevtconfig(struct telemetry_evtconfig evtconfig,
+ enum telemetry_action action)
+{
+ struct intel_scu_ipc_dev *scu = telm_conf->scu;
+ u8 num_ioss_evts, ioss_period;
+ int ret, index, idx;
+ u32 *ioss_evtmap;
+ u32 telem_ctrl;
+
+ num_ioss_evts = evtconfig.num_evts;
+ ioss_period = evtconfig.period;
+ ioss_evtmap = evtconfig.evtmap;
+
+ /* Get telemetry EVENT CTL */
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_READ, NULL, 0,
+ &telem_ctrl, sizeof(telem_ctrl));
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Read Failed\n");
+ return ret;
+ }
+
+ /* Disable Telemetry */
+ TELEM_DISABLE(telem_ctrl);
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE, &telem_ctrl,
+ sizeof(telem_ctrl), NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+
+
+ /* Reset Everything */
+ if (action == TELEM_RESET) {
+ /* Clear All Events */
+ TELEM_CLEAR_EVENTS(telem_ctrl);
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl),
+ NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+ telm_conf->ioss_config.ssram_evts_used = 0;
+
+ /* Configure Events */
+ for (idx = 0; idx < num_ioss_evts; idx++) {
+ if (telemetry_plt_config_ioss_event(
+ telm_conf->ioss_config.telem_evts[idx].evt_id,
+ idx)) {
+ pr_err("IOSS TELEM_RESET Fail for data: %x\n",
+ telm_conf->ioss_config.telem_evts[idx].evt_id);
+ continue;
+ }
+ telm_conf->ioss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Re-Configure Everything */
+ if (action == TELEM_UPDATE) {
+ /* Clear All Events */
+ TELEM_CLEAR_EVENTS(telem_ctrl);
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl),
+ NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+ telm_conf->ioss_config.ssram_evts_used = 0;
+
+ /* Configure Events */
+ for (index = 0; index < num_ioss_evts; index++) {
+ telm_conf->ioss_config.telem_evts[index].evt_id =
+ ioss_evtmap[index];
+
+ if (telemetry_plt_config_ioss_event(
+ telm_conf->ioss_config.telem_evts[index].evt_id,
+ index)) {
+ pr_err("IOSS TELEM_UPDATE Fail for Evt%x\n",
+ ioss_evtmap[index]);
+ continue;
+ }
+ telm_conf->ioss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Add some Events */
+ if (action == TELEM_ADD) {
+ /* Configure Events */
+ for (index = telm_conf->ioss_config.ssram_evts_used, idx = 0;
+ idx < num_ioss_evts; index++, idx++) {
+ telm_conf->ioss_config.telem_evts[index].evt_id =
+ ioss_evtmap[idx];
+
+ if (telemetry_plt_config_ioss_event(
+ telm_conf->ioss_config.telem_evts[index].evt_id,
+ index)) {
+ pr_err("IOSS TELEM_ADD Fail for Event %x\n",
+ ioss_evtmap[idx]);
+ continue;
+ }
+ telm_conf->ioss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Enable Periodic Telemetry Events and enable SRAM trace */
+ TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl);
+ TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl);
+ TELEM_ENABLE_PERIODIC(telem_ctrl);
+ telem_ctrl |= ioss_period;
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl), NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Enable Write Failed\n");
+ return ret;
+ }
+
+ telm_conf->ioss_config.curr_period = ioss_period;
+
+ return 0;
+}
+
+
+static int telemetry_setup_pssevtconfig(struct telemetry_evtconfig evtconfig,
+ enum telemetry_action action)
+{
+ u8 num_pss_evts, pss_period;
+ int ret, index, idx;
+ u32 *pss_evtmap;
+ u32 telem_ctrl;
+
+ num_pss_evts = evtconfig.num_evts;
+ pss_period = evtconfig.period;
+ pss_evtmap = evtconfig.evtmap;
+
+ /* PSS Config */
+ /* Get telemetry EVENT CTL */
+ ret = intel_punit_ipc_command(IPC_PUNIT_BIOS_READ_TELE_EVENT_CTRL,
+ 0, 0, NULL, &telem_ctrl);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Read Failed\n");
+ return ret;
+ }
+
+ /* Disable Telemetry */
+ TELEM_DISABLE(telem_ctrl);
+ ret = intel_punit_ipc_command(IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+
+ /* Reset Everything */
+ if (action == TELEM_RESET) {
+ /* Clear All Events */
+ TELEM_CLEAR_EVENTS(telem_ctrl);
+
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+ telm_conf->pss_config.ssram_evts_used = 0;
+ /* Configure Events */
+ for (idx = 0; idx < num_pss_evts; idx++) {
+ if (telemetry_plt_config_pss_event(
+ telm_conf->pss_config.telem_evts[idx].evt_id,
+ idx)) {
+ pr_err("PSS TELEM_RESET Fail for Event %x\n",
+ telm_conf->pss_config.telem_evts[idx].evt_id);
+ continue;
+ }
+ telm_conf->pss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Re-Configure Everything */
+ if (action == TELEM_UPDATE) {
+ /* Clear All Events */
+ TELEM_CLEAR_EVENTS(telem_ctrl);
+
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Disable Write Failed\n");
+ return ret;
+ }
+ telm_conf->pss_config.ssram_evts_used = 0;
+
+ /* Configure Events */
+ for (index = 0; index < num_pss_evts; index++) {
+ telm_conf->pss_config.telem_evts[index].evt_id =
+ pss_evtmap[index];
+
+ if (telemetry_plt_config_pss_event(
+ telm_conf->pss_config.telem_evts[index].evt_id,
+ index)) {
+ pr_err("PSS TELEM_UPDATE Fail for Event %x\n",
+ pss_evtmap[index]);
+ continue;
+ }
+ telm_conf->pss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Add some Events */
+ if (action == TELEM_ADD) {
+ /* Configure Events */
+ for (index = telm_conf->pss_config.ssram_evts_used, idx = 0;
+ idx < num_pss_evts; index++, idx++) {
+
+ telm_conf->pss_config.telem_evts[index].evt_id =
+ pss_evtmap[idx];
+
+ if (telemetry_plt_config_pss_event(
+ telm_conf->pss_config.telem_evts[index].evt_id,
+ index)) {
+ pr_err("PSS TELEM_ADD Fail for Event %x\n",
+ pss_evtmap[idx]);
+ continue;
+ }
+ telm_conf->pss_config.ssram_evts_used++;
+ }
+ }
+
+ /* Enable Periodic Telemetry Events and enable SRAM trace */
+ TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl);
+ TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl);
+ TELEM_ENABLE_PERIODIC(telem_ctrl);
+ telem_ctrl |= pss_period;
+
+ ret = intel_punit_ipc_command(IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Enable Write Failed\n");
+ return ret;
+ }
+
+ telm_conf->pss_config.curr_period = pss_period;
+
+ return 0;
+}
+
+static int telemetry_setup_evtconfig(struct telemetry_evtconfig pss_evtconfig,
+ struct telemetry_evtconfig ioss_evtconfig,
+ enum telemetry_action action)
+{
+ int ret;
+
+ mutex_lock(&(telm_conf->telem_lock));
+
+ if ((action == TELEM_UPDATE) && (telm_conf->telem_in_use)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = telemetry_check_evtid(TELEM_PSS, pss_evtconfig.evtmap,
+ pss_evtconfig.num_evts, action);
+ if (ret)
+ goto out;
+
+ ret = telemetry_check_evtid(TELEM_IOSS, ioss_evtconfig.evtmap,
+ ioss_evtconfig.num_evts, action);
+ if (ret)
+ goto out;
+
+ if (ioss_evtconfig.num_evts) {
+ ret = telemetry_setup_iossevtconfig(ioss_evtconfig, action);
+ if (ret)
+ goto out;
+ }
+
+ if (pss_evtconfig.num_evts) {
+ ret = telemetry_setup_pssevtconfig(pss_evtconfig, action);
+ if (ret)
+ goto out;
+ }
+
+ if ((action == TELEM_UPDATE) || (action == TELEM_ADD))
+ telm_conf->telem_in_use = true;
+ else
+ telm_conf->telem_in_use = false;
+
+out:
+ mutex_unlock(&(telm_conf->telem_lock));
+ return ret;
+}
+
+static int telemetry_setup(struct platform_device *pdev)
+{
+ struct telemetry_evtconfig pss_evtconfig, ioss_evtconfig;
+ u32 read_buf, events, event_regs;
+ int ret;
+
+ ret = intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_INFO_READ, NULL, 0,
+ &read_buf, sizeof(read_buf));
+ if (ret) {
+ dev_err(&pdev->dev, "IOSS TELEM_INFO Read Failed\n");
+ return ret;
+ }
+
+ /* Get telemetry Info */
+ events = (read_buf & TELEM_INFO_SRAMEVTS_MASK) >>
+ TELEM_INFO_SRAMEVTS_SHIFT;
+ event_regs = read_buf & TELEM_INFO_NENABLES_MASK;
+ if ((events < TELEM_MAX_EVENTS_SRAM) ||
+ (event_regs < TELEM_MAX_EVENTS_SRAM)) {
+ dev_err(&pdev->dev, "IOSS:Insufficient Space for SRAM Trace\n");
+ dev_err(&pdev->dev, "SRAM Events %d; Event Regs %d\n",
+ events, event_regs);
+ return -ENOMEM;
+ }
+
+ telm_conf->ioss_config.min_period = TELEM_MIN_PERIOD(read_buf);
+ telm_conf->ioss_config.max_period = TELEM_MAX_PERIOD(read_buf);
+
+ /* PUNIT Mailbox Setup */
+ ret = intel_punit_ipc_command(IPC_PUNIT_BIOS_READ_TELE_INFO, 0, 0,
+ NULL, &read_buf);
+ if (ret) {
+ dev_err(&pdev->dev, "PSS TELEM_INFO Read Failed\n");
+ return ret;
+ }
+
+ /* Get telemetry Info */
+ events = (read_buf & TELEM_INFO_SRAMEVTS_MASK) >>
+ TELEM_INFO_SRAMEVTS_SHIFT;
+ event_regs = read_buf & TELEM_INFO_SRAMEVTS_MASK;
+ if ((events < TELEM_MAX_EVENTS_SRAM) ||
+ (event_regs < TELEM_MAX_EVENTS_SRAM)) {
+ dev_err(&pdev->dev, "PSS:Insufficient Space for SRAM Trace\n");
+ dev_err(&pdev->dev, "SRAM Events %d; Event Regs %d\n",
+ events, event_regs);
+ return -ENOMEM;
+ }
+
+ telm_conf->pss_config.min_period = TELEM_MIN_PERIOD(read_buf);
+ telm_conf->pss_config.max_period = TELEM_MAX_PERIOD(read_buf);
+
+ pss_evtconfig.evtmap = NULL;
+ pss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS;
+ pss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD;
+
+ ioss_evtconfig.evtmap = NULL;
+ ioss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS;
+ ioss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD;
+
+ ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
+ TELEM_RESET);
+ if (ret) {
+ dev_err(&pdev->dev, "TELEMETRY Setup Failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
+ struct telemetry_evtconfig ioss_evtconfig)
+{
+ int ret;
+
+ if ((pss_evtconfig.num_evts > 0) &&
+ (TELEM_SAMPLE_PERIOD_INVALID(pss_evtconfig.period))) {
+ pr_err("PSS Sampling Period Out of Range\n");
+ return -EINVAL;
+ }
+
+ if ((ioss_evtconfig.num_evts > 0) &&
+ (TELEM_SAMPLE_PERIOD_INVALID(ioss_evtconfig.period))) {
+ pr_err("IOSS Sampling Period Out of Range\n");
+ return -EINVAL;
+ }
+
+ ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
+ TELEM_UPDATE);
+ if (ret)
+ pr_err("TELEMETRY Config Failed\n");
+
+ return ret;
+}
+
+
+static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
+{
+ u32 telem_ctrl = 0;
+ int ret = 0;
+
+ mutex_lock(&(telm_conf->telem_lock));
+ if (ioss_period) {
+ struct intel_scu_ipc_dev *scu = telm_conf->scu;
+
+ if (TELEM_SAMPLE_PERIOD_INVALID(ioss_period)) {
+ pr_err("IOSS Sampling Period Out of Range\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get telemetry EVENT CTL */
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_READ, NULL, 0,
+ &telem_ctrl, sizeof(telem_ctrl));
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Read Failed\n");
+ goto out;
+ }
+
+ /* Disable Telemetry */
+ TELEM_DISABLE(telem_ctrl);
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl),
+ NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Disable Write Failed\n");
+ goto out;
+ }
+
+ /* Enable Periodic Telemetry Events and enable SRAM trace */
+ TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl);
+ TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl);
+ TELEM_ENABLE_PERIODIC(telem_ctrl);
+ telem_ctrl |= ioss_period;
+
+ ret = intel_scu_ipc_dev_command(scu, IOSS_TELEM,
+ IOSS_TELEM_EVENT_CTL_WRITE,
+ &telem_ctrl, sizeof(telem_ctrl),
+ NULL, 0);
+ if (ret) {
+ pr_err("IOSS TELEM_CTRL Event Enable Write Failed\n");
+ goto out;
+ }
+ telm_conf->ioss_config.curr_period = ioss_period;
+ }
+
+ if (pss_period) {
+ if (TELEM_SAMPLE_PERIOD_INVALID(pss_period)) {
+ pr_err("PSS Sampling Period Out of Range\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get telemetry EVENT CTL */
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_READ_TELE_EVENT_CTRL,
+ 0, 0, NULL, &telem_ctrl);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Read Failed\n");
+ goto out;
+ }
+
+ /* Disable Telemetry */
+ TELEM_DISABLE(telem_ctrl);
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Disable Write Failed\n");
+ goto out;
+ }
+
+ /* Enable Periodic Telemetry Events and enable SRAM trace */
+ TELEM_CLEAR_SAMPLE_PERIOD(telem_ctrl);
+ TELEM_ENABLE_SRAM_EVT_TRACE(telem_ctrl);
+ TELEM_ENABLE_PERIODIC(telem_ctrl);
+ telem_ctrl |= pss_period;
+
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_WRITE_TELE_EVENT_CTRL,
+ 0, 0, &telem_ctrl, NULL);
+ if (ret) {
+ pr_err("PSS TELEM_CTRL Event Enable Write Failed\n");
+ goto out;
+ }
+ telm_conf->pss_config.curr_period = pss_period;
+ }
+
+out:
+ mutex_unlock(&(telm_conf->telem_lock));
+ return ret;
+}
+
+
+static int telemetry_plt_get_sampling_period(u8 *pss_min_period,
+ u8 *pss_max_period,
+ u8 *ioss_min_period,
+ u8 *ioss_max_period)
+{
+ *pss_min_period = telm_conf->pss_config.min_period;
+ *pss_max_period = telm_conf->pss_config.max_period;
+ *ioss_min_period = telm_conf->ioss_config.min_period;
+ *ioss_max_period = telm_conf->ioss_config.max_period;
+
+ return 0;
+}
+
+
+static int telemetry_plt_reset_events(void)
+{
+ struct telemetry_evtconfig pss_evtconfig, ioss_evtconfig;
+ int ret;
+
+ pss_evtconfig.evtmap = NULL;
+ pss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS;
+ pss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD;
+
+ ioss_evtconfig.evtmap = NULL;
+ ioss_evtconfig.num_evts = TELEM_MAX_OS_ALLOCATED_EVENTS;
+ ioss_evtconfig.period = TELEM_SAMPLING_DEFAULT_PERIOD;
+
+ ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
+ TELEM_RESET);
+ if (ret)
+ pr_err("TELEMETRY Reset Failed\n");
+
+ return ret;
+}
+
+
+static int telemetry_plt_get_eventconfig(struct telemetry_evtconfig *pss_config,
+ struct telemetry_evtconfig *ioss_config,
+ int pss_len, int ioss_len)
+{
+ u32 *pss_evtmap, *ioss_evtmap;
+ u32 index;
+
+ pss_evtmap = pss_config->evtmap;
+ ioss_evtmap = ioss_config->evtmap;
+
+ mutex_lock(&(telm_conf->telem_lock));
+ pss_config->num_evts = telm_conf->pss_config.ssram_evts_used;
+ ioss_config->num_evts = telm_conf->ioss_config.ssram_evts_used;
+
+ pss_config->period = telm_conf->pss_config.curr_period;
+ ioss_config->period = telm_conf->ioss_config.curr_period;
+
+ if ((pss_len < telm_conf->pss_config.ssram_evts_used) ||
+ (ioss_len < telm_conf->ioss_config.ssram_evts_used)) {
+ mutex_unlock(&(telm_conf->telem_lock));
+ return -EINVAL;
+ }
+
+ for (index = 0; index < telm_conf->pss_config.ssram_evts_used;
+ index++) {
+ pss_evtmap[index] =
+ telm_conf->pss_config.telem_evts[index].evt_id;
+ }
+
+ for (index = 0; index < telm_conf->ioss_config.ssram_evts_used;
+ index++) {
+ ioss_evtmap[index] =
+ telm_conf->ioss_config.telem_evts[index].evt_id;
+ }
+
+ mutex_unlock(&(telm_conf->telem_lock));
+ return 0;
+}
+
+
+static int telemetry_plt_add_events(u8 num_pss_evts, u8 num_ioss_evts,
+ u32 *pss_evtmap, u32 *ioss_evtmap)
+{
+ struct telemetry_evtconfig pss_evtconfig, ioss_evtconfig;
+ int ret;
+
+ pss_evtconfig.evtmap = pss_evtmap;
+ pss_evtconfig.num_evts = num_pss_evts;
+ pss_evtconfig.period = telm_conf->pss_config.curr_period;
+
+ ioss_evtconfig.evtmap = ioss_evtmap;
+ ioss_evtconfig.num_evts = num_ioss_evts;
+ ioss_evtconfig.period = telm_conf->ioss_config.curr_period;
+
+ ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
+ TELEM_ADD);
+ if (ret)
+ pr_err("TELEMETRY ADD Failed\n");
+
+ return ret;
+}
+
+static int telem_evtlog_read(enum telemetry_unit telem_unit,
+ struct telem_ssram_region *ssram_region, u8 len)
+{
+ struct telemetry_unit_config *unit_config;
+ u64 timestamp_prev, timestamp_next;
+ int ret, index, timeout = 0;
+
+ ret = telem_get_unitconfig(telem_unit, &unit_config);
+ if (ret < 0)
+ return ret;
+
+ if (len > unit_config->ssram_evts_used)
+ len = unit_config->ssram_evts_used;
+
+ do {
+ timestamp_prev = readq(unit_config->regmap);
+ if (!timestamp_prev) {
+ pr_err("Ssram under update. Please Try Later\n");
+ return -EBUSY;
+ }
+
+ ssram_region->start_time = readq(unit_config->regmap +
+ TELEM_SSRAM_STARTTIME_OFFSET);
+
+ for (index = 0; index < len; index++) {
+ ssram_region->events[index] =
+ readq(unit_config->regmap + TELEM_SSRAM_EVTLOG_OFFSET +
+ BYTES_PER_LONG*index);
+ }
+
+ timestamp_next = readq(unit_config->regmap);
+ if (!timestamp_next) {
+ pr_err("Ssram under update. Please Try Later\n");
+ return -EBUSY;
+ }
+
+ if (timeout++ > TELEM_SSRAM_READ_TIMEOUT) {
+ pr_err("Timeout while reading Events\n");
+ return -EBUSY;
+ }
+
+ } while (timestamp_prev != timestamp_next);
+
+ ssram_region->timestamp = timestamp_next;
+
+ return len;
+}
+
+static int telemetry_plt_raw_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog,
+ int len, int log_all_evts)
+{
+ int index, idx1, ret, readlen = len;
+ struct telem_ssram_region ssram_region;
+ struct telemetry_evtmap *evtmap;
+
+ switch (telem_unit) {
+ case TELEM_PSS:
+ evtmap = telm_conf->pss_config.telem_evts;
+ break;
+
+ case TELEM_IOSS:
+ evtmap = telm_conf->ioss_config.telem_evts;
+ break;
+
+ default:
+ pr_err("Unknown Telemetry Unit Specified %d\n", telem_unit);
+ return -EINVAL;
+ }
+
+ if (!log_all_evts)
+ readlen = TELEM_MAX_EVENTS_SRAM;
+
+ ret = telem_evtlog_read(telem_unit, &ssram_region, readlen);
+ if (ret < 0)
+ return ret;
+
+ /* Invalid evt-id array specified via length mismatch */
+ if ((!log_all_evts) && (len > ret))
+ return -EINVAL;
+
+ if (log_all_evts)
+ for (index = 0; index < ret; index++) {
+ evtlog[index].telem_evtlog = ssram_region.events[index];
+ evtlog[index].telem_evtid = evtmap[index].evt_id;
+ }
+ else
+ for (index = 0, readlen = 0; (index < ret) && (readlen < len);
+ index++) {
+ for (idx1 = 0; idx1 < len; idx1++) {
+ /* Elements matched */
+ if (evtmap[index].evt_id ==
+ evtlog[idx1].telem_evtid) {
+ evtlog[idx1].telem_evtlog =
+ ssram_region.events[index];
+ readlen++;
+
+ break;
+ }
+ }
+ }
+
+ return readlen;
+}
+
+static int telemetry_plt_read_eventlog(enum telemetry_unit telem_unit,
+ struct telemetry_evtlog *evtlog, int len, int log_all_evts)
+{
+ int ret;
+
+ mutex_lock(&(telm_conf->telem_lock));
+ ret = telemetry_plt_raw_read_eventlog(telem_unit, evtlog,
+ len, log_all_evts);
+ mutex_unlock(&(telm_conf->telem_lock));
+
+ return ret;
+}
+
+static int telemetry_plt_get_trace_verbosity(enum telemetry_unit telem_unit,
+ u32 *verbosity)
+{
+ u32 temp = 0;
+ int ret;
+
+ if (verbosity == NULL)
+ return -EINVAL;
+
+ mutex_lock(&(telm_conf->telem_trace_lock));
+ switch (telem_unit) {
+ case TELEM_PSS:
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_READ_TELE_TRACE_CTRL,
+ 0, 0, NULL, &temp);
+ if (ret) {
+ pr_err("PSS TRACE_CTRL Read Failed\n");
+ goto out;
+ }
+
+ break;
+
+ case TELEM_IOSS:
+ ret = intel_scu_ipc_dev_command(telm_conf->scu,
+ IOSS_TELEM, IOSS_TELEM_TRACE_CTL_READ,
+ NULL, 0, &temp, sizeof(temp));
+ if (ret) {
+ pr_err("IOSS TRACE_CTL Read Failed\n");
+ goto out;
+ }
+
+ break;
+
+ default:
+ pr_err("Unknown Telemetry Unit Specified %d\n", telem_unit);
+ ret = -EINVAL;
+ break;
+ }
+ TELEM_EXTRACT_VERBOSITY(temp, *verbosity);
+
+out:
+ mutex_unlock(&(telm_conf->telem_trace_lock));
+ return ret;
+}
+
+static int telemetry_plt_set_trace_verbosity(enum telemetry_unit telem_unit,
+ u32 verbosity)
+{
+ u32 temp = 0;
+ int ret;
+
+ verbosity &= TELEM_TRC_VERBOSITY_MASK;
+
+ mutex_lock(&(telm_conf->telem_trace_lock));
+ switch (telem_unit) {
+ case TELEM_PSS:
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_READ_TELE_TRACE_CTRL,
+ 0, 0, NULL, &temp);
+ if (ret) {
+ pr_err("PSS TRACE_CTRL Read Failed\n");
+ goto out;
+ }
+
+ TELEM_CLEAR_VERBOSITY_BITS(temp);
+ TELEM_SET_VERBOSITY_BITS(temp, verbosity);
+
+ ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_WRITE_TELE_TRACE_CTRL,
+ 0, 0, &temp, NULL);
+ if (ret) {
+ pr_err("PSS TRACE_CTRL Verbosity Set Failed\n");
+ goto out;
+ }
+ break;
+
+ case TELEM_IOSS:
+ ret = intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_TRACE_CTL_READ,
+ NULL, 0, &temp, sizeof(temp));
+ if (ret) {
+ pr_err("IOSS TRACE_CTL Read Failed\n");
+ goto out;
+ }
+
+ TELEM_CLEAR_VERBOSITY_BITS(temp);
+ TELEM_SET_VERBOSITY_BITS(temp, verbosity);
+
+ ret = intel_scu_ipc_dev_command(telm_conf->scu, IOSS_TELEM,
+ IOSS_TELEM_TRACE_CTL_WRITE,
+ &temp, sizeof(temp), NULL, 0);
+ if (ret) {
+ pr_err("IOSS TRACE_CTL Verbosity Set Failed\n");
+ goto out;
+ }
+ break;
+
+ default:
+ pr_err("Unknown Telemetry Unit Specified %d\n", telem_unit);
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ mutex_unlock(&(telm_conf->telem_trace_lock));
+ return ret;
+}
+
+static const struct telemetry_core_ops telm_pltops = {
+ .get_trace_verbosity = telemetry_plt_get_trace_verbosity,
+ .set_trace_verbosity = telemetry_plt_set_trace_verbosity,
+ .set_sampling_period = telemetry_plt_set_sampling_period,
+ .get_sampling_period = telemetry_plt_get_sampling_period,
+ .raw_read_eventlog = telemetry_plt_raw_read_eventlog,
+ .get_eventconfig = telemetry_plt_get_eventconfig,
+ .update_events = telemetry_plt_update_events,
+ .read_eventlog = telemetry_plt_read_eventlog,
+ .reset_events = telemetry_plt_reset_events,
+ .add_events = telemetry_plt_add_events,
+};
+
+static int telemetry_pltdrv_probe(struct platform_device *pdev)
+{
+ const struct x86_cpu_id *id;
+ void __iomem *mem;
+ int ret;
+
+ id = x86_match_cpu(telemetry_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ telm_conf = (struct telemetry_plt_config *)id->driver_data;
+
+ telm_conf->pmc = dev_get_drvdata(pdev->dev.parent);
+
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ telm_conf->pss_config.regmap = mem;
+
+ mem = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ telm_conf->ioss_config.regmap = mem;
+
+ telm_conf->scu = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ if (!telm_conf->scu) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
+
+ mutex_init(&telm_conf->telem_lock);
+ mutex_init(&telm_conf->telem_trace_lock);
+
+ ret = telemetry_setup(pdev);
+ if (ret)
+ goto out;
+
+ ret = telemetry_set_pltdata(&telm_pltops, telm_conf);
+ if (ret) {
+ dev_err(&pdev->dev, "TELEMETRY Set Pltops Failed.\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
+
+ return ret;
+}
+
+static void telemetry_pltdrv_remove(struct platform_device *pdev)
+{
+ telemetry_clear_pltdata();
+}
+
+static struct platform_driver telemetry_soc_driver = {
+ .probe = telemetry_pltdrv_probe,
+ .remove_new = telemetry_pltdrv_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init telemetry_module_init(void)
+{
+ return platform_driver_register(&telemetry_soc_driver);
+}
+
+static void __exit telemetry_module_exit(void)
+{
+ platform_driver_unregister(&telemetry_soc_driver);
+}
+
+device_initcall(telemetry_module_init);
+module_exit(telemetry_module_exit);
+
+MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>");
+MODULE_DESCRIPTION("Intel SoC Telemetry Platform Driver");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
new file mode 100644
index 0000000000..0a95736d97
--- /dev/null
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * intel-tpmi : Driver to enumerate TPMI features and create devices
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * The TPMI (Topology Aware Register and PM Capsule Interface) provides a
+ * flexible, extendable and PCIe enumerable MMIO interface for PM features.
+ *
+ * For example Intel RAPL (Running Average Power Limit) provides a MMIO
+ * interface using TPMI. This has advantage over traditional MSR
+ * (Model Specific Register) interface, where a thread needs to be scheduled
+ * on the target CPU to read or write. Also the RAPL features vary between
+ * CPU models, and hence lot of model specific code. Here TPMI provides an
+ * architectural interface by providing hierarchical tables and fields,
+ * which will not need any model specific implementation.
+ *
+ * The TPMI interface uses a PCI VSEC structure to expose the location of
+ * MMIO region.
+ *
+ * This VSEC structure is present in the PCI configuration space of the
+ * Intel Out-of-Band (OOB) device, which is handled by the Intel VSEC
+ * driver. The Intel VSEC driver parses VSEC structures present in the PCI
+ * configuration space of the given device and creates an auxiliary device
+ * object for each of them. In particular, it creates an auxiliary device
+ * object representing TPMI that can be bound by an auxiliary driver.
+ *
+ * This TPMI driver will bind to the TPMI auxiliary device object created
+ * by the Intel VSEC driver.
+ *
+ * The TPMI specification defines a PFS (PM Feature Structure) table.
+ * This table is present in the TPMI MMIO region. The starting address
+ * of PFS is derived from the tBIR (Bar Indicator Register) and "Address"
+ * field from the VSEC header.
+ *
+ * Each TPMI PM feature has one entry in the PFS with a unique TPMI
+ * ID and its access details. The TPMI driver creates device nodes
+ * for the supported PM features.
+ *
+ * The names of the devices created by the TPMI driver start with the
+ * "intel_vsec.tpmi-" prefix which is followed by a specific name of the
+ * given PM feature (for example, "intel_vsec.tpmi-rapl.0").
+ *
+ * The device nodes are create by using interface "intel_vsec_add_aux()"
+ * provided by the Intel VSEC driver.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/intel_tpmi.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/security.h>
+#include <linux/sizes.h>
+#include <linux/string_helpers.h>
+
+#include "vsec.h"
+
+/**
+ * struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry
+ * @tpmi_id: TPMI feature identifier (what the feature is and its data format).
+ * @num_entries: Number of feature interface instances present in the PFS.
+ * This represents the maximum number of Power domains in the SoC.
+ * @entry_size: Interface instance entry size in 32-bit words.
+ * @cap_offset: Offset from the PM_Features base address to the base of the PM VSEC
+ * register bank in KB.
+ * @attribute: Feature attribute: 0=BIOS. 1=OS. 2-3=Reserved.
+ * @reserved: Bits for use in the future.
+ *
+ * Represents one TPMI feature entry data in the PFS retrieved as is
+ * from the hardware.
+ */
+struct intel_tpmi_pfs_entry {
+ u64 tpmi_id:8;
+ u64 num_entries:8;
+ u64 entry_size:16;
+ u64 cap_offset:16;
+ u64 attribute:2;
+ u64 reserved:14;
+} __packed;
+
+/**
+ * struct intel_tpmi_pm_feature - TPMI PM Feature information for a TPMI ID
+ * @pfs_header: PFS header retireved from the hardware.
+ * @vsec_offset: Starting MMIO address for this feature in bytes. Essentially
+ * this offset = "Address" from VSEC header + PFS Capability
+ * offset for this feature entry.
+ * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device
+ *
+ * Represents TPMI instance information for one TPMI ID.
+ */
+struct intel_tpmi_pm_feature {
+ struct intel_tpmi_pfs_entry pfs_header;
+ unsigned int vsec_offset;
+ struct intel_vsec_device *vsec_dev;
+};
+
+/**
+ * struct intel_tpmi_info - TPMI information for all IDs in an instance
+ * @tpmi_features: Pointer to a list of TPMI feature instances
+ * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device
+ * @feature_count: Number of TPMI of TPMI instances pointed by tpmi_features
+ * @pfs_start: Start of PFS offset for the TPMI instances in this device
+ * @plat_info: Stores platform info which can be used by the client drivers
+ * @tpmi_control_mem: Memory mapped IO for getting control information
+ * @dbgfs_dir: debugfs entry pointer
+ *
+ * Stores the information for all TPMI devices enumerated from a single PCI device.
+ */
+struct intel_tpmi_info {
+ struct intel_tpmi_pm_feature *tpmi_features;
+ struct intel_vsec_device *vsec_dev;
+ int feature_count;
+ u64 pfs_start;
+ struct intel_tpmi_plat_info plat_info;
+ void __iomem *tpmi_control_mem;
+ struct dentry *dbgfs_dir;
+};
+
+/**
+ * struct tpmi_info_header - CPU package ID to PCI device mapping information
+ * @fn: PCI function number
+ * @dev: PCI device number
+ * @bus: PCI bus number
+ * @pkg: CPU Package id
+ * @reserved: Reserved for future use
+ * @lock: When set to 1 the register is locked and becomes read-only
+ * until next reset. Not for use by the OS driver.
+ *
+ * The structure to read hardware provided mapping information.
+ */
+struct tpmi_info_header {
+ u64 fn:3;
+ u64 dev:5;
+ u64 bus:8;
+ u64 pkg:8;
+ u64 reserved:39;
+ u64 lock:1;
+} __packed;
+
+/*
+ * List of supported TMPI IDs.
+ * Some TMPI IDs are not used by Linux, so the numbers are not consecutive.
+ */
+enum intel_tpmi_id {
+ TPMI_ID_RAPL = 0, /* Running Average Power Limit */
+ TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
+ TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
+ TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
+ TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
+};
+
+/*
+ * The size from hardware is in u32 units. This size is from a trusted hardware,
+ * but better to verify for pre silicon platforms. Set size to 0, when invalid.
+ */
+#define TPMI_GET_SINGLE_ENTRY_SIZE(pfs) \
+({ \
+ pfs->pfs_header.entry_size > SZ_1K ? 0 : pfs->pfs_header.entry_size << 2; \
+})
+
+/* Used during auxbus device creation */
+static DEFINE_IDA(intel_vsec_tpmi_ida);
+
+struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev)
+{
+ struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
+
+ return vsec_dev->priv_data;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_platform_data, INTEL_TPMI);
+
+int tpmi_get_resource_count(struct auxiliary_device *auxdev)
+{
+ struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
+
+ if (vsec_dev)
+ return vsec_dev->num_resources;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_count, INTEL_TPMI);
+
+struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index)
+{
+ struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
+
+ if (vsec_dev && index < vsec_dev->num_resources)
+ return &vsec_dev->resource[index];
+
+ return NULL;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI);
+
+/* TPMI Control Interface */
+
+#define TPMI_CONTROL_STATUS_OFFSET 0x00
+#define TPMI_COMMAND_OFFSET 0x08
+
+/*
+ * Spec is calling for max 1 seconds to get ownership at the worst
+ * case. Read at 10 ms timeouts and repeat up to 1 second.
+ */
+#define TPMI_CONTROL_TIMEOUT_US (10 * USEC_PER_MSEC)
+#define TPMI_CONTROL_TIMEOUT_MAX_US (1 * USEC_PER_SEC)
+
+#define TPMI_RB_TIMEOUT_US (10 * USEC_PER_MSEC)
+#define TPMI_RB_TIMEOUT_MAX_US USEC_PER_SEC
+
+/* TPMI Control status register defines */
+
+#define TPMI_CONTROL_STATUS_RB BIT_ULL(0)
+
+#define TPMI_CONTROL_STATUS_OWNER GENMASK_ULL(5, 4)
+#define TPMI_OWNER_NONE 0
+#define TPMI_OWNER_IN_BAND 1
+
+#define TPMI_CONTROL_STATUS_CPL BIT_ULL(6)
+#define TPMI_CONTROL_STATUS_RESULT GENMASK_ULL(15, 8)
+#define TPMI_CONTROL_STATUS_LEN GENMASK_ULL(31, 16)
+
+#define TPMI_CMD_PKT_LEN 2
+#define TPMI_CMD_STATUS_SUCCESS 0x40
+
+/* TPMI command data registers */
+#define TMPI_CONTROL_DATA_CMD GENMASK_ULL(7, 0)
+#define TMPI_CONTROL_DATA_VAL GENMASK_ULL(63, 32)
+#define TPMI_CONTROL_DATA_VAL_FEATURE GENMASK_ULL(48, 40)
+
+/* Command to send via control interface */
+#define TPMI_CONTROL_GET_STATE_CMD 0x10
+
+#define TPMI_CONTROL_CMD_MASK GENMASK_ULL(48, 40)
+
+#define TPMI_CMD_LEN_MASK GENMASK_ULL(18, 16)
+
+#define TPMI_STATE_DISABLED BIT_ULL(0)
+#define TPMI_STATE_LOCKED BIT_ULL(31)
+
+/* Mutex to complete get feature status without interruption */
+static DEFINE_MUTEX(tpmi_dev_lock);
+
+static int tpmi_wait_for_owner(struct intel_tpmi_info *tpmi_info, u8 owner)
+{
+ u64 control;
+
+ return readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET,
+ control, owner == FIELD_GET(TPMI_CONTROL_STATUS_OWNER, control),
+ TPMI_CONTROL_TIMEOUT_US, TPMI_CONTROL_TIMEOUT_MAX_US);
+}
+
+static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int feature_id,
+ int *locked, int *disabled)
+{
+ u64 control, data;
+ int ret;
+
+ if (!tpmi_info->tpmi_control_mem)
+ return -EFAULT;
+
+ mutex_lock(&tpmi_dev_lock);
+
+ /* Wait for owner bit set to 0 (none) */
+ ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_NONE);
+ if (ret)
+ goto err_unlock;
+
+ /* set command id to 0x10 for TPMI_GET_STATE */
+ data = FIELD_PREP(TMPI_CONTROL_DATA_CMD, TPMI_CONTROL_GET_STATE_CMD);
+
+ /* 32 bits for DATA offset and +8 for feature_id field */
+ data |= FIELD_PREP(TPMI_CONTROL_DATA_VAL_FEATURE, feature_id);
+
+ /* Write at command offset for qword access */
+ writeq(data, tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET);
+
+ /* Wait for owner bit set to in-band */
+ ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_IN_BAND);
+ if (ret)
+ goto err_unlock;
+
+ /* Set Run Busy and packet length of 2 dwords */
+ control = TPMI_CONTROL_STATUS_RB;
+ control |= FIELD_PREP(TPMI_CONTROL_STATUS_LEN, TPMI_CMD_PKT_LEN);
+
+ /* Write at status offset for qword access */
+ writeq(control, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET);
+
+ /* Wait for Run Busy clear */
+ ret = readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET,
+ control, !(control & TPMI_CONTROL_STATUS_RB),
+ TPMI_RB_TIMEOUT_US, TPMI_RB_TIMEOUT_MAX_US);
+ if (ret)
+ goto done_proc;
+
+ control = FIELD_GET(TPMI_CONTROL_STATUS_RESULT, control);
+ if (control != TPMI_CMD_STATUS_SUCCESS) {
+ ret = -EBUSY;
+ goto done_proc;
+ }
+
+ /* Response is ready */
+ data = readq(tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET);
+ data = FIELD_GET(TMPI_CONTROL_DATA_VAL, data);
+
+ *disabled = 0;
+ *locked = 0;
+
+ if (!(data & TPMI_STATE_DISABLED))
+ *disabled = 1;
+
+ if (data & TPMI_STATE_LOCKED)
+ *locked = 1;
+
+ ret = 0;
+
+done_proc:
+ /* Set CPL "completion" bit */
+ writeq(TPMI_CONTROL_STATUS_CPL, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET);
+
+err_unlock:
+ mutex_unlock(&tpmi_dev_lock);
+
+ return ret;
+}
+
+int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id,
+ int *locked, int *disabled)
+{
+ struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent);
+ struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev);
+
+ return tpmi_read_feature_status(tpmi_info, feature_id, locked, disabled);
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI);
+
+static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
+{
+ struct intel_tpmi_info *tpmi_info = s->private;
+ struct intel_tpmi_pm_feature *pfs;
+ int locked, disabled, ret, i;
+
+ seq_printf(s, "tpmi PFS start offset 0x:%llx\n", tpmi_info->pfs_start);
+ seq_puts(s, "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\n");
+ for (i = 0; i < tpmi_info->feature_count; ++i) {
+ pfs = &tpmi_info->tpmi_features[i];
+ ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &locked,
+ &disabled);
+ if (ret) {
+ locked = 'U';
+ disabled = 'U';
+ } else {
+ disabled = disabled ? 'Y' : 'N';
+ locked = locked ? 'Y' : 'N';
+ }
+ seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%08x\t%c\t%c\n",
+ pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries,
+ pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset,
+ pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(tpmi_pfs_dbg);
+
+#define MEM_DUMP_COLUMN_COUNT 8
+
+static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
+{
+ size_t row_size = MEM_DUMP_COLUMN_COUNT * sizeof(u32);
+ struct intel_tpmi_pm_feature *pfs = s->private;
+ int count, ret = 0;
+ void __iomem *mem;
+ u32 off, size;
+ u8 *buffer;
+
+ size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
+ if (!size)
+ return -EIO;
+
+ buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ off = pfs->vsec_offset;
+
+ mutex_lock(&tpmi_dev_lock);
+
+ for (count = 0; count < pfs->pfs_header.num_entries; ++count) {
+ seq_printf(s, "TPMI Instance:%d offset:0x%x\n", count, off);
+
+ mem = ioremap(off, size);
+ if (!mem) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ memcpy_fromio(buffer, mem, size);
+
+ seq_hex_dump(s, " ", DUMP_PREFIX_OFFSET, row_size, sizeof(u32), buffer, size,
+ false);
+
+ iounmap(mem);
+
+ off += size;
+ }
+
+ mutex_unlock(&tpmi_dev_lock);
+
+ kfree(buffer);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(tpmi_mem_dump);
+
+static ssize_t mem_write(struct file *file, const char __user *userbuf, size_t len, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_tpmi_pm_feature *pfs = m->private;
+ u32 addr, value, punit, size;
+ u32 num_elems, *array;
+ void __iomem *mem;
+ int ret;
+
+ size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
+ if (!size)
+ return -EIO;
+
+ ret = parse_int_array_user(userbuf, len, (int **)&array);
+ if (ret < 0)
+ return ret;
+
+ num_elems = *array;
+ if (num_elems != 3) {
+ ret = -EINVAL;
+ goto exit_write;
+ }
+
+ punit = array[1];
+ addr = array[2];
+ value = array[3];
+
+ if (punit >= pfs->pfs_header.num_entries) {
+ ret = -EINVAL;
+ goto exit_write;
+ }
+
+ if (addr >= size) {
+ ret = -EINVAL;
+ goto exit_write;
+ }
+
+ mutex_lock(&tpmi_dev_lock);
+
+ mem = ioremap(pfs->vsec_offset + punit * size, size);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto unlock_mem_write;
+ }
+
+ writel(value, mem + addr);
+
+ iounmap(mem);
+
+ ret = len;
+
+unlock_mem_write:
+ mutex_unlock(&tpmi_dev_lock);
+
+exit_write:
+ kfree(array);
+
+ return ret;
+}
+
+static int mem_write_show(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static int mem_write_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mem_write_show, inode->i_private);
+}
+
+static const struct file_operations mem_write_ops = {
+ .open = mem_write_open,
+ .read = seq_read,
+ .write = mem_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define tpmi_to_dev(info) (&info->vsec_dev->pcidev->dev)
+
+static void tpmi_dbgfs_register(struct intel_tpmi_info *tpmi_info)
+{
+ char name[64];
+ int i;
+
+ snprintf(name, sizeof(name), "tpmi-%s", dev_name(tpmi_to_dev(tpmi_info)));
+ tpmi_info->dbgfs_dir = debugfs_create_dir(name, NULL);
+
+ debugfs_create_file("pfs_dump", 0444, tpmi_info->dbgfs_dir, tpmi_info, &tpmi_pfs_dbg_fops);
+
+ for (i = 0; i < tpmi_info->feature_count; ++i) {
+ struct intel_tpmi_pm_feature *pfs;
+ struct dentry *dir;
+
+ pfs = &tpmi_info->tpmi_features[i];
+ snprintf(name, sizeof(name), "tpmi-id-%02x", pfs->pfs_header.tpmi_id);
+ dir = debugfs_create_dir(name, tpmi_info->dbgfs_dir);
+
+ debugfs_create_file("mem_dump", 0444, dir, pfs, &tpmi_mem_dump_fops);
+ debugfs_create_file("mem_write", 0644, dir, pfs, &mem_write_ops);
+ }
+}
+
+static void tpmi_set_control_base(struct auxiliary_device *auxdev,
+ struct intel_tpmi_info *tpmi_info,
+ struct intel_tpmi_pm_feature *pfs)
+{
+ void __iomem *mem;
+ u32 size;
+
+ size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
+ if (!size)
+ return;
+
+ mem = devm_ioremap(&auxdev->dev, pfs->vsec_offset, size);
+ if (!mem)
+ return;
+
+ /* mem is pointing to TPMI CONTROL base */
+ tpmi_info->tpmi_control_mem = mem;
+}
+
+static const char *intel_tpmi_name(enum intel_tpmi_id id)
+{
+ switch (id) {
+ case TPMI_ID_RAPL:
+ return "rapl";
+ case TPMI_ID_PEM:
+ return "pem";
+ case TPMI_ID_UNCORE:
+ return "uncore";
+ case TPMI_ID_SST:
+ return "sst";
+ default:
+ return NULL;
+ }
+}
+
+/* String Length for tpmi-"feature_name(upto 8 bytes)" */
+#define TPMI_FEATURE_NAME_LEN 14
+
+static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
+ struct intel_tpmi_pm_feature *pfs,
+ u64 pfs_start)
+{
+ struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev;
+ char feature_id_name[TPMI_FEATURE_NAME_LEN];
+ struct intel_vsec_device *feature_vsec_dev;
+ struct resource *res, *tmp;
+ const char *name;
+ int i;
+
+ name = intel_tpmi_name(pfs->pfs_header.tpmi_id);
+ if (!name)
+ return -EOPNOTSUPP;
+
+ res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
+ if (!feature_vsec_dev) {
+ kfree(res);
+ return -ENOMEM;
+ }
+
+ snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name);
+
+ for (i = 0, tmp = res; i < pfs->pfs_header.num_entries; i++, tmp++) {
+ u64 entry_size_bytes = pfs->pfs_header.entry_size * sizeof(u32);
+
+ tmp->start = pfs->vsec_offset + entry_size_bytes * i;
+ tmp->end = tmp->start + entry_size_bytes - 1;
+ tmp->flags = IORESOURCE_MEM;
+ }
+
+ feature_vsec_dev->pcidev = vsec_dev->pcidev;
+ feature_vsec_dev->resource = res;
+ feature_vsec_dev->num_resources = pfs->pfs_header.num_entries;
+ feature_vsec_dev->priv_data = &tpmi_info->plat_info;
+ feature_vsec_dev->priv_data_size = sizeof(tpmi_info->plat_info);
+ feature_vsec_dev->ida = &intel_vsec_tpmi_ida;
+
+ /*
+ * intel_vsec_add_aux() is resource managed, no explicit
+ * delete is required on error or on module unload.
+ * feature_vsec_dev and res memory are also freed as part of
+ * device deletion.
+ */
+ return intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev,
+ feature_vsec_dev, feature_id_name);
+}
+
+static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info)
+{
+ struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev;
+ int ret, i;
+
+ for (i = 0; i < vsec_dev->num_resources; i++) {
+ ret = tpmi_create_device(tpmi_info, &tpmi_info->tpmi_features[i],
+ tpmi_info->pfs_start);
+ /*
+ * Fail, if the supported features fails to create device,
+ * otherwise, continue. Even if one device failed to create,
+ * fail the loading of driver. Since intel_vsec_add_aux()
+ * is resource managed, no clean up is required for the
+ * successfully created devices.
+ */
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ return 0;
+}
+
+#define TPMI_INFO_BUS_INFO_OFFSET 0x08
+
+static int tpmi_process_info(struct intel_tpmi_info *tpmi_info,
+ struct intel_tpmi_pm_feature *pfs)
+{
+ struct tpmi_info_header header;
+ void __iomem *info_mem;
+
+ info_mem = ioremap(pfs->vsec_offset + TPMI_INFO_BUS_INFO_OFFSET,
+ pfs->pfs_header.entry_size * sizeof(u32) - TPMI_INFO_BUS_INFO_OFFSET);
+ if (!info_mem)
+ return -ENOMEM;
+
+ memcpy_fromio(&header, info_mem, sizeof(header));
+
+ tpmi_info->plat_info.package_id = header.pkg;
+ tpmi_info->plat_info.bus_number = header.bus;
+ tpmi_info->plat_info.device_number = header.dev;
+ tpmi_info->plat_info.function_number = header.fn;
+
+ iounmap(info_mem);
+
+ return 0;
+}
+
+static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size)
+{
+ void __iomem *pfs_mem;
+
+ pfs_mem = ioremap(start, size);
+ if (!pfs_mem)
+ return -ENOMEM;
+
+ memcpy_fromio(&pfs->pfs_header, pfs_mem, sizeof(pfs->pfs_header));
+
+ iounmap(pfs_mem);
+
+ return 0;
+}
+
+#define TPMI_CAP_OFFSET_UNIT 1024
+
+static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
+{
+ struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
+ struct pci_dev *pci_dev = vsec_dev->pcidev;
+ struct intel_tpmi_info *tpmi_info;
+ u64 pfs_start = 0;
+ int ret, i;
+
+ tpmi_info = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_info), GFP_KERNEL);
+ if (!tpmi_info)
+ return -ENOMEM;
+
+ tpmi_info->vsec_dev = vsec_dev;
+ tpmi_info->feature_count = vsec_dev->num_resources;
+ tpmi_info->plat_info.bus_number = pci_dev->bus->number;
+
+ tpmi_info->tpmi_features = devm_kcalloc(&auxdev->dev, vsec_dev->num_resources,
+ sizeof(*tpmi_info->tpmi_features),
+ GFP_KERNEL);
+ if (!tpmi_info->tpmi_features)
+ return -ENOMEM;
+
+ for (i = 0; i < vsec_dev->num_resources; i++) {
+ struct intel_tpmi_pm_feature *pfs;
+ struct resource *res;
+ u64 res_start;
+ int size, ret;
+
+ pfs = &tpmi_info->tpmi_features[i];
+ pfs->vsec_dev = vsec_dev;
+
+ res = &vsec_dev->resource[i];
+ if (!res)
+ continue;
+
+ res_start = res->start;
+ size = resource_size(res);
+ if (size < 0)
+ continue;
+
+ ret = tpmi_fetch_pfs_header(pfs, res_start, size);
+ if (ret)
+ continue;
+
+ if (!pfs_start)
+ pfs_start = res_start;
+
+ pfs->vsec_offset = pfs_start + pfs->pfs_header.cap_offset * TPMI_CAP_OFFSET_UNIT;
+
+ /*
+ * Process TPMI_INFO to get PCI device to CPU package ID.
+ * Device nodes for TPMI features are not created in this
+ * for loop. So, the mapping information will be available
+ * when actual device nodes created outside this
+ * loop via tpmi_create_devices().
+ */
+ if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID)
+ tpmi_process_info(tpmi_info, pfs);
+
+ if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID)
+ tpmi_set_control_base(auxdev, tpmi_info, pfs);
+ }
+
+ tpmi_info->pfs_start = pfs_start;
+
+ auxiliary_set_drvdata(auxdev, tpmi_info);
+
+ ret = tpmi_create_devices(tpmi_info);
+ if (ret)
+ return ret;
+
+ /*
+ * Allow debugfs when security policy allows. Everything this debugfs
+ * interface provides, can also be done via /dev/mem access. If
+ * /dev/mem interface is locked, don't allow debugfs to present any
+ * information. Also check for CAP_SYS_RAWIO as /dev/mem interface.
+ */
+ if (!security_locked_down(LOCKDOWN_DEV_MEM) && capable(CAP_SYS_RAWIO))
+ tpmi_dbgfs_register(tpmi_info);
+
+ return 0;
+}
+
+static int tpmi_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ return intel_vsec_tpmi_init(auxdev);
+}
+
+static void tpmi_remove(struct auxiliary_device *auxdev)
+{
+ struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(auxdev);
+
+ debugfs_remove_recursive(tpmi_info->dbgfs_dir);
+}
+
+static const struct auxiliary_device_id tpmi_id_table[] = {
+ { .name = "intel_vsec.tpmi" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, tpmi_id_table);
+
+static struct auxiliary_driver tpmi_aux_driver = {
+ .id_table = tpmi_id_table,
+ .probe = tpmi_probe,
+ .remove = tpmi_remove,
+};
+
+module_auxiliary_driver(tpmi_aux_driver);
+
+MODULE_IMPORT_NS(INTEL_VSEC);
+MODULE_DESCRIPTION("Intel TPMI enumeration module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c
new file mode 100644
index 0000000000..892140b628
--- /dev/null
+++ b/drivers/platform/x86/intel/turbo_max_3.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
+ * Copyright (c) 2017, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/topology.h>
+#include <linux/workqueue.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define MSR_OC_MAILBOX 0x150
+#define MSR_OC_MAILBOX_CMD_OFFSET 32
+#define MSR_OC_MAILBOX_RSP_OFFSET 32
+#define MSR_OC_MAILBOX_BUSY_BIT 63
+#define OC_MAILBOX_FC_CONTROL_CMD 0x1C
+
+/*
+ * Typical latency to get mail box response is ~3us, It takes +3 us to
+ * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
+ * system. So for most of the time, the first mailbox read should have the
+ * response, but to avoid some boundary cases retry twice.
+ */
+#define OC_MAILBOX_RETRY_COUNT 2
+
+static int get_oc_core_priority(unsigned int cpu)
+{
+ u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
+ int ret, i;
+
+ /* Issue favored core read command */
+ value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
+ /* Set the busy bit to indicate OS is trying to issue command */
+ value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
+ ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
+ if (ret) {
+ pr_debug("cpu %d OC mailbox write failed\n", cpu);
+ return ret;
+ }
+
+ for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
+ ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
+ if (ret) {
+ pr_debug("cpu %d OC mailbox read failed\n", cpu);
+ break;
+ }
+
+ if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
+ pr_debug("cpu %d OC mailbox still processing\n", cpu);
+ ret = -EBUSY;
+ continue;
+ }
+
+ if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
+ pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
+ ret = -ENXIO;
+ break;
+ }
+
+ ret = value & 0xff;
+ pr_debug("cpu %d max_ratio %d\n", cpu, ret);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * The work item is needed to avoid CPU hotplug locking issues. The function
+ * itmt_legacy_set_priority() is called from CPU online callback, so can't
+ * call sched_set_itmt_support() from there as this function will aquire
+ * hotplug locks in its path.
+ */
+static void itmt_legacy_work_fn(struct work_struct *work)
+{
+ sched_set_itmt_support();
+}
+
+static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
+
+static int itmt_legacy_cpu_online(unsigned int cpu)
+{
+ static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
+ int priority;
+
+ priority = get_oc_core_priority(cpu);
+ if (priority < 0)
+ return 0;
+
+ sched_set_itmt_core_prio(priority, cpu);
+
+ /* Enable ITMT feature when a core with different priority is found */
+ if (max_highest_perf <= min_highest_perf) {
+ if (priority > max_highest_perf)
+ max_highest_perf = priority;
+
+ if (priority < min_highest_perf)
+ min_highest_perf = priority;
+
+ if (max_highest_perf > min_highest_perf)
+ schedule_work(&sched_itmt_work);
+ }
+
+ return 0;
+}
+
+static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ {}
+};
+
+static int __init itmt_legacy_init(void)
+{
+ const struct x86_cpu_id *id;
+ int ret;
+
+ id = x86_match_cpu(itmt_legacy_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/turbo_max_3:online",
+ itmt_legacy_cpu_online, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+late_initcall(itmt_legacy_init)
diff --git a/drivers/platform/x86/intel/uncore-frequency/Kconfig b/drivers/platform/x86/intel/uncore-frequency/Kconfig
new file mode 100644
index 0000000000..a56d550569
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Uncore Frquency control drivers
+#
+
+menu "Intel Uncore Frequency Control"
+ depends on X86_64 || COMPILE_TEST
+
+config INTEL_UNCORE_FREQ_CONTROL_TPMI
+ tristate
+
+config INTEL_UNCORE_FREQ_CONTROL
+ tristate "Intel Uncore frequency control driver"
+ depends on X86_64
+ select INTEL_UNCORE_FREQ_CONTROL_TPMI if INTEL_TPMI
+ help
+ This driver allows control of Uncore frequency limits on
+ supported server platforms.
+
+ Uncore frequency controls RING/LLC (last-level cache) clocks.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-uncore-frequency.
+
+endmenu
diff --git a/drivers/platform/x86/intel/uncore-frequency/Makefile b/drivers/platform/x86/intel/uncore-frequency/Makefile
new file mode 100644
index 0000000000..08ff57492b
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/intel/uncore-frequency
+#
+
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
+intel-uncore-frequency-y := uncore-frequency.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency-common.o
+intel-uncore-frequency-common-y := uncore-frequency-common.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI) += intel-uncore-frequency-tpmi.o
+intel-uncore-frequency-tpmi-y := uncore-frequency-tpmi.o
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
new file mode 100644
index 0000000000..33bb58dc3f
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Uncore Frequency Control: Common code implementation
+ * Copyright (c) 2022, Intel Corporation.
+ * All rights reserved.
+ *
+ */
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include "uncore-frequency-common.h"
+
+/* Mutex to control all mutual exclusions */
+static DEFINE_MUTEX(uncore_lock);
+/* Root of the all uncore sysfs kobjs */
+static struct kobject *uncore_root_kobj;
+/* uncore instance count */
+static int uncore_instance_count;
+
+static DEFINE_IDA(intel_uncore_ida);
+
+/* callbacks for actual HW read/write */
+static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned int *max);
+static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
+static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
+
+static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_kobj_attr);
+
+ return sprintf(buf, "%u\n", data->domain_id);
+}
+
+static ssize_t show_fabric_cluster_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_kobj_attr);
+
+ return sprintf(buf, "%u\n", data->cluster_id);
+}
+
+static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct uncore_data *data = container_of(attr, struct uncore_data, package_id_kobj_attr);
+
+ return sprintf(buf, "%u\n", data->package_id);
+}
+
+static ssize_t show_min_max_freq_khz(struct uncore_data *data,
+ char *buf, int min_max)
+{
+ unsigned int min, max;
+ int ret;
+
+ mutex_lock(&uncore_lock);
+ ret = uncore_read(data, &min, &max);
+ mutex_unlock(&uncore_lock);
+ if (ret)
+ return ret;
+
+ if (min_max)
+ return sprintf(buf, "%u\n", max);
+
+ return sprintf(buf, "%u\n", min);
+}
+
+static ssize_t store_min_max_freq_khz(struct uncore_data *data,
+ const char *buf, ssize_t count,
+ int min_max)
+{
+ unsigned int input;
+ int ret;
+
+ if (kstrtouint(buf, 10, &input))
+ return -EINVAL;
+
+ mutex_lock(&uncore_lock);
+ ret = uncore_write(data, input, min_max);
+ mutex_unlock(&uncore_lock);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
+{
+ unsigned int freq;
+ int ret;
+
+ mutex_lock(&uncore_lock);
+ ret = uncore_read_freq(data, &freq);
+ mutex_unlock(&uncore_lock);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", freq);
+}
+
+#define store_uncore_min_max(name, min_max) \
+ static ssize_t store_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return store_min_max_freq_khz(data, buf, count, \
+ min_max); \
+ }
+
+#define show_uncore_min_max(name, min_max) \
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
+ { \
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return show_min_max_freq_khz(data, buf, min_max); \
+ }
+
+#define show_uncore_perf_status(name) \
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
+ { \
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return show_perf_status_freq_khz(data, buf); \
+ }
+
+store_uncore_min_max(min_freq_khz, 0);
+store_uncore_min_max(max_freq_khz, 1);
+
+show_uncore_min_max(min_freq_khz, 0);
+show_uncore_min_max(max_freq_khz, 1);
+
+show_uncore_perf_status(current_freq_khz);
+
+#define show_uncore_data(member_name) \
+ static ssize_t show_##member_name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
+ { \
+ struct uncore_data *data = container_of(attr, struct uncore_data,\
+ member_name##_kobj_attr);\
+ \
+ return sysfs_emit(buf, "%u\n", \
+ data->member_name); \
+ } \
+
+show_uncore_data(initial_min_freq_khz);
+show_uncore_data(initial_max_freq_khz);
+
+#define init_attribute_rw(_name) \
+ do { \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = store_##_name; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0644; \
+ } while (0)
+
+#define init_attribute_ro(_name) \
+ do { \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = NULL; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0444; \
+ } while (0)
+
+#define init_attribute_root_ro(_name) \
+ do { \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = NULL; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0400; \
+ } while (0)
+
+static int create_attr_group(struct uncore_data *data, char *name)
+{
+ int ret, freq, index = 0;
+
+ init_attribute_rw(max_freq_khz);
+ init_attribute_rw(min_freq_khz);
+ init_attribute_ro(initial_min_freq_khz);
+ init_attribute_ro(initial_max_freq_khz);
+ init_attribute_root_ro(current_freq_khz);
+
+ if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) {
+ init_attribute_root_ro(domain_id);
+ data->uncore_attrs[index++] = &data->domain_id_kobj_attr.attr;
+ init_attribute_root_ro(fabric_cluster_id);
+ data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr;
+ init_attribute_root_ro(package_id);
+ data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr;
+ }
+
+ data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
+
+ ret = uncore_read_freq(data, &freq);
+ if (!ret)
+ data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
+
+ data->uncore_attrs[index] = NULL;
+
+ data->uncore_attr_group.name = name;
+ data->uncore_attr_group.attrs = data->uncore_attrs;
+ ret = sysfs_create_group(uncore_root_kobj, &data->uncore_attr_group);
+
+ return ret;
+}
+
+static void delete_attr_group(struct uncore_data *data, char *name)
+{
+ sysfs_remove_group(uncore_root_kobj, &data->uncore_attr_group);
+}
+
+int uncore_freq_add_entry(struct uncore_data *data, int cpu)
+{
+ int ret = 0;
+
+ mutex_lock(&uncore_lock);
+ if (data->valid) {
+ /* control cpu changed */
+ data->control_cpu = cpu;
+ goto uncore_unlock;
+ }
+
+ if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) {
+ ret = ida_alloc(&intel_uncore_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto uncore_unlock;
+
+ data->instance_id = ret;
+ sprintf(data->name, "uncore%02d", ret);
+ } else {
+ sprintf(data->name, "package_%02d_die_%02d", data->package_id, data->die_id);
+ }
+
+ uncore_read(data, &data->initial_min_freq_khz, &data->initial_max_freq_khz);
+
+ ret = create_attr_group(data, data->name);
+ if (ret) {
+ if (data->domain_id != UNCORE_DOMAIN_ID_INVALID)
+ ida_free(&intel_uncore_ida, data->instance_id);
+ } else {
+ data->control_cpu = cpu;
+ data->valid = true;
+ }
+
+uncore_unlock:
+ mutex_unlock(&uncore_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(uncore_freq_add_entry, INTEL_UNCORE_FREQUENCY);
+
+void uncore_freq_remove_die_entry(struct uncore_data *data)
+{
+ mutex_lock(&uncore_lock);
+ delete_attr_group(data, data->name);
+ data->control_cpu = -1;
+ data->valid = false;
+ if (data->domain_id != UNCORE_DOMAIN_ID_INVALID)
+ ida_free(&intel_uncore_ida, data->instance_id);
+
+ mutex_unlock(&uncore_lock);
+}
+EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, INTEL_UNCORE_FREQUENCY);
+
+int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max),
+ int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int set_max),
+ int (*read_freq)(struct uncore_data *data, unsigned int *freq))
+{
+ mutex_lock(&uncore_lock);
+
+ uncore_read = read_control_freq;
+ uncore_write = write_control_freq;
+ uncore_read_freq = read_freq;
+
+ if (!uncore_root_kobj) {
+ struct device *dev_root = bus_get_dev_root(&cpu_subsys);
+
+ if (dev_root) {
+ uncore_root_kobj = kobject_create_and_add("intel_uncore_frequency",
+ &dev_root->kobj);
+ put_device(dev_root);
+ }
+ }
+ if (uncore_root_kobj)
+ ++uncore_instance_count;
+ mutex_unlock(&uncore_lock);
+
+ return uncore_root_kobj ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL_NS_GPL(uncore_freq_common_init, INTEL_UNCORE_FREQUENCY);
+
+void uncore_freq_common_exit(void)
+{
+ mutex_lock(&uncore_lock);
+ --uncore_instance_count;
+ if (!uncore_instance_count) {
+ kobject_put(uncore_root_kobj);
+ uncore_root_kobj = NULL;
+ }
+ mutex_unlock(&uncore_lock);
+}
+EXPORT_SYMBOL_NS_GPL(uncore_freq_common_exit, INTEL_UNCORE_FREQUENCY);
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Uncore Frequency Common Module");
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
new file mode 100644
index 0000000000..0e5bf507e5
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Uncore Frequency Control: Common defines and prototypes
+ * Copyright (c) 2022, Intel Corporation.
+ * All rights reserved.
+ *
+ */
+
+#ifndef __INTEL_UNCORE_FREQ_COMMON_H
+#define __INTEL_UNCORE_FREQ_COMMON_H
+
+#include <linux/device.h>
+
+/**
+ * struct uncore_data - Encapsulate all uncore data
+ * @stored_uncore_data: Last user changed MSR 620 value, which will be restored
+ * on system resume.
+ * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init
+ * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init
+ * @control_cpu: Designated CPU for a die to read/write
+ * @valid: Mark the data valid/invalid
+ * @package_id: Package id for this instance
+ * @die_id: Die id for this instance
+ * @domain_id: Power domain id for this instance
+ * @cluster_id: cluster id in a domain
+ * @instance_id: Unique instance id to append to directory name
+ * @name: Sysfs entry name for this instance
+ * @uncore_attr_group: Attribute group storage
+ * @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz
+ * @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
+ * @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz
+ * @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz
+ * @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz
+ * @domain_id_kobj_attr: Storage for kobject attribute domain_id
+ * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
+ * @package_id_kobj_attr: Storage for kobject attribute package_id
+ * @uncore_attrs: Attribute storage for group creation
+ *
+ * This structure is used to encapsulate all data related to uncore sysfs
+ * settings for a die/package.
+ */
+struct uncore_data {
+ u64 stored_uncore_data;
+ u32 initial_min_freq_khz;
+ u32 initial_max_freq_khz;
+ int control_cpu;
+ bool valid;
+ int package_id;
+ int die_id;
+ int domain_id;
+ int cluster_id;
+ int instance_id;
+ char name[32];
+
+ struct attribute_group uncore_attr_group;
+ struct kobj_attribute max_freq_khz_kobj_attr;
+ struct kobj_attribute min_freq_khz_kobj_attr;
+ struct kobj_attribute initial_max_freq_khz_kobj_attr;
+ struct kobj_attribute initial_min_freq_khz_kobj_attr;
+ struct kobj_attribute current_freq_khz_kobj_attr;
+ struct kobj_attribute domain_id_kobj_attr;
+ struct kobj_attribute fabric_cluster_id_kobj_attr;
+ struct kobj_attribute package_id_kobj_attr;
+ struct attribute *uncore_attrs[9];
+};
+
+#define UNCORE_DOMAIN_ID_INVALID -1
+
+int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max),
+ int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int min_max),
+ int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq));
+void uncore_freq_common_exit(void);
+int uncore_freq_add_entry(struct uncore_data *data, int cpu);
+void uncore_freq_remove_die_entry(struct uncore_data *data);
+
+#endif
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
new file mode 100644
index 0000000000..7d0a67f8b5
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * uncore-frquency-tpmi: Uncore frequency scaling using TPMI
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * The hardware interface to read/write is basically substitution of
+ * MSR 0x620 and 0x621.
+ * There are specific MMIO offset and bits to get/set minimum and
+ * maximum uncore ratio, similar to MSRs.
+ * The scope of the uncore MSRs was package scope. But TPMI allows
+ * new gen CPUs to have multiple uncore controls at uncore-cluster
+ * level. Each package can have multiple power domains which further
+ * can have multiple clusters.
+ * Here number of power domains = number of resources in this aux
+ * device. There are offsets and bits to discover number of clusters
+ * and offset for each cluster level controls.
+ *
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/intel_tpmi.h>
+
+#include "uncore-frequency-common.h"
+
+#define UNCORE_HEADER_VERSION 1
+#define UNCORE_HEADER_INDEX 0
+#define UNCORE_FABRIC_CLUSTER_OFFSET 8
+
+/* status + control + adv_ctl1 + adv_ctl2 */
+#define UNCORE_FABRIC_CLUSTER_SIZE (4 * 8)
+
+#define UNCORE_STATUS_INDEX 0
+#define UNCORE_CONTROL_INDEX 8
+
+#define UNCORE_FREQ_KHZ_MULTIPLIER 100000
+
+struct tpmi_uncore_struct;
+
+/* Information for each cluster */
+struct tpmi_uncore_cluster_info {
+ bool root_domain;
+ u8 __iomem *cluster_base;
+ struct uncore_data uncore_data;
+ struct tpmi_uncore_struct *uncore_root;
+};
+
+/* Information for each power domain */
+struct tpmi_uncore_power_domain_info {
+ u8 __iomem *uncore_base;
+ int ufs_header_ver;
+ int cluster_count;
+ struct tpmi_uncore_cluster_info *cluster_infos;
+};
+
+/* Information for all power domains in a package */
+struct tpmi_uncore_struct {
+ int power_domain_count;
+ int max_ratio;
+ int min_ratio;
+ struct tpmi_uncore_power_domain_info *pd_info;
+ struct tpmi_uncore_cluster_info root_cluster;
+};
+
+#define UNCORE_GENMASK_MIN_RATIO GENMASK_ULL(21, 15)
+#define UNCORE_GENMASK_MAX_RATIO GENMASK_ULL(14, 8)
+#define UNCORE_GENMASK_CURRENT_RATIO GENMASK_ULL(6, 0)
+
+/* Helper function to read MMIO offset for max/min control frequency */
+static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info,
+ unsigned int *min, unsigned int *max)
+{
+ u64 control;
+
+ control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
+ *max = FIELD_GET(UNCORE_GENMASK_MAX_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *min = FIELD_GET(UNCORE_GENMASK_MIN_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
+}
+
+#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_GENMASK_MAX_RATIO)
+
+/* Callback for sysfs read for max/min frequencies. Called under mutex locks */
+static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
+ unsigned int *max)
+{
+ struct tpmi_uncore_cluster_info *cluster_info;
+
+ cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
+
+ if (cluster_info->root_domain) {
+ struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
+ int i, _min = 0, _max = 0;
+
+ *min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *max = 0;
+
+ /*
+ * Get the max/min by looking at each cluster. Get the lowest
+ * min and highest max.
+ */
+ for (i = 0; i < uncore_root->power_domain_count; ++i) {
+ int j;
+
+ for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) {
+ read_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
+ &_min, &_max);
+ if (*min > _min)
+ *min = _min;
+ if (*max < _max)
+ *max = _max;
+ }
+ }
+ return 0;
+ }
+
+ read_control_freq(cluster_info, min, max);
+
+ return 0;
+}
+
+/* Helper function to write MMIO offset for max/min control frequency */
+static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input,
+ unsigned int min_max)
+{
+ u64 control;
+
+ control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
+
+ if (min_max) {
+ control &= ~UNCORE_GENMASK_MAX_RATIO;
+ control |= FIELD_PREP(UNCORE_GENMASK_MAX_RATIO, input);
+ } else {
+ control &= ~UNCORE_GENMASK_MIN_RATIO;
+ control |= FIELD_PREP(UNCORE_GENMASK_MIN_RATIO, input);
+ }
+
+ writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX));
+}
+
+/* Callback for sysfs write for max/min frequencies. Called under mutex locks */
+static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
+ unsigned int min_max)
+{
+ struct tpmi_uncore_cluster_info *cluster_info;
+ struct tpmi_uncore_struct *uncore_root;
+
+ input /= UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (!input || input > UNCORE_MAX_RATIO)
+ return -EINVAL;
+
+ cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
+ uncore_root = cluster_info->uncore_root;
+
+ /* Update each cluster in a package */
+ if (cluster_info->root_domain) {
+ struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
+ int i;
+
+ for (i = 0; i < uncore_root->power_domain_count; ++i) {
+ int j;
+
+ for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j)
+ write_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
+ input, min_max);
+ }
+
+ if (min_max)
+ uncore_root->max_ratio = input;
+ else
+ uncore_root->min_ratio = input;
+
+ return 0;
+ }
+
+ if (min_max && uncore_root->max_ratio && uncore_root->max_ratio < input)
+ return -EINVAL;
+
+ if (!min_max && uncore_root->min_ratio && uncore_root->min_ratio > input)
+ return -EINVAL;
+
+ write_control_freq(cluster_info, input, min_max);
+
+ return 0;
+}
+
+/* Callback for sysfs read for the current uncore frequency. Called under mutex locks */
+static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
+{
+ struct tpmi_uncore_cluster_info *cluster_info;
+ u64 status;
+
+ cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
+ if (cluster_info->root_domain)
+ return -ENODATA;
+
+ status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX);
+ *freq = FIELD_GET(UNCORE_GENMASK_CURRENT_RATIO, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+ return 0;
+}
+
+static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore)
+{
+ int i;
+
+ for (i = 0; i < tpmi_uncore->power_domain_count; ++i) {
+ struct tpmi_uncore_power_domain_info *pd_info;
+ int j;
+
+ pd_info = &tpmi_uncore->pd_info[i];
+ if (!pd_info->uncore_base)
+ continue;
+
+ for (j = 0; j < pd_info->cluster_count; ++j) {
+ struct tpmi_uncore_cluster_info *cluster_info;
+
+ cluster_info = &pd_info->cluster_infos[j];
+ uncore_freq_remove_die_entry(&cluster_info->uncore_data);
+ }
+ }
+}
+
+#define UNCORE_VERSION_MASK GENMASK_ULL(7, 0)
+#define UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK GENMASK_ULL(15, 8)
+#define UNCORE_CLUSTER_OFF_MASK GENMASK_ULL(7, 0)
+#define UNCORE_MAX_CLUSTER_PER_DOMAIN 8
+
+static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ struct intel_tpmi_plat_info *plat_info;
+ struct tpmi_uncore_struct *tpmi_uncore;
+ int ret, i, pkg = 0;
+ int num_resources;
+
+ /* Get number of power domains, which is equal to number of resources */
+ num_resources = tpmi_get_resource_count(auxdev);
+ if (!num_resources)
+ return -EINVAL;
+
+ /* Register callbacks to uncore core */
+ ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
+ uncore_read_freq);
+ if (ret)
+ return ret;
+
+ /* Allocate uncore instance per package */
+ tpmi_uncore = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_uncore), GFP_KERNEL);
+ if (!tpmi_uncore) {
+ ret = -ENOMEM;
+ goto err_rem_common;
+ }
+
+ /* Allocate memory for all power domains in a package */
+ tpmi_uncore->pd_info = devm_kcalloc(&auxdev->dev, num_resources,
+ sizeof(*tpmi_uncore->pd_info),
+ GFP_KERNEL);
+ if (!tpmi_uncore->pd_info) {
+ ret = -ENOMEM;
+ goto err_rem_common;
+ }
+
+ tpmi_uncore->power_domain_count = num_resources;
+
+ /* Get the package ID from the TPMI core */
+ plat_info = tpmi_get_platform_data(auxdev);
+ if (plat_info)
+ pkg = plat_info->package_id;
+ else
+ dev_info(&auxdev->dev, "Platform information is NULL\n");
+
+ for (i = 0; i < num_resources; ++i) {
+ struct tpmi_uncore_power_domain_info *pd_info;
+ struct resource *res;
+ u64 cluster_offset;
+ u8 cluster_mask;
+ int mask, j;
+ u64 header;
+
+ res = tpmi_get_resource_at_index(auxdev, i);
+ if (!res)
+ continue;
+
+ pd_info = &tpmi_uncore->pd_info[i];
+
+ pd_info->uncore_base = devm_ioremap_resource(&auxdev->dev, res);
+ if (IS_ERR(pd_info->uncore_base)) {
+ ret = PTR_ERR(pd_info->uncore_base);
+ /*
+ * Set to NULL so that clean up can still remove other
+ * entries already created if any by
+ * remove_cluster_entries()
+ */
+ pd_info->uncore_base = NULL;
+ goto remove_clusters;
+ }
+
+ /* Check for version and skip this resource if there is mismatch */
+ header = readq(pd_info->uncore_base);
+ pd_info->ufs_header_ver = header & UNCORE_VERSION_MASK;
+ if (pd_info->ufs_header_ver != UNCORE_HEADER_VERSION) {
+ dev_info(&auxdev->dev, "Uncore: Unsupported version:%d\n",
+ pd_info->ufs_header_ver);
+ continue;
+ }
+
+ /* Get Cluster ID Mask */
+ cluster_mask = FIELD_GET(UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK, header);
+ if (!cluster_mask) {
+ dev_info(&auxdev->dev, "Uncore: Invalid cluster mask:%x\n", cluster_mask);
+ continue;
+ }
+
+ /* Find out number of clusters in this resource */
+ pd_info->cluster_count = hweight8(cluster_mask);
+
+ pd_info->cluster_infos = devm_kcalloc(&auxdev->dev, pd_info->cluster_count,
+ sizeof(struct tpmi_uncore_cluster_info),
+ GFP_KERNEL);
+ if (!pd_info->cluster_infos) {
+ ret = -ENOMEM;
+ goto remove_clusters;
+ }
+ /*
+ * Each byte in the register point to status and control
+ * registers belonging to cluster id 0-8.
+ */
+ cluster_offset = readq(pd_info->uncore_base +
+ UNCORE_FABRIC_CLUSTER_OFFSET);
+
+ for (j = 0; j < pd_info->cluster_count; ++j) {
+ struct tpmi_uncore_cluster_info *cluster_info;
+
+ /* Get the offset for this cluster */
+ mask = (cluster_offset & UNCORE_CLUSTER_OFF_MASK);
+ /* Offset in QWORD, so change to bytes */
+ mask <<= 3;
+
+ cluster_info = &pd_info->cluster_infos[j];
+
+ cluster_info->cluster_base = pd_info->uncore_base + mask;
+
+ cluster_info->uncore_data.package_id = pkg;
+ /* There are no dies like Cascade Lake */
+ cluster_info->uncore_data.die_id = 0;
+ cluster_info->uncore_data.domain_id = i;
+ cluster_info->uncore_data.cluster_id = j;
+
+ cluster_info->uncore_root = tpmi_uncore;
+
+ ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0);
+ if (ret) {
+ cluster_info->cluster_base = NULL;
+ goto remove_clusters;
+ }
+ /* Point to next cluster offset */
+ cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN;
+ }
+ }
+
+ auxiliary_set_drvdata(auxdev, tpmi_uncore);
+
+ tpmi_uncore->root_cluster.root_domain = true;
+ tpmi_uncore->root_cluster.uncore_root = tpmi_uncore;
+
+ tpmi_uncore->root_cluster.uncore_data.package_id = pkg;
+ tpmi_uncore->root_cluster.uncore_data.domain_id = UNCORE_DOMAIN_ID_INVALID;
+ ret = uncore_freq_add_entry(&tpmi_uncore->root_cluster.uncore_data, 0);
+ if (ret)
+ goto remove_clusters;
+
+ return 0;
+
+remove_clusters:
+ remove_cluster_entries(tpmi_uncore);
+err_rem_common:
+ uncore_freq_common_exit();
+
+ return ret;
+}
+
+static void uncore_remove(struct auxiliary_device *auxdev)
+{
+ struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev);
+
+ uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data);
+ remove_cluster_entries(tpmi_uncore);
+
+ uncore_freq_common_exit();
+}
+
+static const struct auxiliary_device_id intel_uncore_id_table[] = {
+ { .name = "intel_vsec.tpmi-uncore" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, intel_uncore_id_table);
+
+static struct auxiliary_driver intel_uncore_aux_driver = {
+ .id_table = intel_uncore_id_table,
+ .remove = uncore_remove,
+ .probe = uncore_probe,
+};
+
+module_auxiliary_driver(intel_uncore_aux_driver);
+
+MODULE_IMPORT_NS(INTEL_TPMI);
+MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY);
+MODULE_DESCRIPTION("Intel TPMI UFS Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
new file mode 100644
index 0000000000..a3b25253b6
--- /dev/null
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Uncore Frequency Setting
+ * Copyright (c) 2022, Intel Corporation.
+ * All rights reserved.
+ *
+ * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
+ * one control CPU is identified per die to read/write limit. This control CPU
+ * is changed, if the CPU state is changed to offline. When the last CPU is
+ * offline in a die then remove the sysfs object for that die.
+ * The majority of actual code is related to sysfs create and read/write
+ * attributes.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#include "uncore-frequency-common.h"
+
+/* Max instances for uncore data, one for each die */
+static int uncore_max_entries __read_mostly;
+/* Storage for uncore data for all instances */
+static struct uncore_data *uncore_instances;
+/* Stores the CPU mask of the target CPUs to use during uncore read/write */
+static cpumask_t uncore_cpu_mask;
+/* CPU online callback register instance */
+static enum cpuhp_state uncore_hp_state __read_mostly;
+
+#define MSR_UNCORE_RATIO_LIMIT 0x620
+#define MSR_UNCORE_PERF_STATUS 0x621
+#define UNCORE_FREQ_KHZ_MULTIPLIER 100000
+
+static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
+ unsigned int *max)
+{
+ u64 cap;
+ int ret;
+
+ if (data->control_cpu < 0)
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ return ret;
+
+ *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+ return 0;
+}
+
+static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
+ unsigned int min_max)
+{
+ int ret;
+ u64 cap;
+
+ input /= UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (!input || input > 0x7F)
+ return -EINVAL;
+
+ if (data->control_cpu < 0)
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ return ret;
+
+ if (min_max) {
+ cap &= ~0x7F;
+ cap |= input;
+ } else {
+ cap &= ~GENMASK(14, 8);
+ cap |= (input << 8);
+ }
+
+ ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
+ if (ret)
+ return ret;
+
+ data->stored_uncore_data = cap;
+
+ return 0;
+}
+
+static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
+{
+ u64 ratio;
+ int ret;
+
+ if (data->control_cpu < 0)
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio);
+ if (ret)
+ return ret;
+
+ *freq = (ratio & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+ return 0;
+}
+
+/* Caller provides protection */
+static struct uncore_data *uncore_get_instance(unsigned int cpu)
+{
+ int id = topology_logical_die_id(cpu);
+
+ if (id >= 0 && id < uncore_max_entries)
+ return &uncore_instances[id];
+
+ return NULL;
+}
+
+static int uncore_event_cpu_online(unsigned int cpu)
+{
+ struct uncore_data *data;
+ int target;
+
+ /* Check if there is an online cpu in the package for uncore MSR */
+ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
+ if (target < nr_cpu_ids)
+ return 0;
+
+ /* Use this CPU on this die as a control CPU */
+ cpumask_set_cpu(cpu, &uncore_cpu_mask);
+
+ data = uncore_get_instance(cpu);
+ if (!data)
+ return 0;
+
+ data->package_id = topology_physical_package_id(cpu);
+ data->die_id = topology_die_id(cpu);
+ data->domain_id = UNCORE_DOMAIN_ID_INVALID;
+
+ return uncore_freq_add_entry(data, cpu);
+}
+
+static int uncore_event_cpu_offline(unsigned int cpu)
+{
+ struct uncore_data *data;
+ int target;
+
+ data = uncore_get_instance(cpu);
+ if (!data)
+ return 0;
+
+ /* Check if existing cpu is used for uncore MSRs */
+ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+ return 0;
+
+ /* Find a new cpu to set uncore MSR */
+ target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &uncore_cpu_mask);
+ uncore_freq_add_entry(data, target);
+ } else {
+ uncore_freq_remove_die_entry(data);
+ }
+
+ return 0;
+}
+
+static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
+ void *_unused)
+{
+ int i;
+
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ for (i = 0; i < uncore_max_entries; ++i) {
+ struct uncore_data *data = &uncore_instances[i];
+
+ if (!data || !data->valid || !data->stored_uncore_data)
+ return 0;
+
+ wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT,
+ data->stored_uncore_data);
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block uncore_pm_nb = {
+ .notifier_call = uncore_pm_notify,
+};
+
+static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
+
+static int __init intel_uncore_init(void)
+{
+ const struct x86_cpu_id *id;
+ int ret;
+
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return -ENODEV;
+
+ id = x86_match_cpu(intel_uncore_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ uncore_max_entries = topology_max_packages() *
+ topology_max_die_per_package();
+ uncore_instances = kcalloc(uncore_max_entries,
+ sizeof(*uncore_instances), GFP_KERNEL);
+ if (!uncore_instances)
+ return -ENOMEM;
+
+ ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
+ uncore_read_freq);
+ if (ret)
+ goto err_free;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/uncore-freq:online",
+ uncore_event_cpu_online,
+ uncore_event_cpu_offline);
+ if (ret < 0)
+ goto err_rem_kobj;
+
+ uncore_hp_state = ret;
+
+ ret = register_pm_notifier(&uncore_pm_nb);
+ if (ret)
+ goto err_rem_state;
+
+ return 0;
+
+err_rem_state:
+ cpuhp_remove_state(uncore_hp_state);
+err_rem_kobj:
+ uncore_freq_common_exit();
+err_free:
+ kfree(uncore_instances);
+
+ return ret;
+}
+module_init(intel_uncore_init)
+
+static void __exit intel_uncore_exit(void)
+{
+ int i;
+
+ unregister_pm_notifier(&uncore_pm_nb);
+ cpuhp_remove_state(uncore_hp_state);
+ for (i = 0; i < uncore_max_entries; ++i)
+ uncore_freq_remove_die_entry(&uncore_instances[i]);
+ uncore_freq_common_exit();
+ kfree(uncore_instances);
+}
+module_exit(intel_uncore_exit)
+
+MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
new file mode 100644
index 0000000000..210b0a81b7
--- /dev/null
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Intel Virtual Button driver for Windows 8.1+
+ *
+ * Copyright (C) 2016 AceLan Kao <acelan.kao@canonical.com>
+ * Copyright (C) 2016 Alex Hung <alex.hung@canonical.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
+#include "../dual_accel_detect.h"
+
+/* Returned when NOT in tablet mode on some HP Stream x360 11 models */
+#define VGBS_TABLET_MODE_FLAG_ALT 0x10
+/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
+#define VGBS_TABLET_MODE_FLAG 0x40
+#define VGBS_DOCK_MODE_FLAG 0x80
+
+#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("AceLan Kao");
+
+static const struct acpi_device_id intel_vbtn_ids[] = {
+ {"INT33D6", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, intel_vbtn_ids);
+
+/* In theory, these are HID usages. */
+static const struct key_entry intel_vbtn_keymap[] = {
+ { KE_KEY, 0xC0, { KEY_POWER } }, /* power key press */
+ { KE_IGNORE, 0xC1, { KEY_POWER } }, /* power key release */
+ { KE_KEY, 0xC2, { KEY_LEFTMETA } }, /* 'Windows' key press */
+ { KE_KEY, 0xC3, { KEY_LEFTMETA } }, /* 'Windows' key release */
+ { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* volume-up key press */
+ { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* volume-up key release */
+ { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* volume-down key press */
+ { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */
+ { KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key press */
+ { KE_KEY, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key release */
+ { KE_END }
+};
+
+static const struct key_entry intel_vbtn_switchmap[] = {
+ /*
+ * SW_DOCK should only be reported for docking stations, but DSDTs using the
+ * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set
+ * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0).
+ * This causes userspace to think the laptop is docked to a port-replicator
+ * and to disable suspend-on-lid-close, which is undesirable.
+ * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting.
+ */
+ { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
+ { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
+ { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */
+ { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */
+ { KE_END }
+};
+
+struct intel_vbtn_priv {
+ struct input_dev *buttons_dev;
+ struct input_dev *switches_dev;
+ bool dual_accel;
+ bool has_buttons;
+ bool has_switches;
+ bool wakeup_mode;
+};
+
+static void detect_tablet_mode(struct device *dev)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
+ unsigned long long vgbs;
+ acpi_status status;
+ int m;
+
+ status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
+ if (ACPI_FAILURE(status))
+ return;
+
+ m = !(vgbs & VGBS_TABLET_MODE_FLAGS);
+ input_report_switch(priv->switches_dev, SW_TABLET_MODE, m);
+ m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0;
+ input_report_switch(priv->switches_dev, SW_DOCK, m);
+
+ input_sync(priv->switches_dev);
+}
+
+/*
+ * Note this unconditionally creates the 2 input_dev-s and sets up
+ * the sparse-keymaps. Only the registration is conditional on
+ * have_buttons / have_switches. This is done so that the notify
+ * handler can always call sparse_keymap_entry_from_scancode()
+ * on the input_dev-s do determine the event type.
+ */
+static int intel_vbtn_input_setup(struct platform_device *device)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+ int ret;
+
+ priv->buttons_dev = devm_input_allocate_device(&device->dev);
+ if (!priv->buttons_dev)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(priv->buttons_dev, intel_vbtn_keymap, NULL);
+ if (ret)
+ return ret;
+
+ priv->buttons_dev->dev.parent = &device->dev;
+ priv->buttons_dev->name = "Intel Virtual Buttons";
+ priv->buttons_dev->id.bustype = BUS_HOST;
+
+ if (priv->has_buttons) {
+ ret = input_register_device(priv->buttons_dev);
+ if (ret)
+ return ret;
+ }
+
+ priv->switches_dev = devm_input_allocate_device(&device->dev);
+ if (!priv->switches_dev)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(priv->switches_dev, intel_vbtn_switchmap, NULL);
+ if (ret)
+ return ret;
+
+ priv->switches_dev->dev.parent = &device->dev;
+ priv->switches_dev->name = "Intel Virtual Switches";
+ priv->switches_dev->id.bustype = BUS_HOST;
+
+ if (priv->has_switches) {
+ detect_tablet_mode(&device->dev);
+
+ ret = input_register_device(priv->switches_dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void notify_handler(acpi_handle handle, u32 event, void *context)
+{
+ struct platform_device *device = context;
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+ unsigned int val = !(event & 1); /* Even=press, Odd=release */
+ const struct key_entry *ke, *ke_rel;
+ struct input_dev *input_dev;
+ bool autorelease;
+ int ret;
+
+ if ((ke = sparse_keymap_entry_from_scancode(priv->buttons_dev, event))) {
+ if (!priv->has_buttons) {
+ dev_warn(&device->dev, "Warning: received a button event on a device without buttons, please report this.\n");
+ return;
+ }
+ input_dev = priv->buttons_dev;
+ } else if ((ke = sparse_keymap_entry_from_scancode(priv->switches_dev, event))) {
+ if (!priv->has_switches) {
+ /* See dual_accel_detect.h for more info */
+ if (priv->dual_accel)
+ return;
+
+ dev_info(&device->dev, "Registering Intel Virtual Switches input-dev after receiving a switch event\n");
+ ret = input_register_device(priv->switches_dev);
+ if (ret)
+ return;
+
+ priv->has_switches = true;
+ }
+ input_dev = priv->switches_dev;
+ } else {
+ dev_dbg(&device->dev, "unknown event index 0x%x\n", event);
+ return;
+ }
+
+ if (priv->wakeup_mode) {
+ pm_wakeup_hard_event(&device->dev);
+
+ /*
+ * Skip reporting an evdev event for button wake events,
+ * mirroring how the drivers/acpi/button.c code skips this too.
+ */
+ if (ke->type == KE_KEY)
+ return;
+ }
+
+ /*
+ * Even press events are autorelease if there is no corresponding odd
+ * release event, or if the odd event is KE_IGNORE.
+ */
+ ke_rel = sparse_keymap_entry_from_scancode(input_dev, event | 1);
+ autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
+
+ sparse_keymap_report_event(input_dev, event, val, autorelease);
+
+ /* Some devices need this to report further events */
+ acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+}
+
+/*
+ * There are several laptops (non 2-in-1) models out there which support VGBS,
+ * but simply always return 0, which we translate to SW_TABLET_MODE=1. This in
+ * turn causes userspace (libinput) to suppress events from the builtin
+ * keyboard and touchpad, making the laptop essentially unusable.
+ *
+ * Since the problem of wrongly reporting SW_TABLET_MODE=1 in combination
+ * with libinput, leads to a non-usable system. Where as OTOH many people will
+ * not even notice when SW_TABLET_MODE is not being reported, a DMI based allow
+ * list is used here. This list mainly matches on the chassis-type of 2-in-1s.
+ *
+ * There are also some 2-in-1s which use the intel-vbtn ACPI interface to report
+ * SW_TABLET_MODE with a chassis-type of 8 ("Portable") or 10 ("Notebook"),
+ * these are matched on a per model basis, since many normal laptops with a
+ * possible broken VGBS ACPI-method also use these chassis-types.
+ */
+static const struct dmi_system_id dmi_switches_allow_list[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
+ },
+ },
+ {} /* Array terminator */
+};
+
+static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
+{
+ unsigned long long vgbs;
+ acpi_status status;
+
+ /* See dual_accel_detect.h for more info */
+ if (dual_accel)
+ return false;
+
+ if (!dmi_check_system(dmi_switches_allow_list))
+ return false;
+
+ status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
+ return ACPI_SUCCESS(status);
+}
+
+static int intel_vbtn_probe(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ bool dual_accel, has_buttons, has_switches;
+ struct intel_vbtn_priv *priv;
+ acpi_status status;
+ int err;
+
+ dual_accel = dual_accel_detect();
+ has_buttons = acpi_has_method(handle, "VBDL");
+ has_switches = intel_vbtn_has_switches(handle, dual_accel);
+
+ if (!has_buttons && !has_switches) {
+ dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(&device->dev, priv);
+
+ priv->dual_accel = dual_accel;
+ priv->has_buttons = has_buttons;
+ priv->has_switches = has_switches;
+
+ err = intel_vbtn_input_setup(device);
+ if (err) {
+ pr_err("Failed to setup Intel Virtual Button\n");
+ return err;
+ }
+
+ status = acpi_install_notify_handler(handle,
+ ACPI_DEVICE_NOTIFY,
+ notify_handler,
+ device);
+ if (ACPI_FAILURE(status))
+ return -EBUSY;
+
+ if (has_buttons) {
+ status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
+ }
+
+ device_init_wakeup(&device->dev, true);
+ /*
+ * In order for system wakeup to work, the EC GPE has to be marked as
+ * a wakeup one, so do that here (this setting will persist, but it has
+ * no effect until the wakeup mask is set for the EC GPE).
+ */
+ acpi_ec_mark_gpe_for_wake();
+ return 0;
+}
+
+static void intel_vbtn_remove(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+
+ device_init_wakeup(&device->dev, false);
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
+}
+
+static int intel_vbtn_pm_prepare(struct device *dev)
+{
+ if (device_may_wakeup(dev)) {
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
+ priv->wakeup_mode = true;
+ }
+ return 0;
+}
+
+static void intel_vbtn_pm_complete(struct device *dev)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
+ priv->wakeup_mode = false;
+}
+
+static int intel_vbtn_pm_resume(struct device *dev)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
+ intel_vbtn_pm_complete(dev);
+
+ if (priv->has_switches)
+ detect_tablet_mode(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops intel_vbtn_pm_ops = {
+ .prepare = intel_vbtn_pm_prepare,
+ .complete = intel_vbtn_pm_complete,
+ .resume = intel_vbtn_pm_resume,
+ .restore = intel_vbtn_pm_resume,
+ .thaw = intel_vbtn_pm_resume,
+};
+
+static struct platform_driver intel_vbtn_pl_driver = {
+ .driver = {
+ .name = "intel-vbtn",
+ .acpi_match_table = intel_vbtn_ids,
+ .pm = &intel_vbtn_pm_ops,
+ },
+ .probe = intel_vbtn_probe,
+ .remove_new = intel_vbtn_remove,
+};
+
+static acpi_status __init
+check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ const struct acpi_device_id *ids = context;
+ struct acpi_device *dev = acpi_fetch_acpi_dev(handle);
+
+ if (dev && acpi_match_device_ids(dev, ids) == 0)
+ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
+ dev_info(&dev->dev,
+ "intel-vbtn: created platform device\n");
+
+ return AE_OK;
+}
+
+static int __init intel_vbtn_init(void)
+{
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, check_acpi_dev, NULL,
+ (void *)intel_vbtn_ids, NULL);
+
+ return platform_driver_register(&intel_vbtn_pl_driver);
+}
+module_init(intel_vbtn_init);
+
+static void __exit intel_vbtn_exit(void)
+{
+ platform_driver_unregister(&intel_vbtn_pl_driver);
+}
+module_exit(intel_vbtn_exit);
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
new file mode 100644
index 0000000000..343ab6a82c
--- /dev/null
+++ b/drivers/platform/x86/intel/vsec.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Vendor Specific Extended Capabilities auxiliary bus driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: David E. Box <david.e.box@linux.intel.com>
+ *
+ * This driver discovers and creates auxiliary devices for Intel defined PCIe
+ * "Vendor Specific" and "Designated Vendor Specific" Extended Capabilities,
+ * VSEC and DVSEC respectively. The driver supports features on specific PCIe
+ * endpoints that exist primarily to expose them.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "vsec.h"
+
+/* Intel DVSEC offsets */
+#define INTEL_DVSEC_ENTRIES 0xA
+#define INTEL_DVSEC_SIZE 0xB
+#define INTEL_DVSEC_TABLE 0xC
+#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
+#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
+#define TABLE_OFFSET_SHIFT 3
+#define PMT_XA_START 0
+#define PMT_XA_MAX INT_MAX
+#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
+
+static DEFINE_IDA(intel_vsec_ida);
+static DEFINE_IDA(intel_vsec_sdsi_ida);
+static DEFINE_XARRAY_ALLOC(auxdev_array);
+
+/**
+ * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
+ * @rev: Revision ID of the VSEC/DVSEC register space
+ * @length: Length of the VSEC/DVSEC register space
+ * @id: ID of the feature
+ * @num_entries: Number of instances of the feature
+ * @entry_size: Size of the discovery table for each feature
+ * @tbir: BAR containing the discovery tables
+ * @offset: BAR offset of start of the first discovery table
+ */
+struct intel_vsec_header {
+ u8 rev;
+ u16 length;
+ u16 id;
+ u8 num_entries;
+ u8 entry_size;
+ u8 tbir;
+ u32 offset;
+};
+
+enum intel_vsec_id {
+ VSEC_ID_TELEMETRY = 2,
+ VSEC_ID_WATCHER = 3,
+ VSEC_ID_CRASHLOG = 4,
+ VSEC_ID_SDSI = 65,
+ VSEC_ID_TPMI = 66,
+};
+
+static const char *intel_vsec_name(enum intel_vsec_id id)
+{
+ switch (id) {
+ case VSEC_ID_TELEMETRY:
+ return "telemetry";
+
+ case VSEC_ID_WATCHER:
+ return "watcher";
+
+ case VSEC_ID_CRASHLOG:
+ return "crashlog";
+
+ case VSEC_ID_SDSI:
+ return "sdsi";
+
+ case VSEC_ID_TPMI:
+ return "tpmi";
+
+ default:
+ return NULL;
+ }
+}
+
+static bool intel_vsec_supported(u16 id, unsigned long caps)
+{
+ switch (id) {
+ case VSEC_ID_TELEMETRY:
+ return !!(caps & VSEC_CAP_TELEMETRY);
+ case VSEC_ID_WATCHER:
+ return !!(caps & VSEC_CAP_WATCHER);
+ case VSEC_ID_CRASHLOG:
+ return !!(caps & VSEC_CAP_CRASHLOG);
+ case VSEC_ID_SDSI:
+ return !!(caps & VSEC_CAP_SDSI);
+ case VSEC_ID_TPMI:
+ return !!(caps & VSEC_CAP_TPMI);
+ default:
+ return false;
+ }
+}
+
+static void intel_vsec_remove_aux(void *data)
+{
+ auxiliary_device_delete(data);
+ auxiliary_device_uninit(data);
+}
+
+static DEFINE_MUTEX(vsec_ida_lock);
+
+static void intel_vsec_dev_release(struct device *dev)
+{
+ struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);
+
+ xa_erase(&auxdev_array, intel_vsec_dev->id);
+
+ mutex_lock(&vsec_ida_lock);
+ ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
+ mutex_unlock(&vsec_ida_lock);
+
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+}
+
+int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct intel_vsec_device *intel_vsec_dev,
+ const char *name)
+{
+ struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
+ int ret, id;
+
+ ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev,
+ PMT_XA_LIMIT, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+ return ret;
+ }
+
+ mutex_lock(&vsec_ida_lock);
+ id = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
+ mutex_unlock(&vsec_ida_lock);
+ if (id < 0) {
+ xa_erase(&auxdev_array, intel_vsec_dev->id);
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+ return id;
+ }
+
+ if (!parent)
+ parent = &pdev->dev;
+
+ auxdev->id = id;
+ auxdev->name = name;
+ auxdev->dev.parent = parent;
+ auxdev->dev.release = intel_vsec_dev_release;
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret < 0) {
+ intel_vsec_dev_release(&auxdev->dev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(auxdev);
+ if (ret < 0) {
+ auxiliary_device_uninit(auxdev);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(parent, intel_vsec_remove_aux,
+ auxdev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC);
+
+static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
+ struct intel_vsec_platform_info *info)
+{
+ struct intel_vsec_device *intel_vsec_dev;
+ struct resource *res, *tmp;
+ unsigned long quirks = info->quirks;
+ int i;
+
+ if (!intel_vsec_supported(header->id, info->caps))
+ return -EINVAL;
+
+ if (!header->num_entries) {
+ dev_dbg(&pdev->dev, "Invalid 0 entry count for header id %d\n", header->id);
+ return -EINVAL;
+ }
+
+ if (!header->entry_size) {
+ dev_dbg(&pdev->dev, "Invalid 0 entry size for header id %d\n", header->id);
+ return -EINVAL;
+ }
+
+ intel_vsec_dev = kzalloc(sizeof(*intel_vsec_dev), GFP_KERNEL);
+ if (!intel_vsec_dev)
+ return -ENOMEM;
+
+ res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ kfree(intel_vsec_dev);
+ return -ENOMEM;
+ }
+
+ if (quirks & VSEC_QUIRK_TABLE_SHIFT)
+ header->offset >>= TABLE_OFFSET_SHIFT;
+
+ /*
+ * The DVSEC/VSEC contains the starting offset and count for a block of
+ * discovery tables. Create a resource array of these tables to the
+ * auxiliary device driver.
+ */
+ for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) {
+ tmp->start = pdev->resource[header->tbir].start +
+ header->offset + i * (header->entry_size * sizeof(u32));
+ tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1;
+ tmp->flags = IORESOURCE_MEM;
+ }
+
+ intel_vsec_dev->pcidev = pdev;
+ intel_vsec_dev->resource = res;
+ intel_vsec_dev->num_resources = header->num_entries;
+ intel_vsec_dev->info = info;
+
+ if (header->id == VSEC_ID_SDSI)
+ intel_vsec_dev->ida = &intel_vsec_sdsi_ida;
+ else
+ intel_vsec_dev->ida = &intel_vsec_ida;
+
+ return intel_vsec_add_aux(pdev, NULL, intel_vsec_dev,
+ intel_vsec_name(header->id));
+}
+
+static bool intel_vsec_walk_header(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+ struct intel_vsec_header **header = info->headers;
+ bool have_devices = false;
+ int ret;
+
+ for ( ; *header; header++) {
+ ret = intel_vsec_add_dev(pdev, *header, info);
+ if (ret)
+ dev_info(&pdev->dev, "Could not add device for VSEC id %d\n",
+ (*header)->id);
+ else
+ have_devices = true;
+ }
+
+ return have_devices;
+}
+
+static bool intel_vsec_walk_dvsec(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+ bool have_devices = false;
+ int pos = 0;
+
+ do {
+ struct intel_vsec_header header;
+ u32 table, hdr;
+ u16 vid;
+ int ret;
+
+ pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
+ if (!pos)
+ break;
+
+ pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER1, &hdr);
+ vid = PCI_DVSEC_HEADER1_VID(hdr);
+ if (vid != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ /* Support only revision 1 */
+ header.rev = PCI_DVSEC_HEADER1_REV(hdr);
+ if (header.rev != 1) {
+ dev_info(&pdev->dev, "Unsupported DVSEC revision %d\n", header.rev);
+ continue;
+ }
+
+ header.length = PCI_DVSEC_HEADER1_LEN(hdr);
+
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
+ pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
+
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+ pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr);
+ header.id = PCI_DVSEC_HEADER2_ID(hdr);
+
+ ret = intel_vsec_add_dev(pdev, &header, info);
+ if (ret)
+ continue;
+
+ have_devices = true;
+ } while (true);
+
+ return have_devices;
+}
+
+static bool intel_vsec_walk_vsec(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+ bool have_devices = false;
+ int pos = 0;
+
+ do {
+ struct intel_vsec_header header;
+ u32 table, hdr;
+ int ret;
+
+ pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_VNDR);
+ if (!pos)
+ break;
+
+ pci_read_config_dword(pdev, pos + PCI_VNDR_HEADER, &hdr);
+
+ /* Support only revision 1 */
+ header.rev = PCI_VNDR_HEADER_REV(hdr);
+ if (header.rev != 1) {
+ dev_info(&pdev->dev, "Unsupported VSEC revision %d\n", header.rev);
+ continue;
+ }
+
+ header.id = PCI_VNDR_HEADER_ID(hdr);
+ header.length = PCI_VNDR_HEADER_LEN(hdr);
+
+ /* entry, size, and table offset are the same as DVSEC */
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
+ pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
+
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+ ret = intel_vsec_add_dev(pdev, &header, info);
+ if (ret)
+ continue;
+
+ have_devices = true;
+ } while (true);
+
+ return have_devices;
+}
+
+static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct intel_vsec_platform_info *info;
+ bool have_devices = false;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_save_state(pdev);
+ info = (struct intel_vsec_platform_info *)id->driver_data;
+ if (!info)
+ return -EINVAL;
+
+ if (intel_vsec_walk_dvsec(pdev, info))
+ have_devices = true;
+
+ if (intel_vsec_walk_vsec(pdev, info))
+ have_devices = true;
+
+ if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) &&
+ intel_vsec_walk_header(pdev, info))
+ have_devices = true;
+
+ if (!have_devices)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* DG1 info */
+static struct intel_vsec_header dg1_header = {
+ .length = 0x10,
+ .id = 2,
+ .num_entries = 1,
+ .entry_size = 3,
+ .tbir = 0,
+ .offset = 0x466000,
+};
+
+static struct intel_vsec_header *dg1_headers[] = {
+ &dg1_header,
+ NULL
+};
+
+static const struct intel_vsec_platform_info dg1_info = {
+ .caps = VSEC_CAP_TELEMETRY,
+ .headers = dg1_headers,
+ .quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW,
+};
+
+/* MTL info */
+static const struct intel_vsec_platform_info mtl_info = {
+ .caps = VSEC_CAP_TELEMETRY,
+};
+
+/* OOBMSM info */
+static const struct intel_vsec_platform_info oobmsm_info = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI,
+};
+
+/* TGL info */
+static const struct intel_vsec_platform_info tgl_info = {
+ .caps = VSEC_CAP_TELEMETRY,
+ .quirks = VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW,
+};
+
+#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
+#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
+#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
+#define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d
+#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
+#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
+static const struct pci_device_id intel_vsec_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
+
+static pci_ers_result_t intel_vsec_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_NEED_RESET;
+
+ dev_info(&pdev->dev, "PCI error detected, state %d", state);
+
+ if (state == pci_channel_io_perm_failure)
+ status = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+static pci_ers_result_t intel_vsec_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct intel_vsec_device *intel_vsec_dev;
+ pci_ers_result_t status = PCI_ERS_RESULT_DISCONNECT;
+ const struct pci_device_id *pci_dev_id;
+ unsigned long index;
+
+ dev_info(&pdev->dev, "Resetting PCI slot\n");
+
+ msleep(2000);
+ if (pci_enable_device(pdev)) {
+ dev_info(&pdev->dev,
+ "Failed to re-enable PCI device after reset.\n");
+ goto out;
+ }
+
+ status = PCI_ERS_RESULT_RECOVERED;
+
+ xa_for_each(&auxdev_array, index, intel_vsec_dev) {
+ /* check if pdev doesn't match */
+ if (pdev != intel_vsec_dev->pcidev)
+ continue;
+ devm_release_action(&pdev->dev, intel_vsec_remove_aux,
+ &intel_vsec_dev->auxdev);
+ }
+ pci_disable_device(pdev);
+ pci_restore_state(pdev);
+ pci_dev_id = pci_match_id(intel_vsec_pci_ids, pdev);
+ intel_vsec_pci_probe(pdev, pci_dev_id);
+
+out:
+ return status;
+}
+
+static void intel_vsec_pci_resume(struct pci_dev *pdev)
+{
+ dev_info(&pdev->dev, "Done resuming PCI device\n");
+}
+
+static const struct pci_error_handlers intel_vsec_pci_err_handlers = {
+ .error_detected = intel_vsec_pci_error_detected,
+ .slot_reset = intel_vsec_pci_slot_reset,
+ .resume = intel_vsec_pci_resume,
+};
+
+static struct pci_driver intel_vsec_pci_driver = {
+ .name = "intel_vsec",
+ .id_table = intel_vsec_pci_ids,
+ .probe = intel_vsec_pci_probe,
+ .err_handler = &intel_vsec_pci_err_handlers,
+};
+module_pci_driver(intel_vsec_pci_driver);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Extended Capabilities auxiliary bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
new file mode 100644
index 0000000000..0a6201b4a0
--- /dev/null
+++ b/drivers/platform/x86/intel/vsec.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VSEC_H
+#define _VSEC_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+
+#define VSEC_CAP_TELEMETRY BIT(0)
+#define VSEC_CAP_WATCHER BIT(1)
+#define VSEC_CAP_CRASHLOG BIT(2)
+#define VSEC_CAP_SDSI BIT(3)
+#define VSEC_CAP_TPMI BIT(4)
+
+struct pci_dev;
+struct resource;
+
+enum intel_vsec_quirks {
+ /* Watcher feature not supported */
+ VSEC_QUIRK_NO_WATCHER = BIT(0),
+
+ /* Crashlog feature not supported */
+ VSEC_QUIRK_NO_CRASHLOG = BIT(1),
+
+ /* Use shift instead of mask to read discovery table offset */
+ VSEC_QUIRK_TABLE_SHIFT = BIT(2),
+
+ /* DVSEC not present (provided in driver data) */
+ VSEC_QUIRK_NO_DVSEC = BIT(3),
+
+ /* Platforms requiring quirk in the auxiliary driver */
+ VSEC_QUIRK_EARLY_HW = BIT(4),
+};
+
+/* Platform specific data */
+struct intel_vsec_platform_info {
+ struct intel_vsec_header **headers;
+ unsigned long caps;
+ unsigned long quirks;
+};
+
+struct intel_vsec_device {
+ struct auxiliary_device auxdev;
+ struct pci_dev *pcidev;
+ struct resource *resource;
+ struct ida *ida;
+ struct intel_vsec_platform_info *info;
+ int num_resources;
+ int id; /* xa */
+ void *priv_data;
+ size_t priv_data_size;
+};
+
+int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct intel_vsec_device *intel_vsec_dev,
+ const char *name);
+
+static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
+{
+ return container_of(dev, struct intel_vsec_device, auxdev.dev);
+}
+
+static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev)
+{
+ return container_of(auxdev, struct intel_vsec_device, auxdev);
+}
+#endif
diff --git a/drivers/platform/x86/intel/wmi/Kconfig b/drivers/platform/x86/intel/wmi/Kconfig
new file mode 100644
index 0000000000..8e159f7121
--- /dev/null
+++ b/drivers/platform/x86/intel/wmi/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+config INTEL_WMI
+ bool
+
+config INTEL_WMI_SBL_FW_UPDATE
+ tristate "Intel WMI Slim Bootloader firmware update signaling driver"
+ depends on ACPI_WMI
+ select INTEL_WMI
+ help
+ Say Y here if you want to be able to use the WMI interface to signal
+ Slim Bootloader to trigger update on next reboot.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel-wmi-sbl-fw-update.
+
+config INTEL_WMI_THUNDERBOLT
+ tristate "Intel WMI thunderbolt force power driver"
+ depends on ACPI_WMI
+ select INTEL_WMI
+ help
+ Say Y here if you want to be able to use the WMI interface on select
+ systems to force the power control of Intel Thunderbolt controllers.
+ This is useful for updating the firmware when devices are not plugged
+ into the controller.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel-wmi-thunderbolt.
diff --git a/drivers/platform/x86/intel/wmi/Makefile b/drivers/platform/x86/intel/wmi/Makefile
new file mode 100644
index 0000000000..c2d56d25de
--- /dev/null
+++ b/drivers/platform/x86/intel/wmi/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel x86 Platform Specific Drivers
+#
+
+intel-wmi-sbl-fw-update-y := sbl-fw-update.o
+obj-$(CONFIG_INTEL_WMI_SBL_FW_UPDATE) += intel-wmi-sbl-fw-update.o
+intel-wmi-thunderbolt-y := thunderbolt.o
+obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
diff --git a/drivers/platform/x86/intel/wmi/sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
new file mode 100644
index 0000000000..3c86e0108a
--- /dev/null
+++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Slim Bootloader(SBL) firmware update signaling driver
+ *
+ * Slim Bootloader is a small, open-source, non UEFI compliant, boot firmware
+ * optimized for running on certain Intel platforms.
+ *
+ * SBL exposes an ACPI-WMI device via /sys/bus/wmi/devices/<INTEL_WMI_SBL_GUID>.
+ * This driver further adds "firmware_update_request" device attribute.
+ * This attribute normally has a value of 0 and userspace can signal SBL
+ * to update firmware, on next reboot, by writing a value of 1.
+ *
+ * More details of SBL firmware update process is available at:
+ * https://slimbootloader.github.io/security/firmware-update.html
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/wmi.h>
+
+#define INTEL_WMI_SBL_GUID "44FADEB1-B204-40F2-8581-394BBDC1B651"
+
+static int get_fwu_request(struct device *dev, u32 *out)
+{
+ struct acpi_buffer result = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_query_block(INTEL_WMI_SBL_GUID, 0, &result);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "wmi_query_block failed\n");
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)result.pointer;
+ if (!obj || obj->type != ACPI_TYPE_INTEGER) {
+ dev_warn(dev, "wmi_query_block returned invalid value\n");
+ kfree(obj);
+ return -EINVAL;
+ }
+
+ *out = obj->integer.value;
+ kfree(obj);
+
+ return 0;
+}
+
+static int set_fwu_request(struct device *dev, u32 in)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ u32 value;
+
+ value = in;
+ input.length = sizeof(u32);
+ input.pointer = &value;
+
+ status = wmi_set_block(INTEL_WMI_SBL_GUID, 0, &input);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "wmi_set_block failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static ssize_t firmware_update_request_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ int ret;
+
+ ret = get_fwu_request(dev, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t firmware_update_request_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ /* May later be extended to support values other than 0 and 1 */
+ if (val > 1)
+ return -ERANGE;
+
+ ret = set_fwu_request(dev, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(firmware_update_request);
+
+static struct attribute *firmware_update_attrs[] = {
+ &dev_attr_firmware_update_request.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(firmware_update);
+
+static int intel_wmi_sbl_fw_update_probe(struct wmi_device *wdev,
+ const void *context)
+{
+ dev_info(&wdev->dev, "Slim Bootloader signaling driver attached\n");
+ return 0;
+}
+
+static void intel_wmi_sbl_fw_update_remove(struct wmi_device *wdev)
+{
+ dev_info(&wdev->dev, "Slim Bootloader signaling driver removed\n");
+}
+
+static const struct wmi_device_id intel_wmi_sbl_id_table[] = {
+ { .guid_string = INTEL_WMI_SBL_GUID },
+ {}
+};
+MODULE_DEVICE_TABLE(wmi, intel_wmi_sbl_id_table);
+
+static struct wmi_driver intel_wmi_sbl_fw_update_driver = {
+ .driver = {
+ .name = "intel-wmi-sbl-fw-update",
+ .dev_groups = firmware_update_groups,
+ },
+ .probe = intel_wmi_sbl_fw_update_probe,
+ .remove = intel_wmi_sbl_fw_update_remove,
+ .id_table = intel_wmi_sbl_id_table,
+};
+module_wmi_driver(intel_wmi_sbl_fw_update_driver);
+
+MODULE_AUTHOR("Jithu Joseph <jithu.joseph@intel.com>");
+MODULE_DESCRIPTION("Slim Bootloader firmware update signaling driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/wmi/thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c
new file mode 100644
index 0000000000..fc333ff82d
--- /dev/null
+++ b/drivers/platform/x86/intel/wmi/thunderbolt.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * WMI Thunderbolt driver
+ *
+ * Copyright (C) 2017 Dell Inc. All Rights Reserved.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#define INTEL_WMI_THUNDERBOLT_GUID "86CCFD48-205E-4A77-9C48-2021CBEDE341"
+
+static ssize_t force_power_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ u8 mode;
+
+ input.length = sizeof(u8);
+ input.pointer = &mode;
+ mode = hex_to_bin(buf[0]);
+ dev_dbg(dev, "force_power: storing %#x\n", mode);
+ if (mode == 0 || mode == 1) {
+ status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1,
+ &input, NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(dev, "force_power: failed to evaluate ACPI method\n");
+ return -ENODEV;
+ }
+ } else {
+ dev_dbg(dev, "force_power: unsupported mode\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR_WO(force_power);
+
+static struct attribute *tbt_attrs[] = {
+ &dev_attr_force_power.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(tbt);
+
+static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
+ { .guid_string = INTEL_WMI_THUNDERBOLT_GUID },
+ { },
+};
+
+static struct wmi_driver intel_wmi_thunderbolt_driver = {
+ .driver = {
+ .name = "intel-wmi-thunderbolt",
+ .dev_groups = tbt_groups,
+ },
+ .id_table = intel_wmi_thunderbolt_id_table,
+};
+
+module_wmi_driver(intel_wmi_thunderbolt_driver);
+
+MODULE_DEVICE_TABLE(wmi, intel_wmi_thunderbolt_id_table);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Intel WMI Thunderbolt force power driver");
+MODULE_LICENSE("GPL v2");