summaryrefslogtreecommitdiffstats
path: root/drivers/platform/x86/amd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/platform/x86/amd
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/platform/x86/amd')
-rw-r--r--drivers/platform/x86/amd/Kconfig20
-rw-r--r--drivers/platform/x86/amd/Makefile10
-rw-r--r--drivers/platform/x86/amd/hsmp.c423
-rw-r--r--drivers/platform/x86/amd/pmc/Kconfig20
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile8
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c265
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c1089
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h56
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig29
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile9
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c325
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c441
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c467
-rw-r--r--drivers/platform/x86/amd/pmf/core.c450
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h436
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c284
16 files changed, 4332 insertions, 0 deletions
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
new file mode 100644
index 0000000000..55f3a2fc6a
--- /dev/null
+++ b/drivers/platform/x86/amd/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD x86 Platform Specific Drivers
+#
+
+source "drivers/platform/x86/amd/pmf/Kconfig"
+source "drivers/platform/x86/amd/pmc/Kconfig"
+
+config AMD_HSMP
+ tristate "AMD HSMP Driver"
+ depends on AMD_NB && X86_64
+ help
+ The driver provides a way for user space tools to monitor and manage
+ system management functionality on EPYC server CPUs from AMD.
+
+ Host System Management Port (HSMP) interface is a mailbox interface
+ between the x86 core and the System Management Unit (SMU) firmware.
+
+ If you choose to compile this driver as a module the module will be
+ called amd_hsmp.
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
new file mode 100644
index 0000000000..f04932b7a7
--- /dev/null
+++ b/drivers/platform/x86/amd/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for drivers/platform/x86/amd
+# AMD x86 Platform-Specific Drivers
+#
+
+obj-$(CONFIG_AMD_PMC) += pmc/
+amd_hsmp-y := hsmp.o
+obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
+obj-$(CONFIG_AMD_PMF) += pmf/
diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
new file mode 100644
index 0000000000..31382ef52e
--- /dev/null
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD HSMP Platform Driver
+ * Copyright (c) 2022, AMD.
+ * All Rights Reserved.
+ *
+ * This file provides a device implementation for HSMP interface
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/amd_hsmp.h>
+#include <asm/amd_nb.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+#define DRIVER_NAME "amd_hsmp"
+#define DRIVER_VERSION "1.0"
+
+/* HSMP Status / Error codes */
+#define HSMP_STATUS_NOT_READY 0x00
+#define HSMP_STATUS_OK 0x01
+#define HSMP_ERR_INVALID_MSG 0xFE
+#define HSMP_ERR_INVALID_INPUT 0xFF
+
+/* Timeout in millsec */
+#define HSMP_MSG_TIMEOUT 100
+#define HSMP_SHORT_SLEEP 1
+
+#define HSMP_WR true
+#define HSMP_RD false
+
+/*
+ * To access specific HSMP mailbox register, s/w writes the SMN address of HSMP mailbox
+ * register into the SMN_INDEX register, and reads/writes the SMN_DATA reg.
+ * Below are required SMN address for HSMP Mailbox register offsets in SMU address space
+ */
+#define SMN_HSMP_MSG_ID 0x3B10534
+#define SMN_HSMP_MSG_RESP 0x3B10980
+#define SMN_HSMP_MSG_DATA 0x3B109E0
+
+#define HSMP_INDEX_REG 0xc4
+#define HSMP_DATA_REG 0xc8
+
+static struct semaphore *hsmp_sem;
+
+static struct miscdevice hsmp_device;
+
+static int amd_hsmp_rdwr(struct pci_dev *root, u32 address,
+ u32 *value, bool write)
+{
+ int ret;
+
+ ret = pci_write_config_dword(root, HSMP_INDEX_REG, address);
+ if (ret)
+ return ret;
+
+ ret = (write ? pci_write_config_dword(root, HSMP_DATA_REG, *value)
+ : pci_read_config_dword(root, HSMP_DATA_REG, value));
+
+ return ret;
+}
+
+/*
+ * Send a message to the HSMP port via PCI-e config space registers.
+ *
+ * The caller is expected to zero out any unused arguments.
+ * If a response is expected, the number of response words should be greater than 0.
+ *
+ * Returns 0 for success and populates the requested number of arguments.
+ * Returns a negative error code for failure.
+ */
+static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
+{
+ unsigned long timeout, short_sleep;
+ u32 mbox_status;
+ u32 index;
+ int ret;
+
+ /* Clear the status register */
+ mbox_status = HSMP_STATUS_NOT_READY;
+ ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_WR);
+ if (ret) {
+ pr_err("Error %d clearing mailbox status register\n", ret);
+ return ret;
+ }
+
+ index = 0;
+ /* Write any message arguments */
+ while (index < msg->num_args) {
+ ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
+ &msg->args[index], HSMP_WR);
+ if (ret) {
+ pr_err("Error %d writing message argument %d\n", ret, index);
+ return ret;
+ }
+ index++;
+ }
+
+ /* Write the message ID which starts the operation */
+ ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_ID, &msg->msg_id, HSMP_WR);
+ if (ret) {
+ pr_err("Error %d writing message ID %u\n", ret, msg->msg_id);
+ return ret;
+ }
+
+ /*
+ * Depending on when the trigger write completes relative to the SMU
+ * firmware 1 ms cycle, the operation may take from tens of us to 1 ms
+ * to complete. Some operations may take more. Therefore we will try
+ * a few short duration sleeps and switch to long sleeps if we don't
+ * succeed quickly.
+ */
+ short_sleep = jiffies + msecs_to_jiffies(HSMP_SHORT_SLEEP);
+ timeout = jiffies + msecs_to_jiffies(HSMP_MSG_TIMEOUT);
+
+ while (time_before(jiffies, timeout)) {
+ ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_RD);
+ if (ret) {
+ pr_err("Error %d reading mailbox status\n", ret);
+ return ret;
+ }
+
+ if (mbox_status != HSMP_STATUS_NOT_READY)
+ break;
+ if (time_before(jiffies, short_sleep))
+ usleep_range(50, 100);
+ else
+ usleep_range(1000, 2000);
+ }
+
+ if (unlikely(mbox_status == HSMP_STATUS_NOT_READY)) {
+ return -ETIMEDOUT;
+ } else if (unlikely(mbox_status == HSMP_ERR_INVALID_MSG)) {
+ return -ENOMSG;
+ } else if (unlikely(mbox_status == HSMP_ERR_INVALID_INPUT)) {
+ return -EINVAL;
+ } else if (unlikely(mbox_status != HSMP_STATUS_OK)) {
+ pr_err("Message ID %u unknown failure (status = 0x%X)\n",
+ msg->msg_id, mbox_status);
+ return -EIO;
+ }
+
+ /*
+ * SMU has responded OK. Read response data.
+ * SMU reads the input arguments from eight 32 bit registers starting
+ * from SMN_HSMP_MSG_DATA and writes the response data to the same
+ * SMN_HSMP_MSG_DATA address.
+ * We copy the response data if any, back to the args[].
+ */
+ index = 0;
+ while (index < msg->response_sz) {
+ ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
+ &msg->args[index], HSMP_RD);
+ if (ret) {
+ pr_err("Error %d reading response %u for message ID:%u\n",
+ ret, index, msg->msg_id);
+ break;
+ }
+ index++;
+ }
+
+ return ret;
+}
+
+static int validate_message(struct hsmp_message *msg)
+{
+ /* msg_id against valid range of message IDs */
+ if (msg->msg_id < HSMP_TEST || msg->msg_id >= HSMP_MSG_ID_MAX)
+ return -ENOMSG;
+
+ /* msg_id is a reserved message ID */
+ if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_RSVD)
+ return -ENOMSG;
+
+ /* num_args and response_sz against the HSMP spec */
+ if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args ||
+ msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz)
+ return -EINVAL;
+
+ return 0;
+}
+
+int hsmp_send_message(struct hsmp_message *msg)
+{
+ struct amd_northbridge *nb;
+ int ret;
+
+ if (!msg)
+ return -EINVAL;
+
+ nb = node_to_amd_nb(msg->sock_ind);
+ if (!nb || !nb->root)
+ return -ENODEV;
+
+ ret = validate_message(msg);
+ if (ret)
+ return ret;
+
+ /*
+ * The time taken by smu operation to complete is between
+ * 10us to 1ms. Sometime it may take more time.
+ * In SMP system timeout of 100 millisecs should
+ * be enough for the previous thread to finish the operation
+ */
+ ret = down_timeout(&hsmp_sem[msg->sock_ind],
+ msecs_to_jiffies(HSMP_MSG_TIMEOUT));
+ if (ret < 0)
+ return ret;
+
+ ret = __hsmp_send_message(nb->root, msg);
+
+ up(&hsmp_sem[msg->sock_ind]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hsmp_send_message);
+
+static int hsmp_test(u16 sock_ind, u32 value)
+{
+ struct hsmp_message msg = { 0 };
+ struct amd_northbridge *nb;
+ int ret = -ENODEV;
+
+ nb = node_to_amd_nb(sock_ind);
+ if (!nb || !nb->root)
+ return ret;
+
+ /*
+ * Test the hsmp port by performing TEST command. The test message
+ * takes one argument and returns the value of that argument + 1.
+ */
+ msg.msg_id = HSMP_TEST;
+ msg.num_args = 1;
+ msg.response_sz = 1;
+ msg.args[0] = value;
+ msg.sock_ind = sock_ind;
+
+ ret = __hsmp_send_message(nb->root, &msg);
+ if (ret)
+ return ret;
+
+ /* Check the response value */
+ if (msg.args[0] != (value + 1)) {
+ pr_err("Socket %d test message failed, Expected 0x%08X, received 0x%08X\n",
+ sock_ind, (value + 1), msg.args[0]);
+ return -EBADE;
+ }
+
+ return ret;
+}
+
+static long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ int __user *arguser = (int __user *)arg;
+ struct hsmp_message msg = { 0 };
+ int ret;
+
+ if (copy_struct_from_user(&msg, sizeof(msg), arguser, sizeof(struct hsmp_message)))
+ return -EFAULT;
+
+ /*
+ * Check msg_id is within the range of supported msg ids
+ * i.e within the array bounds of hsmp_msg_desc_table
+ */
+ if (msg.msg_id < HSMP_TEST || msg.msg_id >= HSMP_MSG_ID_MAX)
+ return -ENOMSG;
+
+ switch (fp->f_mode & (FMODE_WRITE | FMODE_READ)) {
+ case FMODE_WRITE:
+ /*
+ * Device is opened in O_WRONLY mode
+ * Execute only set/configure commands
+ */
+ if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_SET)
+ return -EINVAL;
+ break;
+ case FMODE_READ:
+ /*
+ * Device is opened in O_RDONLY mode
+ * Execute only get/monitor commands
+ */
+ if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_GET)
+ return -EINVAL;
+ break;
+ case FMODE_READ | FMODE_WRITE:
+ /*
+ * Device is opened in O_RDWR mode
+ * Execute both get/monitor and set/configure commands
+ */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hsmp_send_message(&msg);
+ if (ret)
+ return ret;
+
+ if (hsmp_msg_desc_table[msg.msg_id].response_sz > 0) {
+ /* Copy results back to user for get/monitor commands */
+ if (copy_to_user(arguser, &msg, sizeof(struct hsmp_message)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static const struct file_operations hsmp_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = hsmp_ioctl,
+ .compat_ioctl = hsmp_ioctl,
+};
+
+static int hsmp_pltdrv_probe(struct platform_device *pdev)
+{
+ int i;
+
+ hsmp_sem = devm_kzalloc(&pdev->dev,
+ (amd_nb_num() * sizeof(struct semaphore)),
+ GFP_KERNEL);
+ if (!hsmp_sem)
+ return -ENOMEM;
+
+ for (i = 0; i < amd_nb_num(); i++)
+ sema_init(&hsmp_sem[i], 1);
+
+ hsmp_device.name = "hsmp_cdev";
+ hsmp_device.minor = MISC_DYNAMIC_MINOR;
+ hsmp_device.fops = &hsmp_fops;
+ hsmp_device.parent = &pdev->dev;
+ hsmp_device.nodename = "hsmp";
+ hsmp_device.mode = 0644;
+
+ return misc_register(&hsmp_device);
+}
+
+static void hsmp_pltdrv_remove(struct platform_device *pdev)
+{
+ misc_deregister(&hsmp_device);
+}
+
+static struct platform_driver amd_hsmp_driver = {
+ .probe = hsmp_pltdrv_probe,
+ .remove_new = hsmp_pltdrv_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static struct platform_device *amd_hsmp_platdev;
+
+static int __init hsmp_plt_init(void)
+{
+ int ret = -ENODEV;
+ u16 num_sockets;
+ int i;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
+ pr_err("HSMP is not supported on Family:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ return ret;
+ }
+
+ /*
+ * amd_nb_num() returns number of SMN/DF interfaces present in the system
+ * if we have N SMN/DF interfaces that ideally means N sockets
+ */
+ num_sockets = amd_nb_num();
+ if (num_sockets == 0)
+ return ret;
+
+ /* Test the hsmp interface on each socket */
+ for (i = 0; i < num_sockets; i++) {
+ ret = hsmp_test(i, 0xDEADBEEF);
+ if (ret) {
+ pr_err("HSMP is not supported on Fam:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ pr_err("Or Is HSMP disabled in BIOS ?\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ ret = platform_driver_register(&amd_hsmp_driver);
+ if (ret)
+ return ret;
+
+ amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!amd_hsmp_platdev) {
+ ret = -ENOMEM;
+ goto drv_unregister;
+ }
+
+ ret = platform_device_add(amd_hsmp_platdev);
+ if (ret) {
+ platform_device_put(amd_hsmp_platdev);
+ goto drv_unregister;
+ }
+
+ return 0;
+
+drv_unregister:
+ platform_driver_unregister(&amd_hsmp_driver);
+ return ret;
+}
+
+static void __exit hsmp_plt_exit(void)
+{
+ platform_device_unregister(amd_hsmp_platdev);
+ platform_driver_unregister(&amd_hsmp_driver);
+}
+
+device_initcall(hsmp_plt_init);
+module_exit(hsmp_plt_exit);
+
+MODULE_DESCRIPTION("AMD HSMP Platform Interface Driver");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/amd/pmc/Kconfig b/drivers/platform/x86/amd/pmc/Kconfig
new file mode 100644
index 0000000000..883c0a95ac
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD PMC Driver
+#
+
+config AMD_PMC
+ tristate "AMD SoC PMC driver"
+ depends on ACPI && PCI && RTC_CLASS && AMD_NB
+ depends on SUSPEND
+ select SERIO
+ help
+ The driver provides support for AMD Power Management Controller
+ primarily responsible for S2Idle transactions that are driven from
+ a platform firmware running on SMU. This driver also provides a debug
+ mechanism to investigate the S2Idle transactions and failures.
+
+ Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU.
+
+ If you choose to compile this driver as a module the module will be
+ called amd-pmc.
diff --git a/drivers/platform/x86/amd/pmc/Makefile b/drivers/platform/x86/amd/pmc/Makefile
new file mode 100644
index 0000000000..4aaa29d351
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/amd/pmc
+# AMD Power Management Controller Driver
+#
+
+amd-pmc-objs := pmc.o pmc-quirks.o
+obj-$(CONFIG_AMD_PMC) += amd-pmc.o
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
new file mode 100644
index 0000000000..b456370166
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD SoC Power Management Controller Driver Quirks
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+
+#include "pmc.h"
+
+struct quirk_entry {
+ u32 s2idle_bug_mmio;
+ bool spurious_8042;
+};
+
+static struct quirk_entry quirk_s2idle_bug = {
+ .s2idle_bug_mmio = 0xfed80380,
+};
+
+static struct quirk_entry quirk_spurious_8042 = {
+ .spurious_8042 = true,
+};
+
+static const struct dmi_system_id fwbug_list[] = {
+ {
+ .ident = "L14 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
+ }
+ },
+ {
+ .ident = "T14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
+ }
+ },
+ {
+ .ident = "X13 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
+ }
+ },
+ {
+ .ident = "T14 Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
+ }
+ },
+ {
+ .ident = "T14 Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
+ }
+ },
+ {
+ .ident = "T14 Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
+ }
+ },
+ {
+ .ident = "T14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
+ }
+ },
+ {
+ .ident = "T14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
+ }
+ },
+ {
+ .ident = "P14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
+ }
+ },
+ {
+ .ident = "P14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
+ }
+ },
+ {
+ .ident = "P14s Gen2 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
+ }
+ },
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=218024 */
+ {
+ .ident = "V14 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82YT"),
+ }
+ },
+ {
+ .ident = "V14 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83GE"),
+ }
+ },
+ {
+ .ident = "V15 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82YU"),
+ }
+ },
+ {
+ .ident = "V15 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83CQ"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 14AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82VF"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 15AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82VG"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 15AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82X5"),
+ }
+ },
+ {
+ .ident = "IdeaPad Slim 3 14AMN8",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82XN"),
+ }
+ },
+ {
+ .ident = "IdeaPad Slim 3 15AMN8",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
+ }
+ },
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
+ {
+ .ident = "HP Laptop 15s-eq2xxx",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"),
+ }
+ },
+ /* https://community.frame.work/t/tracking-framework-amd-ryzen-7040-series-lid-wakeup-behavior-feedback/39128 */
+ {
+ .ident = "Framework Laptop 13 (Phoenix)",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
+ DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
+ }
+ },
+ {}
+};
+
+/*
+ * Laptops that run a SMI handler during the D3->D0 transition that occurs
+ * specifically when exiting suspend to idle which can cause
+ * large delays during resume when the IOMMU translation layer is enabled (the default
+ * behavior) for NVME devices:
+ *
+ * To avoid this firmware problem, skip the SMI handler on these machines before the
+ * D0 transition occurs.
+ */
+static void amd_pmc_skip_nvme_smi_handler(u32 s2idle_bug_mmio)
+{
+ void __iomem *addr;
+ u8 val;
+
+ if (!request_mem_region_muxed(s2idle_bug_mmio, 1, "amd_pmc_pm80"))
+ return;
+
+ addr = ioremap(s2idle_bug_mmio, 1);
+ if (!addr)
+ goto cleanup_resource;
+
+ val = ioread8(addr);
+ iowrite8(val & ~BIT(0), addr);
+
+ iounmap(addr);
+cleanup_resource:
+ release_mem_region(s2idle_bug_mmio, 1);
+}
+
+void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev)
+{
+ if (dev->quirks && dev->quirks->s2idle_bug_mmio)
+ amd_pmc_skip_nvme_smi_handler(dev->quirks->s2idle_bug_mmio);
+}
+
+void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
+{
+ const struct dmi_system_id *dmi_id;
+
+ if (dev->cpu_id == AMD_CPU_ID_CZN)
+ dev->disable_8042_wakeup = true;
+
+ dmi_id = dmi_first_match(fwbug_list);
+ if (!dmi_id)
+ return;
+ dev->quirks = dmi_id->driver_data;
+ if (dev->quirks->s2idle_bug_mmio)
+ pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
+ dmi_id->ident);
+ if (dev->quirks->spurious_8042)
+ dev->disable_8042_wakeup = true;
+}
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
new file mode 100644
index 0000000000..96caf2221d
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -0,0 +1,1089 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD SoC Power Management Controller Driver
+ *
+ * Copyright (c) 2020, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/amd_nb.h>
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/serio.h>
+#include <linux/suspend.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include "pmc.h"
+
+/* SMU communication registers */
+#define AMD_PMC_REGISTER_MESSAGE 0x538
+#define AMD_PMC_REGISTER_RESPONSE 0x980
+#define AMD_PMC_REGISTER_ARGUMENT 0x9BC
+
+/* PMC Scratch Registers */
+#define AMD_PMC_SCRATCH_REG_CZN 0x94
+#define AMD_PMC_SCRATCH_REG_YC 0xD14
+
+/* STB Registers */
+#define AMD_PMC_STB_PMI_0 0x03E30600
+#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
+#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
+#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
+#define AMD_PMC_STB_DUMMY_PC 0xC6000007
+
+/* STB S2D(Spill to DRAM) has different message port offset */
+#define AMD_S2D_REGISTER_MESSAGE 0xA20
+#define AMD_S2D_REGISTER_RESPONSE 0xA80
+#define AMD_S2D_REGISTER_ARGUMENT 0xA88
+
+/* STB Spill to DRAM Parameters */
+#define S2D_TELEMETRY_BYTES_MAX 0x100000
+#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000
+
+/* Base address of SMU for mapping physical address to virtual address */
+#define AMD_PMC_MAPPING_SIZE 0x01000
+#define AMD_PMC_BASE_ADDR_OFFSET 0x10000
+#define AMD_PMC_BASE_ADDR_LO 0x13B102E8
+#define AMD_PMC_BASE_ADDR_HI 0x13B102EC
+#define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0)
+#define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20)
+
+/* SMU Response Codes */
+#define AMD_PMC_RESULT_OK 0x01
+#define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC
+#define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD
+#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
+#define AMD_PMC_RESULT_FAILED 0xFF
+
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
+#define FCH_SSC_MAPPING_SIZE 0x800
+#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION 0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
+#define SMU_MSG_LOG_START 0x06
+#define SMU_MSG_LOG_RESET 0x07
+#define SMU_MSG_LOG_DUMP_DATA 0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
+
+#define PMC_MSG_DELAY_MIN_US 50
+#define RESPONSE_REGISTER_LOOP_MAX 20000
+
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
+#define FIFO_SIZE 4096
+
+enum amd_pmc_def {
+ MSG_TEST = 0x01,
+ MSG_OS_HINT_PCO,
+ MSG_OS_HINT_RN,
+};
+
+enum s2d_arg {
+ S2D_TELEMETRY_SIZE = 0x01,
+ S2D_PHYS_ADDR_LOW,
+ S2D_PHYS_ADDR_HIGH,
+ S2D_NUM_SAMPLES,
+ S2D_DRAM_SIZE,
+};
+
+struct amd_pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+static const struct amd_pmc_bit_map soc15_ip_blk[] = {
+ {"DISPLAY", BIT(0)},
+ {"CPU", BIT(1)},
+ {"GFX", BIT(2)},
+ {"VDD", BIT(3)},
+ {"ACP", BIT(4)},
+ {"VCN", BIT(5)},
+ {"ISP", BIT(6)},
+ {"NBIO", BIT(7)},
+ {"DF", BIT(8)},
+ {"USB3_0", BIT(9)},
+ {"USB3_1", BIT(10)},
+ {"LAPIC", BIT(11)},
+ {"USB3_2", BIT(12)},
+ {"USB3_3", BIT(13)},
+ {"USB3_4", BIT(14)},
+ {"USB4_0", BIT(15)},
+ {"USB4_1", BIT(16)},
+ {"MPM", BIT(17)},
+ {"JPEG", BIT(18)},
+ {"IPU", BIT(19)},
+ {"UMSCH", BIT(20)},
+ {}
+};
+
+static bool enable_stb;
+module_param(enable_stb, bool, 0644);
+MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
+
+static bool disable_workarounds;
+module_param(disable_workarounds, bool, 0644);
+MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs");
+
+static struct amd_pmc_dev pmc;
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
+static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
+
+static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
+{
+ return ioread32(dev->regbase + reg_offset);
+}
+
+static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u32 val)
+{
+ iowrite32(val, dev->regbase + reg_offset);
+}
+
+struct smu_metrics {
+ u32 table_version;
+ u32 hint_count;
+ u32 s0i3_last_entry_status;
+ u32 timein_s0i2;
+ u64 timeentering_s0i3_lastcapture;
+ u64 timeentering_s0i3_totaltime;
+ u64 timeto_resume_to_os_lastcapture;
+ u64 timeto_resume_to_os_totaltime;
+ u64 timein_s0i3_lastcapture;
+ u64 timein_s0i3_totaltime;
+ u64 timein_swdrips_lastcapture;
+ u64 timein_swdrips_totaltime;
+ u64 timecondition_notmet_lastcapture[32];
+ u64 timecondition_notmet_totaltime[32];
+} __packed;
+
+static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 size = FIFO_SIZE * sizeof(u32);
+ u32 *buf;
+ int rc;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = amd_pmc_read_stb(dev, buf);
+ if (rc) {
+ kfree(buf);
+ return rc;
+ }
+
+ filp->private_data = buf;
+ return rc;
+}
+
+static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf, size, pos, filp->private_data,
+ FIFO_SIZE * sizeof(u32));
+}
+
+static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static const struct file_operations amd_pmc_stb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = amd_pmc_stb_debugfs_open,
+ .read = amd_pmc_stb_debugfs_read,
+ .release = amd_pmc_stb_debugfs_release,
+};
+
+static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
+{
+ struct amd_pmc_dev *dev = filp->f_inode->i_private;
+ u32 *buf, fsize, num_samples, stb_rdptr_offset = 0;
+ int ret;
+
+ /* Write dummy postcode while reading the STB buffer */
+ ret = amd_pmc_write_stb(dev, AMD_PMC_STB_DUMMY_PC);
+ if (ret)
+ dev_err(dev->dev, "error writing to STB: %d\n", ret);
+
+ buf = kzalloc(S2D_TELEMETRY_BYTES_MAX, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Spill to DRAM num_samples uses separate SMU message port */
+ dev->msg_port = 1;
+
+ /* Get the num_samples to calculate the last push location */
+ ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->s2d_msg_id, true);
+ /* Clear msg_port for other SMU operation */
+ dev->msg_port = 0;
+ if (ret) {
+ dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret);
+ kfree(buf);
+ return ret;
+ }
+
+ /* Start capturing data from the last push location */
+ if (num_samples > S2D_TELEMETRY_BYTES_MAX) {
+ fsize = S2D_TELEMETRY_BYTES_MAX;
+ stb_rdptr_offset = num_samples - fsize;
+ } else {
+ fsize = num_samples;
+ stb_rdptr_offset = 0;
+ }
+
+ memcpy_fromio(buf, dev->stb_virt_addr + stb_rdptr_offset, fsize);
+ filp->private_data = buf;
+
+ return 0;
+}
+
+static ssize_t amd_pmc_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ if (!filp->private_data)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf, size, pos, filp->private_data,
+ S2D_TELEMETRY_BYTES_MAX);
+}
+
+static int amd_pmc_stb_debugfs_release_v2(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = {
+ .owner = THIS_MODULE,
+ .open = amd_pmc_stb_debugfs_open_v2,
+ .read = amd_pmc_stb_debugfs_read_v2,
+ .release = amd_pmc_stb_debugfs_release_v2,
+};
+
+static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
+{
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_PCO:
+ case AMD_CPU_ID_RN:
+ case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ dev->num_ips = 12;
+ dev->s2d_msg_id = 0xBE;
+ break;
+ case AMD_CPU_ID_PS:
+ dev->num_ips = 21;
+ dev->s2d_msg_id = 0x85;
+ break;
+ }
+}
+
+static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+{
+ if (dev->cpu_id == AMD_CPU_ID_PCO) {
+ dev_warn_once(dev->dev, "SMU debugging info not supported on this platform\n");
+ return -EINVAL;
+ }
+
+ /* Get Active devices list from SMU */
+ if (!dev->active_ips)
+ amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, true);
+
+ /* Get dram address */
+ if (!dev->smu_virt_addr) {
+ u32 phys_addr_low, phys_addr_hi;
+ u64 smu_phys_addr;
+
+ amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, true);
+ amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, true);
+ smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+ dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr,
+ sizeof(struct smu_metrics));
+ if (!dev->smu_virt_addr)
+ return -ENOMEM;
+ }
+
+ /* Start the logging */
+ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, false);
+ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, false);
+
+ return 0;
+}
+
+static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table)
+{
+ if (!pdev->smu_virt_addr) {
+ int ret = amd_pmc_setup_smu_logging(pdev);
+
+ if (ret)
+ return ret;
+ }
+
+ if (pdev->cpu_id == AMD_CPU_ID_PCO)
+ return -ENODEV;
+ memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics));
+ return 0;
+}
+
+static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
+{
+ struct smu_metrics table;
+
+ if (get_metrics_table(pdev, &table))
+ return;
+
+ if (!table.s0i3_last_entry_status)
+ dev_warn(pdev->dev, "Last suspend didn't reach deepest state\n");
+ pm_report_hw_sleep_time(table.s0i3_last_entry_status ?
+ table.timein_s0i3_lastcapture : 0);
+}
+
+static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
+{
+ int rc;
+ u32 val;
+
+ if (dev->cpu_id == AMD_CPU_ID_PCO)
+ return -ENODEV;
+
+ rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, true);
+ if (rc)
+ return rc;
+
+ dev->smu_program = (val >> 24) & GENMASK(7, 0);
+ dev->major = (val >> 16) & GENMASK(7, 0);
+ dev->minor = (val >> 8) & GENMASK(7, 0);
+ dev->rev = (val >> 0) & GENMASK(7, 0);
+
+ dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
+ dev->smu_program, dev->major, dev->minor, dev->rev);
+
+ return 0;
+}
+
+static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmc_dev *dev = dev_get_drvdata(d);
+
+ if (!dev->major) {
+ int rc = amd_pmc_get_smu_version(dev);
+
+ if (rc)
+ return rc;
+ }
+ return sysfs_emit(buf, "%u.%u.%u\n", dev->major, dev->minor, dev->rev);
+}
+
+static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmc_dev *dev = dev_get_drvdata(d);
+
+ if (!dev->major) {
+ int rc = amd_pmc_get_smu_version(dev);
+
+ if (rc)
+ return rc;
+ }
+ return sysfs_emit(buf, "%u\n", dev->smu_program);
+}
+
+static DEVICE_ATTR_RO(smu_fw_version);
+static DEVICE_ATTR_RO(smu_program);
+
+static umode_t pmc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->cpu_id == AMD_CPU_ID_PCO)
+ return 0;
+ return 0444;
+}
+
+static struct attribute *pmc_attrs[] = {
+ &dev_attr_smu_fw_version.attr,
+ &dev_attr_smu_program.attr,
+ NULL,
+};
+
+static struct attribute_group pmc_attr_group = {
+ .attrs = pmc_attrs,
+ .is_visible = pmc_attr_is_visible,
+};
+
+static const struct attribute_group *pmc_groups[] = {
+ &pmc_attr_group,
+ NULL,
+};
+
+static int smu_fw_info_show(struct seq_file *s, void *unused)
+{
+ struct amd_pmc_dev *dev = s->private;
+ struct smu_metrics table;
+ int idx;
+
+ if (get_metrics_table(dev, &table))
+ return -EINVAL;
+
+ seq_puts(s, "\n=== SMU Statistics ===\n");
+ seq_printf(s, "Table Version: %d\n", table.table_version);
+ seq_printf(s, "Hint Count: %d\n", table.hint_count);
+ seq_printf(s, "Last S0i3 Status: %s\n", table.s0i3_last_entry_status ? "Success" :
+ "Unknown/Fail");
+ seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
+ seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
+ seq_printf(s, "Time (in us) to resume from S0i3: %lld\n",
+ table.timeto_resume_to_os_lastcapture);
+
+ seq_puts(s, "\n=== Active time (in us) ===\n");
+ for (idx = 0 ; idx < dev->num_ips ; idx++) {
+ if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
+ seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+ table.timecondition_notmet_lastcapture[idx]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
+
+static int s0ix_stats_show(struct seq_file *s, void *unused)
+{
+ struct amd_pmc_dev *dev = s->private;
+ u64 entry_time, exit_time, residency;
+
+ /* Use FCH registers to get the S0ix stats */
+ if (!dev->fch_virt_addr) {
+ u32 base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
+ u32 base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
+ u64 fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+ dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
+ if (!dev->fch_virt_addr)
+ return -ENOMEM;
+ }
+
+ entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
+ entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
+
+ exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
+ exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
+
+ /* It's in 48MHz. We need to convert it */
+ residency = exit_time - entry_time;
+ do_div(residency, 48);
+
+ seq_puts(s, "=== S0ix statistics ===\n");
+ seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
+ seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
+ seq_printf(s, "Residency Time: %lld\n", residency);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+
+static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
+ struct seq_file *s)
+{
+ u32 val;
+ int rc;
+
+ switch (pdev->cpu_id) {
+ case AMD_CPU_ID_CZN:
+ /* we haven't yet read SMU version */
+ if (!pdev->major) {
+ rc = amd_pmc_get_smu_version(pdev);
+ if (rc)
+ return rc;
+ }
+ if (pdev->major > 56 || (pdev->major >= 55 && pdev->minor >= 37))
+ val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
+ else
+ return -EINVAL;
+ break;
+ case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ case AMD_CPU_ID_PS:
+ val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dev)
+ pm_pr_dbg("SMU idlemask s0i3: 0x%x\n", val);
+
+ if (s)
+ seq_printf(s, "SMU idlemask : 0x%x\n", val);
+
+ return 0;
+}
+
+static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
+{
+ return amd_pmc_idlemask_read(s->private, NULL, s);
+}
+DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask);
+
+static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+{
+ debugfs_remove_recursive(dev->dbgfs_dir);
+}
+
+static bool amd_pmc_is_stb_supported(struct amd_pmc_dev *dev)
+{
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ case AMD_CPU_ID_PS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
+{
+ dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
+ debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
+ &smu_fw_info_fops);
+ debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
+ &s0ix_stats_fops);
+ debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev,
+ &amd_pmc_idlemask_fops);
+ /* Enable STB only when the module_param is set */
+ if (enable_stb) {
+ if (amd_pmc_is_stb_supported(dev))
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_pmc_stb_debugfs_fops_v2);
+ else
+ debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
+ &amd_pmc_stb_debugfs_fops);
+ }
+}
+
+static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
+{
+ u32 value, message, argument, response;
+
+ if (dev->msg_port) {
+ message = AMD_S2D_REGISTER_MESSAGE;
+ argument = AMD_S2D_REGISTER_ARGUMENT;
+ response = AMD_S2D_REGISTER_RESPONSE;
+ } else {
+ message = AMD_PMC_REGISTER_MESSAGE;
+ argument = AMD_PMC_REGISTER_ARGUMENT;
+ response = AMD_PMC_REGISTER_RESPONSE;
+ }
+
+ value = amd_pmc_reg_read(dev, response);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+
+ value = amd_pmc_reg_read(dev, argument);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+
+ value = amd_pmc_reg_read(dev, message);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
+}
+
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
+{
+ int rc;
+ u32 val, message, argument, response;
+
+ mutex_lock(&dev->lock);
+
+ if (dev->msg_port) {
+ message = AMD_S2D_REGISTER_MESSAGE;
+ argument = AMD_S2D_REGISTER_ARGUMENT;
+ response = AMD_S2D_REGISTER_RESPONSE;
+ } else {
+ message = AMD_PMC_REGISTER_MESSAGE;
+ argument = AMD_PMC_REGISTER_ARGUMENT;
+ response = AMD_PMC_REGISTER_RESPONSE;
+ }
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + response,
+ val, val != 0, PMC_MSG_DELAY_MIN_US,
+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "failed to talk to SMU\n");
+ goto out_unlock;
+ }
+
+ /* Write zero to response register */
+ amd_pmc_reg_write(dev, response, 0);
+
+ /* Write argument into response register */
+ amd_pmc_reg_write(dev, argument, arg);
+
+ /* Write message ID to message ID register */
+ amd_pmc_reg_write(dev, message, msg);
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + response,
+ val, val != 0, PMC_MSG_DELAY_MIN_US,
+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "SMU response timed out\n");
+ goto out_unlock;
+ }
+
+ switch (val) {
+ case AMD_PMC_RESULT_OK:
+ if (ret) {
+ /* PMFW may take longer time to return back the data */
+ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+ *data = amd_pmc_reg_read(dev, argument);
+ }
+ break;
+ case AMD_PMC_RESULT_CMD_REJECT_BUSY:
+ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+ rc = -EBUSY;
+ goto out_unlock;
+ case AMD_PMC_RESULT_CMD_UNKNOWN:
+ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+ rc = -EINVAL;
+ goto out_unlock;
+ case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
+ case AMD_PMC_RESULT_FAILED:
+ default:
+ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+ rc = -EIO;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->lock);
+ amd_pmc_dump_registers(dev);
+ return rc;
+}
+
+static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+{
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_PCO:
+ return MSG_OS_HINT_PCO;
+ case AMD_CPU_ID_RN:
+ case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ case AMD_CPU_ID_PS:
+ return MSG_OS_HINT_RN;
+ }
+ return -EINVAL;
+}
+
+static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev)
+{
+ struct device *d;
+ int rc;
+
+ /* cezanne platform firmware has a fix in 64.66.0 */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN) {
+ if (!pdev->major) {
+ rc = amd_pmc_get_smu_version(pdev);
+ if (rc)
+ return rc;
+ }
+
+ if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
+ return 0;
+ }
+
+ d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
+ if (!d)
+ return 0;
+ if (device_may_wakeup(d)) {
+ dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n");
+ disable_irq_wake(1);
+ device_set_wakeup_enable(d, false);
+ }
+ put_device(d);
+
+ return 0;
+}
+
+static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
+{
+ struct rtc_device *rtc_device;
+ time64_t then, now, duration;
+ struct rtc_wkalrm alarm;
+ struct rtc_time tm;
+ int rc;
+
+ /* we haven't yet read SMU version */
+ if (!pdev->major) {
+ rc = amd_pmc_get_smu_version(pdev);
+ if (rc)
+ return rc;
+ }
+
+ if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53))
+ return 0;
+
+ rtc_device = rtc_class_open("rtc0");
+ if (!rtc_device)
+ return 0;
+ rc = rtc_read_alarm(rtc_device, &alarm);
+ if (rc)
+ return rc;
+ if (!alarm.enabled) {
+ dev_dbg(pdev->dev, "alarm not enabled\n");
+ return 0;
+ }
+ rc = rtc_read_time(rtc_device, &tm);
+ if (rc)
+ return rc;
+ then = rtc_tm_to_time64(&alarm.time);
+ now = rtc_tm_to_time64(&tm);
+ duration = then-now;
+
+ /* in the past */
+ if (then < now)
+ return 0;
+
+ /* will be stored in upper 16 bits of s0i3 hint argument,
+ * so timer wakeup from s0i3 is limited to ~18 hours or less
+ */
+ if (duration <= 4 || duration > U16_MAX)
+ return -EINVAL;
+
+ *arg |= (duration << 16);
+ rc = rtc_alarm_irq_enable(rtc_device, 0);
+ pm_pr_dbg("wakeup timer programmed for %lld seconds\n", duration);
+
+ return rc;
+}
+
+static void amd_pmc_s2idle_prepare(void)
+{
+ struct amd_pmc_dev *pdev = &pmc;
+ int rc;
+ u8 msg;
+ u32 arg = 1;
+
+ /* Reset and Start SMU logging - to monitor the s0i3 stats */
+ amd_pmc_setup_smu_logging(pdev);
+
+ /* Activate CZN specific platform bug workarounds */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) {
+ rc = amd_pmc_verify_czn_rtc(pdev, &arg);
+ if (rc) {
+ dev_err(pdev->dev, "failed to set RTC: %d\n", rc);
+ return;
+ }
+ }
+
+ msg = amd_pmc_get_os_hint(pdev);
+ rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, false);
+ if (rc) {
+ dev_err(pdev->dev, "suspend failed: %d\n", rc);
+ return;
+ }
+
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
+}
+
+static void amd_pmc_s2idle_check(void)
+{
+ struct amd_pmc_dev *pdev = &pmc;
+ struct smu_metrics table;
+ int rc;
+
+ /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
+ table.s0i3_last_entry_status)
+ usleep_range(10000, 20000);
+
+ /* Dump the IdleMask before we add to the STB */
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
+
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
+}
+
+static int amd_pmc_dump_data(struct amd_pmc_dev *pdev)
+{
+ if (pdev->cpu_id == AMD_CPU_ID_PCO)
+ return -ENODEV;
+
+ return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, false);
+}
+
+static void amd_pmc_s2idle_restore(void)
+{
+ struct amd_pmc_dev *pdev = &pmc;
+ int rc;
+ u8 msg;
+
+ msg = amd_pmc_get_os_hint(pdev);
+ rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, false);
+ if (rc)
+ dev_err(pdev->dev, "resume failed: %d\n", rc);
+
+ /* Let SMU know that we are looking for stats */
+ amd_pmc_dump_data(pdev);
+
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
+
+ /* Notify on failed entry */
+ amd_pmc_validate_deepest(pdev);
+
+ amd_pmc_process_restore_quirks(pdev);
+}
+
+static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
+ .prepare = amd_pmc_s2idle_prepare,
+ .check = amd_pmc_s2idle_check,
+ .restore = amd_pmc_s2idle_restore,
+};
+
+static int amd_pmc_suspend_handler(struct device *dev)
+{
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->disable_8042_wakeup && !disable_workarounds) {
+ int rc = amd_pmc_wa_irq1(pdev);
+
+ if (rc) {
+ dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
+
+static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
+ { }
+};
+
+static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
+{
+ u32 phys_addr_low, phys_addr_hi;
+ u64 stb_phys_addr;
+ u32 size = 0;
+ int ret;
+
+ /* Spill to DRAM feature uses separate SMU message port */
+ dev->msg_port = 1;
+
+ /* Get num of IP blocks within the SoC */
+ amd_pmc_get_ip_info(dev);
+
+ amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->s2d_msg_id, true);
+ if (size != S2D_TELEMETRY_BYTES_MAX)
+ return -EIO;
+
+ /* Get DRAM size */
+ ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
+ if (ret || !dev->dram_size)
+ dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
+
+ /* Get STB DRAM address */
+ amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->s2d_msg_id, true);
+ amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->s2d_msg_id, true);
+
+ stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+ /* Clear msg_port for other SMU operation */
+ dev->msg_port = 0;
+
+ dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size);
+ if (!dev->stb_virt_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
+{
+ int err;
+
+ err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data);
+ if (err) {
+ dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0);
+ return pcibios_err_to_errno(err);
+ }
+
+ return 0;
+}
+
+static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
+{
+ int i, err;
+
+ for (i = 0; i < FIFO_SIZE; i++) {
+ err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++);
+ if (err) {
+ dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0);
+ return pcibios_err_to_errno(err);
+ }
+ }
+
+ return 0;
+}
+
+static int amd_pmc_probe(struct platform_device *pdev)
+{
+ struct amd_pmc_dev *dev = &pmc;
+ struct pci_dev *rdev;
+ u32 base_addr_lo, base_addr_hi;
+ u64 base_addr;
+ int err;
+ u32 val;
+
+ dev->dev = &pdev->dev;
+
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) {
+ err = -ENODEV;
+ goto err_pci_dev_put;
+ }
+
+ dev->cpu_id = rdev->device;
+
+ if (dev->cpu_id == AMD_CPU_ID_SP) {
+ dev_warn_once(dev->dev, "S0i3 is not supported on this hardware\n");
+ err = -ENODEV;
+ goto err_pci_dev_put;
+ }
+
+ dev->rdev = rdev;
+ err = amd_smn_read(0, AMD_PMC_BASE_ADDR_LO, &val);
+ if (err) {
+ dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_LO);
+ err = pcibios_err_to_errno(err);
+ goto err_pci_dev_put;
+ }
+
+ base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
+
+ err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val);
+ if (err) {
+ dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI);
+ err = pcibios_err_to_errno(err);
+ goto err_pci_dev_put;
+ }
+
+ base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK;
+ base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+ dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
+ AMD_PMC_MAPPING_SIZE);
+ if (!dev->regbase) {
+ err = -ENOMEM;
+ goto err_pci_dev_put;
+ }
+
+ mutex_init(&dev->lock);
+
+ if (enable_stb && amd_pmc_is_stb_supported(dev)) {
+ err = amd_pmc_s2d_init(dev);
+ if (err)
+ goto err_pci_dev_put;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ if (IS_ENABLED(CONFIG_SUSPEND)) {
+ err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
+ if (err)
+ dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+ if (!disable_workarounds)
+ amd_pmc_quirks_init(dev);
+ }
+
+ amd_pmc_dbgfs_register(dev);
+ pm_report_max_hw_sleep(U64_MAX);
+ return 0;
+
+err_pci_dev_put:
+ pci_dev_put(rdev);
+ return err;
+}
+
+static void amd_pmc_remove(struct platform_device *pdev)
+{
+ struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+
+ if (IS_ENABLED(CONFIG_SUSPEND))
+ acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
+ amd_pmc_dbgfs_unregister(dev);
+ pci_dev_put(dev->rdev);
+ mutex_destroy(&dev->lock);
+}
+
+static const struct acpi_device_id amd_pmc_acpi_ids[] = {
+ {"AMDI0005", 0},
+ {"AMDI0006", 0},
+ {"AMDI0007", 0},
+ {"AMDI0008", 0},
+ {"AMDI0009", 0},
+ {"AMDI000A", 0},
+ {"AMD0004", 0},
+ {"AMD0005", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids);
+
+static struct platform_driver amd_pmc_driver = {
+ .driver = {
+ .name = "amd_pmc",
+ .acpi_match_table = amd_pmc_acpi_ids,
+ .dev_groups = pmc_groups,
+ .pm = pm_sleep_ptr(&amd_pmc_pm),
+ },
+ .probe = amd_pmc_probe,
+ .remove_new = amd_pmc_remove,
+};
+module_platform_driver(amd_pmc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("AMD PMC Driver");
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
new file mode 100644
index 0000000000..b4794f1187
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMD SoC Power Management Controller Driver
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef PMC_H
+#define PMC_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+struct amd_pmc_dev {
+ void __iomem *regbase;
+ void __iomem *smu_virt_addr;
+ void __iomem *stb_virt_addr;
+ void __iomem *fch_virt_addr;
+ bool msg_port;
+ u32 base_addr;
+ u32 cpu_id;
+ u32 active_ips;
+ u32 dram_size;
+ u32 num_ips;
+ u32 s2d_msg_id;
+/* SMU version information */
+ u8 smu_program;
+ u8 major;
+ u8 minor;
+ u8 rev;
+ struct device *dev;
+ struct pci_dev *rdev;
+ struct mutex lock; /* generic mutex lock */
+ struct dentry *dbgfs_dir;
+ struct quirk_entry *quirks;
+ bool disable_8042_wakeup;
+};
+
+void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev);
+void amd_pmc_quirks_init(struct amd_pmc_dev *dev);
+
+/* List of supported CPU ids */
+#define AMD_CPU_ID_RV 0x15D0
+#define AMD_CPU_ID_RN 0x1630
+#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
+#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
+#define AMD_CPU_ID_YC 0x14B5
+#define AMD_CPU_ID_CB 0x14D8
+#define AMD_CPU_ID_PS 0x14E8
+#define AMD_CPU_ID_SP 0x14A4
+#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
+
+#endif /* PMC_H */
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
new file mode 100644
index 0000000000..3064bc8ea1
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD PMF Driver
+#
+
+config AMD_PMF
+ tristate "AMD Platform Management Framework"
+ depends on ACPI && PCI
+ depends on POWER_SUPPLY
+ depends on AMD_NB
+ select ACPI_PLATFORM_PROFILE
+ help
+ This driver provides support for the AMD Platform Management Framework.
+ The goal is to enhance end user experience by making AMD PCs smarter,
+ quiter, power efficient by adapting to user behavior and environment.
+
+ To compile this driver as a module, choose M here: the module will
+ be called amd_pmf.
+
+config AMD_PMF_DEBUG
+ bool "PMF debug information"
+ depends on AMD_PMF
+ help
+ Enabling this option would give more debug information on the OEM fed
+ power setting values for each of the PMF feature. PMF driver gets this
+ information after evaluating a ACPI method and the information is stored
+ in the PMF config store.
+
+ Say Y here to enable more debug logs and Say N here if you are not sure.
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
new file mode 100644
index 0000000000..fdededf543
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/amd/pmf
+# AMD Platform Management Framework
+#
+
+obj-$(CONFIG_AMD_PMF) += amd-pmf.o
+amd-pmf-objs := core.o acpi.o sps.o \
+ auto-mode.o cnqf.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
new file mode 100644
index 0000000000..3fc5e4547d
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/acpi.h>
+#include "pmf.h"
+
+#define APMF_CQL_NOTIFICATION 2
+#define APMF_AMT_NOTIFICATION 3
+
+static union acpi_object *apmf_if_call(struct amd_pmf_dev *pdev, int fn, struct acpi_buffer *param)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle ahandle = ACPI_HANDLE(pdev->dev);
+ struct acpi_object_list apmf_if_arg_list;
+ union acpi_object apmf_if_args[2];
+ acpi_status status;
+
+ apmf_if_arg_list.count = 2;
+ apmf_if_arg_list.pointer = &apmf_if_args[0];
+
+ apmf_if_args[0].type = ACPI_TYPE_INTEGER;
+ apmf_if_args[0].integer.value = fn;
+
+ if (param) {
+ apmf_if_args[1].type = ACPI_TYPE_BUFFER;
+ apmf_if_args[1].buffer.length = param->length;
+ apmf_if_args[1].buffer.pointer = param->pointer;
+ } else {
+ apmf_if_args[1].type = ACPI_TYPE_INTEGER;
+ apmf_if_args[1].integer.value = 0;
+ }
+
+ status = acpi_evaluate_object(ahandle, "APMF", &apmf_if_arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pdev->dev, "APMF method:%d call failed\n", fn);
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+static int apmf_if_call_store_buffer(struct amd_pmf_dev *pdev, int fn, void *dest, size_t out_sz)
+{
+ union acpi_object *info;
+ size_t size;
+ int err = 0;
+
+ info = apmf_if_call(pdev, fn, NULL);
+ if (!info)
+ return -EIO;
+
+ if (info->type != ACPI_TYPE_BUFFER) {
+ dev_err(pdev->dev, "object is not a buffer\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (info->buffer.length < 2) {
+ dev_err(pdev->dev, "buffer too small\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ size = *(u16 *)info->buffer.pointer;
+ if (info->buffer.length < size) {
+ dev_err(pdev->dev, "buffer smaller then headersize %u < %zu\n",
+ info->buffer.length, size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (size < out_sz) {
+ dev_err(pdev->dev, "buffer too small %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(dest, info->buffer.pointer, out_sz);
+
+out:
+ kfree(info);
+ return err;
+}
+
+int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index)
+{
+ /* If bit-n is set, that indicates function n+1 is supported */
+ return !!(pdev->supported_func & BIT(index - 1));
+}
+
+int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output *data)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR,
+ data, sizeof(*data));
+}
+
+int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event)
+{
+ struct os_power_slider args;
+ struct acpi_buffer params;
+ union acpi_object *info;
+ int err = 0;
+
+ args.size = sizeof(args);
+ args.slider_event = event;
+
+ params.length = sizeof(args);
+ params.pointer = (void *)&args;
+
+ info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, &params);
+ if (!info)
+ err = -EIO;
+
+ kfree(info);
+ return err;
+}
+
+static void apmf_sbios_heartbeat_notify(struct work_struct *work)
+{
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work);
+ union acpi_object *info;
+
+ dev_dbg(dev->dev, "Sending heartbeat to SBIOS\n");
+ info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT, NULL);
+ if (!info)
+ goto out;
+
+ schedule_delayed_work(&dev->heart_beat, msecs_to_jiffies(dev->hb_interval * 1000));
+
+out:
+ kfree(info);
+}
+
+int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
+{
+ union acpi_object *info;
+ struct apmf_fan_idx args;
+ struct acpi_buffer params;
+ int err = 0;
+
+ args.size = sizeof(args);
+ args.fan_ctl_mode = manual;
+ args.fan_ctl_idx = idx;
+
+ params.length = sizeof(args);
+ params.pointer = (void *)&args;
+
+ info = apmf_if_call(pdev, APMF_FUNC_SET_FAN_IDX, &params);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+out:
+ kfree(info);
+ return err;
+}
+
+int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data));
+}
+
+int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS,
+ req, sizeof(*req));
+}
+
+static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
+{
+ struct amd_pmf_dev *pmf_dev = data;
+ struct apmf_sbios_req req;
+ int ret;
+
+ mutex_lock(&pmf_dev->update_mutex);
+ ret = apmf_get_sbios_requests(pmf_dev, &req);
+ if (ret) {
+ dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret);
+ goto out;
+ }
+
+ if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) {
+ dev_dbg(pmf_dev->dev, "AMT is supported and notifications %s\n",
+ req.amt_event ? "Enabled" : "Disabled");
+ pmf_dev->amt_enabled = !!req.amt_event;
+
+ if (pmf_dev->amt_enabled)
+ amd_pmf_handle_amt(pmf_dev);
+ else
+ amd_pmf_reset_amt(pmf_dev);
+ }
+
+ if (req.pending_req & BIT(APMF_CQL_NOTIFICATION)) {
+ dev_dbg(pmf_dev->dev, "CQL is supported and notifications %s\n",
+ req.cql_event ? "Enabled" : "Disabled");
+
+ /* update the target mode information */
+ if (pmf_dev->amt_enabled)
+ amd_pmf_update_2_cql(pmf_dev, req.cql_event);
+ }
+out:
+ mutex_unlock(&pmf_dev->update_mutex);
+}
+
+static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
+{
+ struct apmf_verify_interface output;
+ int err;
+
+ err = apmf_if_call_store_buffer(pdev, APMF_FUNC_VERIFY_INTERFACE, &output, sizeof(output));
+ if (err)
+ return err;
+
+ pdev->supported_func = output.supported_functions;
+ dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x\n",
+ output.supported_functions, output.notification_mask);
+
+ return 0;
+}
+
+static int apmf_get_system_params(struct amd_pmf_dev *dev)
+{
+ struct apmf_system_params params;
+ int err;
+
+ if (!is_apmf_func_supported(dev, APMF_FUNC_GET_SYS_PARAMS))
+ return -EINVAL;
+
+ err = apmf_if_call_store_buffer(dev, APMF_FUNC_GET_SYS_PARAMS, &params, sizeof(params));
+ if (err)
+ return err;
+
+ dev_dbg(dev->dev, "system params mask:0x%x flags:0x%x cmd_code:0x%x heartbeat:%d\n",
+ params.valid_mask,
+ params.flags,
+ params.command_code,
+ params.heartbeat_int);
+ params.flags = params.flags & params.valid_mask;
+ dev->hb_interval = params.heartbeat_int;
+
+ return 0;
+}
+
+int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_AC, data, sizeof(*data));
+}
+
+int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_DC, data, sizeof(*data));
+}
+
+int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
+ acpi_status status;
+
+ /* Install the APMF Notify handler */
+ if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
+ is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS)) {
+ status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY,
+ apmf_event_handler, pmf_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pmf_dev->dev, "failed to install notify handler\n");
+ return -ENODEV;
+ }
+
+ /* Call the handler once manually to catch up with possibly missed notifies. */
+ apmf_event_handler(ahandle, 0, pmf_dev);
+ }
+
+ return 0;
+}
+
+void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
+
+ if (pmf_dev->hb_interval)
+ cancel_delayed_work_sync(&pmf_dev->heart_beat);
+
+ if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
+ is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS))
+ acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler);
+}
+
+int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
+{
+ int ret;
+
+ ret = apmf_if_verify_interface(pmf_dev);
+ if (ret) {
+ dev_err(pmf_dev->dev, "APMF verify interface failed :%d\n", ret);
+ goto out;
+ }
+
+ ret = apmf_get_system_params(pmf_dev);
+ if (ret) {
+ dev_dbg(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
+ goto out;
+ }
+
+ if (pmf_dev->hb_interval) {
+ /* send heartbeats only if the interval is not zero */
+ INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify);
+ schedule_delayed_work(&pmf_dev->heart_beat, 0);
+ }
+
+out:
+ return ret;
+}
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
new file mode 100644
index 0000000000..02ff68be10
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/workqueue.h>
+#include "pmf.h"
+
+static struct auto_mode_mode_config config_store;
+static const char *state_as_str(unsigned int state);
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+static void amd_pmf_dump_auto_mode_defaults(struct auto_mode_mode_config *data)
+{
+ struct auto_mode_mode_settings *its_mode;
+
+ pr_debug("Auto Mode Data - BEGIN\n");
+
+ /* time constant */
+ pr_debug("balanced_to_perf: %u ms\n",
+ data->transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant);
+ pr_debug("perf_to_balanced: %u ms\n",
+ data->transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant);
+ pr_debug("quiet_to_balanced: %u ms\n",
+ data->transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant);
+ pr_debug("balanced_to_quiet: %u ms\n",
+ data->transition[AUTO_TRANSITION_TO_QUIET].time_constant);
+
+ /* power floor */
+ pr_debug("pfloor_perf: %u mW\n", data->mode_set[AUTO_PERFORMANCE].power_floor);
+ pr_debug("pfloor_balanced: %u mW\n", data->mode_set[AUTO_BALANCE].power_floor);
+ pr_debug("pfloor_quiet: %u mW\n", data->mode_set[AUTO_QUIET].power_floor);
+
+ /* Power delta for mode change */
+ pr_debug("pd_balanced_to_perf: %u mW\n",
+ data->transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta);
+ pr_debug("pd_perf_to_balanced: %u mW\n",
+ data->transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta);
+ pr_debug("pd_quiet_to_balanced: %u mW\n",
+ data->transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta);
+ pr_debug("pd_balanced_to_quiet: %u mW\n",
+ data->transition[AUTO_TRANSITION_TO_QUIET].power_delta);
+
+ /* skin temperature limits */
+ its_mode = &data->mode_set[AUTO_PERFORMANCE_ON_LAP];
+ pr_debug("stt_apu_perf_on_lap: %u C\n",
+ its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
+ pr_debug("stt_hs2_perf_on_lap: %u C\n",
+ its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
+ pr_debug("stt_min_limit_perf_on_lap: %u mW\n", its_mode->power_control.stt_min);
+
+ its_mode = &data->mode_set[AUTO_PERFORMANCE];
+ pr_debug("stt_apu_perf: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
+ pr_debug("stt_hs2_perf: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
+ pr_debug("stt_min_limit_perf: %u mW\n", its_mode->power_control.stt_min);
+
+ its_mode = &data->mode_set[AUTO_BALANCE];
+ pr_debug("stt_apu_balanced: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
+ pr_debug("stt_hs2_balanced: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
+ pr_debug("stt_min_limit_balanced: %u mW\n", its_mode->power_control.stt_min);
+
+ its_mode = &data->mode_set[AUTO_QUIET];
+ pr_debug("stt_apu_quiet: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
+ pr_debug("stt_hs2_quiet: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
+ pr_debug("stt_min_limit_quiet: %u mW\n", its_mode->power_control.stt_min);
+
+ /* SPL based power limits */
+ its_mode = &data->mode_set[AUTO_PERFORMANCE_ON_LAP];
+ pr_debug("fppt_perf_on_lap: %u mW\n", its_mode->power_control.fppt);
+ pr_debug("sppt_perf_on_lap: %u mW\n", its_mode->power_control.sppt);
+ pr_debug("spl_perf_on_lap: %u mW\n", its_mode->power_control.spl);
+ pr_debug("sppt_apu_only_perf_on_lap: %u mW\n", its_mode->power_control.sppt_apu_only);
+
+ its_mode = &data->mode_set[AUTO_PERFORMANCE];
+ pr_debug("fppt_perf: %u mW\n", its_mode->power_control.fppt);
+ pr_debug("sppt_perf: %u mW\n", its_mode->power_control.sppt);
+ pr_debug("spl_perf: %u mW\n", its_mode->power_control.spl);
+ pr_debug("sppt_apu_only_perf: %u mW\n", its_mode->power_control.sppt_apu_only);
+
+ its_mode = &data->mode_set[AUTO_BALANCE];
+ pr_debug("fppt_balanced: %u mW\n", its_mode->power_control.fppt);
+ pr_debug("sppt_balanced: %u mW\n", its_mode->power_control.sppt);
+ pr_debug("spl_balanced: %u mW\n", its_mode->power_control.spl);
+ pr_debug("sppt_apu_only_balanced: %u mW\n", its_mode->power_control.sppt_apu_only);
+
+ its_mode = &data->mode_set[AUTO_QUIET];
+ pr_debug("fppt_quiet: %u mW\n", its_mode->power_control.fppt);
+ pr_debug("sppt_quiet: %u mW\n", its_mode->power_control.sppt);
+ pr_debug("spl_quiet: %u mW\n", its_mode->power_control.spl);
+ pr_debug("sppt_apu_only_quiet: %u mW\n", its_mode->power_control.sppt_apu_only);
+
+ /* Fan ID */
+ pr_debug("fan_id_perf: %lu\n",
+ data->mode_set[AUTO_PERFORMANCE].fan_control.fan_id);
+ pr_debug("fan_id_balanced: %lu\n",
+ data->mode_set[AUTO_BALANCE].fan_control.fan_id);
+ pr_debug("fan_id_quiet: %lu\n",
+ data->mode_set[AUTO_QUIET].fan_control.fan_id);
+
+ pr_debug("Auto Mode Data - END\n");
+}
+#else
+static void amd_pmf_dump_auto_mode_defaults(struct auto_mode_mode_config *data) {}
+#endif
+
+static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx,
+ struct auto_mode_mode_config *table)
+{
+ struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control;
+
+ amd_pmf_send_cmd(dev, SET_SPL, false, pwr_ctrl->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, pwr_ctrl->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, pwr_ctrl->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL);
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
+ apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual,
+ config_store.mode_set[idx].fan_control.fan_id);
+}
+
+static int amd_pmf_get_moving_avg(struct amd_pmf_dev *pdev, int socket_power)
+{
+ int i, total = 0;
+
+ if (pdev->socket_power_history_idx == -1) {
+ for (i = 0; i < AVG_SAMPLE_SIZE; i++)
+ pdev->socket_power_history[i] = socket_power;
+ }
+
+ pdev->socket_power_history_idx = (pdev->socket_power_history_idx + 1) % AVG_SAMPLE_SIZE;
+ pdev->socket_power_history[pdev->socket_power_history_idx] = socket_power;
+
+ for (i = 0; i < AVG_SAMPLE_SIZE; i++)
+ total += pdev->socket_power_history[i];
+
+ return total / AVG_SAMPLE_SIZE;
+}
+
+void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms)
+{
+ int avg_power = 0;
+ bool update = false;
+ int i, j;
+
+ /* Get the average moving average computed by auto mode algorithm */
+ avg_power = amd_pmf_get_moving_avg(dev, socket_power);
+
+ for (i = 0; i < AUTO_TRANSITION_MAX; i++) {
+ if ((config_store.transition[i].shifting_up && avg_power >=
+ config_store.transition[i].power_threshold) ||
+ (!config_store.transition[i].shifting_up && avg_power <=
+ config_store.transition[i].power_threshold)) {
+ if (config_store.transition[i].timer <
+ config_store.transition[i].time_constant)
+ config_store.transition[i].timer += time_elapsed_ms;
+ } else {
+ config_store.transition[i].timer = 0;
+ }
+
+ if (config_store.transition[i].timer >=
+ config_store.transition[i].time_constant &&
+ !config_store.transition[i].applied) {
+ config_store.transition[i].applied = true;
+ update = true;
+ } else if (config_store.transition[i].timer <=
+ config_store.transition[i].time_constant &&
+ config_store.transition[i].applied) {
+ config_store.transition[i].applied = false;
+ update = true;
+ }
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+ dev_dbg(dev->dev, "[AUTO MODE] average_power : %d mW mode: %s\n", avg_power,
+ state_as_str(config_store.current_mode));
+
+ dev_dbg(dev->dev, "[AUTO MODE] time: %lld ms timer: %u ms tc: %u ms\n",
+ time_elapsed_ms, config_store.transition[i].timer,
+ config_store.transition[i].time_constant);
+
+ dev_dbg(dev->dev, "[AUTO MODE] shiftup: %u pt: %u mW pf: %u mW pd: %u mW\n",
+ config_store.transition[i].shifting_up,
+ config_store.transition[i].power_threshold,
+ config_store.mode_set[i].power_floor,
+ config_store.transition[i].power_delta);
+#endif
+ }
+
+ dev_dbg(dev->dev, "[AUTO_MODE] avg power: %u mW mode: %s\n", avg_power,
+ state_as_str(config_store.current_mode));
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+ dev_dbg(dev->dev, "[AUTO MODE] priority1: %u priority2: %u priority3: %u priority4: %u\n",
+ config_store.transition[0].applied,
+ config_store.transition[1].applied,
+ config_store.transition[2].applied,
+ config_store.transition[3].applied);
+#endif
+
+ if (update) {
+ for (j = 0; j < AUTO_TRANSITION_MAX; j++) {
+ /* Apply the mode with highest priority indentified */
+ if (config_store.transition[j].applied) {
+ if (config_store.current_mode !=
+ config_store.transition[j].target_mode) {
+ config_store.current_mode =
+ config_store.transition[j].target_mode;
+ dev_dbg(dev->dev, "[AUTO_MODE] moving to mode:%s\n",
+ state_as_str(config_store.current_mode));
+ amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+ }
+ break;
+ }
+ }
+ }
+}
+
+void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event)
+{
+ int mode = config_store.current_mode;
+
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode =
+ is_cql_event ? AUTO_PERFORMANCE_ON_LAP : AUTO_PERFORMANCE;
+
+ if ((mode == AUTO_PERFORMANCE || mode == AUTO_PERFORMANCE_ON_LAP) &&
+ mode != config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode) {
+ mode = config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode;
+ amd_pmf_set_automode(dev, mode, NULL);
+ }
+ dev_dbg(dev->dev, "updated CQL thermals\n");
+}
+
+static void amd_pmf_get_power_threshold(void)
+{
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold =
+ config_store.mode_set[AUTO_BALANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold =
+ config_store.mode_set[AUTO_BALANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_threshold =
+ config_store.mode_set[AUTO_QUIET].power_floor -
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_threshold =
+ config_store.mode_set[AUTO_PERFORMANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta;
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+ pr_debug("[AUTO MODE TO_QUIET] pt: %u mW pf: %u mW pd: %u mW\n",
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold,
+ config_store.mode_set[AUTO_BALANCE].power_floor,
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta);
+
+ pr_debug("[AUTO MODE TO_PERFORMANCE] pt: %u mW pf: %u mW pd: %u mW\n",
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold,
+ config_store.mode_set[AUTO_BALANCE].power_floor,
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta);
+
+ pr_debug("[AUTO MODE QUIET_TO_BALANCE] pt: %u mW pf: %u mW pd: %u mW\n",
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE]
+ .power_threshold,
+ config_store.mode_set[AUTO_QUIET].power_floor,
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta);
+
+ pr_debug("[AUTO MODE PERFORMANCE_TO_BALANCE] pt: %u mW pf: %u mW pd: %u mW\n",
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE]
+ .power_threshold,
+ config_store.mode_set[AUTO_PERFORMANCE].power_floor,
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta);
+#endif
+}
+
+static const char *state_as_str(unsigned int state)
+{
+ switch (state) {
+ case AUTO_QUIET:
+ return "QUIET";
+ case AUTO_BALANCE:
+ return "BALANCED";
+ case AUTO_PERFORMANCE_ON_LAP:
+ return "ON_LAP";
+ case AUTO_PERFORMANCE:
+ return "PERFORMANCE";
+ default:
+ return "Unknown Auto Mode State";
+ }
+}
+
+static void amd_pmf_load_defaults_auto_mode(struct amd_pmf_dev *dev)
+{
+ struct apmf_auto_mode output;
+ struct power_table_control *pwr_ctrl;
+ int i;
+
+ apmf_get_auto_mode_def(dev, &output);
+ /* time constant */
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].time_constant =
+ output.balanced_to_quiet;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant =
+ output.balanced_to_perf;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant =
+ output.quiet_to_balanced;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant =
+ output.perf_to_balanced;
+
+ /* power floor */
+ config_store.mode_set[AUTO_QUIET].power_floor = output.pfloor_quiet;
+ config_store.mode_set[AUTO_BALANCE].power_floor = output.pfloor_balanced;
+ config_store.mode_set[AUTO_PERFORMANCE].power_floor = output.pfloor_perf;
+ config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_floor = output.pfloor_perf;
+
+ /* Power delta for mode change */
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta =
+ output.pd_balanced_to_quiet;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta =
+ output.pd_balanced_to_perf;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta =
+ output.pd_quiet_to_balanced;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta =
+ output.pd_perf_to_balanced;
+
+ /* Power threshold */
+ amd_pmf_get_power_threshold();
+
+ /* skin temperature limits */
+ pwr_ctrl = &config_store.mode_set[AUTO_QUIET].power_control;
+ pwr_ctrl->spl = output.spl_quiet;
+ pwr_ctrl->sppt = output.sppt_quiet;
+ pwr_ctrl->fppt = output.fppt_quiet;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_quiet;
+ pwr_ctrl->stt_min = output.stt_min_limit_quiet;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_quiet;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_quiet;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_BALANCE].power_control;
+ pwr_ctrl->spl = output.spl_balanced;
+ pwr_ctrl->sppt = output.sppt_balanced;
+ pwr_ctrl->fppt = output.fppt_balanced;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_balanced;
+ pwr_ctrl->stt_min = output.stt_min_limit_balanced;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_balanced;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_balanced;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE].power_control;
+ pwr_ctrl->spl = output.spl_perf;
+ pwr_ctrl->sppt = output.sppt_perf;
+ pwr_ctrl->fppt = output.fppt_perf;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf;
+ pwr_ctrl->stt_min = output.stt_min_limit_perf;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_control;
+ pwr_ctrl->spl = output.spl_perf_on_lap;
+ pwr_ctrl->sppt = output.sppt_perf_on_lap;
+ pwr_ctrl->fppt = output.fppt_perf_on_lap;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf_on_lap;
+ pwr_ctrl->stt_min = output.stt_min_limit_perf_on_lap;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf_on_lap;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf_on_lap;
+
+ /* Fan ID */
+ config_store.mode_set[AUTO_QUIET].fan_control.fan_id = output.fan_id_quiet;
+ config_store.mode_set[AUTO_BALANCE].fan_control.fan_id = output.fan_id_balanced;
+ config_store.mode_set[AUTO_PERFORMANCE].fan_control.fan_id = output.fan_id_perf;
+ config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].fan_control.fan_id =
+ output.fan_id_perf;
+
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].target_mode = AUTO_QUIET;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode =
+ AUTO_PERFORMANCE;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].target_mode =
+ AUTO_BALANCE;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].target_mode =
+ AUTO_BALANCE;
+
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].shifting_up = false;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].shifting_up = true;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].shifting_up = true;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].shifting_up =
+ false;
+
+ for (i = 0 ; i < AUTO_MODE_MAX ; i++) {
+ if (config_store.mode_set[i].fan_control.fan_id == FAN_INDEX_AUTO)
+ config_store.mode_set[i].fan_control.manual = false;
+ else
+ config_store.mode_set[i].fan_control.manual = true;
+ }
+
+ /* set to initial default values */
+ config_store.current_mode = AUTO_BALANCE;
+ dev->socket_power_history_idx = -1;
+
+ amd_pmf_dump_auto_mode_defaults(&config_store);
+}
+
+int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
+{
+ /*
+ * OEM BIOS implementation guide says that if the auto mode is enabled
+ * the platform_profile registration shall be done by the OEM driver.
+ * There could be cases where both static slider and auto mode BIOS
+ * functions are enabled, in that case enable static slider updates
+ * only if it advertised as supported.
+ */
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ dev_dbg(dev->dev, "resetting AMT thermals\n");
+ amd_pmf_set_sps_power_limits(dev);
+ }
+ return 0;
+}
+
+void amd_pmf_handle_amt(struct amd_pmf_dev *dev)
+{
+ amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+}
+
+void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->work_buffer);
+}
+
+void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev)
+{
+ amd_pmf_load_defaults_auto_mode(dev);
+ amd_pmf_init_metrics_table(dev);
+}
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
new file mode 100644
index 0000000000..bc8899e15c
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/string_choices.h>
+#include <linux/workqueue.h>
+#include "pmf.h"
+
+static struct cnqf_config config_store;
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *state_as_str_cnqf(unsigned int state)
+{
+ switch (state) {
+ case APMF_CNQF_TURBO:
+ return "turbo";
+ case APMF_CNQF_PERFORMANCE:
+ return "performance";
+ case APMF_CNQF_BALANCE:
+ return "balance";
+ case APMF_CNQF_QUIET:
+ return "quiet";
+ default:
+ return "Unknown CnQF State";
+ }
+}
+
+static void amd_pmf_cnqf_dump_defaults(struct apmf_dyn_slider_output *data, int idx)
+{
+ int i;
+
+ pr_debug("Dynamic Slider %s Defaults - BEGIN\n", idx ? "DC" : "AC");
+ pr_debug("size: %u\n", data->size);
+ pr_debug("flags: 0x%x\n", data->flags);
+
+ /* Time constants */
+ pr_debug("t_perf_to_turbo: %u ms\n", data->t_perf_to_turbo);
+ pr_debug("t_balanced_to_perf: %u ms\n", data->t_balanced_to_perf);
+ pr_debug("t_quiet_to_balanced: %u ms\n", data->t_quiet_to_balanced);
+ pr_debug("t_balanced_to_quiet: %u ms\n", data->t_balanced_to_quiet);
+ pr_debug("t_perf_to_balanced: %u ms\n", data->t_perf_to_balanced);
+ pr_debug("t_turbo_to_perf: %u ms\n", data->t_turbo_to_perf);
+
+ for (i = 0 ; i < CNQF_MODE_MAX ; i++) {
+ pr_debug("pfloor_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].pfloor);
+ pr_debug("fppt_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].fppt);
+ pr_debug("sppt_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].sppt);
+ pr_debug("sppt_apuonly_%s: %u mW\n",
+ state_as_str_cnqf(i), data->ps[i].sppt_apu_only);
+ pr_debug("spl_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].spl);
+ pr_debug("stt_minlimit_%s: %u mW\n",
+ state_as_str_cnqf(i), data->ps[i].stt_min_limit);
+ pr_debug("stt_skintemp_apu_%s: %u C\n", state_as_str_cnqf(i),
+ data->ps[i].stt_skintemp[STT_TEMP_APU]);
+ pr_debug("stt_skintemp_hs2_%s: %u C\n", state_as_str_cnqf(i),
+ data->ps[i].stt_skintemp[STT_TEMP_HS2]);
+ pr_debug("fan_id_%s: %u\n", state_as_str_cnqf(i), data->ps[i].fan_id);
+ }
+
+ pr_debug("Dynamic Slider %s Defaults - END\n", idx ? "DC" : "AC");
+}
+#else
+static void amd_pmf_cnqf_dump_defaults(struct apmf_dyn_slider_output *data, int idx) {}
+#endif
+
+static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
+ struct cnqf_config *table)
+{
+ struct power_table_control *pc;
+
+ pc = &config_store.mode_set[src][idx].power_control;
+
+ amd_pmf_send_cmd(dev, SET_SPL, false, pc->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, pc->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU],
+ NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2],
+ NULL);
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
+ apmf_update_fan_idx(dev,
+ config_store.mode_set[src][idx].fan_control.manual,
+ config_store.mode_set[src][idx].fan_control.fan_id);
+
+ return 0;
+}
+
+static void amd_pmf_update_power_threshold(int src)
+{
+ struct cnqf_mode_settings *ts;
+ struct cnqf_tran_params *tp;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_QUIET];
+ ts = &config_store.mode_set[src][CNQF_MODE_BALANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_TURBO];
+ ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_BALANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_QUIET];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_TURBO];
+ tp->power_threshold = ts->power_floor;
+}
+
+static const char *state_as_str(unsigned int state)
+{
+ switch (state) {
+ case CNQF_MODE_QUIET:
+ return "QUIET";
+ case CNQF_MODE_BALANCE:
+ return "BALANCED";
+ case CNQF_MODE_TURBO:
+ return "TURBO";
+ case CNQF_MODE_PERFORMANCE:
+ return "PERFORMANCE";
+ default:
+ return "Unknown CnQF mode";
+ }
+}
+
+static int amd_pmf_cnqf_get_power_source(struct amd_pmf_dev *dev)
+{
+ if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) &&
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ return amd_pmf_get_power_source();
+ else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ return POWER_SOURCE_DC;
+ else
+ return POWER_SOURCE_AC;
+}
+
+int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms)
+{
+ struct cnqf_tran_params *tp;
+ int src, i, j;
+ u32 avg_power = 0;
+
+ src = amd_pmf_cnqf_get_power_source(dev);
+
+ if (is_pprof_balanced(dev)) {
+ amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
+ } else {
+ /*
+ * Return from here if the platform_profile is not balanced
+ * so that preference is given to user mode selection, rather
+ * than enforcing CnQF to run all the time (if enabled)
+ */
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CNQF_TRANSITION_MAX; i++) {
+ config_store.trans_param[src][i].timer += time_lapsed_ms;
+ config_store.trans_param[src][i].total_power += socket_power;
+ config_store.trans_param[src][i].count++;
+
+ tp = &config_store.trans_param[src][i];
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+ dev_dbg(dev->dev, "avg_power: %u mW total_power: %u mW count: %u timer: %u ms\n",
+ avg_power, config_store.trans_param[src][i].total_power,
+ config_store.trans_param[src][i].count,
+ config_store.trans_param[src][i].timer);
+#endif
+ if (tp->timer >= tp->time_constant && tp->count) {
+ avg_power = tp->total_power / tp->count;
+
+ /* Reset the indices */
+ tp->timer = 0;
+ tp->total_power = 0;
+ tp->count = 0;
+
+ if ((tp->shifting_up && avg_power >= tp->power_threshold) ||
+ (!tp->shifting_up && avg_power <= tp->power_threshold)) {
+ tp->priority = true;
+ } else {
+ tp->priority = false;
+ }
+ }
+ }
+
+ dev_dbg(dev->dev, "[CNQF] Avg power: %u mW socket power: %u mW mode:%s\n",
+ avg_power, socket_power, state_as_str(config_store.current_mode));
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+ dev_dbg(dev->dev, "[CNQF] priority1: %u priority2: %u priority3: %u\n",
+ config_store.trans_param[src][0].priority,
+ config_store.trans_param[src][1].priority,
+ config_store.trans_param[src][2].priority);
+
+ dev_dbg(dev->dev, "[CNQF] priority4: %u priority5: %u priority6: %u\n",
+ config_store.trans_param[src][3].priority,
+ config_store.trans_param[src][4].priority,
+ config_store.trans_param[src][5].priority);
+#endif
+
+ for (j = 0; j < CNQF_TRANSITION_MAX; j++) {
+ /* apply the highest priority */
+ if (config_store.trans_param[src][j].priority) {
+ if (config_store.current_mode !=
+ config_store.trans_param[src][j].target_mode) {
+ config_store.current_mode =
+ config_store.trans_param[src][j].target_mode;
+ dev_dbg(dev->dev, "Moving to Mode :%s\n",
+ state_as_str(config_store.current_mode));
+ amd_pmf_set_cnqf(dev, src,
+ config_store.current_mode, NULL);
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+static void amd_pmf_update_trans_data(int idx, struct apmf_dyn_slider_output *out)
+{
+ struct cnqf_tran_params *tp;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_QUIET];
+ tp->time_constant = out->t_balanced_to_quiet;
+ tp->target_mode = CNQF_MODE_QUIET;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE];
+ tp->time_constant = out->t_balanced_to_perf;
+ tp->target_mode = CNQF_MODE_PERFORMANCE;
+ tp->shifting_up = true;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE];
+ tp->time_constant = out->t_quiet_to_balanced;
+ tp->target_mode = CNQF_MODE_BALANCE;
+ tp->shifting_up = true;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE];
+ tp->time_constant = out->t_perf_to_balanced;
+ tp->target_mode = CNQF_MODE_BALANCE;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE];
+ tp->time_constant = out->t_turbo_to_perf;
+ tp->target_mode = CNQF_MODE_PERFORMANCE;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_TURBO];
+ tp->time_constant = out->t_perf_to_turbo;
+ tp->target_mode = CNQF_MODE_TURBO;
+ tp->shifting_up = true;
+}
+
+static void amd_pmf_update_mode_set(int idx, struct apmf_dyn_slider_output *out)
+{
+ struct cnqf_mode_settings *ms;
+
+ /* Quiet Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_QUIET];
+ ms->power_floor = out->ps[APMF_CNQF_QUIET].pfloor;
+ ms->power_control.fppt = out->ps[APMF_CNQF_QUIET].fppt;
+ ms->power_control.sppt = out->ps[APMF_CNQF_QUIET].sppt;
+ ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_QUIET].sppt_apu_only;
+ ms->power_control.spl = out->ps[APMF_CNQF_QUIET].spl;
+ ms->power_control.stt_min = out->ps[APMF_CNQF_QUIET].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out->ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out->ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out->ps[APMF_CNQF_QUIET].fan_id;
+
+ /* Balance Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_BALANCE];
+ ms->power_floor = out->ps[APMF_CNQF_BALANCE].pfloor;
+ ms->power_control.fppt = out->ps[APMF_CNQF_BALANCE].fppt;
+ ms->power_control.sppt = out->ps[APMF_CNQF_BALANCE].sppt;
+ ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_BALANCE].sppt_apu_only;
+ ms->power_control.spl = out->ps[APMF_CNQF_BALANCE].spl;
+ ms->power_control.stt_min = out->ps[APMF_CNQF_BALANCE].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out->ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out->ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out->ps[APMF_CNQF_BALANCE].fan_id;
+
+ /* Performance Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_PERFORMANCE];
+ ms->power_floor = out->ps[APMF_CNQF_PERFORMANCE].pfloor;
+ ms->power_control.fppt = out->ps[APMF_CNQF_PERFORMANCE].fppt;
+ ms->power_control.sppt = out->ps[APMF_CNQF_PERFORMANCE].sppt;
+ ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_PERFORMANCE].sppt_apu_only;
+ ms->power_control.spl = out->ps[APMF_CNQF_PERFORMANCE].spl;
+ ms->power_control.stt_min = out->ps[APMF_CNQF_PERFORMANCE].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out->ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out->ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out->ps[APMF_CNQF_PERFORMANCE].fan_id;
+
+ /* Turbo Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_TURBO];
+ ms->power_floor = out->ps[APMF_CNQF_TURBO].pfloor;
+ ms->power_control.fppt = out->ps[APMF_CNQF_TURBO].fppt;
+ ms->power_control.sppt = out->ps[APMF_CNQF_TURBO].sppt;
+ ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_TURBO].sppt_apu_only;
+ ms->power_control.spl = out->ps[APMF_CNQF_TURBO].spl;
+ ms->power_control.stt_min = out->ps[APMF_CNQF_TURBO].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out->ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out->ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out->ps[APMF_CNQF_TURBO].fan_id;
+}
+
+static int amd_pmf_check_flags(struct amd_pmf_dev *dev)
+{
+ struct apmf_dyn_slider_output out = {};
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC))
+ apmf_get_dyn_slider_def_ac(dev, &out);
+ else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ apmf_get_dyn_slider_def_dc(dev, &out);
+
+ return out.flags;
+}
+
+static int amd_pmf_load_defaults_cnqf(struct amd_pmf_dev *dev)
+{
+ struct apmf_dyn_slider_output out;
+ int i, j, ret;
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++) {
+ if (!is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC + i))
+ continue;
+
+ if (i == POWER_SOURCE_AC)
+ ret = apmf_get_dyn_slider_def_ac(dev, &out);
+ else
+ ret = apmf_get_dyn_slider_def_dc(dev, &out);
+ if (ret) {
+ dev_err(dev->dev, "APMF apmf_get_dyn_slider_def_dc failed :%d\n", ret);
+ return ret;
+ }
+
+ amd_pmf_cnqf_dump_defaults(&out, i);
+ amd_pmf_update_mode_set(i, &out);
+ amd_pmf_update_trans_data(i, &out);
+ amd_pmf_update_power_threshold(i);
+
+ for (j = 0; j < CNQF_MODE_MAX; j++) {
+ if (config_store.mode_set[i][j].fan_control.fan_id == FAN_INDEX_AUTO)
+ config_store.mode_set[i][j].fan_control.manual = false;
+ else
+ config_store.mode_set[i][j].fan_control.manual = true;
+ }
+ }
+
+ /* set to initial default values */
+ config_store.current_mode = CNQF_MODE_BALANCE;
+
+ return 0;
+}
+
+static ssize_t cnqf_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+ int result, src;
+ bool input;
+
+ result = kstrtobool(buf, &input);
+ if (result)
+ return result;
+
+ src = amd_pmf_cnqf_get_power_source(pdev);
+ pdev->cnqf_enabled = input;
+
+ if (pdev->cnqf_enabled && is_pprof_balanced(pdev)) {
+ amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL);
+ } else {
+ if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ amd_pmf_set_sps_power_limits(pdev);
+ }
+
+ dev_dbg(pdev->dev, "Received CnQF %s\n", str_on_off(input));
+ return count;
+}
+
+static ssize_t cnqf_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", str_on_off(pdev->cnqf_enabled));
+}
+
+static DEVICE_ATTR_RW(cnqf_enable);
+
+static umode_t cnqf_feature_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ return pdev->cnqf_supported ? attr->mode : 0;
+}
+
+static struct attribute *cnqf_feature_attrs[] = {
+ &dev_attr_cnqf_enable.attr,
+ NULL
+};
+
+const struct attribute_group cnqf_feature_attribute_group = {
+ .is_visible = cnqf_feature_is_visible,
+ .attrs = cnqf_feature_attrs,
+};
+
+void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->work_buffer);
+}
+
+int amd_pmf_init_cnqf(struct amd_pmf_dev *dev)
+{
+ int ret, src;
+
+ /*
+ * Note the caller of this function has already checked that both
+ * APMF_FUNC_DYN_SLIDER_AC and APMF_FUNC_DYN_SLIDER_DC are supported.
+ */
+
+ ret = amd_pmf_load_defaults_cnqf(dev);
+ if (ret < 0)
+ return ret;
+
+ amd_pmf_init_metrics_table(dev);
+
+ dev->cnqf_supported = true;
+ dev->cnqf_enabled = amd_pmf_check_flags(dev);
+
+ /* update the thermal for CnQF */
+ if (dev->cnqf_enabled && is_pprof_balanced(dev)) {
+ src = amd_pmf_cnqf_get_power_source(dev);
+ amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
+ }
+
+ return 0;
+}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
new file mode 100644
index 0000000000..78ed3ee225
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <asm/amd_nb.h>
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include "pmf.h"
+
+/* PMF-SMU communication registers */
+#define AMD_PMF_REGISTER_MESSAGE 0xA18
+#define AMD_PMF_REGISTER_RESPONSE 0xA78
+#define AMD_PMF_REGISTER_ARGUMENT 0xA58
+
+/* Base address of SMU for mapping physical address to virtual address */
+#define AMD_PMF_MAPPING_SIZE 0x01000
+#define AMD_PMF_BASE_ADDR_OFFSET 0x10000
+#define AMD_PMF_BASE_ADDR_LO 0x13B102E8
+#define AMD_PMF_BASE_ADDR_HI 0x13B102EC
+#define AMD_PMF_BASE_ADDR_LO_MASK GENMASK(15, 0)
+#define AMD_PMF_BASE_ADDR_HI_MASK GENMASK(31, 20)
+
+/* SMU Response Codes */
+#define AMD_PMF_RESULT_OK 0x01
+#define AMD_PMF_RESULT_CMD_REJECT_BUSY 0xFC
+#define AMD_PMF_RESULT_CMD_REJECT_PREREQ 0xFD
+#define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE
+#define AMD_PMF_RESULT_FAILED 0xFF
+
+/* List of supported CPU ids */
+#define AMD_CPU_ID_RMB 0x14b5
+#define AMD_CPU_ID_PS 0x14e8
+#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
+
+#define PMF_MSG_DELAY_MIN_US 50
+#define RESPONSE_REGISTER_LOOP_MAX 20000
+
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
+
+/* override Metrics Table sample size time (in ms) */
+static int metrics_table_loop_ms = 1000;
+module_param(metrics_table_loop_ms, int, 0644);
+MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
+
+/* Force load on supported older platforms */
+static bool force_load;
+module_param(force_load, bool, 0444);
+MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
+
+static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
+
+ if (event != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
+ if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
+ return NOTIFY_DONE;
+ }
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ amd_pmf_set_sps_power_limits(pmf);
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
+ amd_pmf_power_slider_update_event(pmf);
+
+ return NOTIFY_OK;
+}
+
+static int current_power_limits_show(struct seq_file *seq, void *unused)
+{
+ struct amd_pmf_dev *dev = seq->private;
+ struct amd_pmf_static_slider_granular table;
+ int mode, src = 0;
+
+ mode = amd_pmf_get_pprof_modes(dev);
+ if (mode < 0)
+ return mode;
+
+ src = amd_pmf_get_power_source();
+ amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
+ seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
+ table.prop[src][mode].spl,
+ table.prop[src][mode].fppt,
+ table.prop[src][mode].sppt,
+ table.prop[src][mode].sppt_apu_only,
+ table.prop[src][mode].stt_min,
+ table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
+ table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(current_power_limits);
+
+static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
+{
+ debugfs_remove_recursive(dev->dbgfs_dir);
+}
+
+static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
+{
+ dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
+ debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
+ &current_power_limits_fops);
+}
+
+int amd_pmf_get_power_source(void)
+{
+ if (power_supply_is_system_supplied() > 0)
+ return POWER_SOURCE_AC;
+ else
+ return POWER_SOURCE_DC;
+}
+
+static void amd_pmf_get_metrics(struct work_struct *work)
+{
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
+ ktime_t time_elapsed_ms;
+ int socket_power;
+
+ mutex_lock(&dev->update_mutex);
+ /* Transfer table contents */
+ memset(dev->buf, 0, sizeof(dev->m_table));
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+ memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
+
+ time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
+ /* Calculate the avg SoC power consumption */
+ socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
+
+ if (dev->amt_enabled) {
+ /* Apply the Auto Mode transition */
+ amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
+ }
+
+ if (dev->cnqf_enabled) {
+ /* Apply the CnQF transition */
+ amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
+ }
+
+ dev->start_time = ktime_to_ms(ktime_get());
+ schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
+ mutex_unlock(&dev->update_mutex);
+}
+
+static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
+{
+ return ioread32(dev->regbase + reg_offset);
+}
+
+static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
+{
+ iowrite32(val, dev->regbase + reg_offset);
+}
+
+static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
+{
+ u32 value;
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
+}
+
+int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
+{
+ int rc;
+ u32 val;
+
+ mutex_lock(&dev->lock);
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
+ val, val != 0, PMF_MSG_DELAY_MIN_US,
+ PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "failed to talk to SMU\n");
+ goto out_unlock;
+ }
+
+ /* Write zero to response register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
+
+ /* Write argument into argument register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
+
+ /* Write message ID to message ID register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
+ val, val != 0, PMF_MSG_DELAY_MIN_US,
+ PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "SMU response timed out\n");
+ goto out_unlock;
+ }
+
+ switch (val) {
+ case AMD_PMF_RESULT_OK:
+ if (get) {
+ /* PMFW may take longer time to return back the data */
+ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+ *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
+ }
+ break;
+ case AMD_PMF_RESULT_CMD_REJECT_BUSY:
+ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+ rc = -EBUSY;
+ goto out_unlock;
+ case AMD_PMF_RESULT_CMD_UNKNOWN:
+ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+ rc = -EINVAL;
+ goto out_unlock;
+ case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
+ case AMD_PMF_RESULT_FAILED:
+ default:
+ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+ rc = -EIO;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->lock);
+ amd_pmf_dump_registers(dev);
+ return rc;
+}
+
+static const struct pci_device_id pmf_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
+ { }
+};
+
+static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev)
+{
+ u64 phys_addr;
+ u32 hi, low;
+
+ phys_addr = virt_to_phys(dev->buf);
+ hi = phys_addr >> 32;
+ low = phys_addr & GENMASK(31, 0);
+
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+}
+
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+{
+ /* Get Metrics Table Address */
+ dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
+ if (!dev->buf)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
+
+ amd_pmf_set_dram_addr(dev);
+
+ /*
+ * Start collecting the metrics data after a small delay
+ * or else, we might end up getting stale values from PMFW.
+ */
+ schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
+
+ return 0;
+}
+
+static int amd_pmf_resume_handler(struct device *dev)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->buf)
+ amd_pmf_set_dram_addr(pdev);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler);
+
+static void amd_pmf_init_features(struct amd_pmf_dev *dev)
+{
+ int ret;
+
+ /* Enable Static Slider */
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
+ is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ amd_pmf_init_sps(dev);
+ dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
+ power_supply_reg_notifier(&dev->pwr_src_notifier);
+ dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
+ }
+
+ /* Enable Auto Mode */
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ amd_pmf_init_auto_mode(dev);
+ dev_dbg(dev->dev, "Auto Mode Init done\n");
+ } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
+ /* Enable Cool n Quiet Framework (CnQF) */
+ ret = amd_pmf_init_cnqf(dev);
+ if (ret)
+ dev_warn(dev->dev, "CnQF Init failed\n");
+ }
+}
+
+static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
+{
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
+ is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ power_supply_unreg_notifier(&dev->pwr_src_notifier);
+ amd_pmf_deinit_sps(dev);
+ }
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ amd_pmf_deinit_auto_mode(dev);
+ } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
+ amd_pmf_deinit_cnqf(dev);
+ }
+}
+
+static const struct acpi_device_id amd_pmf_acpi_ids[] = {
+ {"AMDI0100", 0x100},
+ {"AMDI0102", 0},
+ {"AMDI0103", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
+
+static int amd_pmf_probe(struct platform_device *pdev)
+{
+ const struct acpi_device_id *id;
+ struct amd_pmf_dev *dev;
+ struct pci_dev *rdev;
+ u32 base_addr_lo;
+ u32 base_addr_hi;
+ u64 base_addr;
+ u32 val;
+ int err;
+
+ id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ if (id->driver_data == 0x100 && !force_load)
+ return -ENODEV;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->dev = &pdev->dev;
+
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
+ pci_dev_put(rdev);
+ return -ENODEV;
+ }
+
+ dev->cpu_id = rdev->device;
+
+ err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
+ if (err) {
+ dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
+
+ err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
+ if (err) {
+ dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
+ pci_dev_put(rdev);
+ base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+ dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
+ AMD_PMF_MAPPING_SIZE);
+ if (!dev->regbase)
+ return -ENOMEM;
+
+ mutex_init(&dev->lock);
+ mutex_init(&dev->update_mutex);
+
+ apmf_acpi_init(dev);
+ platform_set_drvdata(pdev, dev);
+ amd_pmf_init_features(dev);
+ apmf_install_handler(dev);
+ amd_pmf_dbgfs_register(dev);
+
+ dev_info(dev->dev, "registered PMF device successfully\n");
+
+ return 0;
+}
+
+static void amd_pmf_remove(struct platform_device *pdev)
+{
+ struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
+
+ amd_pmf_deinit_features(dev);
+ apmf_acpi_deinit(dev);
+ amd_pmf_dbgfs_unregister(dev);
+ mutex_destroy(&dev->lock);
+ mutex_destroy(&dev->update_mutex);
+ kfree(dev->buf);
+}
+
+static const struct attribute_group *amd_pmf_driver_groups[] = {
+ &cnqf_feature_attribute_group,
+ NULL,
+};
+
+static struct platform_driver amd_pmf_driver = {
+ .driver = {
+ .name = "amd-pmf",
+ .acpi_match_table = amd_pmf_acpi_ids,
+ .dev_groups = amd_pmf_driver_groups,
+ .pm = pm_sleep_ptr(&amd_pmf_pm),
+ },
+ .probe = amd_pmf_probe,
+ .remove_new = amd_pmf_remove,
+};
+module_platform_driver(amd_pmf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
new file mode 100644
index 0000000000..deba88e6e4
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#ifndef PMF_H
+#define PMF_H
+
+#include <linux/acpi.h>
+#include <linux/platform_profile.h>
+
+/* APMF Functions */
+#define APMF_FUNC_VERIFY_INTERFACE 0
+#define APMF_FUNC_GET_SYS_PARAMS 1
+#define APMF_FUNC_SBIOS_REQUESTS 2
+#define APMF_FUNC_SBIOS_HEARTBEAT 4
+#define APMF_FUNC_AUTO_MODE 5
+#define APMF_FUNC_SET_FAN_IDX 7
+#define APMF_FUNC_OS_POWER_SLIDER_UPDATE 8
+#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
+#define APMF_FUNC_DYN_SLIDER_AC 11
+#define APMF_FUNC_DYN_SLIDER_DC 12
+
+/* Message Definitions */
+#define SET_SPL 0x03 /* SPL: Sustained Power Limit */
+#define SET_SPPT 0x05 /* SPPT: Slow Package Power Tracking */
+#define SET_FPPT 0x07 /* FPPT: Fast Package Power Tracking */
+#define GET_SPL 0x0B
+#define GET_SPPT 0x0D
+#define GET_FPPT 0x0F
+#define SET_DRAM_ADDR_HIGH 0x14
+#define SET_DRAM_ADDR_LOW 0x15
+#define SET_TRANSFER_TABLE 0x16
+#define SET_STT_MIN_LIMIT 0x18 /* STT: Skin Temperature Tracking */
+#define SET_STT_LIMIT_APU 0x19
+#define SET_STT_LIMIT_HS2 0x1A
+#define SET_SPPT_APU_ONLY 0x1D
+#define GET_SPPT_APU_ONLY 0x1E
+#define GET_STT_MIN_LIMIT 0x1F
+#define GET_STT_LIMIT_APU 0x20
+#define GET_STT_LIMIT_HS2 0x21
+
+/* OS slider update notification */
+#define DC_BEST_PERF 0
+#define DC_BETTER_PERF 1
+#define DC_BATTERY_SAVER 3
+#define AC_BEST_PERF 4
+#define AC_BETTER_PERF 5
+#define AC_BETTER_BATTERY 6
+
+/* Fan Index for Auto Mode */
+#define FAN_INDEX_AUTO 0xFFFFFFFF
+
+#define ARG_NONE 0
+#define AVG_SAMPLE_SIZE 3
+
+/* AMD PMF BIOS interfaces */
+struct apmf_verify_interface {
+ u16 size;
+ u16 version;
+ u32 notification_mask;
+ u32 supported_functions;
+} __packed;
+
+struct apmf_system_params {
+ u16 size;
+ u32 valid_mask;
+ u32 flags;
+ u8 command_code;
+ u32 heartbeat_int;
+} __packed;
+
+struct apmf_sbios_req {
+ u16 size;
+ u32 pending_req;
+ u8 rsd;
+ u8 cql_event;
+ u8 amt_event;
+ u32 fppt;
+ u32 sppt;
+ u32 fppt_apu_only;
+ u32 spl;
+ u32 stt_min_limit;
+ u8 skin_temp_apu;
+ u8 skin_temp_hs2;
+} __packed;
+
+struct apmf_fan_idx {
+ u16 size;
+ u8 fan_ctl_mode;
+ u32 fan_ctl_idx;
+} __packed;
+
+struct smu_pmf_metrics {
+ u16 gfxclk_freq; /* in MHz */
+ u16 socclk_freq; /* in MHz */
+ u16 vclk_freq; /* in MHz */
+ u16 dclk_freq; /* in MHz */
+ u16 memclk_freq; /* in MHz */
+ u16 spare;
+ u16 gfx_activity; /* in Centi */
+ u16 uvd_activity; /* in Centi */
+ u16 voltage[2]; /* in mV */
+ u16 currents[2]; /* in mA */
+ u16 power[2];/* in mW */
+ u16 core_freq[8]; /* in MHz */
+ u16 core_power[8]; /* in mW */
+ u16 core_temp[8]; /* in centi-Celsius */
+ u16 l3_freq; /* in MHz */
+ u16 l3_temp; /* in centi-Celsius */
+ u16 gfx_temp; /* in centi-Celsius */
+ u16 soc_temp; /* in centi-Celsius */
+ u16 throttler_status;
+ u16 current_socketpower; /* in mW */
+ u16 stapm_orig_limit; /* in W */
+ u16 stapm_cur_limit; /* in W */
+ u32 apu_power; /* in mW */
+ u32 dgpu_power; /* in mW */
+ u16 vdd_tdc_val; /* in mA */
+ u16 soc_tdc_val; /* in mA */
+ u16 vdd_edc_val; /* in mA */
+ u16 soc_edcv_al; /* in mA */
+ u16 infra_cpu_maxfreq; /* in MHz */
+ u16 infra_gfx_maxfreq; /* in MHz */
+ u16 skin_temp; /* in centi-Celsius */
+ u16 device_state;
+} __packed;
+
+enum amd_stt_skin_temp {
+ STT_TEMP_APU,
+ STT_TEMP_HS2,
+ STT_TEMP_COUNT,
+};
+
+enum amd_slider_op {
+ SLIDER_OP_GET,
+ SLIDER_OP_SET,
+};
+
+enum power_source {
+ POWER_SOURCE_AC,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_MAX,
+};
+
+enum power_modes {
+ POWER_MODE_PERFORMANCE,
+ POWER_MODE_BALANCED_POWER,
+ POWER_MODE_POWER_SAVER,
+ POWER_MODE_MAX,
+};
+
+struct amd_pmf_dev {
+ void __iomem *regbase;
+ void __iomem *smu_virt_addr;
+ void *buf;
+ u32 base_addr;
+ u32 cpu_id;
+ struct device *dev;
+ struct mutex lock; /* protects the PMF interface */
+ u32 supported_func;
+ enum platform_profile_option current_profile;
+ struct platform_profile_handler pprof;
+ struct dentry *dbgfs_dir;
+ int hb_interval; /* SBIOS heartbeat interval */
+ struct delayed_work heart_beat;
+ struct smu_pmf_metrics m_table;
+ struct delayed_work work_buffer;
+ ktime_t start_time;
+ int socket_power_history[AVG_SAMPLE_SIZE];
+ int socket_power_history_idx;
+ bool amt_enabled;
+ struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */
+ bool cnqf_enabled;
+ bool cnqf_supported;
+ struct notifier_block pwr_src_notifier;
+};
+
+struct apmf_sps_prop_granular {
+ u32 fppt;
+ u32 sppt;
+ u32 sppt_apu_only;
+ u32 spl;
+ u32 stt_min;
+ u8 stt_skin_temp[STT_TEMP_COUNT];
+ u32 fan_id;
+} __packed;
+
+/* Static Slider */
+struct apmf_static_slider_granular_output {
+ u16 size;
+ struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX * POWER_MODE_MAX];
+} __packed;
+
+struct amd_pmf_static_slider_granular {
+ u16 size;
+ struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
+};
+
+struct os_power_slider {
+ u16 size;
+ u8 slider_event;
+} __packed;
+
+struct fan_table_control {
+ bool manual;
+ unsigned long fan_id;
+};
+
+struct power_table_control {
+ u32 spl;
+ u32 sppt;
+ u32 fppt;
+ u32 sppt_apu_only;
+ u32 stt_min;
+ u32 stt_skin_temp[STT_TEMP_COUNT];
+ u32 reserved[16];
+};
+
+/* Auto Mode Layer */
+enum auto_mode_transition_priority {
+ AUTO_TRANSITION_TO_PERFORMANCE, /* Any other mode to Performance Mode */
+ AUTO_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */
+ AUTO_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */
+ AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance Mode to Balance Mode */
+ AUTO_TRANSITION_MAX,
+};
+
+enum auto_mode_mode {
+ AUTO_QUIET,
+ AUTO_BALANCE,
+ AUTO_PERFORMANCE_ON_LAP,
+ AUTO_PERFORMANCE,
+ AUTO_MODE_MAX,
+};
+
+struct auto_mode_trans_params {
+ u32 time_constant; /* minimum time required to switch to next mode */
+ u32 power_delta; /* delta power to shift mode */
+ u32 power_threshold;
+ u32 timer; /* elapsed time. if timer > TimeThreshold, it will move to next mode */
+ u32 applied;
+ enum auto_mode_mode target_mode;
+ u32 shifting_up;
+};
+
+struct auto_mode_mode_settings {
+ struct power_table_control power_control;
+ struct fan_table_control fan_control;
+ u32 power_floor;
+};
+
+struct auto_mode_mode_config {
+ struct auto_mode_trans_params transition[AUTO_TRANSITION_MAX];
+ struct auto_mode_mode_settings mode_set[AUTO_MODE_MAX];
+ enum auto_mode_mode current_mode;
+};
+
+struct apmf_auto_mode {
+ u16 size;
+ /* time constant */
+ u32 balanced_to_perf;
+ u32 perf_to_balanced;
+ u32 quiet_to_balanced;
+ u32 balanced_to_quiet;
+ /* power floor */
+ u32 pfloor_perf;
+ u32 pfloor_balanced;
+ u32 pfloor_quiet;
+ /* Power delta for mode change */
+ u32 pd_balanced_to_perf;
+ u32 pd_perf_to_balanced;
+ u32 pd_quiet_to_balanced;
+ u32 pd_balanced_to_quiet;
+ /* skin temperature limits */
+ u8 stt_apu_perf_on_lap; /* CQL ON */
+ u8 stt_hs2_perf_on_lap; /* CQL ON */
+ u8 stt_apu_perf;
+ u8 stt_hs2_perf;
+ u8 stt_apu_balanced;
+ u8 stt_hs2_balanced;
+ u8 stt_apu_quiet;
+ u8 stt_hs2_quiet;
+ u32 stt_min_limit_perf_on_lap; /* CQL ON */
+ u32 stt_min_limit_perf;
+ u32 stt_min_limit_balanced;
+ u32 stt_min_limit_quiet;
+ /* SPL based */
+ u32 fppt_perf_on_lap; /* CQL ON */
+ u32 sppt_perf_on_lap; /* CQL ON */
+ u32 spl_perf_on_lap; /* CQL ON */
+ u32 sppt_apu_only_perf_on_lap; /* CQL ON */
+ u32 fppt_perf;
+ u32 sppt_perf;
+ u32 spl_perf;
+ u32 sppt_apu_only_perf;
+ u32 fppt_balanced;
+ u32 sppt_balanced;
+ u32 spl_balanced;
+ u32 sppt_apu_only_balanced;
+ u32 fppt_quiet;
+ u32 sppt_quiet;
+ u32 spl_quiet;
+ u32 sppt_apu_only_quiet;
+ /* Fan ID */
+ u32 fan_id_perf;
+ u32 fan_id_balanced;
+ u32 fan_id_quiet;
+} __packed;
+
+/* CnQF Layer */
+enum cnqf_trans_priority {
+ CNQF_TRANSITION_TO_TURBO, /* Any other mode to Turbo Mode */
+ CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE, /* quiet/balance to Performance Mode */
+ CNQF_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */
+ CNQF_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */
+ CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance/Turbo to Balance Mode */
+ CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE, /* Turbo mode to Performance Mode */
+ CNQF_TRANSITION_MAX,
+};
+
+enum cnqf_mode {
+ CNQF_MODE_QUIET,
+ CNQF_MODE_BALANCE,
+ CNQF_MODE_PERFORMANCE,
+ CNQF_MODE_TURBO,
+ CNQF_MODE_MAX,
+};
+
+enum apmf_cnqf_pos {
+ APMF_CNQF_TURBO,
+ APMF_CNQF_PERFORMANCE,
+ APMF_CNQF_BALANCE,
+ APMF_CNQF_QUIET,
+ APMF_CNQF_MAX,
+};
+
+struct cnqf_mode_settings {
+ struct power_table_control power_control;
+ struct fan_table_control fan_control;
+ u32 power_floor;
+};
+
+struct cnqf_tran_params {
+ u32 time_constant; /* minimum time required to switch to next mode */
+ u32 power_threshold;
+ u32 timer; /* elapsed time. if timer > timethreshold, it will move to next mode */
+ u32 total_power;
+ u32 count;
+ bool priority;
+ bool shifting_up;
+ enum cnqf_mode target_mode;
+};
+
+struct cnqf_config {
+ struct cnqf_tran_params trans_param[POWER_SOURCE_MAX][CNQF_TRANSITION_MAX];
+ struct cnqf_mode_settings mode_set[POWER_SOURCE_MAX][CNQF_MODE_MAX];
+ struct power_table_control defaults;
+ enum cnqf_mode current_mode;
+ u32 power_src;
+ u32 avg_power;
+};
+
+struct apmf_cnqf_power_set {
+ u32 pfloor;
+ u32 fppt;
+ u32 sppt;
+ u32 sppt_apu_only;
+ u32 spl;
+ u32 stt_min_limit;
+ u8 stt_skintemp[STT_TEMP_COUNT];
+ u32 fan_id;
+} __packed;
+
+struct apmf_dyn_slider_output {
+ u16 size;
+ u16 flags;
+ u32 t_perf_to_turbo;
+ u32 t_balanced_to_perf;
+ u32 t_quiet_to_balanced;
+ u32 t_balanced_to_quiet;
+ u32 t_perf_to_balanced;
+ u32 t_turbo_to_perf;
+ struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
+} __packed;
+
+/* Core Layer */
+int apmf_acpi_init(struct amd_pmf_dev *pmf_dev);
+void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev);
+int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index);
+int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data);
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev);
+int amd_pmf_get_power_source(void);
+int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
+int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
+
+/* SPS Layer */
+int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
+void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+ struct amd_pmf_static_slider_granular *table);
+int amd_pmf_init_sps(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
+int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output *output);
+bool is_pprof_balanced(struct amd_pmf_dev *pmf);
+int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev);
+
+
+int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
+
+/* Auto Mode Layer */
+int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
+void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev);
+void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms);
+int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req);
+
+void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event);
+int amd_pmf_reset_amt(struct amd_pmf_dev *dev);
+void amd_pmf_handle_amt(struct amd_pmf_dev *dev);
+
+/* CnQF Layer */
+int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data);
+int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data);
+int amd_pmf_init_cnqf(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev);
+int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms);
+extern const struct attribute_group cnqf_feature_attribute_group;
+
+#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
new file mode 100644
index 0000000000..a70e67749b
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework (PMF) Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include "pmf.h"
+
+static struct amd_pmf_static_slider_granular config_store;
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *slider_as_str(unsigned int state)
+{
+ switch (state) {
+ case POWER_MODE_PERFORMANCE:
+ return "PERFORMANCE";
+ case POWER_MODE_BALANCED_POWER:
+ return "BALANCED_POWER";
+ case POWER_MODE_POWER_SAVER:
+ return "POWER_SAVER";
+ default:
+ return "Unknown Slider State";
+ }
+}
+
+static const char *source_as_str(unsigned int state)
+{
+ switch (state) {
+ case POWER_SOURCE_AC:
+ return "AC";
+ case POWER_SOURCE_DC:
+ return "DC";
+ default:
+ return "Unknown Power State";
+ }
+}
+
+static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data)
+{
+ int i, j;
+
+ pr_debug("Static Slider Data - BEGIN\n");
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++) {
+ for (j = 0; j < POWER_MODE_MAX; j++) {
+ pr_debug("--- Source:%s Mode:%s ---\n", source_as_str(i), slider_as_str(j));
+ pr_debug("SPL: %u mW\n", data->prop[i][j].spl);
+ pr_debug("SPPT: %u mW\n", data->prop[i][j].sppt);
+ pr_debug("SPPT_ApuOnly: %u mW\n", data->prop[i][j].sppt_apu_only);
+ pr_debug("FPPT: %u mW\n", data->prop[i][j].fppt);
+ pr_debug("STTMinLimit: %u mW\n", data->prop[i][j].stt_min);
+ pr_debug("STT_SkinTempLimit_APU: %u C\n",
+ data->prop[i][j].stt_skin_temp[STT_TEMP_APU]);
+ pr_debug("STT_SkinTempLimit_HS2: %u C\n",
+ data->prop[i][j].stt_skin_temp[STT_TEMP_HS2]);
+ }
+ }
+
+ pr_debug("Static Slider Data - END\n");
+}
+#else
+static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {}
+#endif
+
+static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
+{
+ struct apmf_static_slider_granular_output output;
+ int i, j, idx = 0;
+
+ memset(&config_store, 0, sizeof(config_store));
+ apmf_get_static_slider_granular(dev, &output);
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++) {
+ for (j = 0; j < POWER_MODE_MAX; j++) {
+ config_store.prop[i][j].spl = output.prop[idx].spl;
+ config_store.prop[i][j].sppt = output.prop[idx].sppt;
+ config_store.prop[i][j].sppt_apu_only =
+ output.prop[idx].sppt_apu_only;
+ config_store.prop[i][j].fppt = output.prop[idx].fppt;
+ config_store.prop[i][j].stt_min = output.prop[idx].stt_min;
+ config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] =
+ output.prop[idx].stt_skin_temp[STT_TEMP_APU];
+ config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] =
+ output.prop[idx].stt_skin_temp[STT_TEMP_HS2];
+ config_store.prop[i][j].fan_id = output.prop[idx].fan_id;
+ idx++;
+ }
+ }
+ amd_pmf_dump_sps_defaults(&config_store);
+}
+
+void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+ struct amd_pmf_static_slider_granular *table)
+{
+ int src = amd_pmf_get_power_source();
+
+ if (op == SLIDER_OP_SET) {
+ amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
+ config_store.prop[src][idx].sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ config_store.prop[src][idx].stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
+ } else if (op == SLIDER_OP_GET) {
+ amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
+ amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
+ amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
+ amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
+ &table->prop[src][idx].sppt_apu_only);
+ amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
+ &table->prop[src][idx].stt_min);
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
+ (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
+ (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
+ }
+}
+
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
+{
+ int mode;
+
+ mode = amd_pmf_get_pprof_modes(pmf);
+ if (mode < 0)
+ return mode;
+
+ amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
+
+ return 0;
+}
+
+bool is_pprof_balanced(struct amd_pmf_dev *pmf)
+{
+ return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
+}
+
+static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+
+ *profile = pmf->current_profile;
+ return 0;
+}
+
+int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
+{
+ int mode;
+
+ switch (pmf->current_profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ mode = POWER_MODE_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ mode = POWER_MODE_BALANCED_POWER;
+ break;
+ case PLATFORM_PROFILE_LOW_POWER:
+ mode = POWER_MODE_POWER_SAVER;
+ break;
+ default:
+ dev_err(pmf->dev, "Unknown Platform Profile.\n");
+ return -EOPNOTSUPP;
+ }
+
+ return mode;
+}
+
+int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
+{
+ u8 flag = 0;
+ int mode;
+ int src;
+
+ mode = amd_pmf_get_pprof_modes(dev);
+ if (mode < 0)
+ return mode;
+
+ src = amd_pmf_get_power_source();
+
+ if (src == POWER_SOURCE_AC) {
+ switch (mode) {
+ case POWER_MODE_PERFORMANCE:
+ flag |= BIT(AC_BEST_PERF);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ flag |= BIT(AC_BETTER_PERF);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ flag |= BIT(AC_BETTER_BATTERY);
+ break;
+ default:
+ dev_err(dev->dev, "unsupported platform profile\n");
+ return -EOPNOTSUPP;
+ }
+
+ } else if (src == POWER_SOURCE_DC) {
+ switch (mode) {
+ case POWER_MODE_PERFORMANCE:
+ flag |= BIT(DC_BEST_PERF);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ flag |= BIT(DC_BETTER_PERF);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ flag |= BIT(DC_BATTERY_SAVER);
+ break;
+ default:
+ dev_err(dev->dev, "unsupported platform profile\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ apmf_os_power_slider_update(dev, flag);
+
+ return 0;
+}
+
+static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ int ret = 0;
+
+ pmf->current_profile = profile;
+
+ /* Notify EC about the slider position change */
+ if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ ret = amd_pmf_power_slider_update_event(pmf);
+ if (ret)
+ return ret;
+ }
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ ret = amd_pmf_set_sps_power_limits(pmf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+{
+ int err;
+
+ dev->current_profile = PLATFORM_PROFILE_BALANCED;
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ amd_pmf_load_defaults_sps(dev);
+
+ /* update SPS balanced power mode thermals */
+ amd_pmf_set_sps_power_limits(dev);
+ }
+
+ dev->pprof.profile_get = amd_pmf_profile_get;
+ dev->pprof.profile_set = amd_pmf_profile_set;
+
+ /* Setup supported modes */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
+
+ /* Create platform_profile structure and register */
+ err = platform_profile_register(&dev->pprof);
+ if (err)
+ dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
+ err);
+
+ return err;
+}
+
+void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
+{
+ platform_profile_remove();
+}