summaryrefslogtreecommitdiffstats
path: root/drivers/platform/x86/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/platform/x86/amd')
-rw-r--r--drivers/platform/x86/amd/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp.c584
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c141
-rw-r--r--drivers/platform/x86/amd/pmf/core.c16
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c51
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h96
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c145
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c23
9 files changed, 919 insertions, 141 deletions
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index 54753213c..f88682d36 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -8,7 +8,7 @@ source "drivers/platform/x86/amd/pmc/Kconfig"
config AMD_HSMP
tristate "AMD HSMP Driver"
- depends on AMD_NB && X86_64
+ depends on AMD_NB && X86_64 && ACPI
help
The driver provides a way for user space tools to monitor and manage
system management functionality on EPYC server CPUs from AMD.
diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
index b55d80e29..1927be901 100644
--- a/drivers/platform/x86/amd/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -18,9 +18,11 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/semaphore.h>
+#include <linux/acpi.h>
#define DRIVER_NAME "amd_hsmp"
-#define DRIVER_VERSION "2.0"
+#define DRIVER_VERSION "2.2"
+#define ACPI_HSMP_DEVICE_HID "AMDI0097"
/* HSMP Status / Error codes */
#define HSMP_STATUS_NOT_READY 0x00
@@ -40,9 +42,11 @@
* register into the SMN_INDEX register, and reads/writes the SMN_DATA reg.
* Below are required SMN address for HSMP Mailbox register offsets in SMU address space
*/
-#define SMN_HSMP_MSG_ID 0x3B10534
-#define SMN_HSMP_MSG_RESP 0x3B10980
-#define SMN_HSMP_MSG_DATA 0x3B109E0
+#define SMN_HSMP_BASE 0x3B00000
+#define SMN_HSMP_MSG_ID 0x0010534
+#define SMN_HSMP_MSG_ID_F1A_M0H 0x0010934
+#define SMN_HSMP_MSG_RESP 0x0010980
+#define SMN_HSMP_MSG_DATA 0x00109E0
#define HSMP_INDEX_REG 0xc4
#define HSMP_DATA_REG 0xc8
@@ -53,41 +57,86 @@
#define HSMP_ATTR_GRP_NAME_SIZE 10
+/* These are the strings specified in ACPI table */
+#define MSG_IDOFF_STR "MsgIdOffset"
+#define MSG_ARGOFF_STR "MsgArgOffset"
+#define MSG_RESPOFF_STR "MsgRspOffset"
+
+#define MAX_AMD_SOCKETS 8
+
+struct hsmp_mbaddr_info {
+ u32 base_addr;
+ u32 msg_id_off;
+ u32 msg_resp_off;
+ u32 msg_arg_off;
+ u32 size;
+};
+
struct hsmp_socket {
struct bin_attribute hsmp_attr;
+ struct hsmp_mbaddr_info mbinfo;
void __iomem *metric_tbl_addr;
+ void __iomem *virt_base_addr;
struct semaphore hsmp_sem;
char name[HSMP_ATTR_GRP_NAME_SIZE];
+ struct pci_dev *root;
+ struct device *dev;
u16 sock_ind;
};
struct hsmp_plat_device {
struct miscdevice hsmp_device;
struct hsmp_socket *sock;
- struct device *dev;
u32 proto_ver;
u16 num_sockets;
+ bool is_acpi_device;
+ bool is_probed;
};
static struct hsmp_plat_device plat_dev;
-static int amd_hsmp_rdwr(struct pci_dev *root, u32 address,
- u32 *value, bool write)
+static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset,
+ u32 *value, bool write)
{
int ret;
- ret = pci_write_config_dword(root, HSMP_INDEX_REG, address);
+ if (!sock->root)
+ return -ENODEV;
+
+ ret = pci_write_config_dword(sock->root, HSMP_INDEX_REG,
+ sock->mbinfo.base_addr + offset);
if (ret)
return ret;
- ret = (write ? pci_write_config_dword(root, HSMP_DATA_REG, *value)
- : pci_read_config_dword(root, HSMP_DATA_REG, value));
+ ret = (write ? pci_write_config_dword(sock->root, HSMP_DATA_REG, *value)
+ : pci_read_config_dword(sock->root, HSMP_DATA_REG, value));
return ret;
}
+static void amd_hsmp_acpi_rdwr(struct hsmp_socket *sock, u32 offset,
+ u32 *value, bool write)
+{
+ if (write)
+ iowrite32(*value, sock->virt_base_addr + offset);
+ else
+ *value = ioread32(sock->virt_base_addr + offset);
+}
+
+static int amd_hsmp_rdwr(struct hsmp_socket *sock, u32 offset,
+ u32 *value, bool write)
+{
+ if (plat_dev.is_acpi_device)
+ amd_hsmp_acpi_rdwr(sock, offset, value, write);
+ else
+ return amd_hsmp_pci_rdwr(sock, offset, value, write);
+
+ return 0;
+}
+
/*
- * Send a message to the HSMP port via PCI-e config space registers.
+ * Send a message to the HSMP port via PCI-e config space registers
+ * or by writing to MMIO space.
*
* The caller is expected to zero out any unused arguments.
* If a response is expected, the number of response words should be greater than 0.
@@ -95,16 +144,19 @@ static int amd_hsmp_rdwr(struct pci_dev *root, u32 address,
* Returns 0 for success and populates the requested number of arguments.
* Returns a negative error code for failure.
*/
-static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
+static int __hsmp_send_message(struct hsmp_socket *sock, struct hsmp_message *msg)
{
+ struct hsmp_mbaddr_info *mbinfo;
unsigned long timeout, short_sleep;
u32 mbox_status;
u32 index;
int ret;
+ mbinfo = &sock->mbinfo;
+
/* Clear the status register */
mbox_status = HSMP_STATUS_NOT_READY;
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_WR);
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_resp_off, &mbox_status, HSMP_WR);
if (ret) {
pr_err("Error %d clearing mailbox status register\n", ret);
return ret;
@@ -113,7 +165,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
index = 0;
/* Write any message arguments */
while (index < msg->num_args) {
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_arg_off + (index << 2),
&msg->args[index], HSMP_WR);
if (ret) {
pr_err("Error %d writing message argument %d\n", ret, index);
@@ -123,7 +175,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
}
/* Write the message ID which starts the operation */
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_ID, &msg->msg_id, HSMP_WR);
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_id_off, &msg->msg_id, HSMP_WR);
if (ret) {
pr_err("Error %d writing message ID %u\n", ret, msg->msg_id);
return ret;
@@ -140,7 +192,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
timeout = jiffies + msecs_to_jiffies(HSMP_MSG_TIMEOUT);
while (time_before(jiffies, timeout)) {
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_RD);
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_resp_off, &mbox_status, HSMP_RD);
if (ret) {
pr_err("Error %d reading mailbox status\n", ret);
return ret;
@@ -175,7 +227,7 @@ static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
*/
index = 0;
while (index < msg->response_sz) {
- ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
+ ret = amd_hsmp_rdwr(sock, mbinfo->msg_arg_off + (index << 2),
&msg->args[index], HSMP_RD);
if (ret) {
pr_err("Error %d reading response %u for message ID:%u\n",
@@ -208,21 +260,19 @@ static int validate_message(struct hsmp_message *msg)
int hsmp_send_message(struct hsmp_message *msg)
{
- struct hsmp_socket *sock = &plat_dev.sock[msg->sock_ind];
- struct amd_northbridge *nb;
+ struct hsmp_socket *sock;
int ret;
if (!msg)
return -EINVAL;
-
- nb = node_to_amd_nb(msg->sock_ind);
- if (!nb || !nb->root)
- return -ENODEV;
-
ret = validate_message(msg);
if (ret)
return ret;
+ if (!plat_dev.sock || msg->sock_ind >= plat_dev.num_sockets)
+ return -ENODEV;
+ sock = &plat_dev.sock[msg->sock_ind];
+
/*
* The time taken by smu operation to complete is between
* 10us to 1ms. Sometime it may take more time.
@@ -233,7 +283,7 @@ int hsmp_send_message(struct hsmp_message *msg)
if (ret < 0)
return ret;
- ret = __hsmp_send_message(nb->root, msg);
+ ret = __hsmp_send_message(sock, msg);
up(&sock->hsmp_sem);
@@ -244,12 +294,7 @@ EXPORT_SYMBOL_GPL(hsmp_send_message);
static int hsmp_test(u16 sock_ind, u32 value)
{
struct hsmp_message msg = { 0 };
- struct amd_northbridge *nb;
- int ret = -ENODEV;
-
- nb = node_to_amd_nb(sock_ind);
- if (!nb || !nb->root)
- return ret;
+ int ret;
/*
* Test the hsmp port by performing TEST command. The test message
@@ -261,14 +306,15 @@ static int hsmp_test(u16 sock_ind, u32 value)
msg.args[0] = value;
msg.sock_ind = sock_ind;
- ret = __hsmp_send_message(nb->root, &msg);
+ ret = hsmp_send_message(&msg);
if (ret)
return ret;
/* Check the response value */
if (msg.args[0] != (value + 1)) {
- pr_err("Socket %d test message failed, Expected 0x%08X, received 0x%08X\n",
- sock_ind, (value + 1), msg.args[0]);
+ dev_err(plat_dev.sock[sock_ind].dev,
+ "Socket %d test message failed, Expected 0x%08X, received 0x%08X\n",
+ sock_ind, (value + 1), msg.args[0]);
return -EBADE;
}
@@ -337,6 +383,181 @@ static const struct file_operations hsmp_fops = {
.compat_ioctl = hsmp_ioctl,
};
+/* This is the UUID used for HSMP */
+static const guid_t acpi_hsmp_uuid = GUID_INIT(0xb74d619d, 0x5707, 0x48bd,
+ 0xa6, 0x9f, 0x4e, 0xa2,
+ 0x87, 0x1f, 0xc2, 0xf6);
+
+static inline bool is_acpi_hsmp_uuid(union acpi_object *obj)
+{
+ if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == UUID_SIZE)
+ return guid_equal((guid_t *)obj->buffer.pointer, &acpi_hsmp_uuid);
+
+ return false;
+}
+
+static inline int hsmp_get_uid(struct device *dev, u16 *sock_ind)
+{
+ char *uid;
+
+ /*
+ * UID (ID00, ID01..IDXX) is used for differentiating sockets,
+ * read it and strip the "ID" part of it and convert the remaining
+ * bytes to integer.
+ */
+ uid = acpi_device_uid(ACPI_COMPANION(dev));
+
+ return kstrtou16(uid + 2, 10, sock_ind);
+}
+
+static acpi_status hsmp_resource(struct acpi_resource *res, void *data)
+{
+ struct hsmp_socket *sock = data;
+ struct resource r;
+
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ if (!acpi_dev_resource_memory(res, &r))
+ return AE_ERROR;
+ if (!r.start || r.end < r.start || !(r.flags & IORESOURCE_MEM_WRITEABLE))
+ return AE_ERROR;
+ sock->mbinfo.base_addr = r.start;
+ sock->mbinfo.size = resource_size(&r);
+ break;
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ break;
+ default:
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
+static int hsmp_read_acpi_dsd(struct hsmp_socket *sock)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *guid, *mailbox_package;
+ union acpi_object *dsd;
+ acpi_status status;
+ int ret = 0;
+ int j;
+
+ status = acpi_evaluate_object_typed(ACPI_HANDLE(sock->dev), "_DSD", NULL,
+ &buf, ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status)) {
+ dev_err(sock->dev, "Failed to read mailbox reg offsets from DSD table, err: %s\n",
+ acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ dsd = buf.pointer;
+
+ /* HSMP _DSD property should contain 2 objects.
+ * 1. guid which is an acpi object of type ACPI_TYPE_BUFFER
+ * 2. mailbox which is an acpi object of type ACPI_TYPE_PACKAGE
+ * This mailbox object contains 3 more acpi objects of type
+ * ACPI_TYPE_PACKAGE for holding msgid, msgresp, msgarg offsets
+ * these packages inturn contain 2 acpi objects of type
+ * ACPI_TYPE_STRING and ACPI_TYPE_INTEGER
+ */
+ if (!dsd || dsd->type != ACPI_TYPE_PACKAGE || dsd->package.count != 2) {
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ guid = &dsd->package.elements[0];
+ mailbox_package = &dsd->package.elements[1];
+ if (!is_acpi_hsmp_uuid(guid) || mailbox_package->type != ACPI_TYPE_PACKAGE) {
+ dev_err(sock->dev, "Invalid hsmp _DSD table data\n");
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ for (j = 0; j < mailbox_package->package.count; j++) {
+ union acpi_object *msgobj, *msgstr, *msgint;
+
+ msgobj = &mailbox_package->package.elements[j];
+ msgstr = &msgobj->package.elements[0];
+ msgint = &msgobj->package.elements[1];
+
+ /* package should have 1 string and 1 integer object */
+ if (msgobj->type != ACPI_TYPE_PACKAGE ||
+ msgstr->type != ACPI_TYPE_STRING ||
+ msgint->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ if (!strncmp(msgstr->string.pointer, MSG_IDOFF_STR,
+ msgstr->string.length)) {
+ sock->mbinfo.msg_id_off = msgint->integer.value;
+ } else if (!strncmp(msgstr->string.pointer, MSG_RESPOFF_STR,
+ msgstr->string.length)) {
+ sock->mbinfo.msg_resp_off = msgint->integer.value;
+ } else if (!strncmp(msgstr->string.pointer, MSG_ARGOFF_STR,
+ msgstr->string.length)) {
+ sock->mbinfo.msg_arg_off = msgint->integer.value;
+ } else {
+ ret = -ENOENT;
+ goto free_buf;
+ }
+ }
+
+ if (!sock->mbinfo.msg_id_off || !sock->mbinfo.msg_resp_off ||
+ !sock->mbinfo.msg_arg_off)
+ ret = -EINVAL;
+
+free_buf:
+ ACPI_FREE(buf.pointer);
+ return ret;
+}
+
+static int hsmp_read_acpi_crs(struct hsmp_socket *sock)
+{
+ acpi_status status;
+
+ status = acpi_walk_resources(ACPI_HANDLE(sock->dev), METHOD_NAME__CRS,
+ hsmp_resource, sock);
+ if (ACPI_FAILURE(status)) {
+ dev_err(sock->dev, "Failed to look up MP1 base address from CRS method, err: %s\n",
+ acpi_format_exception(status));
+ return -EINVAL;
+ }
+ if (!sock->mbinfo.base_addr || !sock->mbinfo.size)
+ return -EINVAL;
+
+ /* The mapped region should be un cached */
+ sock->virt_base_addr = devm_ioremap_uc(sock->dev, sock->mbinfo.base_addr,
+ sock->mbinfo.size);
+ if (!sock->virt_base_addr) {
+ dev_err(sock->dev, "Failed to ioremap MP1 base address\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Parse the ACPI table to read the data */
+static int hsmp_parse_acpi_table(struct device *dev, u16 sock_ind)
+{
+ struct hsmp_socket *sock = &plat_dev.sock[sock_ind];
+ int ret;
+
+ sock->sock_ind = sock_ind;
+ sock->dev = dev;
+ plat_dev.is_acpi_device = true;
+
+ sema_init(&sock->hsmp_sem, 1);
+
+ /* Read MP1 base address from CRS method */
+ ret = hsmp_read_acpi_crs(sock);
+ if (ret)
+ return ret;
+
+ /* Read mailbox offsets from DSD table */
+ return hsmp_read_acpi_dsd(sock);
+}
+
static ssize_t hsmp_metric_tbl_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
@@ -345,14 +566,12 @@ static ssize_t hsmp_metric_tbl_read(struct file *filp, struct kobject *kobj,
struct hsmp_message msg = { 0 };
int ret;
- /* Do not support lseek(), reads entire metric table */
- if (count < bin_attr->size) {
- dev_err(plat_dev.dev, "Wrong buffer size\n");
+ if (!sock)
return -EINVAL;
- }
- if (!sock) {
- dev_err(plat_dev.dev, "Failed to read attribute private data\n");
+ /* Do not support lseek(), reads entire metric table */
+ if (count < bin_attr->size) {
+ dev_err(sock->dev, "Wrong buffer size\n");
return -EINVAL;
}
@@ -388,13 +607,13 @@ static int hsmp_get_tbl_dram_base(u16 sock_ind)
*/
dram_addr = msg.args[0] | ((u64)(msg.args[1]) << 32);
if (!dram_addr) {
- dev_err(plat_dev.dev, "Invalid DRAM address for metric table\n");
+ dev_err(sock->dev, "Invalid DRAM address for metric table\n");
return -ENOMEM;
}
- sock->metric_tbl_addr = devm_ioremap(plat_dev.dev, dram_addr,
+ sock->metric_tbl_addr = devm_ioremap(sock->dev, dram_addr,
sizeof(struct hsmp_metric_table));
if (!sock->metric_tbl_addr) {
- dev_err(plat_dev.dev, "Failed to ioremap metric table addr\n");
+ dev_err(sock->dev, "Failed to ioremap metric table addr\n");
return -ENOMEM;
}
return 0;
@@ -422,65 +641,91 @@ static int hsmp_init_metric_tbl_bin_attr(struct bin_attribute **hattrs, u16 sock
hattrs[0] = hattr;
if (plat_dev.proto_ver == HSMP_PROTO_VER6)
- return (hsmp_get_tbl_dram_base(sock_ind));
+ return hsmp_get_tbl_dram_base(sock_ind);
else
return 0;
}
-/* One bin sysfs for metrics table*/
+/* One bin sysfs for metrics table */
#define NUM_HSMP_ATTRS 1
-static int hsmp_create_sysfs_interface(void)
+static int hsmp_create_attr_list(struct attribute_group *attr_grp,
+ struct device *dev, u16 sock_ind)
{
- const struct attribute_group **hsmp_attr_grps;
struct bin_attribute **hsmp_bin_attrs;
+
+ /* Null terminated list of attributes */
+ hsmp_bin_attrs = devm_kcalloc(dev, NUM_HSMP_ATTRS + 1,
+ sizeof(*hsmp_bin_attrs),
+ GFP_KERNEL);
+ if (!hsmp_bin_attrs)
+ return -ENOMEM;
+
+ attr_grp->bin_attrs = hsmp_bin_attrs;
+
+ return hsmp_init_metric_tbl_bin_attr(hsmp_bin_attrs, sock_ind);
+}
+
+static int hsmp_create_non_acpi_sysfs_if(struct device *dev)
+{
+ const struct attribute_group **hsmp_attr_grps;
struct attribute_group *attr_grp;
- int ret;
u16 i;
- /* String formatting is currently limited to u8 sockets */
- if (WARN_ON(plat_dev.num_sockets > U8_MAX))
- return -ERANGE;
-
- hsmp_attr_grps = devm_kzalloc(plat_dev.dev, sizeof(struct attribute_group *) *
- (plat_dev.num_sockets + 1), GFP_KERNEL);
+ hsmp_attr_grps = devm_kcalloc(dev, plat_dev.num_sockets + 1,
+ sizeof(*hsmp_attr_grps),
+ GFP_KERNEL);
if (!hsmp_attr_grps)
return -ENOMEM;
/* Create a sysfs directory for each socket */
for (i = 0; i < plat_dev.num_sockets; i++) {
- attr_grp = devm_kzalloc(plat_dev.dev, sizeof(struct attribute_group), GFP_KERNEL);
+ attr_grp = devm_kzalloc(dev, sizeof(struct attribute_group),
+ GFP_KERNEL);
if (!attr_grp)
return -ENOMEM;
snprintf(plat_dev.sock[i].name, HSMP_ATTR_GRP_NAME_SIZE, "socket%u", (u8)i);
- attr_grp->name = plat_dev.sock[i].name;
-
- /* Null terminated list of attributes */
- hsmp_bin_attrs = devm_kzalloc(plat_dev.dev, sizeof(struct bin_attribute *) *
- (NUM_HSMP_ATTRS + 1), GFP_KERNEL);
- if (!hsmp_bin_attrs)
- return -ENOMEM;
-
- attr_grp->bin_attrs = hsmp_bin_attrs;
+ attr_grp->name = plat_dev.sock[i].name;
attr_grp->is_bin_visible = hsmp_is_sock_attr_visible;
hsmp_attr_grps[i] = attr_grp;
- /* Now create the leaf nodes */
- ret = hsmp_init_metric_tbl_bin_attr(hsmp_bin_attrs, i);
- if (ret)
- return ret;
+ hsmp_create_attr_list(attr_grp, dev, i);
}
- return devm_device_add_groups(plat_dev.dev, hsmp_attr_grps);
+
+ return devm_device_add_groups(dev, hsmp_attr_grps);
+}
+
+static int hsmp_create_acpi_sysfs_if(struct device *dev)
+{
+ struct attribute_group *attr_grp;
+ u16 sock_ind;
+ int ret;
+
+ attr_grp = devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
+ if (!attr_grp)
+ return -ENOMEM;
+
+ attr_grp->is_bin_visible = hsmp_is_sock_attr_visible;
+
+ ret = hsmp_get_uid(dev, &sock_ind);
+ if (ret)
+ return ret;
+
+ ret = hsmp_create_attr_list(attr_grp, dev, sock_ind);
+ if (ret)
+ return ret;
+
+ return devm_device_add_group(dev, attr_grp);
}
-static int hsmp_cache_proto_ver(void)
+static int hsmp_cache_proto_ver(u16 sock_ind)
{
struct hsmp_message msg = { 0 };
int ret;
msg.msg_id = HSMP_GET_PROTO_VER;
- msg.sock_ind = 0;
+ msg.sock_ind = sock_ind;
msg.response_sz = hsmp_msg_desc_table[HSMP_GET_PROTO_VER].response_sz;
ret = hsmp_send_message(&msg);
@@ -490,45 +735,150 @@ static int hsmp_cache_proto_ver(void)
return ret;
}
-static int hsmp_pltdrv_probe(struct platform_device *pdev)
+static inline bool is_f1a_m0h(void)
{
- int ret, i;
+ if (boot_cpu_data.x86 == 0x1A && boot_cpu_data.x86_model <= 0x0F)
+ return true;
- plat_dev.sock = devm_kzalloc(&pdev->dev,
- (plat_dev.num_sockets * sizeof(struct hsmp_socket)),
- GFP_KERNEL);
- if (!plat_dev.sock)
- return -ENOMEM;
- plat_dev.dev = &pdev->dev;
+ return false;
+}
+
+static int init_platform_device(struct device *dev)
+{
+ struct hsmp_socket *sock;
+ int ret, i;
for (i = 0; i < plat_dev.num_sockets; i++) {
- sema_init(&plat_dev.sock[i].hsmp_sem, 1);
- plat_dev.sock[i].sock_ind = i;
+ if (!node_to_amd_nb(i))
+ return -ENODEV;
+ sock = &plat_dev.sock[i];
+ sock->root = node_to_amd_nb(i)->root;
+ sock->sock_ind = i;
+ sock->dev = dev;
+ sock->mbinfo.base_addr = SMN_HSMP_BASE;
+
+ /*
+ * This is a transitional change from non-ACPI to ACPI, only
+ * family 0x1A, model 0x00 platform is supported for both ACPI and non-ACPI.
+ */
+ if (is_f1a_m0h())
+ sock->mbinfo.msg_id_off = SMN_HSMP_MSG_ID_F1A_M0H;
+ else
+ sock->mbinfo.msg_id_off = SMN_HSMP_MSG_ID;
+
+ sock->mbinfo.msg_resp_off = SMN_HSMP_MSG_RESP;
+ sock->mbinfo.msg_arg_off = SMN_HSMP_MSG_DATA;
+ sema_init(&sock->hsmp_sem, 1);
+
+ /* Test the hsmp interface on each socket */
+ ret = hsmp_test(i, 0xDEADBEEF);
+ if (ret) {
+ dev_err(dev, "HSMP test message failed on Fam:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ dev_err(dev, "Is HSMP disabled in BIOS ?\n");
+ return ret;
+ }
}
- plat_dev.hsmp_device.name = HSMP_CDEV_NAME;
- plat_dev.hsmp_device.minor = MISC_DYNAMIC_MINOR;
- plat_dev.hsmp_device.fops = &hsmp_fops;
- plat_dev.hsmp_device.parent = &pdev->dev;
- plat_dev.hsmp_device.nodename = HSMP_DEVNODE_NAME;
- plat_dev.hsmp_device.mode = 0644;
+ return 0;
+}
+
+static const struct acpi_device_id amd_hsmp_acpi_ids[] = {
+ {ACPI_HSMP_DEVICE_HID, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, amd_hsmp_acpi_ids);
+
+static int hsmp_pltdrv_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev;
+ u16 sock_ind = 0;
+ int ret;
+
+ /*
+ * On ACPI supported BIOS, there is an ACPI HSMP device added for
+ * each socket, so the per socket probing, but the memory allocated for
+ * sockets should be contiguous to access it as an array,
+ * Hence allocate memory for all the sockets at once instead of allocating
+ * on each probe.
+ */
+ if (!plat_dev.is_probed) {
+ plat_dev.sock = devm_kcalloc(&pdev->dev, plat_dev.num_sockets,
+ sizeof(*plat_dev.sock),
+ GFP_KERNEL);
+ if (!plat_dev.sock)
+ return -ENOMEM;
+ }
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (adev && !acpi_match_device_ids(adev, amd_hsmp_acpi_ids)) {
+ ret = hsmp_get_uid(&pdev->dev, &sock_ind);
+ if (ret)
+ return ret;
+ if (sock_ind >= plat_dev.num_sockets)
+ return -EINVAL;
+ ret = hsmp_parse_acpi_table(&pdev->dev, sock_ind);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to parse ACPI table\n");
+ return ret;
+ }
+ /* Test the hsmp interface */
+ ret = hsmp_test(sock_ind, 0xDEADBEEF);
+ if (ret) {
+ dev_err(&pdev->dev, "HSMP test message failed on Fam:%x model:%x\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ dev_err(&pdev->dev, "Is HSMP disabled in BIOS ?\n");
+ return ret;
+ }
+ } else {
+ ret = init_platform_device(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init HSMP mailbox\n");
+ return ret;
+ }
+ }
- ret = hsmp_cache_proto_ver();
+ ret = hsmp_cache_proto_ver(sock_ind);
if (ret) {
- dev_err(plat_dev.dev, "Failed to read HSMP protocol version\n");
+ dev_err(&pdev->dev, "Failed to read HSMP protocol version\n");
return ret;
}
- ret = hsmp_create_sysfs_interface();
+ if (plat_dev.is_acpi_device)
+ ret = hsmp_create_acpi_sysfs_if(&pdev->dev);
+ else
+ ret = hsmp_create_non_acpi_sysfs_if(&pdev->dev);
if (ret)
- dev_err(plat_dev.dev, "Failed to create HSMP sysfs interface\n");
+ dev_err(&pdev->dev, "Failed to create HSMP sysfs interface\n");
+
+ if (!plat_dev.is_probed) {
+ plat_dev.hsmp_device.name = HSMP_CDEV_NAME;
+ plat_dev.hsmp_device.minor = MISC_DYNAMIC_MINOR;
+ plat_dev.hsmp_device.fops = &hsmp_fops;
+ plat_dev.hsmp_device.parent = &pdev->dev;
+ plat_dev.hsmp_device.nodename = HSMP_DEVNODE_NAME;
+ plat_dev.hsmp_device.mode = 0644;
+
+ ret = misc_register(&plat_dev.hsmp_device);
+ if (ret)
+ return ret;
+
+ plat_dev.is_probed = true;
+ }
+
+ return 0;
- return misc_register(&plat_dev.hsmp_device);
}
static void hsmp_pltdrv_remove(struct platform_device *pdev)
{
- misc_deregister(&plat_dev.hsmp_device);
+ /*
+ * We register only one misc_device even on multi socket system.
+ * So, deregister should happen only once.
+ */
+ if (plat_dev.is_probed) {
+ misc_deregister(&plat_dev.hsmp_device);
+ plat_dev.is_probed = false;
+ }
}
static struct platform_driver amd_hsmp_driver = {
@@ -536,15 +886,30 @@ static struct platform_driver amd_hsmp_driver = {
.remove_new = hsmp_pltdrv_remove,
.driver = {
.name = DRIVER_NAME,
+ .acpi_match_table = amd_hsmp_acpi_ids,
},
};
static struct platform_device *amd_hsmp_platdev;
+static int hsmp_plat_dev_register(void)
+{
+ int ret;
+
+ amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!amd_hsmp_platdev)
+ return -ENOMEM;
+
+ ret = platform_device_add(amd_hsmp_platdev);
+ if (ret)
+ platform_device_put(amd_hsmp_platdev);
+
+ return ret;
+}
+
static int __init hsmp_plt_init(void)
{
int ret = -ENODEV;
- int i;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
pr_err("HSMP is not supported on Family:%x model:%x\n",
@@ -557,40 +922,19 @@ static int __init hsmp_plt_init(void)
* if we have N SMN/DF interfaces that ideally means N sockets
*/
plat_dev.num_sockets = amd_nb_num();
- if (plat_dev.num_sockets == 0)
+ if (plat_dev.num_sockets == 0 || plat_dev.num_sockets > MAX_AMD_SOCKETS)
return ret;
- /* Test the hsmp interface on each socket */
- for (i = 0; i < plat_dev.num_sockets; i++) {
- ret = hsmp_test(i, 0xDEADBEEF);
- if (ret) {
- pr_err("HSMP test message failed on Fam:%x model:%x\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- pr_err("Is HSMP disabled in BIOS ?\n");
- return ret;
- }
- }
-
ret = platform_driver_register(&amd_hsmp_driver);
if (ret)
return ret;
- amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
- if (!amd_hsmp_platdev) {
- ret = -ENOMEM;
- goto drv_unregister;
- }
-
- ret = platform_device_add(amd_hsmp_platdev);
- if (ret) {
- platform_device_put(amd_hsmp_platdev);
- goto drv_unregister;
+ if (!plat_dev.is_acpi_device) {
+ ret = hsmp_plat_dev_register();
+ if (ret)
+ platform_driver_unregister(&amd_hsmp_driver);
}
- return 0;
-
-drv_unregister:
- platform_driver_unregister(&amd_hsmp_driver);
return ret;
}
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index 6b26e48ce..7d6079b02 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_AMD_PMF) += amd-pmf.o
amd-pmf-objs := core.o acpi.o sps.o \
auto-mode.o cnqf.o \
- tee-if.o spc.o
+ tee-if.o spc.o pmf-quirks.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index 1c0d2bbc1..1157ec148 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -90,12 +90,96 @@ out:
return err;
}
+static union acpi_object *apts_if_call(struct amd_pmf_dev *pdev, u32 state_index)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle ahandle = ACPI_HANDLE(pdev->dev);
+ struct acpi_object_list apts_if_arg_list;
+ union acpi_object apts_if_args[3];
+ acpi_status status;
+
+ apts_if_arg_list.count = 3;
+ apts_if_arg_list.pointer = &apts_if_args[0];
+
+ apts_if_args[0].type = ACPI_TYPE_INTEGER;
+ apts_if_args[0].integer.value = 1;
+ apts_if_args[1].type = ACPI_TYPE_INTEGER;
+ apts_if_args[1].integer.value = state_index;
+ apts_if_args[2].type = ACPI_TYPE_INTEGER;
+ apts_if_args[2].integer.value = 0;
+
+ status = acpi_evaluate_object(ahandle, "APTS", &apts_if_arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pdev->dev, "APTS state_idx:%u call failed\n", state_index);
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+static int apts_if_call_store_buffer(struct amd_pmf_dev *pdev,
+ u32 index, void *data, size_t out_sz)
+{
+ union acpi_object *info;
+ size_t size;
+ int err = 0;
+
+ info = apts_if_call(pdev, index);
+ if (!info)
+ return -EIO;
+
+ if (info->type != ACPI_TYPE_BUFFER) {
+ dev_err(pdev->dev, "object is not a buffer\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ size = *(u16 *)info->buffer.pointer;
+ if (info->buffer.length < size) {
+ dev_err(pdev->dev, "buffer smaller than header size %u < %zu\n",
+ info->buffer.length, size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (size < out_sz) {
+ dev_err(pdev->dev, "buffer too small %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(data, info->buffer.pointer, out_sz);
+out:
+ kfree(info);
+ return err;
+}
+
int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index)
{
/* If bit-n is set, that indicates function n+1 is supported */
return !!(pdev->supported_func & BIT(index - 1));
}
+int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct amd_pmf_apts_granular_output *data, u32 apts_idx)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apts_if_call_store_buffer(pdev, apts_idx, data, sizeof(*data));
+}
+
+int apmf_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output_v2 *data)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR,
+ data, sizeof(*data));
+}
+
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *data)
{
@@ -140,6 +224,43 @@ static void apmf_sbios_heartbeat_notify(struct work_struct *work)
kfree(info);
}
+int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag)
+{
+ struct sbios_hb_event_v2 args = { };
+ struct acpi_buffer params;
+ union acpi_object *info;
+
+ args.size = sizeof(args);
+
+ switch (flag) {
+ case ON_LOAD:
+ args.load = 1;
+ break;
+ case ON_UNLOAD:
+ args.unload = 1;
+ break;
+ case ON_SUSPEND:
+ args.suspend = 1;
+ break;
+ case ON_RESUME:
+ args.resume = 1;
+ break;
+ default:
+ dev_dbg(dev->dev, "Failed to send v2 heartbeat event, flag:0x%x\n", flag);
+ return -EINVAL;
+ }
+
+ params.length = sizeof(args);
+ params.pointer = &args;
+
+ info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2, &params);
+ if (!info)
+ return -EIO;
+
+ kfree(info);
+ return 0;
+}
+
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
{
union acpi_object *info;
@@ -166,6 +287,11 @@ int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data
return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data));
}
+int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, req, sizeof(*req));
+}
+
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req)
{
return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS,
@@ -217,9 +343,14 @@ static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
if (err)
return err;
- pdev->supported_func = output.supported_functions;
- dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x\n",
- output.supported_functions, output.notification_mask);
+ /* only set if not already set by a quirk */
+ if (!pdev->supported_func)
+ pdev->supported_func = output.supported_functions;
+
+ dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x version:%u\n",
+ output.supported_functions, output.notification_mask, output.version);
+
+ pdev->pmf_if_version = output.version;
return 0;
}
@@ -320,7 +451,7 @@ void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
{
acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
- if (pmf_dev->hb_interval)
+ if (pmf_dev->hb_interval && pmf_dev->pmf_if_version == PMF_IF_V1)
cancel_delayed_work_sync(&pmf_dev->heart_beat);
if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
@@ -344,7 +475,7 @@ int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
goto out;
}
- if (pmf_dev->hb_interval) {
+ if (pmf_dev->hb_interval && pmf_dev->pmf_if_version == PMF_IF_V1) {
/* send heartbeats only if the interval is not zero */
INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify);
schedule_delayed_work(&pmf_dev->heart_beat, 0);
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 4f734e049..64e6e34a2 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -113,8 +113,9 @@ static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
{
dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
- debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
- &current_power_limits_fops);
+ if (dev->pmf_if_version == PMF_IF_V1)
+ debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
+ &current_power_limits_fops);
}
int amd_pmf_get_power_source(void)
@@ -299,6 +300,9 @@ static int amd_pmf_suspend_handler(struct device *dev)
if (pdev->smart_pc_enabled)
cancel_delayed_work_sync(&pdev->pb_work);
+ if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_SUSPEND);
+
return 0;
}
@@ -313,6 +317,9 @@ static int amd_pmf_resume_handler(struct device *dev)
return ret;
}
+ if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_RESUME);
+
if (pdev->smart_pc_enabled)
schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
@@ -438,11 +445,14 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
+ amd_pmf_quirks_init(dev);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_dbgfs_register(dev);
amd_pmf_init_features(dev);
apmf_install_handler(dev);
+ if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_LOAD);
dev_info(dev->dev, "registered PMF device successfully\n");
@@ -454,6 +464,8 @@ static void amd_pmf_remove(struct platform_device *pdev)
struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
amd_pmf_deinit_features(dev);
+ if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
+ amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
mutex_destroy(&dev->lock);
diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c
new file mode 100644
index 000000000..0b2eb0ae8
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/pmf-quirks.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Platform Management Framework Driver Quirks
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <linux/dmi.h>
+
+#include "pmf.h"
+
+struct quirk_entry {
+ u32 supported_func;
+};
+
+static struct quirk_entry quirk_no_sps_bug = {
+ .supported_func = 0x4003,
+};
+
+static const struct dmi_system_id fwbug_list[] = {
+ {
+ .ident = "ROG Zephyrus G14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA403UV"),
+ },
+ .driver_data = &quirk_no_sps_bug,
+ },
+ {}
+};
+
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev)
+{
+ const struct dmi_system_id *dmi_id;
+ struct quirk_entry *quirks;
+
+ dmi_id = dmi_first_match(fwbug_list);
+ if (!dmi_id)
+ return;
+
+ quirks = dmi_id->driver_data;
+ if (quirks->supported_func) {
+ dev->supported_func = quirks->supported_func;
+ pr_info("Using supported funcs quirk to avoid %s platform firmware bug\n",
+ dmi_id->ident);
+ }
+}
+
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 66cae1cca..eeedd0c03 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -17,7 +17,11 @@
#define POLICY_BUF_MAX_SZ 0x4b000
#define POLICY_SIGN_COOKIE 0x31535024
#define POLICY_COOKIE_OFFSET 0x10
-#define POLICY_COOKIE_LEN 0x14
+
+struct cookie_header {
+ u32 sign;
+ u32 length;
+} __packed;
/* APMF Functions */
#define APMF_FUNC_VERIFY_INTERFACE 0
@@ -30,6 +34,7 @@
#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
#define APMF_FUNC_DYN_SLIDER_AC 11
#define APMF_FUNC_DYN_SLIDER_DC 12
+#define APMF_FUNC_SBIOS_HEARTBEAT_V2 16
/* Message Definitions */
#define SET_SPL 0x03 /* SPL: Sustained Power Limit */
@@ -50,6 +55,8 @@
#define GET_STT_LIMIT_APU 0x20
#define GET_STT_LIMIT_HS2 0x21
#define SET_P3T 0x23 /* P3T: Peak Package Power Limit */
+#define SET_PMF_PPT 0x25
+#define SET_PMF_PPT_APU_ONLY 0x26
/* OS slider update notification */
#define DC_BEST_PERF 0
@@ -83,6 +90,47 @@
#define TA_OUTPUT_RESERVED_MEM 906
#define MAX_OPERATION_PARAMS 4
+#define PMF_IF_V1 1
+#define PMF_IF_V2 2
+
+#define APTS_MAX_STATES 16
+
+/* APTS PMF BIOS Interface */
+struct amd_pmf_apts_output {
+ u16 table_version;
+ u32 fan_table_idx;
+ u32 pmf_ppt;
+ u32 ppt_pmf_apu_only;
+ u32 stt_min_limit;
+ u8 stt_skin_temp_limit_apu;
+ u8 stt_skin_temp_limit_hs2;
+} __packed;
+
+struct amd_pmf_apts_granular_output {
+ u16 size;
+ struct amd_pmf_apts_output val;
+} __packed;
+
+struct amd_pmf_apts_granular {
+ u16 size;
+ struct amd_pmf_apts_output val[APTS_MAX_STATES];
+};
+
+struct sbios_hb_event_v2 {
+ u16 size;
+ u8 load;
+ u8 unload;
+ u8 suspend;
+ u8 resume;
+} __packed;
+
+enum sbios_hb_v2 {
+ ON_LOAD,
+ ON_UNLOAD,
+ ON_SUSPEND,
+ ON_RESUME,
+};
+
/* AMD PMF BIOS interfaces */
struct apmf_verify_interface {
u16 size;
@@ -114,6 +162,18 @@ struct apmf_sbios_req {
u8 skin_temp_hs2;
} __packed;
+struct apmf_sbios_req_v2 {
+ u16 size;
+ u32 pending_req;
+ u8 rsd;
+ u32 ppt_pmf;
+ u32 ppt_pmf_apu_only;
+ u32 stt_min_limit;
+ u8 skin_temp_apu;
+ u8 skin_temp_hs2;
+ u32 custom_policy[10];
+} __packed;
+
struct apmf_fan_idx {
u16 size;
u8 fan_ctl_mode;
@@ -194,6 +254,14 @@ enum power_modes {
POWER_MODE_MAX,
};
+enum power_modes_v2 {
+ POWER_MODE_BEST_PERFORMANCE,
+ POWER_MODE_BALANCED,
+ POWER_MODE_BEST_POWER_EFFICIENCY,
+ POWER_MODE_ENERGY_SAVE,
+ POWER_MODE_V2_MAX,
+};
+
struct amd_pmf_dev {
void __iomem *regbase;
void __iomem *smu_virt_addr;
@@ -229,10 +297,15 @@ struct amd_pmf_dev {
struct delayed_work pb_work;
struct pmf_action_table *prev_data;
u64 policy_addr;
- void *policy_base;
+ void __iomem *policy_base;
bool smart_pc_enabled;
+ u16 pmf_if_version;
};
+struct apmf_sps_prop_granular_v2 {
+ u8 power_states[POWER_SOURCE_MAX][POWER_MODE_V2_MAX];
+} __packed;
+
struct apmf_sps_prop_granular {
u32 fppt;
u32 sppt;
@@ -254,6 +327,16 @@ struct amd_pmf_static_slider_granular {
struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
};
+struct apmf_static_slider_granular_output_v2 {
+ u16 size;
+ struct apmf_sps_prop_granular_v2 sps_idx;
+} __packed;
+
+struct amd_pmf_static_slider_granular_v2 {
+ u16 size;
+ struct apmf_sps_prop_granular_v2 sps_idx;
+};
+
struct os_power_slider {
u16 size;
u8 slider_event;
@@ -585,6 +668,7 @@ int amd_pmf_get_power_source(void);
int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer);
+int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag);
/* SPS Layer */
int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
@@ -602,6 +686,10 @@ const char *amd_pmf_source_as_str(unsigned int state);
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
+int apmf_get_static_slider_granular_v2(struct amd_pmf_dev *dev,
+ struct apmf_static_slider_granular_output_v2 *data);
+int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
+ struct amd_pmf_apts_granular_output *data, u32 apts_idx);
/* Auto Mode Layer */
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
@@ -609,6 +697,7 @@ void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev);
void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev);
void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms);
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req);
+int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req);
void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event);
int amd_pmf_reset_amt(struct amd_pmf_dev *dev);
@@ -631,4 +720,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev);
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
+/* Quirk infrastructure */
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev);
+
#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index 33e23e25c..92f7fb222 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -10,9 +10,27 @@
#include "pmf.h"
+static struct amd_pmf_static_slider_granular_v2 config_store_v2;
static struct amd_pmf_static_slider_granular config_store;
+static struct amd_pmf_apts_granular apts_config_store;
#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *slider_v2_as_str(unsigned int state)
+{
+ switch (state) {
+ case POWER_MODE_BEST_PERFORMANCE:
+ return "Best Performance";
+ case POWER_MODE_BALANCED:
+ return "Balanced";
+ case POWER_MODE_BEST_POWER_EFFICIENCY:
+ return "Best Power Efficiency";
+ case POWER_MODE_ENERGY_SAVE:
+ return "Energy Save";
+ default:
+ return "Unknown Power Mode";
+ }
+}
+
static const char *slider_as_str(unsigned int state)
{
switch (state) {
@@ -63,10 +81,88 @@ static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *dat
pr_debug("Static Slider Data - END\n");
}
+
+static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data)
+{
+ unsigned int i, j;
+
+ pr_debug("Static Slider APTS state index data - BEGIN");
+ pr_debug("size: %u\n", data->size);
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++)
+ for (j = 0; j < POWER_MODE_V2_MAX; j++)
+ pr_debug("%s %s: %u\n", amd_pmf_source_as_str(i), slider_v2_as_str(j),
+ data->sps_idx.power_states[i][j]);
+
+ pr_debug("Static Slider APTS state index data - END\n");
+}
+
+static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info)
+{
+ int i;
+
+ pr_debug("Static Slider APTS index default values data - BEGIN");
+
+ for (i = 0; i < APTS_MAX_STATES; i++) {
+ pr_debug("Table Version[%d] = %u\n", i, info->val[i].table_version);
+ pr_debug("Fan Index[%d] = %u\n", i, info->val[i].fan_table_idx);
+ pr_debug("PPT[%d] = %u\n", i, info->val[i].pmf_ppt);
+ pr_debug("PPT APU[%d] = %u\n", i, info->val[i].ppt_pmf_apu_only);
+ pr_debug("STT Min[%d] = %u\n", i, info->val[i].stt_min_limit);
+ pr_debug("STT APU[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_apu);
+ pr_debug("STT HS2[%d] = %u\n", i, info->val[i].stt_skin_temp_limit_hs2);
+ }
+
+ pr_debug("Static Slider APTS index default values data - END");
+}
#else
static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {}
+static void amd_pmf_dump_sps_defaults_v2(struct amd_pmf_static_slider_granular_v2 *data) {}
+static void amd_pmf_dump_apts_sps_defaults(struct amd_pmf_apts_granular *info) {}
#endif
+static void amd_pmf_load_apts_defaults_sps_v2(struct amd_pmf_dev *pdev)
+{
+ struct amd_pmf_apts_granular_output output;
+ struct amd_pmf_apts_output *ps;
+ int i;
+
+ memset(&apts_config_store, 0, sizeof(apts_config_store));
+
+ ps = apts_config_store.val;
+
+ for (i = 0; i < APTS_MAX_STATES; i++) {
+ apts_get_static_slider_granular_v2(pdev, &output, i);
+ ps[i].table_version = output.val.table_version;
+ ps[i].fan_table_idx = output.val.fan_table_idx;
+ ps[i].pmf_ppt = output.val.pmf_ppt;
+ ps[i].ppt_pmf_apu_only = output.val.ppt_pmf_apu_only;
+ ps[i].stt_min_limit = output.val.stt_min_limit;
+ ps[i].stt_skin_temp_limit_apu = output.val.stt_skin_temp_limit_apu;
+ ps[i].stt_skin_temp_limit_hs2 = output.val.stt_skin_temp_limit_hs2;
+ }
+
+ amd_pmf_dump_apts_sps_defaults(&apts_config_store);
+}
+
+static void amd_pmf_load_defaults_sps_v2(struct amd_pmf_dev *dev)
+{
+ struct apmf_static_slider_granular_output_v2 output;
+ unsigned int i, j;
+
+ memset(&config_store_v2, 0, sizeof(config_store_v2));
+ apmf_get_static_slider_granular_v2(dev, &output);
+
+ config_store_v2.size = output.size;
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++)
+ for (j = 0; j < POWER_MODE_V2_MAX; j++)
+ config_store_v2.sps_idx.power_states[i][j] =
+ output.sps_idx.power_states[i][j];
+
+ amd_pmf_dump_sps_defaults_v2(&config_store_v2);
+}
+
static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
{
struct apmf_static_slider_granular_output output;
@@ -94,6 +190,19 @@ static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
amd_pmf_dump_sps_defaults(&config_store);
}
+static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx)
+{
+ amd_pmf_send_cmd(dev, SET_PMF_PPT, false, apts_config_store.val[idx].pmf_ppt, NULL);
+ amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false,
+ apts_config_store.val[idx].ppt_pmf_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ apts_config_store.val[idx].stt_min_limit, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ apts_config_store.val[idx].stt_skin_temp_limit_apu, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ apts_config_store.val[idx].stt_skin_temp_limit_hs2, NULL);
+}
+
void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
struct amd_pmf_static_slider_granular *table)
{
@@ -126,6 +235,32 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
}
}
+static int amd_pmf_update_sps_power_limits_v2(struct amd_pmf_dev *pdev, int pwr_mode)
+{
+ int src, index;
+
+ src = amd_pmf_get_power_source();
+
+ switch (pwr_mode) {
+ case POWER_MODE_PERFORMANCE:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_PERFORMANCE];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BALANCED];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ index = config_store_v2.sps_idx.power_states[src][POWER_MODE_BEST_POWER_EFFICIENCY];
+ amd_pmf_update_slider_v2(pdev, index);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
{
int mode;
@@ -134,6 +269,9 @@ int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
if (mode < 0)
return mode;
+ if (pmf->pmf_if_version == PMF_IF_V2)
+ return amd_pmf_update_sps_power_limits_v2(pmf, mode);
+
amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
return 0;
@@ -256,7 +394,12 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
dev->current_profile = PLATFORM_PROFILE_BALANCED;
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
- amd_pmf_load_defaults_sps(dev);
+ if (dev->pmf_if_version == PMF_IF_V2) {
+ amd_pmf_load_defaults_sps_v2(dev);
+ amd_pmf_load_apts_defaults_sps_v2(dev);
+ } else {
+ amd_pmf_load_defaults_sps(dev);
+ }
/* update SPS balanced power mode thermals */
amd_pmf_set_sps_power_limits(dev);
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 4ebfe0f5a..b438de4d6 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -246,19 +246,24 @@ static void amd_pmf_invoke_cmd(struct work_struct *work)
static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
{
- u32 cookie, length;
+ struct cookie_header *header;
int res;
- cookie = *(u32 *)(dev->policy_buf + POLICY_COOKIE_OFFSET);
- length = *(u32 *)(dev->policy_buf + POLICY_COOKIE_LEN);
+ if (dev->policy_sz < POLICY_COOKIE_OFFSET + sizeof(*header))
+ return -EINVAL;
+
+ header = (struct cookie_header *)(dev->policy_buf + POLICY_COOKIE_OFFSET);
- if (cookie != POLICY_SIGN_COOKIE || !length) {
+ if (header->sign != POLICY_SIGN_COOKIE || !header->length) {
dev_dbg(dev->dev, "cookie doesn't match\n");
return -EINVAL;
}
+ if (dev->policy_sz < header->length + 512)
+ return -EINVAL;
+
/* Update the actual length */
- dev->policy_sz = length + 512;
+ dev->policy_sz = header->length + 512;
res = amd_pmf_invoke_cmd_init(dev);
if (res == TA_PMF_TYPE_SUCCESS) {
/* Now its safe to announce that smart pc is enabled */
@@ -271,7 +276,7 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
} else {
dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
dev->smart_pc_enabled = false;
- return res;
+ return -EIO;
}
return 0;
@@ -311,8 +316,8 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
amd_pmf_hex_dump_pb(dev);
ret = amd_pmf_start_policy_engine(dev);
- if (ret)
- return -EINVAL;
+ if (ret < 0)
+ return ret;
return length;
}
@@ -453,7 +458,7 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
goto error;
}
- memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
+ memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
amd_pmf_hex_dump_pb(dev);