diff options
Diffstat (limited to 'drivers/platform/x86/amd/pmf')
-rw-r--r-- | drivers/platform/x86/amd/pmf/Kconfig | 18 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/Makefile | 9 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/acpi.c | 325 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/auto-mode.c | 298 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/cnqf.c | 391 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/core.c | 448 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/pmf.h | 436 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/sps.c | 229 |
8 files changed, 2154 insertions, 0 deletions
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig new file mode 100644 index 000000000..d87986adf --- /dev/null +++ b/drivers/platform/x86/amd/pmf/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# AMD PMF Driver +# + +config AMD_PMF + tristate "AMD Platform Management Framework" + depends on ACPI && PCI + depends on POWER_SUPPLY + depends on AMD_NB + select ACPI_PLATFORM_PROFILE + help + This driver provides support for the AMD Platform Management Framework. + The goal is to enhance end user experience by making AMD PCs smarter, + quiter, power efficient by adapting to user behavior and environment. + + To compile this driver as a module, choose M here: the module will + be called amd_pmf. diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile new file mode 100644 index 000000000..fdededf54 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for linux/drivers/platform/x86/amd/pmf +# AMD Platform Management Framework +# + +obj-$(CONFIG_AMD_PMF) += amd-pmf.o +amd-pmf-objs := core.o acpi.o sps.o \ + auto-mode.o cnqf.o diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c new file mode 100644 index 000000000..3fc5e4547 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/acpi.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Platform Management Framework Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include <linux/acpi.h> +#include "pmf.h" + +#define APMF_CQL_NOTIFICATION 2 +#define APMF_AMT_NOTIFICATION 3 + +static union acpi_object *apmf_if_call(struct amd_pmf_dev *pdev, int fn, struct acpi_buffer *param) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + acpi_handle ahandle = ACPI_HANDLE(pdev->dev); + struct acpi_object_list apmf_if_arg_list; + union acpi_object apmf_if_args[2]; + acpi_status status; + + apmf_if_arg_list.count = 2; + apmf_if_arg_list.pointer = &apmf_if_args[0]; + + apmf_if_args[0].type = ACPI_TYPE_INTEGER; + apmf_if_args[0].integer.value = fn; + + if (param) { + apmf_if_args[1].type = ACPI_TYPE_BUFFER; + apmf_if_args[1].buffer.length = param->length; + apmf_if_args[1].buffer.pointer = param->pointer; + } else { + apmf_if_args[1].type = ACPI_TYPE_INTEGER; + apmf_if_args[1].integer.value = 0; + } + + status = acpi_evaluate_object(ahandle, "APMF", &apmf_if_arg_list, &buffer); + if (ACPI_FAILURE(status)) { + dev_err(pdev->dev, "APMF method:%d call failed\n", fn); + kfree(buffer.pointer); + return NULL; + } + + return buffer.pointer; +} + +static int apmf_if_call_store_buffer(struct amd_pmf_dev *pdev, int fn, void *dest, size_t out_sz) +{ + union acpi_object *info; + size_t size; + int err = 0; + + info = apmf_if_call(pdev, fn, NULL); + if (!info) + return -EIO; + + if (info->type != ACPI_TYPE_BUFFER) { + dev_err(pdev->dev, "object is not a buffer\n"); + err = -EINVAL; + goto out; + } + + if (info->buffer.length < 2) { + dev_err(pdev->dev, "buffer too small\n"); + err = -EINVAL; + goto out; + } + + size = *(u16 *)info->buffer.pointer; + if (info->buffer.length < size) { + dev_err(pdev->dev, "buffer smaller then headersize %u < %zu\n", + info->buffer.length, size); + err = -EINVAL; + goto out; + } + + if (size < out_sz) { + dev_err(pdev->dev, "buffer too small %zu\n", size); + err = -EINVAL; + goto out; + } + + memcpy(dest, info->buffer.pointer, out_sz); + +out: + kfree(info); + return err; +} + +int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index) +{ + /* If bit-n is set, that indicates function n+1 is supported */ + return !!(pdev->supported_func & BIT(index - 1)); +} + +int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev, + struct apmf_static_slider_granular_output *data) +{ + if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + return -EINVAL; + + return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR, + data, sizeof(*data)); +} + +int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event) +{ + struct os_power_slider args; + struct acpi_buffer params; + union acpi_object *info; + int err = 0; + + args.size = sizeof(args); + args.slider_event = event; + + params.length = sizeof(args); + params.pointer = (void *)&args; + + info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, ¶ms); + if (!info) + err = -EIO; + + kfree(info); + return err; +} + +static void apmf_sbios_heartbeat_notify(struct work_struct *work) +{ + struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work); + union acpi_object *info; + + dev_dbg(dev->dev, "Sending heartbeat to SBIOS\n"); + info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT, NULL); + if (!info) + goto out; + + schedule_delayed_work(&dev->heart_beat, msecs_to_jiffies(dev->hb_interval * 1000)); + +out: + kfree(info); +} + +int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx) +{ + union acpi_object *info; + struct apmf_fan_idx args; + struct acpi_buffer params; + int err = 0; + + args.size = sizeof(args); + args.fan_ctl_mode = manual; + args.fan_ctl_idx = idx; + + params.length = sizeof(args); + params.pointer = (void *)&args; + + info = apmf_if_call(pdev, APMF_FUNC_SET_FAN_IDX, ¶ms); + if (!info) { + err = -EIO; + goto out; + } + +out: + kfree(info); + return err; +} + +int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data) +{ + return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data)); +} + +int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req) +{ + return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, + req, sizeof(*req)); +} + +static void apmf_event_handler(acpi_handle handle, u32 event, void *data) +{ + struct amd_pmf_dev *pmf_dev = data; + struct apmf_sbios_req req; + int ret; + + mutex_lock(&pmf_dev->update_mutex); + ret = apmf_get_sbios_requests(pmf_dev, &req); + if (ret) { + dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret); + goto out; + } + + if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) { + dev_dbg(pmf_dev->dev, "AMT is supported and notifications %s\n", + req.amt_event ? "Enabled" : "Disabled"); + pmf_dev->amt_enabled = !!req.amt_event; + + if (pmf_dev->amt_enabled) + amd_pmf_handle_amt(pmf_dev); + else + amd_pmf_reset_amt(pmf_dev); + } + + if (req.pending_req & BIT(APMF_CQL_NOTIFICATION)) { + dev_dbg(pmf_dev->dev, "CQL is supported and notifications %s\n", + req.cql_event ? "Enabled" : "Disabled"); + + /* update the target mode information */ + if (pmf_dev->amt_enabled) + amd_pmf_update_2_cql(pmf_dev, req.cql_event); + } +out: + mutex_unlock(&pmf_dev->update_mutex); +} + +static int apmf_if_verify_interface(struct amd_pmf_dev *pdev) +{ + struct apmf_verify_interface output; + int err; + + err = apmf_if_call_store_buffer(pdev, APMF_FUNC_VERIFY_INTERFACE, &output, sizeof(output)); + if (err) + return err; + + pdev->supported_func = output.supported_functions; + dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x\n", + output.supported_functions, output.notification_mask); + + return 0; +} + +static int apmf_get_system_params(struct amd_pmf_dev *dev) +{ + struct apmf_system_params params; + int err; + + if (!is_apmf_func_supported(dev, APMF_FUNC_GET_SYS_PARAMS)) + return -EINVAL; + + err = apmf_if_call_store_buffer(dev, APMF_FUNC_GET_SYS_PARAMS, ¶ms, sizeof(params)); + if (err) + return err; + + dev_dbg(dev->dev, "system params mask:0x%x flags:0x%x cmd_code:0x%x heartbeat:%d\n", + params.valid_mask, + params.flags, + params.command_code, + params.heartbeat_int); + params.flags = params.flags & params.valid_mask; + dev->hb_interval = params.heartbeat_int; + + return 0; +} + +int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data) +{ + return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_AC, data, sizeof(*data)); +} + +int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data) +{ + return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_DC, data, sizeof(*data)); +} + +int apmf_install_handler(struct amd_pmf_dev *pmf_dev) +{ + acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev); + acpi_status status; + + /* Install the APMF Notify handler */ + if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) && + is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS)) { + status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY, + apmf_event_handler, pmf_dev); + if (ACPI_FAILURE(status)) { + dev_err(pmf_dev->dev, "failed to install notify handler\n"); + return -ENODEV; + } + + /* Call the handler once manually to catch up with possibly missed notifies. */ + apmf_event_handler(ahandle, 0, pmf_dev); + } + + return 0; +} + +void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev) +{ + acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev); + + if (pmf_dev->hb_interval) + cancel_delayed_work_sync(&pmf_dev->heart_beat); + + if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) && + is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS)) + acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler); +} + +int apmf_acpi_init(struct amd_pmf_dev *pmf_dev) +{ + int ret; + + ret = apmf_if_verify_interface(pmf_dev); + if (ret) { + dev_err(pmf_dev->dev, "APMF verify interface failed :%d\n", ret); + goto out; + } + + ret = apmf_get_system_params(pmf_dev); + if (ret) { + dev_dbg(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret); + goto out; + } + + if (pmf_dev->hb_interval) { + /* send heartbeats only if the interval is not zero */ + INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify); + schedule_delayed_work(&pmf_dev->heart_beat, 0); + } + +out: + return ret; +} diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c new file mode 100644 index 000000000..96a8e1832 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/auto-mode.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Platform Management Framework Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include <linux/acpi.h> +#include <linux/workqueue.h> +#include "pmf.h" + +static struct auto_mode_mode_config config_store; +static const char *state_as_str(unsigned int state); + +static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx, + struct auto_mode_mode_config *table) +{ + struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control; + + amd_pmf_send_cmd(dev, SET_SPL, false, pwr_ctrl->spl, NULL); + amd_pmf_send_cmd(dev, SET_FPPT, false, pwr_ctrl->fppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT, false, pwr_ctrl->sppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL); + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL); + + if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX)) + apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual, + config_store.mode_set[idx].fan_control.fan_id); +} + +static int amd_pmf_get_moving_avg(struct amd_pmf_dev *pdev, int socket_power) +{ + int i, total = 0; + + if (pdev->socket_power_history_idx == -1) { + for (i = 0; i < AVG_SAMPLE_SIZE; i++) + pdev->socket_power_history[i] = socket_power; + } + + pdev->socket_power_history_idx = (pdev->socket_power_history_idx + 1) % AVG_SAMPLE_SIZE; + pdev->socket_power_history[pdev->socket_power_history_idx] = socket_power; + + for (i = 0; i < AVG_SAMPLE_SIZE; i++) + total += pdev->socket_power_history[i]; + + return total / AVG_SAMPLE_SIZE; +} + +void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms) +{ + int avg_power = 0; + bool update = false; + int i, j; + + /* Get the average moving average computed by auto mode algorithm */ + avg_power = amd_pmf_get_moving_avg(dev, socket_power); + + for (i = 0; i < AUTO_TRANSITION_MAX; i++) { + if ((config_store.transition[i].shifting_up && avg_power >= + config_store.transition[i].power_threshold) || + (!config_store.transition[i].shifting_up && avg_power <= + config_store.transition[i].power_threshold)) { + if (config_store.transition[i].timer < + config_store.transition[i].time_constant) + config_store.transition[i].timer += time_elapsed_ms; + } else { + config_store.transition[i].timer = 0; + } + + if (config_store.transition[i].timer >= + config_store.transition[i].time_constant && + !config_store.transition[i].applied) { + config_store.transition[i].applied = true; + update = true; + } else if (config_store.transition[i].timer <= + config_store.transition[i].time_constant && + config_store.transition[i].applied) { + config_store.transition[i].applied = false; + update = true; + } + } + + dev_dbg(dev->dev, "[AUTO_MODE] avg power: %u mW mode: %s\n", avg_power, + state_as_str(config_store.current_mode)); + + if (update) { + for (j = 0; j < AUTO_TRANSITION_MAX; j++) { + /* Apply the mode with highest priority indentified */ + if (config_store.transition[j].applied) { + if (config_store.current_mode != + config_store.transition[j].target_mode) { + config_store.current_mode = + config_store.transition[j].target_mode; + dev_dbg(dev->dev, "[AUTO_MODE] moving to mode:%s\n", + state_as_str(config_store.current_mode)); + amd_pmf_set_automode(dev, config_store.current_mode, NULL); + } + break; + } + } + } +} + +void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event) +{ + int mode = config_store.current_mode; + + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode = + is_cql_event ? AUTO_PERFORMANCE_ON_LAP : AUTO_PERFORMANCE; + + if ((mode == AUTO_PERFORMANCE || mode == AUTO_PERFORMANCE_ON_LAP) && + mode != config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode) { + mode = config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode; + amd_pmf_set_automode(dev, mode, NULL); + } + dev_dbg(dev->dev, "updated CQL thermals\n"); +} + +static void amd_pmf_get_power_threshold(void) +{ + config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold = + config_store.mode_set[AUTO_BALANCE].power_floor - + config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta; + + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold = + config_store.mode_set[AUTO_BALANCE].power_floor - + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta; + + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_threshold = + config_store.mode_set[AUTO_QUIET].power_floor - + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta; + + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_threshold = + config_store.mode_set[AUTO_PERFORMANCE].power_floor - + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta; +} + +static const char *state_as_str(unsigned int state) +{ + switch (state) { + case AUTO_QUIET: + return "QUIET"; + case AUTO_BALANCE: + return "BALANCED"; + case AUTO_PERFORMANCE_ON_LAP: + return "ON_LAP"; + case AUTO_PERFORMANCE: + return "PERFORMANCE"; + default: + return "Unknown Auto Mode State"; + } +} + +static void amd_pmf_load_defaults_auto_mode(struct amd_pmf_dev *dev) +{ + struct apmf_auto_mode output; + struct power_table_control *pwr_ctrl; + int i; + + apmf_get_auto_mode_def(dev, &output); + /* time constant */ + config_store.transition[AUTO_TRANSITION_TO_QUIET].time_constant = + output.balanced_to_quiet; + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant = + output.balanced_to_perf; + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant = + output.quiet_to_balanced; + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant = + output.perf_to_balanced; + + /* power floor */ + config_store.mode_set[AUTO_QUIET].power_floor = output.pfloor_quiet; + config_store.mode_set[AUTO_BALANCE].power_floor = output.pfloor_balanced; + config_store.mode_set[AUTO_PERFORMANCE].power_floor = output.pfloor_perf; + config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_floor = output.pfloor_perf; + + /* Power delta for mode change */ + config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta = + output.pd_balanced_to_quiet; + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta = + output.pd_balanced_to_perf; + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta = + output.pd_quiet_to_balanced; + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta = + output.pd_perf_to_balanced; + + /* Power threshold */ + amd_pmf_get_power_threshold(); + + /* skin temperature limits */ + pwr_ctrl = &config_store.mode_set[AUTO_QUIET].power_control; + pwr_ctrl->spl = output.spl_quiet; + pwr_ctrl->sppt = output.sppt_quiet; + pwr_ctrl->fppt = output.fppt_quiet; + pwr_ctrl->sppt_apu_only = output.sppt_apu_only_quiet; + pwr_ctrl->stt_min = output.stt_min_limit_quiet; + pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_quiet; + pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_quiet; + + pwr_ctrl = &config_store.mode_set[AUTO_BALANCE].power_control; + pwr_ctrl->spl = output.spl_balanced; + pwr_ctrl->sppt = output.sppt_balanced; + pwr_ctrl->fppt = output.fppt_balanced; + pwr_ctrl->sppt_apu_only = output.sppt_apu_only_balanced; + pwr_ctrl->stt_min = output.stt_min_limit_balanced; + pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_balanced; + pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_balanced; + + pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE].power_control; + pwr_ctrl->spl = output.spl_perf; + pwr_ctrl->sppt = output.sppt_perf; + pwr_ctrl->fppt = output.fppt_perf; + pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf; + pwr_ctrl->stt_min = output.stt_min_limit_perf; + pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf; + pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf; + + pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_control; + pwr_ctrl->spl = output.spl_perf_on_lap; + pwr_ctrl->sppt = output.sppt_perf_on_lap; + pwr_ctrl->fppt = output.fppt_perf_on_lap; + pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf_on_lap; + pwr_ctrl->stt_min = output.stt_min_limit_perf_on_lap; + pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf_on_lap; + pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf_on_lap; + + /* Fan ID */ + config_store.mode_set[AUTO_QUIET].fan_control.fan_id = output.fan_id_quiet; + config_store.mode_set[AUTO_BALANCE].fan_control.fan_id = output.fan_id_balanced; + config_store.mode_set[AUTO_PERFORMANCE].fan_control.fan_id = output.fan_id_perf; + config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].fan_control.fan_id = + output.fan_id_perf; + + config_store.transition[AUTO_TRANSITION_TO_QUIET].target_mode = AUTO_QUIET; + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode = + AUTO_PERFORMANCE; + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].target_mode = + AUTO_BALANCE; + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].target_mode = + AUTO_BALANCE; + + config_store.transition[AUTO_TRANSITION_TO_QUIET].shifting_up = false; + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].shifting_up = true; + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].shifting_up = true; + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].shifting_up = + false; + + for (i = 0 ; i < AUTO_MODE_MAX ; i++) { + if (config_store.mode_set[i].fan_control.fan_id == FAN_INDEX_AUTO) + config_store.mode_set[i].fan_control.manual = false; + else + config_store.mode_set[i].fan_control.manual = true; + } + + /* set to initial default values */ + config_store.current_mode = AUTO_BALANCE; + dev->socket_power_history_idx = -1; +} + +int amd_pmf_reset_amt(struct amd_pmf_dev *dev) +{ + /* + * OEM BIOS implementation guide says that if the auto mode is enabled + * the platform_profile registration shall be done by the OEM driver. + * There could be cases where both static slider and auto mode BIOS + * functions are enabled, in that case enable static slider updates + * only if it advertised as supported. + */ + + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { + dev_dbg(dev->dev, "resetting AMT thermals\n"); + amd_pmf_set_sps_power_limits(dev); + } + return 0; +} + +void amd_pmf_handle_amt(struct amd_pmf_dev *dev) +{ + amd_pmf_set_automode(dev, config_store.current_mode, NULL); +} + +void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev) +{ + cancel_delayed_work_sync(&dev->work_buffer); +} + +void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev) +{ + amd_pmf_load_defaults_auto_mode(dev); + amd_pmf_init_metrics_table(dev); +} diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c new file mode 100644 index 000000000..f39275ec5 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/cnqf.c @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Platform Management Framework Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include <linux/workqueue.h> +#include "pmf.h" + +static struct cnqf_config config_store; + +static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx, + struct cnqf_config *table) +{ + struct power_table_control *pc; + + pc = &config_store.mode_set[src][idx].power_control; + + amd_pmf_send_cmd(dev, SET_SPL, false, pc->spl, NULL); + amd_pmf_send_cmd(dev, SET_FPPT, false, pc->fppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL); + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU], + NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2], + NULL); + + if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX)) + apmf_update_fan_idx(dev, + config_store.mode_set[src][idx].fan_control.manual, + config_store.mode_set[src][idx].fan_control.fan_id); + + return 0; +} + +static void amd_pmf_update_power_threshold(int src) +{ + struct cnqf_mode_settings *ts; + struct cnqf_tran_params *tp; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_QUIET]; + ts = &config_store.mode_set[src][CNQF_MODE_BALANCE]; + tp->power_threshold = ts->power_floor; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_TURBO]; + ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE]; + tp->power_threshold = ts->power_floor; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE]; + ts = &config_store.mode_set[src][CNQF_MODE_BALANCE]; + tp->power_threshold = ts->power_floor; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE]; + ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE]; + tp->power_threshold = ts->power_floor; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE]; + ts = &config_store.mode_set[src][CNQF_MODE_QUIET]; + tp->power_threshold = ts->power_floor; + + tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE]; + ts = &config_store.mode_set[src][CNQF_MODE_TURBO]; + tp->power_threshold = ts->power_floor; +} + +static const char *state_as_str(unsigned int state) +{ + switch (state) { + case CNQF_MODE_QUIET: + return "QUIET"; + case CNQF_MODE_BALANCE: + return "BALANCED"; + case CNQF_MODE_TURBO: + return "TURBO"; + case CNQF_MODE_PERFORMANCE: + return "PERFORMANCE"; + default: + return "Unknown CnQF mode"; + } +} + +static int amd_pmf_cnqf_get_power_source(struct amd_pmf_dev *dev) +{ + if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) && + is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) + return amd_pmf_get_power_source(); + else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) + return POWER_SOURCE_DC; + else + return POWER_SOURCE_AC; +} + +int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms) +{ + struct cnqf_tran_params *tp; + int src, i, j; + u32 avg_power = 0; + + src = amd_pmf_cnqf_get_power_source(dev); + + if (is_pprof_balanced(dev)) { + amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL); + } else { + /* + * Return from here if the platform_profile is not balanced + * so that preference is given to user mode selection, rather + * than enforcing CnQF to run all the time (if enabled) + */ + return -EINVAL; + } + + for (i = 0; i < CNQF_TRANSITION_MAX; i++) { + config_store.trans_param[src][i].timer += time_lapsed_ms; + config_store.trans_param[src][i].total_power += socket_power; + config_store.trans_param[src][i].count++; + + tp = &config_store.trans_param[src][i]; + if (tp->timer >= tp->time_constant && tp->count) { + avg_power = tp->total_power / tp->count; + + /* Reset the indices */ + tp->timer = 0; + tp->total_power = 0; + tp->count = 0; + + if ((tp->shifting_up && avg_power >= tp->power_threshold) || + (!tp->shifting_up && avg_power <= tp->power_threshold)) { + tp->priority = true; + } else { + tp->priority = false; + } + } + } + + dev_dbg(dev->dev, "[CNQF] Avg power: %u mW socket power: %u mW mode:%s\n", + avg_power, socket_power, state_as_str(config_store.current_mode)); + + for (j = 0; j < CNQF_TRANSITION_MAX; j++) { + /* apply the highest priority */ + if (config_store.trans_param[src][j].priority) { + if (config_store.current_mode != + config_store.trans_param[src][j].target_mode) { + config_store.current_mode = + config_store.trans_param[src][j].target_mode; + dev_dbg(dev->dev, "Moving to Mode :%s\n", + state_as_str(config_store.current_mode)); + amd_pmf_set_cnqf(dev, src, + config_store.current_mode, NULL); + } + break; + } + } + return 0; +} + +static void amd_pmf_update_trans_data(int idx, struct apmf_dyn_slider_output out) +{ + struct cnqf_tran_params *tp; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_QUIET]; + tp->time_constant = out.t_balanced_to_quiet; + tp->target_mode = CNQF_MODE_QUIET; + tp->shifting_up = false; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE]; + tp->time_constant = out.t_balanced_to_perf; + tp->target_mode = CNQF_MODE_PERFORMANCE; + tp->shifting_up = true; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE]; + tp->time_constant = out.t_quiet_to_balanced; + tp->target_mode = CNQF_MODE_BALANCE; + tp->shifting_up = true; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE]; + tp->time_constant = out.t_perf_to_balanced; + tp->target_mode = CNQF_MODE_BALANCE; + tp->shifting_up = false; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE]; + tp->time_constant = out.t_turbo_to_perf; + tp->target_mode = CNQF_MODE_PERFORMANCE; + tp->shifting_up = false; + + tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_TURBO]; + tp->time_constant = out.t_perf_to_turbo; + tp->target_mode = CNQF_MODE_TURBO; + tp->shifting_up = true; +} + +static void amd_pmf_update_mode_set(int idx, struct apmf_dyn_slider_output out) +{ + struct cnqf_mode_settings *ms; + + /* Quiet Mode */ + ms = &config_store.mode_set[idx][CNQF_MODE_QUIET]; + ms->power_floor = out.ps[APMF_CNQF_QUIET].pfloor; + ms->power_control.fppt = out.ps[APMF_CNQF_QUIET].fppt; + ms->power_control.sppt = out.ps[APMF_CNQF_QUIET].sppt; + ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_QUIET].sppt_apu_only; + ms->power_control.spl = out.ps[APMF_CNQF_QUIET].spl; + ms->power_control.stt_min = out.ps[APMF_CNQF_QUIET].stt_min_limit; + ms->power_control.stt_skin_temp[STT_TEMP_APU] = + out.ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_APU]; + ms->power_control.stt_skin_temp[STT_TEMP_HS2] = + out.ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_HS2]; + ms->fan_control.fan_id = out.ps[APMF_CNQF_QUIET].fan_id; + + /* Balance Mode */ + ms = &config_store.mode_set[idx][CNQF_MODE_BALANCE]; + ms->power_floor = out.ps[APMF_CNQF_BALANCE].pfloor; + ms->power_control.fppt = out.ps[APMF_CNQF_BALANCE].fppt; + ms->power_control.sppt = out.ps[APMF_CNQF_BALANCE].sppt; + ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_BALANCE].sppt_apu_only; + ms->power_control.spl = out.ps[APMF_CNQF_BALANCE].spl; + ms->power_control.stt_min = out.ps[APMF_CNQF_BALANCE].stt_min_limit; + ms->power_control.stt_skin_temp[STT_TEMP_APU] = + out.ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_APU]; + ms->power_control.stt_skin_temp[STT_TEMP_HS2] = + out.ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_HS2]; + ms->fan_control.fan_id = out.ps[APMF_CNQF_BALANCE].fan_id; + + /* Performance Mode */ + ms = &config_store.mode_set[idx][CNQF_MODE_PERFORMANCE]; + ms->power_floor = out.ps[APMF_CNQF_PERFORMANCE].pfloor; + ms->power_control.fppt = out.ps[APMF_CNQF_PERFORMANCE].fppt; + ms->power_control.sppt = out.ps[APMF_CNQF_PERFORMANCE].sppt; + ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_PERFORMANCE].sppt_apu_only; + ms->power_control.spl = out.ps[APMF_CNQF_PERFORMANCE].spl; + ms->power_control.stt_min = out.ps[APMF_CNQF_PERFORMANCE].stt_min_limit; + ms->power_control.stt_skin_temp[STT_TEMP_APU] = + out.ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_APU]; + ms->power_control.stt_skin_temp[STT_TEMP_HS2] = + out.ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_HS2]; + ms->fan_control.fan_id = out.ps[APMF_CNQF_PERFORMANCE].fan_id; + + /* Turbo Mode */ + ms = &config_store.mode_set[idx][CNQF_MODE_TURBO]; + ms->power_floor = out.ps[APMF_CNQF_TURBO].pfloor; + ms->power_control.fppt = out.ps[APMF_CNQF_TURBO].fppt; + ms->power_control.sppt = out.ps[APMF_CNQF_TURBO].sppt; + ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_TURBO].sppt_apu_only; + ms->power_control.spl = out.ps[APMF_CNQF_TURBO].spl; + ms->power_control.stt_min = out.ps[APMF_CNQF_TURBO].stt_min_limit; + ms->power_control.stt_skin_temp[STT_TEMP_APU] = + out.ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_APU]; + ms->power_control.stt_skin_temp[STT_TEMP_HS2] = + out.ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_HS2]; + ms->fan_control.fan_id = out.ps[APMF_CNQF_TURBO].fan_id; +} + +static int amd_pmf_check_flags(struct amd_pmf_dev *dev) +{ + struct apmf_dyn_slider_output out = {}; + + if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC)) + apmf_get_dyn_slider_def_ac(dev, &out); + else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) + apmf_get_dyn_slider_def_dc(dev, &out); + + return out.flags; +} + +static int amd_pmf_load_defaults_cnqf(struct amd_pmf_dev *dev) +{ + struct apmf_dyn_slider_output out; + int i, j, ret; + + for (i = 0; i < POWER_SOURCE_MAX; i++) { + if (!is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC + i)) + continue; + + if (i == POWER_SOURCE_AC) + ret = apmf_get_dyn_slider_def_ac(dev, &out); + else + ret = apmf_get_dyn_slider_def_dc(dev, &out); + if (ret) { + dev_err(dev->dev, "APMF apmf_get_dyn_slider_def_dc failed :%d\n", ret); + return ret; + } + + amd_pmf_update_mode_set(i, out); + amd_pmf_update_trans_data(i, out); + amd_pmf_update_power_threshold(i); + + for (j = 0; j < CNQF_MODE_MAX; j++) { + if (config_store.mode_set[i][j].fan_control.fan_id == FAN_INDEX_AUTO) + config_store.mode_set[i][j].fan_control.manual = false; + else + config_store.mode_set[i][j].fan_control.manual = true; + } + } + + /* set to initial default values */ + config_store.current_mode = CNQF_MODE_BALANCE; + + return 0; +} + +static ssize_t cnqf_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + int result, src; + bool input; + + result = kstrtobool(buf, &input); + if (result) + return result; + + src = amd_pmf_cnqf_get_power_source(pdev); + pdev->cnqf_enabled = input; + + if (pdev->cnqf_enabled && is_pprof_balanced(pdev)) { + amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL); + } else { + if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + amd_pmf_set_sps_power_limits(pdev); + } + + dev_dbg(pdev->dev, "Received CnQF %s\n", input ? "on" : "off"); + return count; +} + +static ssize_t cnqf_enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", pdev->cnqf_enabled ? "on" : "off"); +} + +static DEVICE_ATTR_RW(cnqf_enable); + +static umode_t cnqf_feature_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + + return pdev->cnqf_supported ? attr->mode : 0; +} + +static struct attribute *cnqf_feature_attrs[] = { + &dev_attr_cnqf_enable.attr, + NULL +}; + +const struct attribute_group cnqf_feature_attribute_group = { + .is_visible = cnqf_feature_is_visible, + .attrs = cnqf_feature_attrs, +}; + +void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev) +{ + cancel_delayed_work_sync(&dev->work_buffer); +} + +int amd_pmf_init_cnqf(struct amd_pmf_dev *dev) +{ + int ret, src; + + /* + * Note the caller of this function has already checked that both + * APMF_FUNC_DYN_SLIDER_AC and APMF_FUNC_DYN_SLIDER_DC are supported. + */ + + ret = amd_pmf_load_defaults_cnqf(dev); + if (ret < 0) + return ret; + + amd_pmf_init_metrics_table(dev); + + dev->cnqf_supported = true; + dev->cnqf_enabled = amd_pmf_check_flags(dev); + + /* update the thermal for CnQF */ + if (dev->cnqf_enabled && is_pprof_balanced(dev)) { + src = amd_pmf_cnqf_get_power_source(dev); + amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL); + } + + return 0; +} diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c new file mode 100644 index 000000000..d10c09738 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/core.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Platform Management Framework Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include <asm/amd_nb.h> +#include <linux/debugfs.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/power_supply.h> +#include "pmf.h" + +/* PMF-SMU communication registers */ +#define AMD_PMF_REGISTER_MESSAGE 0xA18 +#define AMD_PMF_REGISTER_RESPONSE 0xA78 +#define AMD_PMF_REGISTER_ARGUMENT 0xA58 + +/* Base address of SMU for mapping physical address to virtual address */ +#define AMD_PMF_MAPPING_SIZE 0x01000 +#define AMD_PMF_BASE_ADDR_OFFSET 0x10000 +#define AMD_PMF_BASE_ADDR_LO 0x13B102E8 +#define AMD_PMF_BASE_ADDR_HI 0x13B102EC +#define AMD_PMF_BASE_ADDR_LO_MASK GENMASK(15, 0) +#define AMD_PMF_BASE_ADDR_HI_MASK GENMASK(31, 20) + +/* SMU Response Codes */ +#define AMD_PMF_RESULT_OK 0x01 +#define AMD_PMF_RESULT_CMD_REJECT_BUSY 0xFC +#define AMD_PMF_RESULT_CMD_REJECT_PREREQ 0xFD +#define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE +#define AMD_PMF_RESULT_FAILED 0xFF + +/* List of supported CPU ids */ +#define AMD_CPU_ID_RMB 0x14b5 +#define AMD_CPU_ID_PS 0x14e8 + +#define PMF_MSG_DELAY_MIN_US 50 +#define RESPONSE_REGISTER_LOOP_MAX 20000 + +#define DELAY_MIN_US 2000 +#define DELAY_MAX_US 3000 + +/* override Metrics Table sample size time (in ms) */ +static int metrics_table_loop_ms = 1000; +module_param(metrics_table_loop_ms, int, 0644); +MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)"); + +/* Force load on supported older platforms */ +static bool force_load; +module_param(force_load, bool, 0444); +MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)"); + +static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data) +{ + struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier); + + if (event != PSY_EVENT_PROP_CHANGED) + return NOTIFY_OK; + + if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) || + is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) || + is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) { + if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf)) + return NOTIFY_DONE; + } + + if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + amd_pmf_set_sps_power_limits(pmf); + + if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) + amd_pmf_power_slider_update_event(pmf); + + return NOTIFY_OK; +} + +static int current_power_limits_show(struct seq_file *seq, void *unused) +{ + struct amd_pmf_dev *dev = seq->private; + struct amd_pmf_static_slider_granular table; + int mode, src = 0; + + mode = amd_pmf_get_pprof_modes(dev); + if (mode < 0) + return mode; + + src = amd_pmf_get_power_source(); + amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table); + seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n", + table.prop[src][mode].spl, + table.prop[src][mode].fppt, + table.prop[src][mode].sppt, + table.prop[src][mode].sppt_apu_only, + table.prop[src][mode].stt_min, + table.prop[src][mode].stt_skin_temp[STT_TEMP_APU], + table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(current_power_limits); + +static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev) +{ + debugfs_remove_recursive(dev->dbgfs_dir); +} + +static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev) +{ + dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL); + debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev, + ¤t_power_limits_fops); +} + +int amd_pmf_get_power_source(void) +{ + if (power_supply_is_system_supplied() > 0) + return POWER_SOURCE_AC; + else + return POWER_SOURCE_DC; +} + +static void amd_pmf_get_metrics(struct work_struct *work) +{ + struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work); + ktime_t time_elapsed_ms; + int socket_power; + + mutex_lock(&dev->update_mutex); + /* Transfer table contents */ + memset(dev->buf, 0, sizeof(dev->m_table)); + amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL); + memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table)); + + time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time; + /* Calculate the avg SoC power consumption */ + socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power; + + if (dev->amt_enabled) { + /* Apply the Auto Mode transition */ + amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms); + } + + if (dev->cnqf_enabled) { + /* Apply the CnQF transition */ + amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms); + } + + dev->start_time = ktime_to_ms(ktime_get()); + schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms)); + mutex_unlock(&dev->update_mutex); +} + +static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset) +{ + return ioread32(dev->regbase + reg_offset); +} + +static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val) +{ + iowrite32(val, dev->regbase + reg_offset); +} + +static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev) +{ + u32 value; + + value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE); + dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value); + + value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT); + dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value); + + value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE); + dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value); +} + +int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data) +{ + int rc; + u32 val; + + mutex_lock(&dev->lock); + + /* Wait until we get a valid response */ + rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE, + val, val != 0, PMF_MSG_DELAY_MIN_US, + PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); + if (rc) { + dev_err(dev->dev, "failed to talk to SMU\n"); + goto out_unlock; + } + + /* Write zero to response register */ + amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0); + + /* Write argument into argument register */ + amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg); + + /* Write message ID to message ID register */ + amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message); + + /* Wait until we get a valid response */ + rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE, + val, val != 0, PMF_MSG_DELAY_MIN_US, + PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); + if (rc) { + dev_err(dev->dev, "SMU response timed out\n"); + goto out_unlock; + } + + switch (val) { + case AMD_PMF_RESULT_OK: + if (get) { + /* PMFW may take longer time to return back the data */ + usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US); + *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT); + } + break; + case AMD_PMF_RESULT_CMD_REJECT_BUSY: + dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val); + rc = -EBUSY; + goto out_unlock; + case AMD_PMF_RESULT_CMD_UNKNOWN: + dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val); + rc = -EINVAL; + goto out_unlock; + case AMD_PMF_RESULT_CMD_REJECT_PREREQ: + case AMD_PMF_RESULT_FAILED: + default: + dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val); + rc = -EIO; + goto out_unlock; + } + +out_unlock: + mutex_unlock(&dev->lock); + amd_pmf_dump_registers(dev); + return rc; +} + +static const struct pci_device_id pmf_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) }, + { } +}; + +static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev) +{ + u64 phys_addr; + u32 hi, low; + + phys_addr = virt_to_phys(dev->buf); + hi = phys_addr >> 32; + low = phys_addr & GENMASK(31, 0); + + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL); + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL); +} + +int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) +{ + /* Get Metrics Table Address */ + dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL); + if (!dev->buf) + return -ENOMEM; + + INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics); + + amd_pmf_set_dram_addr(dev); + + /* + * Start collecting the metrics data after a small delay + * or else, we might end up getting stale values from PMFW. + */ + schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3)); + + return 0; +} + +static int amd_pmf_resume_handler(struct device *dev) +{ + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + + if (pdev->buf) + amd_pmf_set_dram_addr(pdev); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler); + +static void amd_pmf_init_features(struct amd_pmf_dev *dev) +{ + int ret; + + /* Enable Static Slider */ + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) || + is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) { + amd_pmf_init_sps(dev); + dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call; + power_supply_reg_notifier(&dev->pwr_src_notifier); + dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n"); + } + + /* Enable Auto Mode */ + if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) { + amd_pmf_init_auto_mode(dev); + dev_dbg(dev->dev, "Auto Mode Init done\n"); + } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) || + is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) { + /* Enable Cool n Quiet Framework (CnQF) */ + ret = amd_pmf_init_cnqf(dev); + if (ret) + dev_warn(dev->dev, "CnQF Init failed\n"); + } +} + +static void amd_pmf_deinit_features(struct amd_pmf_dev *dev) +{ + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) || + is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) { + power_supply_unreg_notifier(&dev->pwr_src_notifier); + amd_pmf_deinit_sps(dev); + } + + if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) { + amd_pmf_deinit_auto_mode(dev); + } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) || + is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) { + amd_pmf_deinit_cnqf(dev); + } +} + +static const struct acpi_device_id amd_pmf_acpi_ids[] = { + {"AMDI0100", 0x100}, + {"AMDI0102", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids); + +static int amd_pmf_probe(struct platform_device *pdev) +{ + const struct acpi_device_id *id; + struct amd_pmf_dev *dev; + struct pci_dev *rdev; + u32 base_addr_lo; + u32 base_addr_hi; + u64 base_addr; + u32 val; + int err; + + id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev); + if (!id) + return -ENODEV; + + if (id->driver_data == 0x100 && !force_load) + return -ENODEV; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->dev = &pdev->dev; + + rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); + if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) { + pci_dev_put(rdev); + return -ENODEV; + } + + dev->cpu_id = rdev->device; + + err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val); + if (err) { + dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO); + pci_dev_put(rdev); + return pcibios_err_to_errno(err); + } + + base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK; + + err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val); + if (err) { + dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI); + pci_dev_put(rdev); + return pcibios_err_to_errno(err); + } + + base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK; + pci_dev_put(rdev); + base_addr = ((u64)base_addr_hi << 32 | base_addr_lo); + + dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET, + AMD_PMF_MAPPING_SIZE); + if (!dev->regbase) + return -ENOMEM; + + mutex_init(&dev->lock); + mutex_init(&dev->update_mutex); + + apmf_acpi_init(dev); + platform_set_drvdata(pdev, dev); + amd_pmf_init_features(dev); + apmf_install_handler(dev); + amd_pmf_dbgfs_register(dev); + + dev_info(dev->dev, "registered PMF device successfully\n"); + + return 0; +} + +static int amd_pmf_remove(struct platform_device *pdev) +{ + struct amd_pmf_dev *dev = platform_get_drvdata(pdev); + + amd_pmf_deinit_features(dev); + apmf_acpi_deinit(dev); + amd_pmf_dbgfs_unregister(dev); + mutex_destroy(&dev->lock); + mutex_destroy(&dev->update_mutex); + kfree(dev->buf); + return 0; +} + +static const struct attribute_group *amd_pmf_driver_groups[] = { + &cnqf_feature_attribute_group, + NULL, +}; + +static struct platform_driver amd_pmf_driver = { + .driver = { + .name = "amd-pmf", + .acpi_match_table = amd_pmf_acpi_ids, + .dev_groups = amd_pmf_driver_groups, + .pm = pm_sleep_ptr(&amd_pmf_pm), + }, + .probe = amd_pmf_probe, + .remove = amd_pmf_remove, +}; +module_platform_driver(amd_pmf_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AMD Platform Management Framework Driver"); diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h new file mode 100644 index 000000000..deba88e6e --- /dev/null +++ b/drivers/platform/x86/amd/pmf/pmf.h @@ -0,0 +1,436 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AMD Platform Management Framework Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#ifndef PMF_H +#define PMF_H + +#include <linux/acpi.h> +#include <linux/platform_profile.h> + +/* APMF Functions */ +#define APMF_FUNC_VERIFY_INTERFACE 0 +#define APMF_FUNC_GET_SYS_PARAMS 1 +#define APMF_FUNC_SBIOS_REQUESTS 2 +#define APMF_FUNC_SBIOS_HEARTBEAT 4 +#define APMF_FUNC_AUTO_MODE 5 +#define APMF_FUNC_SET_FAN_IDX 7 +#define APMF_FUNC_OS_POWER_SLIDER_UPDATE 8 +#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9 +#define APMF_FUNC_DYN_SLIDER_AC 11 +#define APMF_FUNC_DYN_SLIDER_DC 12 + +/* Message Definitions */ +#define SET_SPL 0x03 /* SPL: Sustained Power Limit */ +#define SET_SPPT 0x05 /* SPPT: Slow Package Power Tracking */ +#define SET_FPPT 0x07 /* FPPT: Fast Package Power Tracking */ +#define GET_SPL 0x0B +#define GET_SPPT 0x0D +#define GET_FPPT 0x0F +#define SET_DRAM_ADDR_HIGH 0x14 +#define SET_DRAM_ADDR_LOW 0x15 +#define SET_TRANSFER_TABLE 0x16 +#define SET_STT_MIN_LIMIT 0x18 /* STT: Skin Temperature Tracking */ +#define SET_STT_LIMIT_APU 0x19 +#define SET_STT_LIMIT_HS2 0x1A +#define SET_SPPT_APU_ONLY 0x1D +#define GET_SPPT_APU_ONLY 0x1E +#define GET_STT_MIN_LIMIT 0x1F +#define GET_STT_LIMIT_APU 0x20 +#define GET_STT_LIMIT_HS2 0x21 + +/* OS slider update notification */ +#define DC_BEST_PERF 0 +#define DC_BETTER_PERF 1 +#define DC_BATTERY_SAVER 3 +#define AC_BEST_PERF 4 +#define AC_BETTER_PERF 5 +#define AC_BETTER_BATTERY 6 + +/* Fan Index for Auto Mode */ +#define FAN_INDEX_AUTO 0xFFFFFFFF + +#define ARG_NONE 0 +#define AVG_SAMPLE_SIZE 3 + +/* AMD PMF BIOS interfaces */ +struct apmf_verify_interface { + u16 size; + u16 version; + u32 notification_mask; + u32 supported_functions; +} __packed; + +struct apmf_system_params { + u16 size; + u32 valid_mask; + u32 flags; + u8 command_code; + u32 heartbeat_int; +} __packed; + +struct apmf_sbios_req { + u16 size; + u32 pending_req; + u8 rsd; + u8 cql_event; + u8 amt_event; + u32 fppt; + u32 sppt; + u32 fppt_apu_only; + u32 spl; + u32 stt_min_limit; + u8 skin_temp_apu; + u8 skin_temp_hs2; +} __packed; + +struct apmf_fan_idx { + u16 size; + u8 fan_ctl_mode; + u32 fan_ctl_idx; +} __packed; + +struct smu_pmf_metrics { + u16 gfxclk_freq; /* in MHz */ + u16 socclk_freq; /* in MHz */ + u16 vclk_freq; /* in MHz */ + u16 dclk_freq; /* in MHz */ + u16 memclk_freq; /* in MHz */ + u16 spare; + u16 gfx_activity; /* in Centi */ + u16 uvd_activity; /* in Centi */ + u16 voltage[2]; /* in mV */ + u16 currents[2]; /* in mA */ + u16 power[2];/* in mW */ + u16 core_freq[8]; /* in MHz */ + u16 core_power[8]; /* in mW */ + u16 core_temp[8]; /* in centi-Celsius */ + u16 l3_freq; /* in MHz */ + u16 l3_temp; /* in centi-Celsius */ + u16 gfx_temp; /* in centi-Celsius */ + u16 soc_temp; /* in centi-Celsius */ + u16 throttler_status; + u16 current_socketpower; /* in mW */ + u16 stapm_orig_limit; /* in W */ + u16 stapm_cur_limit; /* in W */ + u32 apu_power; /* in mW */ + u32 dgpu_power; /* in mW */ + u16 vdd_tdc_val; /* in mA */ + u16 soc_tdc_val; /* in mA */ + u16 vdd_edc_val; /* in mA */ + u16 soc_edcv_al; /* in mA */ + u16 infra_cpu_maxfreq; /* in MHz */ + u16 infra_gfx_maxfreq; /* in MHz */ + u16 skin_temp; /* in centi-Celsius */ + u16 device_state; +} __packed; + +enum amd_stt_skin_temp { + STT_TEMP_APU, + STT_TEMP_HS2, + STT_TEMP_COUNT, +}; + +enum amd_slider_op { + SLIDER_OP_GET, + SLIDER_OP_SET, +}; + +enum power_source { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_MAX, +}; + +enum power_modes { + POWER_MODE_PERFORMANCE, + POWER_MODE_BALANCED_POWER, + POWER_MODE_POWER_SAVER, + POWER_MODE_MAX, +}; + +struct amd_pmf_dev { + void __iomem *regbase; + void __iomem *smu_virt_addr; + void *buf; + u32 base_addr; + u32 cpu_id; + struct device *dev; + struct mutex lock; /* protects the PMF interface */ + u32 supported_func; + enum platform_profile_option current_profile; + struct platform_profile_handler pprof; + struct dentry *dbgfs_dir; + int hb_interval; /* SBIOS heartbeat interval */ + struct delayed_work heart_beat; + struct smu_pmf_metrics m_table; + struct delayed_work work_buffer; + ktime_t start_time; + int socket_power_history[AVG_SAMPLE_SIZE]; + int socket_power_history_idx; + bool amt_enabled; + struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */ + bool cnqf_enabled; + bool cnqf_supported; + struct notifier_block pwr_src_notifier; +}; + +struct apmf_sps_prop_granular { + u32 fppt; + u32 sppt; + u32 sppt_apu_only; + u32 spl; + u32 stt_min; + u8 stt_skin_temp[STT_TEMP_COUNT]; + u32 fan_id; +} __packed; + +/* Static Slider */ +struct apmf_static_slider_granular_output { + u16 size; + struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX * POWER_MODE_MAX]; +} __packed; + +struct amd_pmf_static_slider_granular { + u16 size; + struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX]; +}; + +struct os_power_slider { + u16 size; + u8 slider_event; +} __packed; + +struct fan_table_control { + bool manual; + unsigned long fan_id; +}; + +struct power_table_control { + u32 spl; + u32 sppt; + u32 fppt; + u32 sppt_apu_only; + u32 stt_min; + u32 stt_skin_temp[STT_TEMP_COUNT]; + u32 reserved[16]; +}; + +/* Auto Mode Layer */ +enum auto_mode_transition_priority { + AUTO_TRANSITION_TO_PERFORMANCE, /* Any other mode to Performance Mode */ + AUTO_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */ + AUTO_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */ + AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance Mode to Balance Mode */ + AUTO_TRANSITION_MAX, +}; + +enum auto_mode_mode { + AUTO_QUIET, + AUTO_BALANCE, + AUTO_PERFORMANCE_ON_LAP, + AUTO_PERFORMANCE, + AUTO_MODE_MAX, +}; + +struct auto_mode_trans_params { + u32 time_constant; /* minimum time required to switch to next mode */ + u32 power_delta; /* delta power to shift mode */ + u32 power_threshold; + u32 timer; /* elapsed time. if timer > TimeThreshold, it will move to next mode */ + u32 applied; + enum auto_mode_mode target_mode; + u32 shifting_up; +}; + +struct auto_mode_mode_settings { + struct power_table_control power_control; + struct fan_table_control fan_control; + u32 power_floor; +}; + +struct auto_mode_mode_config { + struct auto_mode_trans_params transition[AUTO_TRANSITION_MAX]; + struct auto_mode_mode_settings mode_set[AUTO_MODE_MAX]; + enum auto_mode_mode current_mode; +}; + +struct apmf_auto_mode { + u16 size; + /* time constant */ + u32 balanced_to_perf; + u32 perf_to_balanced; + u32 quiet_to_balanced; + u32 balanced_to_quiet; + /* power floor */ + u32 pfloor_perf; + u32 pfloor_balanced; + u32 pfloor_quiet; + /* Power delta for mode change */ + u32 pd_balanced_to_perf; + u32 pd_perf_to_balanced; + u32 pd_quiet_to_balanced; + u32 pd_balanced_to_quiet; + /* skin temperature limits */ + u8 stt_apu_perf_on_lap; /* CQL ON */ + u8 stt_hs2_perf_on_lap; /* CQL ON */ + u8 stt_apu_perf; + u8 stt_hs2_perf; + u8 stt_apu_balanced; + u8 stt_hs2_balanced; + u8 stt_apu_quiet; + u8 stt_hs2_quiet; + u32 stt_min_limit_perf_on_lap; /* CQL ON */ + u32 stt_min_limit_perf; + u32 stt_min_limit_balanced; + u32 stt_min_limit_quiet; + /* SPL based */ + u32 fppt_perf_on_lap; /* CQL ON */ + u32 sppt_perf_on_lap; /* CQL ON */ + u32 spl_perf_on_lap; /* CQL ON */ + u32 sppt_apu_only_perf_on_lap; /* CQL ON */ + u32 fppt_perf; + u32 sppt_perf; + u32 spl_perf; + u32 sppt_apu_only_perf; + u32 fppt_balanced; + u32 sppt_balanced; + u32 spl_balanced; + u32 sppt_apu_only_balanced; + u32 fppt_quiet; + u32 sppt_quiet; + u32 spl_quiet; + u32 sppt_apu_only_quiet; + /* Fan ID */ + u32 fan_id_perf; + u32 fan_id_balanced; + u32 fan_id_quiet; +} __packed; + +/* CnQF Layer */ +enum cnqf_trans_priority { + CNQF_TRANSITION_TO_TURBO, /* Any other mode to Turbo Mode */ + CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE, /* quiet/balance to Performance Mode */ + CNQF_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */ + CNQF_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */ + CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance/Turbo to Balance Mode */ + CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE, /* Turbo mode to Performance Mode */ + CNQF_TRANSITION_MAX, +}; + +enum cnqf_mode { + CNQF_MODE_QUIET, + CNQF_MODE_BALANCE, + CNQF_MODE_PERFORMANCE, + CNQF_MODE_TURBO, + CNQF_MODE_MAX, +}; + +enum apmf_cnqf_pos { + APMF_CNQF_TURBO, + APMF_CNQF_PERFORMANCE, + APMF_CNQF_BALANCE, + APMF_CNQF_QUIET, + APMF_CNQF_MAX, +}; + +struct cnqf_mode_settings { + struct power_table_control power_control; + struct fan_table_control fan_control; + u32 power_floor; +}; + +struct cnqf_tran_params { + u32 time_constant; /* minimum time required to switch to next mode */ + u32 power_threshold; + u32 timer; /* elapsed time. if timer > timethreshold, it will move to next mode */ + u32 total_power; + u32 count; + bool priority; + bool shifting_up; + enum cnqf_mode target_mode; +}; + +struct cnqf_config { + struct cnqf_tran_params trans_param[POWER_SOURCE_MAX][CNQF_TRANSITION_MAX]; + struct cnqf_mode_settings mode_set[POWER_SOURCE_MAX][CNQF_MODE_MAX]; + struct power_table_control defaults; + enum cnqf_mode current_mode; + u32 power_src; + u32 avg_power; +}; + +struct apmf_cnqf_power_set { + u32 pfloor; + u32 fppt; + u32 sppt; + u32 sppt_apu_only; + u32 spl; + u32 stt_min_limit; + u8 stt_skintemp[STT_TEMP_COUNT]; + u32 fan_id; +} __packed; + +struct apmf_dyn_slider_output { + u16 size; + u16 flags; + u32 t_perf_to_turbo; + u32 t_balanced_to_perf; + u32 t_quiet_to_balanced; + u32 t_balanced_to_quiet; + u32 t_perf_to_balanced; + u32 t_turbo_to_perf; + struct apmf_cnqf_power_set ps[APMF_CNQF_MAX]; +} __packed; + +/* Core Layer */ +int apmf_acpi_init(struct amd_pmf_dev *pmf_dev); +void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev); +int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index); +int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data); +int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev); +int amd_pmf_get_power_source(void); +int apmf_install_handler(struct amd_pmf_dev *pmf_dev); +int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag); + +/* SPS Layer */ +int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf); +void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx, + struct amd_pmf_static_slider_granular *table); +int amd_pmf_init_sps(struct amd_pmf_dev *dev); +void amd_pmf_deinit_sps(struct amd_pmf_dev *dev); +int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev, + struct apmf_static_slider_granular_output *output); +bool is_pprof_balanced(struct amd_pmf_dev *pmf); +int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev); + + +int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx); +int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf); + +/* Auto Mode Layer */ +int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data); +void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev); +void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev); +void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms); +int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req); + +void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event); +int amd_pmf_reset_amt(struct amd_pmf_dev *dev); +void amd_pmf_handle_amt(struct amd_pmf_dev *dev); + +/* CnQF Layer */ +int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data); +int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data); +int amd_pmf_init_cnqf(struct amd_pmf_dev *dev); +void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev); +int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms); +extern const struct attribute_group cnqf_feature_attribute_group; + +#endif /* PMF_H */ diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c new file mode 100644 index 000000000..b2cf62937 --- /dev/null +++ b/drivers/platform/x86/amd/pmf/sps.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Platform Management Framework (PMF) Driver + * + * Copyright (c) 2022, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> + */ + +#include "pmf.h" + +static struct amd_pmf_static_slider_granular config_store; + +static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev) +{ + struct apmf_static_slider_granular_output output; + int i, j, idx = 0; + + memset(&config_store, 0, sizeof(config_store)); + apmf_get_static_slider_granular(dev, &output); + + for (i = 0; i < POWER_SOURCE_MAX; i++) { + for (j = 0; j < POWER_MODE_MAX; j++) { + config_store.prop[i][j].spl = output.prop[idx].spl; + config_store.prop[i][j].sppt = output.prop[idx].sppt; + config_store.prop[i][j].sppt_apu_only = + output.prop[idx].sppt_apu_only; + config_store.prop[i][j].fppt = output.prop[idx].fppt; + config_store.prop[i][j].stt_min = output.prop[idx].stt_min; + config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] = + output.prop[idx].stt_skin_temp[STT_TEMP_APU]; + config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] = + output.prop[idx].stt_skin_temp[STT_TEMP_HS2]; + config_store.prop[i][j].fan_id = output.prop[idx].fan_id; + idx++; + } + } +} + +void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx, + struct amd_pmf_static_slider_granular *table) +{ + int src = amd_pmf_get_power_source(); + + if (op == SLIDER_OP_SET) { + amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL); + amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, + config_store.prop[src][idx].sppt_apu_only, NULL); + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, + config_store.prop[src][idx].stt_min, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL); + } else if (op == SLIDER_OP_GET) { + amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl); + amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt); + amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt); + amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE, + &table->prop[src][idx].sppt_apu_only); + amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE, + &table->prop[src][idx].stt_min); + amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE, + (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]); + amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE, + (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]); + } +} + +int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf) +{ + int mode; + + mode = amd_pmf_get_pprof_modes(pmf); + if (mode < 0) + return mode; + + amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL); + + return 0; +} + +bool is_pprof_balanced(struct amd_pmf_dev *pmf) +{ + return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false; +} + +static int amd_pmf_profile_get(struct platform_profile_handler *pprof, + enum platform_profile_option *profile) +{ + struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof); + + *profile = pmf->current_profile; + return 0; +} + +int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf) +{ + int mode; + + switch (pmf->current_profile) { + case PLATFORM_PROFILE_PERFORMANCE: + mode = POWER_MODE_PERFORMANCE; + break; + case PLATFORM_PROFILE_BALANCED: + mode = POWER_MODE_BALANCED_POWER; + break; + case PLATFORM_PROFILE_LOW_POWER: + mode = POWER_MODE_POWER_SAVER; + break; + default: + dev_err(pmf->dev, "Unknown Platform Profile.\n"); + return -EOPNOTSUPP; + } + + return mode; +} + +int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev) +{ + u8 flag = 0; + int mode; + int src; + + mode = amd_pmf_get_pprof_modes(dev); + if (mode < 0) + return mode; + + src = amd_pmf_get_power_source(); + + if (src == POWER_SOURCE_AC) { + switch (mode) { + case POWER_MODE_PERFORMANCE: + flag |= BIT(AC_BEST_PERF); + break; + case POWER_MODE_BALANCED_POWER: + flag |= BIT(AC_BETTER_PERF); + break; + case POWER_MODE_POWER_SAVER: + flag |= BIT(AC_BETTER_BATTERY); + break; + default: + dev_err(dev->dev, "unsupported platform profile\n"); + return -EOPNOTSUPP; + } + + } else if (src == POWER_SOURCE_DC) { + switch (mode) { + case POWER_MODE_PERFORMANCE: + flag |= BIT(DC_BEST_PERF); + break; + case POWER_MODE_BALANCED_POWER: + flag |= BIT(DC_BETTER_PERF); + break; + case POWER_MODE_POWER_SAVER: + flag |= BIT(DC_BATTERY_SAVER); + break; + default: + dev_err(dev->dev, "unsupported platform profile\n"); + return -EOPNOTSUPP; + } + } + + apmf_os_power_slider_update(dev, flag); + + return 0; +} + +static int amd_pmf_profile_set(struct platform_profile_handler *pprof, + enum platform_profile_option profile) +{ + struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof); + int ret = 0; + + pmf->current_profile = profile; + + /* Notify EC about the slider position change */ + if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) { + ret = amd_pmf_power_slider_update_event(pmf); + if (ret) + return ret; + } + + if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { + ret = amd_pmf_set_sps_power_limits(pmf); + if (ret) + return ret; + } + + return 0; +} + +int amd_pmf_init_sps(struct amd_pmf_dev *dev) +{ + int err; + + dev->current_profile = PLATFORM_PROFILE_BALANCED; + + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { + amd_pmf_load_defaults_sps(dev); + + /* update SPS balanced power mode thermals */ + amd_pmf_set_sps_power_limits(dev); + } + + dev->pprof.profile_get = amd_pmf_profile_get; + dev->pprof.profile_set = amd_pmf_profile_set; + + /* Setup supported modes */ + set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices); + set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices); + set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices); + + /* Create platform_profile structure and register */ + err = platform_profile_register(&dev->pprof); + if (err) + dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n", + err); + + return err; +} + +void amd_pmf_deinit_sps(struct amd_pmf_dev *dev) +{ + platform_profile_remove(); +} |