summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/pm/swsmu/smu13
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/smu13')
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c291
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c45
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c583
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c1103
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c530
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c18
8 files changed, 2038 insertions, 536 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 08fff9600..f1440869d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -470,18 +470,12 @@ static bool aldebaran_is_primary(struct smu_context *smu)
static int aldebaran_run_board_btc(struct smu_context *smu)
{
- u32 smu_version;
int ret;
if (!aldebaran_is_primary(smu))
return 0;
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret) {
- dev_err(smu->adev->dev, "Failed to get smu version!\n");
- return ret;
- }
- if (smu_version <= 0x00441d00)
+ if (smu->smc_fw_version <= 0x00441d00)
return 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL);
@@ -553,9 +547,9 @@ static int aldebaran_populate_umd_state_clk(struct smu_context *smu)
return 0;
}
-static int aldebaran_get_clk_table(struct smu_context *smu,
- struct pp_clock_levels_with_latency *clocks,
- struct smu_13_0_dpm_table *dpm_table)
+static void aldebaran_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct smu_13_0_dpm_table *dpm_table)
{
uint32_t i;
@@ -569,7 +563,6 @@ static int aldebaran_get_clk_table(struct smu_context *smu,
clocks->data[i].latency_in_us = 0;
}
- return 0;
}
static int aldebaran_freqs_in_same_level(int32_t frequency1,
@@ -739,25 +732,26 @@ static int aldebaran_get_current_clk_freq_by_table(struct smu_context *smu,
value);
}
-static int aldebaran_print_clk_levels(struct smu_context *smu,
- enum smu_clk_type type, char *buf)
+static int aldebaran_emit_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, char *buf, int *offset)
{
- int i, now, size = 0;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct pp_clock_levels_with_latency clocks;
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
+ uint32_t i;
int display_levels;
uint32_t freq_values[3] = {0};
- uint32_t min_clk, max_clk;
-
- smu_cmn_get_sysfs_buf(&buf, &size);
+ uint32_t min_clk, max_clk, cur_value = 0;
+ bool freq_match;
+ unsigned int clock_mhz;
+ static const char attempt_string[] = "Attempt to get current";
if (amdgpu_ras_intr_triggered()) {
- size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ *offset += sysfs_emit_at(buf, *offset, "unavailable\n");
+ return -EBUSY;
}
dpm_context = smu_dpm->dpm_context;
@@ -765,21 +759,17 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
switch (type) {
case SMU_OD_SCLK:
- size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "GFXCLK");
fallthrough;
case SMU_SCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current gfx clk Failed!");
+ dev_err(smu->adev->dev, "%s gfx clk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get gfx clk levels Failed!");
- return ret;
- }
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
display_levels = (clocks.num_levels == 1) ? 1 : 2;
@@ -790,147 +780,110 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
freq_values[1] = max_clk;
/* fine-grained dpm has only 2 levels */
- if (now > min_clk && now < max_clk) {
+ if (cur_value > min_clk && cur_value < max_clk) {
display_levels++;
freq_values[2] = max_clk;
- freq_values[1] = now;
+ freq_values[1] = cur_value;
}
-
- for (i = 0; i < display_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
- freq_values[i],
- (display_levels == 1) ?
- "*" :
- (aldebaran_freqs_in_same_level(
- freq_values[i], now) ?
- "*" :
- ""));
-
break;
case SMU_OD_MCLK:
- size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "MCLK");
fallthrough;
case SMU_MCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current mclk Failed!");
+ dev_err(smu->adev->dev, "%s mclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get memory clk levels Failed!");
- return ret;
- }
-
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ? "*" :
- (aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
break;
case SMU_SOCCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current socclk Failed!");
+ dev_err(smu->adev->dev, "%s socclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get socclk levels Failed!");
- return ret;
- }
-
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ? "*" :
- (aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
break;
case SMU_FCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current fclk Failed!");
+ dev_err(smu->adev->dev, "%s fclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get fclk levels Failed!");
- return ret;
- }
-
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ? "*" :
- (aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
break;
case SMU_VCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+ dev_err(smu->adev->dev, "%s vclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
- return ret;
- }
-
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ? "*" :
- (aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
break;
case SMU_DCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+ dev_err(smu->adev->dev, "%s dclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
- return ret;
- }
-
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ? "*" :
- (aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
break;
default:
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case SMU_OD_SCLK:
+ case SMU_SCLK:
+ for (i = 0; i < display_levels; i++) {
+ clock_mhz = freq_values[i];
+ freq_match = aldebaran_freqs_in_same_level(clock_mhz, cur_value);
+ freq_match |= (display_levels == 1);
+
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n", i,
+ clock_mhz,
+ (freq_match) ? "*" : "");
+ }
break;
+
+ case SMU_OD_MCLK:
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ for (i = 0; i < clocks.num_levels; i++) {
+ clock_mhz = clocks.data[i].clocks_in_khz / 1000;
+ freq_match = aldebaran_freqs_in_same_level(clock_mhz, cur_value);
+ freq_match |= (clocks.num_levels == 1);
+
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
+ i, clock_mhz,
+ (freq_match) ? "*" : "");
+ }
+ break;
+ default:
+ return -EINVAL;
}
- return size;
+ return 0;
}
static int aldebaran_upload_dpm_level(struct smu_context *smu,
@@ -1189,9 +1142,10 @@ static int aldebaran_read_sensor(struct smu_context *smu,
}
static int aldebaran_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t power_limit = 0;
@@ -1204,7 +1158,8 @@ static int aldebaran_get_power_limit(struct smu_context *smu,
*default_power_limit = 0;
if (max_power_limit)
*max_power_limit = 0;
-
+ if (min_power_limit)
+ *min_power_limit = 0;
dev_warn(smu->adev->dev,
"PPT feature is not enabled, power values can't be fetched.");
@@ -1239,6 +1194,9 @@ static int aldebaran_get_power_limit(struct smu_context *smu,
*max_power_limit = pptable->PptLimit;
}
+ if (min_power_limit)
+ *min_power_limit = 0;
+
return 0;
}
@@ -1622,8 +1580,6 @@ static void aldebaran_get_unique_id(struct smu_context *smu)
out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
- if (adev->serial[0] == '\0')
- sprintf(adev->serial, "%016llx", adev->unique_id);
}
static bool aldebaran_is_baco_supported(struct smu_context *smu)
@@ -1648,20 +1604,27 @@ static int aldebaran_set_df_cstate(struct smu_context *smu,
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
-static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
+static int aldebaran_select_xgmi_plpd_policy(struct smu_context *smu,
+ enum pp_xgmi_plpd_mode mode)
{
struct amdgpu_device *adev = smu->adev;
/* The message only works on master die and NACK will be sent
back for other dies, only send it on master die */
- if (!adev->smuio.funcs->get_socket_id(adev) &&
- !adev->smuio.funcs->get_die_id(adev))
+ if (adev->smuio.funcs->get_socket_id(adev) ||
+ adev->smuio.funcs->get_die_id(adev))
+ return 0;
+
+ if (mode == XGMI_PLPD_DEFAULT)
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ 0, NULL);
+ else if (mode == XGMI_PLPD_DISALLOW)
return smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GmiPwrDnControl,
- en ? 0 : 1,
- NULL);
+ SMU_MSG_GmiPwrDnControl,
+ 1, NULL);
else
- return 0;
+ return -EINVAL;
}
static const struct throttling_logging_label {
@@ -1677,7 +1640,7 @@ static const struct throttling_logging_label {
static void aldebaran_log_thermal_throttling_event(struct smu_context *smu)
{
int ret;
- int throttler_idx, throtting_events = 0, buf_idx = 0;
+ int throttler_idx, throttling_events = 0, buf_idx = 0;
struct amdgpu_device *adev = smu->adev;
uint32_t throttler_status;
char log_buf[256];
@@ -1692,11 +1655,11 @@ static void aldebaran_log_thermal_throttling_event(struct smu_context *smu)
for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
throttler_idx++) {
if (throttler_status & logging_label[throttler_idx].feature_mask) {
- throtting_events++;
+ throttling_events++;
buf_idx += snprintf(log_buf + buf_idx,
sizeof(log_buf) - buf_idx,
"%s%s",
- throtting_events > 1 ? " and " : "",
+ throttling_events > 1 ? " and " : "",
logging_label[throttler_idx].label);
if (buf_idx >= sizeof(log_buf)) {
dev_err(adev->dev, "buffer overflow!\n");
@@ -1808,24 +1771,15 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
static int aldebaran_check_ecc_table_support(struct smu_context *smu,
int *ecctable_version)
{
- uint32_t if_version = 0xff, smu_version = 0xff;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret) {
- /* return not support if failed get smu_version */
- ret = -EOPNOTSUPP;
- }
-
- if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
- ret = -EOPNOTSUPP;
- else if (smu_version >= SUPPORT_ECCTABLE_SMU_VERSION &&
- smu_version < SUPPORT_ECCTABLE_V2_SMU_VERSION)
+ if (smu->smc_fw_version < SUPPORT_ECCTABLE_SMU_VERSION)
+ return -EOPNOTSUPP;
+ else if (smu->smc_fw_version >= SUPPORT_ECCTABLE_SMU_VERSION &&
+ smu->smc_fw_version < SUPPORT_ECCTABLE_V2_SMU_VERSION)
*ecctable_version = 1;
else
*ecctable_version = 2;
- return ret;
+ return 0;
}
static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
@@ -1888,7 +1842,7 @@ static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
static int aldebaran_mode1_reset(struct smu_context *smu)
{
- u32 smu_version, fatal_err, param;
+ u32 fatal_err, param;
int ret = 0;
struct amdgpu_device *adev = smu->adev;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
@@ -1899,13 +1853,12 @@ static int aldebaran_mode1_reset(struct smu_context *smu)
/*
* PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07
*/
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version < 0x00440700) {
+ if (smu->smc_fw_version < 0x00440700) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
} else {
/* fatal error triggered by ras, PMFW supports the flag
from 68.44.0 */
- if ((smu_version >= 0x00442c00) && ras &&
+ if ((smu->smc_fw_version >= 0x00442c00) && ras &&
atomic_read(&ras->in_recovery))
fatal_err = 1;
@@ -1922,18 +1875,15 @@ static int aldebaran_mode1_reset(struct smu_context *smu)
static int aldebaran_mode2_reset(struct smu_context *smu)
{
- u32 smu_version;
int ret = 0, index;
struct amdgpu_device *adev = smu->adev;
int timeout = 10;
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
-
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_GfxDeviceDriverReset);
mutex_lock(&smu->message_lock);
- if (smu_version >= 0x00441400) {
+ if (smu->smc_fw_version >= 0x00441400) {
ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
/* This is similar to FLR, wait till max FLR timeout */
msleep(100);
@@ -1960,7 +1910,7 @@ static int aldebaran_mode2_reset(struct smu_context *smu)
} else {
dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
- smu_version);
+ smu->smc_fw_version);
}
if (ret == 1)
@@ -1983,14 +1933,20 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
{
#if 0
struct amdgpu_device *adev = smu->adev;
- u32 smu_version;
uint32_t val;
+ uint32_t smu_version;
+ int ret;
+
/**
* PM FW version support mode1 reset from 68.07
*/
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return false;
+
if ((smu_version < 0x00440700))
return false;
+
/**
* mode1 reset relies on PSP, so we should check if
* PSP is alive.
@@ -2034,19 +1990,10 @@ static int aldebaran_smu_send_hbm_bad_page_num(struct smu_context *smu,
static int aldebaran_check_bad_channel_info_support(struct smu_context *smu)
{
- uint32_t if_version = 0xff, smu_version = 0xff;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret) {
- /* return not support if failed get smu_version */
- ret = -EOPNOTSUPP;
- }
-
- if (smu_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION)
- ret = -EOPNOTSUPP;
+ if (smu->smc_fw_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION)
+ return -EOPNOTSUPP;
- return ret;
+ return 0;
}
static int aldebaran_send_hbm_bad_channel_flag(struct smu_context *smu,
@@ -2074,7 +2021,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_default_dpm_table = aldebaran_set_default_dpm_table,
.populate_umd_state_clk = aldebaran_populate_umd_state_clk,
.get_thermal_temperature_range = aldebaran_get_thermal_temperature_range,
- .print_clk_levels = aldebaran_print_clk_levels,
+ .emit_clk_levels = aldebaran_emit_clk_levels,
.force_clk_levels = aldebaran_force_clk_levels,
.read_sensor = aldebaran_read_sensor,
.set_performance_level = aldebaran_set_performance_level,
@@ -2116,7 +2063,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
.set_df_cstate = aldebaran_set_df_cstate,
- .allow_xgmi_power_down = aldebaran_allow_xgmi_power_down,
+ .select_xgmi_plpd_policy = aldebaran_select_xgmi_plpd_policy,
.log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index c097aed47..68981086b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -82,6 +82,8 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
+#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
+
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
@@ -196,9 +198,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
- if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) ||
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 7)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)))
return 0;
/* override pptable_id from driver parameter */
@@ -234,7 +236,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
mp1_fw_flags = RREG32_PCIE(MP1_Public |
@@ -269,7 +271,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
if (smu->is_apu ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6))
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6))
adev->pm.fw_version = smu_version;
/* only for dGPU w/ SMU13*/
@@ -802,7 +804,7 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
@@ -1170,7 +1172,7 @@ int smu_v13_0_set_fan_speed_pwm(struct smu_context *smu,
uint32_t duty100, duty;
uint64_t tmp64;
- speed = MIN(speed, 255);
+ speed = min_t(uint32_t, speed, 255);
if (smu_v13_0_auto_fan_control(smu, 0))
return -EINVAL;
@@ -1782,7 +1784,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
* Unset those settings for SMU 13.0.2. As soft limits settings
* for those clock domains are not supported.
*/
- if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) {
mclk_min = mclk_max = 0;
socclk_min = socclk_max = 0;
vclk_min = vclk_max = 0;
@@ -1929,7 +1931,7 @@ static int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
/* SMU v13.0.2 FW returns 0 based max level, increment by one for it */
- if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value))
+ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) && (!ret && value))
++(*value);
return ret;
@@ -1989,7 +1991,7 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
return ret;
}
- if (smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) {
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) {
ret = smu_v13_0_get_fine_grained_status(smu,
clk_type,
&single_dpm_table->is_fine_grained);
@@ -2304,11 +2306,17 @@ int smu_v13_0_baco_exit(struct smu_context *smu)
int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
{
uint16_t index;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
+ }
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_EnableGfxImu);
- /* Param 1 to tell PMFW to enable GFXOFF feature */
- return smu_cmn_send_msg_without_waiting(smu, index, 1);
+ return smu_cmn_send_msg_without_waiting(smu, index,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE);
}
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
@@ -2471,3 +2479,16 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
return 0;
}
+
+int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
+{
+ int ret;
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0);
+
+ ret = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ return ret == 0 ? 0 : -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 4022dd44e..5625a6e57 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -101,6 +101,12 @@
#define PP_OD_FEATURE_UCLK_FMIN 2
#define PP_OD_FEATURE_UCLK_FMAX 3
#define PP_OD_FEATURE_GFX_VF_CURVE 4
+#define PP_OD_FEATURE_FAN_CURVE_TEMP 5
+#define PP_OD_FEATURE_FAN_CURVE_PWM 6
+#define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
+#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
+#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
+#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
#define LINK_SPEED_MAX 3
@@ -176,6 +182,7 @@ static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(VCLK1, PPCLK_VCLK_1),
CLK_MAP(DCLK, PPCLK_DCLK_0),
CLK_MAP(DCLK1, PPCLK_DCLK_1),
+ CLK_MAP(DCEFCLK, PPCLK_DCFCLK),
};
static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -289,7 +296,6 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
struct amdgpu_device *adev = smu->adev;
- u32 smu_version;
if (num > 2)
return -EINVAL;
@@ -309,8 +315,7 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
/* PMFW 78.58 contains a critical fix for gfxoff feature */
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if ((smu_version < 0x004e3a00) ||
+ if ((smu->smc_fw_version < 0x004e3a00) ||
!(adev->pm.pp_feature & PP_GFXOFF_MASK))
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
@@ -341,13 +346,10 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *pptable = smu->smu_table.driver_pptable;
-#if 0
- PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
const OverDriveLimits_t * const overdrive_lowerlimits =
&pptable->SkuTable.OverDriveLimitsMin;
-#endif
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@@ -359,27 +361,18 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
smu_baco->maco_support = true;
}
- /*
- * We are in the transition to a new OD mechanism.
- * Disable the OD feature support for SMU13 temporarily.
- * TODO: get this reverted when new OD mechanism online
- */
-#if 0
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
smu->od_enabled = false;
+ table_context->thermal_controller_type =
+ powerplay_table->thermal_controller_type;
+
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
-#else
- smu->od_enabled = false;
-#endif
-
- table_context->thermal_controller_type =
- powerplay_table->thermal_controller_type;
smu->adev->pm.no_fan =
!(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
@@ -707,6 +700,22 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
pcie_table->num_of_link_levels++;
}
+ /* dcefclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.dcef_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
+ ret = smu_v13_0_set_single_dpm_table(smu,
+ SMU_DCEFCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
return 0;
}
@@ -794,6 +803,9 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
case METRICS_CURR_FCLK:
*value = metrics->CurrClock[PPCLK_FCLK];
break;
+ case METRICS_CURR_DCEFCLK:
+ *value = metrics->CurrClock[PPCLK_DCFCLK];
+ break;
case METRICS_AVERAGE_GFXCLK:
if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD)
*value = metrics->AverageGfxclkFrequencyPostDs;
@@ -1047,6 +1059,9 @@ static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu,
case PPCLK_DCLK_1:
member_type = METRICS_AVERAGE_DCLK1;
break;
+ case PPCLK_DCFCLK:
+ member_type = METRICS_CURR_DCEFCLK;
+ break;
default:
return -EINVAL;
}
@@ -1099,6 +1114,30 @@ static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,
od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary;
od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary;
break;
+ case PP_OD_FEATURE_FAN_CURVE_TEMP:
+ od_min_setting = overdrive_lowerlimits->FanLinearTempPoints;
+ od_max_setting = overdrive_upperlimits->FanLinearTempPoints;
+ break;
+ case PP_OD_FEATURE_FAN_CURVE_PWM:
+ od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints;
+ od_max_setting = overdrive_upperlimits->FanLinearPwmPoints;
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
+ od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
+ od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
+ od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
+ od_max_setting = overdrive_upperlimits->FanTargetTemperature;
+ break;
+ case PP_OD_FEATURE_FAN_MINIMUM_PWM:
+ od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
+ od_max_setting = overdrive_upperlimits->FanMinimumPwm;
+ break;
default:
od_min_setting = od_max_setting = INT_MAX;
break;
@@ -1196,6 +1235,9 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
case SMU_DCLK1:
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
break;
+ case SMU_DCEFCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
+ break;
default:
break;
}
@@ -1209,6 +1251,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
case SMU_VCLK1:
case SMU_DCLK:
case SMU_DCLK1:
+ case SMU_DCEFCLK:
ret = smu_v13_0_0_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
if (ret) {
dev_err(smu->adev->dev, "Failed to get current clock freq!");
@@ -1304,16 +1347,115 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
od_table->OverDriveTable.UclkFmax);
break;
- case SMU_OD_VDDC_CURVE:
+ case SMU_OD_VDDGFX_OFFSET:
if (!smu_v13_0_0_is_od_feature_supported(smu,
PP_OD_FEATURE_GFX_VF_CURVE_BIT))
break;
- size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
- for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
- size += sysfs_emit_at(buf, size, "%d: %dmv\n",
+ size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "%dmV\n",
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
+ break;
+
+ case SMU_OD_FAN_CURVE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
+ size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
i,
- od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]);
+ (int)od_table->OverDriveTable.FanLinearTempPoints[i],
+ (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
+ min_value, max_value);
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
+ min_value, max_value);
+
+ break;
+
+ case SMU_OD_ACOUSTIC_LIMIT:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_ACOUSTIC_TARGET:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_TARGET_TEMPERATURE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanTargetTemperature);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_MINIMUM_PWM:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanMinimumPwm);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
+ min_value, max_value);
break;
case SMU_OD_RANGE:
@@ -1355,7 +1497,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFX_VF_CURVE,
&min_value,
&max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n",
+ size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
min_value, max_value);
}
break;
@@ -1367,6 +1509,60 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
return size;
}
+
+static int smu_v13_0_0_od_restore_table_single(struct smu_context *smu, long input)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *boot_overdrive_table =
+ (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ switch (input) {
+ case PP_OD_EDIT_FAN_CURVE:
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
+ od_table->OverDriveTable.FanLinearTempPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
+ od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
+ }
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ od_table->OverDriveTable.FanTargetTemperature =
+ boot_overdrive_table->OverDriveTable.FanTargetTemperature;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ od_table->OverDriveTable.FanMinimumPwm =
+ boot_overdrive_table->OverDriveTable.FanMinimumPwm;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ default:
+ dev_info(adev->dev, "Invalid table index: %ld\n", input);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[],
@@ -1504,39 +1700,167 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu,
}
break;
- case PP_OD_EDIT_VDDC_CURVE:
+ case PP_OD_EDIT_VDDGFX_OFFSET:
if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
- dev_warn(adev->dev, "VF curve setting not supported!\n");
+ dev_warn(adev->dev, "Gfx offset setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_CURVE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
return -ENOTSUPP;
}
- if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS ||
+ if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
input[0] < 0)
return -EINVAL;
smu_v13_0_0_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFX_VF_CURVE,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
&minimum,
&maximum);
if (input[1] < minimum ||
input[1] > maximum) {
- dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
input[1], minimum, maximum);
return -EINVAL;
}
- od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &minimum,
+ &maximum);
+ if (input[2] < minimum ||
+ input[2] > maximum) {
+ dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
+ input[2], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
+ od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
+ od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanTargetTemperature = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanMinimumPwm = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
- feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
- memcpy(od_table,
+ if (size == 1) {
+ ret = smu_v13_0_0_od_restore_table_single(smu, input[0]);
+ if (ret)
+ return ret;
+ } else {
+ feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
+ memcpy(od_table,
table_context->boot_overdrive_table,
sizeof(OverDriveTableExternal_t));
- od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ }
fallthrough;
-
case PP_OD_COMMIT_DPM_TABLE:
/*
* The member below instructs PMFW the settings focused in
@@ -1696,7 +2020,6 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
return 0;
}
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1720,12 +2043,12 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
- gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0],
+ gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
metrics->AvgTemperature[TEMP_VR_MEM1]);
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
- gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage,
+ gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
@@ -1779,6 +2102,24 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static void smu_v13_0_0_set_supported_od_feature_mask(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_CURVE_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
+}
+
static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu)
{
OverDriveTableExternal_t *od_table =
@@ -1828,8 +2169,24 @@ static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu)
for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
+ user_od_table->OverDriveTable.FanLinearTempPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
+ user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
+ }
+ user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
+ user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
+ user_od_table->OverDriveTable.FanTargetTemperature =
+ user_od_table_bak.OverDriveTable.FanTargetTemperature;
+ user_od_table->OverDriveTable.FanMinimumPwm =
+ user_od_table_bak.OverDriveTable.FanMinimumPwm;
}
+ smu_v13_0_0_set_supported_od_feature_mask(smu);
+
return 0;
}
@@ -1840,9 +2197,10 @@ static int smu_v13_0_0_restore_user_od_settings(struct smu_context *smu)
OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
int res;
- user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT |
- 1U << PP_OD_FEATURE_UCLK_BIT |
- 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
+ BIT(PP_OD_FEATURE_UCLK_BIT) |
+ BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
@@ -1928,8 +2286,6 @@ static void smu_v13_0_0_get_unique_id(struct smu_context *smu)
out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
- if (adev->serial[0] == '\0')
- sprintf(adev->serial, "%016llx", adev->unique_id);
}
static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
@@ -1949,7 +2305,7 @@ static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
}
/* Convert the PMFW output which is in percent to pwm(255) based */
- *speed = MIN(*speed * 255 / 100, 255);
+ *speed = min(*speed * 255 / 100, (uint32_t)255);
return 0;
}
@@ -1985,16 +2341,18 @@ static int smu_v13_0_0_enable_mgpu_fan_boost(struct smu_context *smu)
}
static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
struct smu_table_context *table_context = &smu->smu_table;
struct smu_13_0_0_powerplay_table *powerplay_table =
(struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent;
+ uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
@@ -2006,16 +2364,25 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit) {
- if (smu->od_enabled) {
- od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ if (smu->od_enabled) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ } else {
+ od_percent_upper = 0;
+ od_percent_lower = 100;
+ }
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
- power_limit *= (100 + od_percent);
- power_limit /= 100;
- }
- *max_power_limit = power_limit;
+ if (max_power_limit) {
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
+
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 - od_percent_lower);
+ *min_power_limit /= 100;
}
return 0;
@@ -2110,6 +2477,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
+ u32 workload_mask;
smu->power_profile_mode = input[size];
@@ -2171,9 +2539,23 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
+ workload_mask = 1 << workload_type;
+
+ /* Add optimizations for SMU13.0.0. Reuse the power saving profile */
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7400))) {
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_POWERSAVING);
+ if (workload_type >= 0)
+ workload_mask |= 1 << workload_type;
+ }
+
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
- 1 << workload_type,
+ workload_mask,
NULL);
}
@@ -2193,27 +2575,37 @@ static int smu_v13_0_0_baco_enter(struct smu_context *smu)
static int smu_v13_0_0_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
usleep_range(10000, 11000);
- return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
} else {
- return smu_v13_0_baco_exit(smu);
+ ret = smu_v13_0_baco_exit(smu);
}
+
+ if (!ret)
+ adev->gfx.is_poweron = false;
+
+ return ret;
}
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
u32 smu_version;
+ int ret;
/* SRIOV does not support SMU mode1 reset */
if (amdgpu_sriov_vf(adev))
return false;
/* PMFW support is available since 78.41 */
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return false;
+
if (smu_version < 0x004e2900)
return false;
@@ -2404,13 +2796,10 @@ static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu,
uint32_t supported_version,
uint32_t *param)
{
- uint32_t smu_version;
struct amdgpu_device *adev = smu->adev;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
-
- if ((smu_version >= supported_version) &&
+ if ((smu->smc_fw_version >= supported_version) &&
ras && atomic_read(&ras->in_recovery))
/* Set RAS fatal error reset flag */
*param = 1 << 16;
@@ -2424,7 +2813,7 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
uint32_t param;
struct amdgpu_device *adev = smu->adev;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
/* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */
smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, &param);
@@ -2457,7 +2846,7 @@ static int smu_v13_0_0_mode2_reset(struct smu_context *smu)
int ret;
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10))
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode2Reset, NULL);
else
return -EOPNOTSUPP;
@@ -2469,7 +2858,7 @@ static int smu_v13_0_0_enable_gfx_features(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10))
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
FEATURE_PWR_GFX, NULL);
else
@@ -2526,15 +2915,10 @@ static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu,
static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t if_version = 0xff, smu_version = 0xff;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return -EOPNOTSUPP;
-
- if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) &&
- (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)) &&
+ (smu->smc_fw_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))
return ret;
else
return -EOPNOTSUPP;
@@ -2581,6 +2965,55 @@ static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (limit <= msg_limit) {
+ if (smu->current_power_limit > msg_limit) {
+ od_table->OverDriveTable.Ppt = 0;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+ }
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+ } else if (smu->od_enabled) {
+ ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+ if (ret)
+ return ret;
+
+ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -2635,7 +3068,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.set_fan_control_mode = smu_v13_0_set_fan_control_mode,
.enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost,
.get_power_limit = smu_v13_0_0_get_power_limit,
- .set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_limit = smu_v13_0_0_set_power_limit,
.set_power_source = smu_v13_0_set_power_source,
.get_power_profile_mode = smu_v13_0_0_get_power_profile_mode,
.set_power_profile_mode = smu_v13_0_0_set_power_profile_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 626591f54..bb98156b2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -1144,7 +1144,7 @@ void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)
smu->smc_driver_if_version = SMU13_0_4_DRIVER_IF_VERSION;
smu->is_apu = true;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 4))
smu_v13_0_4_set_smu_mailbox_registers(smu);
else
smu_v13_0_set_smu_mailbox_registers(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index c6e7c2115..0dce672ac 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -1060,7 +1060,7 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
return -EINVAL;
}
- if (sclk_min && sclk_max && smu_v13_0_5_clk_dpm_is_enabled(smu, SMU_SCLK)) {
+ if (sclk_min && sclk_max) {
ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
SMU_SCLK,
sclk_min,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 24d681143..8cd2b8cc3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -44,9 +44,11 @@
#include "amdgpu_xgmi.h"
#include <linux/pci.h>
#include "amdgpu_ras.h"
+#include "amdgpu_mca.h"
#include "smu_cmn.h"
#include "mp/mp_13_0_6_offset.h"
#include "mp/mp_13_0_6_sh_mask.h"
+#include "umc_v12_0.h"
#undef MP1_Public
#undef smnMP1_FIRMWARE_FLAGS
@@ -64,6 +66,8 @@
#undef pr_info
#undef pr_debug
+MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin");
+
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature) \
@@ -91,6 +95,31 @@
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
#define LINK_SPEED_MAX 4
+#define SMU_13_0_6_DSCLK_THRESHOLD 140
+
+#define MCA_BANK_IPID(_ip, _hwid, _type) \
+ [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
+
+struct mca_bank_ipid {
+ enum amdgpu_mca_ip ip;
+ uint16_t hwid;
+ uint16_t mcatype;
+};
+
+struct mca_ras_info {
+ enum amdgpu_ras_block blkid;
+ enum amdgpu_mca_ip ip;
+ int *err_code_array;
+ int err_code_count;
+ int (*get_err_count)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count);
+ bool (*bank_is_valid)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry);
+};
+
+#define P2S_TABLE_ID_A 0x50325341
+#define P2S_TABLE_ID_X 0x50325358
+
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -133,6 +162,13 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
+ MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
+ MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
+ MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, 0),
+ MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, 0),
+ MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0),
+ MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0),
+ MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
};
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
@@ -207,6 +243,10 @@ struct PPTable_t {
};
#define SMUQ10_TO_UINT(x) ((x) >> 10)
+#define SMUQ10_FRAC(x) ((x) & 0x3ff)
+#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\
+ (metrics_a->field) : (metrics_x->field))
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
@@ -215,6 +255,70 @@ struct smu_v13_0_6_dpm_map {
uint32_t *freq_table;
};
+static int smu_v13_0_6_init_microcode(struct smu_context *smu)
+{
+ const struct smc_firmware_header_v2_1 *v2_1;
+ const struct common_firmware_header *hdr;
+ struct amdgpu_firmware_info *ucode = NULL;
+ struct smc_soft_pptable_entry *entries;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t p2s_table_id = P2S_TABLE_ID_A;
+ int ret = 0, i, p2stable_count;
+ char ucode_prefix[15];
+ char fw_name[30];
+
+ /* No need to load P2S tables in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (!(adev->flags & AMD_IS_APU))
+ p2s_table_id = P2S_TABLE_ID_X;
+
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
+ sizeof(ucode_prefix));
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+
+ ret = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ if (ret)
+ goto out;
+
+ hdr = (const struct common_firmware_header *)adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(hdr);
+
+ /* SMU v13.0.6 binary file doesn't carry pptables, instead the entries
+ * are used to carry p2s tables.
+ */
+ v2_1 = (const struct smc_firmware_header_v2_1 *)adev->pm.fw->data;
+ entries = (struct smc_soft_pptable_entry
+ *)((uint8_t *)v2_1 +
+ le32_to_cpu(v2_1->pptable_entry_offset));
+ p2stable_count = le32_to_cpu(v2_1->pptable_count);
+ for (i = 0; i < p2stable_count; i++) {
+ if (le32_to_cpu(entries[i].id) == p2s_table_id) {
+ smu->pptable_firmware.data =
+ ((uint8_t *)v2_1 +
+ le32_to_cpu(entries[i].ppt_offset_bytes));
+ smu->pptable_firmware.size =
+ le32_to_cpu(entries[i].ppt_size_bytes);
+ break;
+ }
+ }
+
+ if (smu->pptable_firmware.data && smu->pptable_firmware.size) {
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
+ ucode->ucode_id = AMDGPU_UCODE_ID_P2S_TABLE;
+ ucode->fw = &smu->pptable_firmware;
+ adev->firmware.fw_size += ALIGN(ucode->fw->size, PAGE_SIZE);
+ }
+
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->pm.fw);
+
+ return ret;
+}
+
static int smu_v13_0_6_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -225,7 +329,8 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+ max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
@@ -233,12 +338,13 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t),
+ sizeof(MetricsTableA_t)), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -329,9 +435,11 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+ MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
+ struct amdgpu_device *adev = smu->adev;
int ret, i, retry = 100;
/* Store one-time values in driver PPTable */
@@ -342,7 +450,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return ret;
/* Ensure that metrics have been updated */
- if (metrics->AccumulationCounter)
+ if (GET_METRIC_FIELD(AccumulationCounter))
break;
usleep_range(1000, 1100);
@@ -352,29 +460,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return -ETIME;
pptable->MaxSocketPowerLimit =
- SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit));
pptable->MaxGfxclkFrequency =
- SMUQ10_TO_UINT(metrics->MaxGfxclkFrequency);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency));
pptable->MinGfxclkFrequency =
- SMUQ10_TO_UINT(metrics->MinGfxclkFrequency);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency));
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
- SMUQ10_TO_UINT(metrics->FclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]);
pptable->UclkFrequencyTable[i] =
- SMUQ10_TO_UINT(metrics->UclkFrequencyTable[i]);
- pptable->SocclkFrequencyTable[i] = SMUQ10_TO_UINT(
- metrics->SocclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]);
+ pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(SocclkFrequencyTable)[i]);
pptable->VclkFrequencyTable[i] =
- SMUQ10_TO_UINT(metrics->VclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]);
pptable->DclkFrequencyTable[i] =
- SMUQ10_TO_UINT(metrics->DclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]);
pptable->LclkFrequencyTable[i] =
- SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]);
}
/* use AID0 serial number by default */
- pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0];
+ pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0];
pptable->Init = true;
}
@@ -676,9 +784,9 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+ MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct amdgpu_device *adev = smu->adev;
- uint32_t smu_version;
int ret = 0;
int xcc_id;
@@ -690,53 +798,52 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version >= 0x552F00) {
+ if (smu->smc_fw_version >= 0x552F00) {
xcc_id = GET_INST(GC, 0);
- *value = SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc_id]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
- *value = SMUQ10_TO_UINT(metrics->SocclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
- *value = SMUQ10_TO_UINT(metrics->UclkFrequency);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
break;
case METRICS_CURR_VCLK:
- *value = SMUQ10_TO_UINT(metrics->VclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]);
break;
case METRICS_CURR_DCLK:
- *value = SMUQ10_TO_UINT(metrics->DclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]);
break;
case METRICS_CURR_FCLK:
- *value = SMUQ10_TO_UINT(metrics->FclkFrequency);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency));
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = SMUQ10_TO_UINT(metrics->SocketGfxBusy);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
break;
case METRICS_CURR_SOCKETPOWER:
- *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
@@ -782,13 +889,63 @@ static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
}
+static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
+ struct smu_13_0_dpm_table *single_dpm_table,
+ uint32_t curr_clk, const char *clk_name)
+{
+ struct pp_clock_levels_with_latency clocks;
+ int i, ret, level = -1;
+ uint32_t clk1, clk2;
+
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Attempt to get %s clk levels failed!",
+ clk_name);
+ return ret;
+ }
+
+ if (!clocks.num_levels)
+ return -EINVAL;
+
+ if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) {
+ size = sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk);
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(buf, size, "%d: %uMhz\n", i,
+ clocks.data[i].clocks_in_khz /
+ 1000);
+
+ } else {
+ if ((clocks.num_levels == 1) ||
+ (curr_clk < (clocks.data[0].clocks_in_khz / 1000)))
+ level = 0;
+ for (i = 0; i < clocks.num_levels; i++) {
+ clk1 = clocks.data[i].clocks_in_khz / 1000;
+
+ if (i < (clocks.num_levels - 1))
+ clk2 = clocks.data[i + 1].clocks_in_khz / 1000;
+
+ if (curr_clk == clk1) {
+ level = i;
+ } else if (curr_clk >= clk1 && curr_clk < clk2) {
+ level = (curr_clk - clk1) <= (clk2 - curr_clk) ?
+ i :
+ i + 1;
+ }
+
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
+ clk1, (level == i) ? "*" : "");
+ }
+ }
+
+ return size;
+}
+
static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
enum smu_clk_type type, char *buf)
{
- int i, now, size = 0;
+ int now, size = 0;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
- struct pp_clock_levels_with_latency clocks;
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
@@ -819,7 +976,15 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
min_clk = pstate_table->gfxclk_pstate.curr.min;
max_clk = pstate_table->gfxclk_pstate.curr.max;
- if (!smu_v13_0_6_freqs_in_same_level(now, min_clk) &&
+ if (now < SMU_13_0_6_DSCLK_THRESHOLD) {
+ size += sysfs_emit_at(buf, size, "S: %uMhz *\n",
+ now);
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n",
+ min_clk);
+ size += sysfs_emit_at(buf, size, "1: %uMhz\n",
+ max_clk);
+
+ } else if (!smu_v13_0_6_freqs_in_same_level(now, min_clk) &&
!smu_v13_0_6_freqs_in_same_level(now, max_clk)) {
size += sysfs_emit_at(buf, size, "0: %uMhz\n",
min_clk);
@@ -851,26 +1016,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
}
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get memory clk levels Failed!");
- return ret;
- }
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- clocks.data[i].clocks_in_khz /
- 1000,
- now) ?
- "*" :
- ""));
- break;
+ return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "mclk");
case SMU_SOCCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
@@ -882,26 +1030,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
}
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get socclk levels Failed!");
- return ret;
- }
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- clocks.data[i].clocks_in_khz /
- 1000,
- now) ?
- "*" :
- ""));
- break;
+ return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "socclk");
case SMU_FCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
@@ -913,26 +1044,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
}
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get fclk levels Failed!");
- return ret;
- }
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- clocks.data[i].clocks_in_khz /
- 1000,
- now) ?
- "*" :
- ""));
- break;
+ return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "fclk");
case SMU_VCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
@@ -944,26 +1058,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
}
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get vclk levels Failed!");
- return ret;
- }
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- clocks.data[i].clocks_in_khz /
- 1000,
- now) ?
- "*" :
- ""));
- break;
+ return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "vclk");
case SMU_DCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
@@ -975,26 +1072,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
}
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get dclk levels Failed!");
- return ret;
- }
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- clocks.data[i].clocks_in_khz /
- 1000,
- now) ?
- "*" :
- ""));
- break;
+ return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "dclk");
default:
break;
@@ -1230,9 +1310,10 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
}
static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
@@ -1256,6 +1337,8 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
*max_power_limit = pptable->MaxSocketPowerLimit;
}
+ if (min_power_limit)
+ *min_power_limit = 0;
return 0;
}
@@ -1380,10 +1463,7 @@ static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
static int smu_v13_0_6_notify_unload(struct smu_context *smu)
{
- uint32_t smu_version;
-
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version <= 0x553500)
+ if (amdgpu_in_reset(smu->adev))
return 0;
dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
@@ -1393,6 +1473,18 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu)
return 0;
}
+static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
+{
+ /* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
+ if (smu->smc_fw_version < 0x554800)
+ return 0;
+
+ amdgpu_ras_set_mca_debug_mode(smu->adev, enable);
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
+ enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK,
+ NULL);
+}
+
static int smu_v13_0_6_system_features_control(struct smu_context *smu,
bool enable)
{
@@ -1644,13 +1736,11 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
uint64_t *feature_mask)
{
- uint32_t smu_version;
int ret;
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
ret = smu_cmn_get_enabled_mask(smu, feature_mask);
- if (ret == -EIO && smu_version < 0x552F00) {
+ if (ret == -EIO && smu->smc_fw_version < 0x552F00) {
*feature_mask = 0;
ret = 0;
}
@@ -1854,8 +1944,6 @@ static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
(struct PPTable_t *)smu_table->driver_pptable;
adev->unique_id = pptable->PublicSerialNumber_AID;
- if (adev->serial[0] == '\0')
- sprintf(adev->serial, "%016llx", adev->unique_id);
}
static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
@@ -1865,19 +1953,6 @@ static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
return false;
}
-static int smu_v13_0_6_set_df_cstate(struct smu_context *smu,
- enum pp_df_cstate state)
-{
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl,
- state, NULL);
-}
-
-static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en)
-{
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl,
- en ? 0 : 1, NULL);
-}
-
static const char *const throttling_logging_label[] = {
[THROTTLER_PROCHOT_BIT] = "Prochot",
[THROTTLER_PPT_BIT] = "PPT",
@@ -1888,7 +1963,7 @@ static const char *const throttling_logging_label[] = {
static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
{
- int throttler_idx, throtting_events = 0, buf_idx = 0;
+ int throttler_idx, throttling_events = 0, buf_idx = 0;
struct amdgpu_device *adev = smu->adev;
uint32_t throttler_status;
char log_buf[256];
@@ -1902,10 +1977,10 @@ static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
throttler_idx < ARRAY_SIZE(throttling_logging_label);
throttler_idx++) {
if (throttler_status & (1U << throttler_idx)) {
- throtting_events++;
+ throttling_events++;
buf_idx += snprintf(
log_buf + buf_idx, sizeof(log_buf) - buf_idx,
- "%s%s", throtting_events > 1 ? " and " : "",
+ "%s%s", throttling_events > 1 ? " and " : "",
throttling_logging_label[throttler_idx]);
if (buf_idx >= sizeof(log_buf)) {
dev_err(adev->dev, "buffer overflow!\n");
@@ -1956,63 +2031,71 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_3 *gpu_metrics =
- (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_5 *gpu_metrics =
+ (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0, inst0, xcc0;
- MetricsTable_t *metrics;
+ int ret = 0, xcc_id, inst, i, j;
+ MetricsTableX_t *metrics_x;
+ MetricsTableA_t *metrics_a;
u16 link_width_level;
- inst0 = adev->sdma.instance[0].aid_id;
- xcc0 = GET_INST(GC, 0);
-
- metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+ metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
if (ret) {
- kfree(metrics);
+ kfree(metrics_x);
return ret;
}
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
+ metrics_a = (MetricsTableA_t *)metrics_x;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5);
gpu_metrics->temperature_hotspot =
- SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature));
/* Individual HBM stack temperature is not reported */
gpu_metrics->temperature_mem =
- SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature));
/* Reports max temperature of all voltage rails */
gpu_metrics->temperature_vrsoc =
- SMUQ10_TO_UINT(metrics->MaxVrTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature));
gpu_metrics->average_gfx_activity =
- SMUQ10_TO_UINT(metrics->SocketGfxBusy);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
gpu_metrics->average_umc_activity =
- SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
- gpu_metrics->average_socket_power =
- SMUQ10_TO_UINT(metrics->SocketPower);
+ gpu_metrics->curr_socket_power =
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower));
/* Energy counter reported in 15.259uJ (2^-16) units */
- gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
-
- gpu_metrics->current_gfxclk =
- SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]);
- gpu_metrics->current_socclk =
- SMUQ10_TO_UINT(metrics->SocclkFrequency[inst0]);
- gpu_metrics->current_uclk = SMUQ10_TO_UINT(metrics->UclkFrequency);
- gpu_metrics->current_vclk0 =
- SMUQ10_TO_UINT(metrics->VclkFrequency[inst0]);
- gpu_metrics->current_dclk0 =
- SMUQ10_TO_UINT(metrics->DclkFrequency[inst0]);
-
- gpu_metrics->average_gfxclk_frequency = gpu_metrics->current_gfxclk;
- gpu_metrics->average_socclk_frequency = gpu_metrics->current_socclk;
- gpu_metrics->average_uclk_frequency = gpu_metrics->current_uclk;
- gpu_metrics->average_vclk0_frequency = gpu_metrics->current_vclk0;
- gpu_metrics->average_dclk0_frequency = gpu_metrics->current_dclk0;
+ gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc);
+
+ for (i = 0; i < MAX_GFX_CLKS; i++) {
+ xcc_id = GET_INST(GC, i);
+ if (xcc_id >= 0)
+ gpu_metrics->current_gfxclk[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
+
+ if (i < MAX_CLKS) {
+ gpu_metrics->current_socclk[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]);
+ inst = GET_INST(VCN, i);
+ if (inst >= 0) {
+ gpu_metrics->current_vclk0[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]);
+ gpu_metrics->current_dclk0[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]);
+ }
+ }
+ }
+
+ gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
/* Throttle status is not reported through metrics now */
gpu_metrics->throttle_status = 0;
+ /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
+ gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
+
if (!(adev->flags & AMD_IS_APU)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
if (link_width_level > MAX_LINK_WIDTH)
@@ -2022,21 +2105,60 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
DECODE_LANE_WIDTH(link_width_level);
gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu);
+ gpu_metrics->pcie_bandwidth_acc =
+ SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
+ gpu_metrics->pcie_bandwidth_inst =
+ SMUQ10_ROUND(metrics_x->PcieBandwidth[0]);
+ gpu_metrics->pcie_l0_to_recov_count_acc =
+ metrics_x->PCIeL0ToRecoveryCountAcc;
+ gpu_metrics->pcie_replay_count_acc =
+ metrics_x->PCIenReplayAAcc;
+ gpu_metrics->pcie_replay_rover_count_acc =
+ metrics_x->PCIenReplayARolloverCountAcc;
+ gpu_metrics->pcie_nak_sent_count_acc =
+ metrics_x->PCIeNAKSentCountAcc;
+ gpu_metrics->pcie_nak_rcvd_count_acc =
+ metrics_x->PCIeNAKReceivedCountAcc;
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
- SMUQ10_TO_UINT(metrics->SocketGfxBusyAcc);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc));
gpu_metrics->mem_activity_acc =
- SMUQ10_TO_UINT(metrics->DramBandwidthUtilizationAcc);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc));
+
+ for (i = 0; i < NUM_XGMI_LINKS; i++) {
+ gpu_metrics->xgmi_read_data_acc[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]);
+ gpu_metrics->xgmi_write_data_acc[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]);
+ }
- gpu_metrics->firmware_timestamp = metrics->Timestamp;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ inst = GET_INST(JPEG, i);
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy)
+ [(inst * adev->jpeg.num_jpeg_rings) + j]);
+ }
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+ gpu_metrics->vcn_activity[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]);
+ }
+
+ gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth));
+ gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate));
+
+ gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp);
*table = (void *)gpu_metrics;
- kfree(metrics);
+ kfree(metrics_x);
- return sizeof(struct gpu_metrics_v1_3);
+ return sizeof(*gpu_metrics);
}
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
@@ -2070,17 +2192,18 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
continue;
}
- if (ret) {
- dev_err(adev->dev,
- "failed to send mode2 message \tparam: 0x%08x error code %d\n",
- SMU_RESET_MODE_2, ret);
+ if (ret)
goto out;
- }
+
} while (ret == -ETIME && timeout);
out:
mutex_unlock(&smu->message_lock);
+ if (ret)
+ dev_err(adev->dev, "failed to send mode2 reset, error code %d",
+ ret);
+
return ret;
}
@@ -2088,8 +2211,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
struct smu_temperature_range *range)
{
struct amdgpu_device *adev = smu->adev;
- u32 aid_temp, xcd_temp, mem_temp;
- uint32_t smu_version;
+ u32 aid_temp, xcd_temp, max_temp;
u32 ccd_temp = 0;
int ret;
@@ -2100,35 +2222,53 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
return -EINVAL;
/*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version < 0x554500)
+ if (smu->smc_fw_version < 0x554500)
return 0;
+ /* Get SOC Max operating temperature */
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_AID_THM_TYPE, &aid_temp);
if (ret)
goto failed;
-
if (adev->flags & AMD_IS_APU) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_CCD_THM_TYPE, &ccd_temp);
if (ret)
goto failed;
}
-
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_XCD_THM_TYPE, &xcd_temp);
if (ret)
goto failed;
-
- range->hotspot_crit_max = max3(aid_temp, xcd_temp, ccd_temp) *
+ range->hotspot_emergency_max = max3(aid_temp, xcd_temp, ccd_temp) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ /* Get HBM Max operating temperature */
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
- PPSMC_HBM_THM_TYPE, &mem_temp);
+ PPSMC_HBM_THM_TYPE, &max_temp);
+ if (ret)
+ goto failed;
+ range->mem_emergency_max =
+ max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ /* Get SOC thermal throttle limit */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetThermalLimit,
+ PPSMC_THROTTLING_LIMIT_TYPE_SOCKET,
+ &max_temp);
+ if (ret)
+ goto failed;
+ range->hotspot_crit_max =
+ max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ /* Get HBM thermal throttle limit */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetThermalLimit,
+ PPSMC_THROTTLING_LIMIT_TYPE_HBM,
+ &max_temp);
if (ret)
goto failed;
- range->mem_crit_max = mem_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_crit_max = max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
failed:
return ret;
}
@@ -2136,16 +2276,24 @@ failed:
static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_hive_info *hive = NULL;
+ u32 hive_ras_recovery = 0;
struct amdgpu_ras *ras;
u32 fatal_err, param;
int ret = 0;
+ hive = amdgpu_get_xgmi_hive(adev);
ras = amdgpu_ras_get_context(adev);
fatal_err = 0;
param = SMU_RESET_MODE_1;
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+
/* fatal error triggered by ras, PMFW supports the flag */
- if (ras && atomic_read(&ras->in_recovery))
+ if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
fatal_err = 1;
param |= (fatal_err << 16);
@@ -2184,6 +2332,525 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_6_post_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!amdgpu_sriov_vf(adev) && adev->ras_enabled)
+ return smu_v13_0_6_mca_set_debug_mode(smu, false);
+
+ return 0;
+}
+
+static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_v13_0_6_mca_set_debug_mode(smu, enable);
+}
+
+static int smu_v13_0_6_get_valid_mca_count(struct smu_context *smu, enum amdgpu_mca_error_type type, uint32_t *count)
+{
+ uint32_t msg;
+ int ret;
+
+ if (!count)
+ return -EINVAL;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ msg = SMU_MSG_QueryValidMcaCount;
+ break;
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ msg = SMU_MSG_QueryValidMcaCeCount;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg(smu, msg, count);
+ if (ret) {
+ *count = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type,
+ int idx, int offset, uint32_t *val)
+{
+ uint32_t msg, param;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ msg = SMU_MSG_McaBankDumpDW;
+ break;
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ msg = SMU_MSG_McaBankCeDumpDW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ param = ((idx & 0xffff) << 16) | (offset & 0xfffc);
+
+ return smu_cmn_send_smc_msg_with_param(smu, msg, param, val);
+}
+
+static int smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type,
+ int idx, int offset, uint32_t *val, int count)
+{
+ int ret, i;
+
+ if (!val)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ ret = __smu_v13_0_6_mca_dump_bank(smu, type, idx, offset + (i << 2), &val[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT] = {
+ MCA_BANK_IPID(UMC, 0x96, 0x0),
+ MCA_BANK_IPID(SMU, 0x01, 0x1),
+ MCA_BANK_IPID(MP5, 0x01, 0x2),
+ MCA_BANK_IPID(PCS_XGMI, 0x50, 0x0),
+};
+
+static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info)
+{
+ uint64_t ipid = entry->regs[MCA_REG_IDX_IPID];
+ uint32_t insthi;
+
+ /* NOTE: All MCA IPID register share the same format,
+ * so the driver can share the MCMP1 register header file.
+ * */
+
+ info->hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
+ info->mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
+
+ insthi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi);
+ info->aid = ((insthi >> 2) & 0x03);
+ info->socket_id = insthi & 0x03;
+}
+
+static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
+ int idx, int reg_idx, uint64_t *val)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t data[2] = {0, 0};
+ int ret;
+
+ if (!val || reg_idx >= MCA_REG_IDX_COUNT)
+ return -EINVAL;
+
+ ret = smu_v13_0_6_mca_dump_bank(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data));
+ if (ret)
+ return ret;
+
+ *val = (uint64_t)data[1] << 32 | data[0];
+
+ dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n",
+ type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
+
+ return 0;
+}
+
+static int mca_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
+ int idx, struct mca_bank_entry *entry)
+{
+ int i, ret;
+
+ /* NOTE: populated all mca register by default */
+ for (i = 0; i < ARRAY_SIZE(entry->regs); i++) {
+ ret = mca_bank_read_reg(adev, type, idx, i, &entry->regs[i]);
+ if (ret)
+ return ret;
+ }
+
+ entry->idx = idx;
+ entry->type = type;
+
+ mca_bank_entry_info_decode(entry, &entry->info);
+
+ return 0;
+}
+
+static int mca_decode_ipid_to_hwip(uint64_t val)
+{
+ const struct mca_bank_ipid *ipid;
+ uint16_t hwid, mcatype;
+ int i;
+
+ hwid = REG_GET_FIELD(val, MCMP1_IPIDT0, HardwareID);
+ mcatype = REG_GET_FIELD(val, MCMP1_IPIDT0, McaType);
+
+ for (i = 0; i < ARRAY_SIZE(smu_v13_0_6_mca_ipid_table); i++) {
+ ipid = &smu_v13_0_6_mca_ipid_table[i];
+
+ if (!ipid->hwid)
+ continue;
+
+ if (ipid->hwid == hwid && ipid->mcatype == mcatype)
+ return i;
+ }
+
+ return AMDGPU_MCA_IP_UNKNOW;
+}
+
+static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
+{
+ uint64_t status0;
+
+ status0 = entry->regs[MCA_REG_IDX_STATUS];
+
+ if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
+ *count = 0;
+ return 0;
+ }
+
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0))
+ *count = 1;
+ else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0))
+ *count = 1;
+
+ return 0;
+}
+
+static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry,
+ uint32_t *count)
+{
+ u32 ext_error_code;
+
+ ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]);
+
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0)
+ *count = 1;
+ else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6)
+ *count = 1;
+
+ return 0;
+}
+
+static bool mca_smu_check_error_code(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
+ uint32_t errcode)
+{
+ int i;
+
+ if (!mca_ras->err_code_count || !mca_ras->err_code_array)
+ return true;
+
+ for (i = 0; i < mca_ras->err_code_count; i++) {
+ if (errcode == mca_ras->err_code_array[i])
+ return true;
+ }
+
+ return false;
+}
+
+static int mca_gfx_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
+{
+ uint64_t status0, misc0;
+
+ status0 = entry->regs[MCA_REG_IDX_STATUS];
+ if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
+ *count = 0;
+ return 0;
+ }
+
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
+ REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 &&
+ REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) {
+ *count = 1;
+ return 0;
+ } else {
+ misc0 = entry->regs[MCA_REG_IDX_MISC0];
+ *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt);
+ }
+
+ return 0;
+}
+
+static int mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
+{
+ uint64_t status0, misc0;
+
+ status0 = entry->regs[MCA_REG_IDX_STATUS];
+ if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
+ *count = 0;
+ return 0;
+ }
+
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
+ REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 &&
+ REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) {
+ if (count)
+ *count = 1;
+ return 0;
+ }
+
+ misc0 = entry->regs[MCA_REG_IDX_MISC0];
+ *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt);
+
+ return 0;
+}
+
+static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
+{
+ uint32_t instlo;
+
+ instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
+ switch (instlo) {
+ case 0x36430400: /* SMNAID XCD 0 */
+ case 0x38430400: /* SMNAID XCD 1 */
+ case 0x40430400: /* SMNXCD XCD 0, NOTE: FIXME: fix this error later */
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+};
+
+static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t errcode, instlo;
+
+ instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
+ if (instlo != 0x03b30400)
+ return false;
+
+ if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
+ errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
+ errcode &= 0xff;
+ } else {
+ errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode);
+ }
+
+ return mca_smu_check_error_code(adev, mca_ras, errcode);
+}
+
+static int sdma_err_codes[] = { CODE_SDMA0, CODE_SDMA1, CODE_SDMA2, CODE_SDMA3 };
+static int mmhub_err_codes[] = {
+ CODE_DAGB0, CODE_DAGB0 + 1, CODE_DAGB0 + 2, CODE_DAGB0 + 3, CODE_DAGB0 + 4, /* DAGB0-4 */
+ CODE_EA0, CODE_EA0 + 1, CODE_EA0 + 2, CODE_EA0 + 3, CODE_EA0 + 4, /* MMEA0-4*/
+ CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE,
+};
+
+static const struct mca_ras_info mca_ras_table[] = {
+ {
+ .blkid = AMDGPU_RAS_BLOCK__UMC,
+ .ip = AMDGPU_MCA_IP_UMC,
+ .get_err_count = mca_umc_mca_get_err_count,
+ }, {
+ .blkid = AMDGPU_RAS_BLOCK__GFX,
+ .ip = AMDGPU_MCA_IP_SMU,
+ .get_err_count = mca_gfx_mca_get_err_count,
+ .bank_is_valid = mca_gfx_smu_bank_is_valid,
+ }, {
+ .blkid = AMDGPU_RAS_BLOCK__SDMA,
+ .ip = AMDGPU_MCA_IP_SMU,
+ .err_code_array = sdma_err_codes,
+ .err_code_count = ARRAY_SIZE(sdma_err_codes),
+ .get_err_count = mca_smu_mca_get_err_count,
+ .bank_is_valid = mca_smu_bank_is_valid,
+ }, {
+ .blkid = AMDGPU_RAS_BLOCK__MMHUB,
+ .ip = AMDGPU_MCA_IP_SMU,
+ .err_code_array = mmhub_err_codes,
+ .err_code_count = ARRAY_SIZE(mmhub_err_codes),
+ .get_err_count = mca_smu_mca_get_err_count,
+ .bank_is_valid = mca_smu_bank_is_valid,
+ }, {
+ .blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ .ip = AMDGPU_MCA_IP_PCS_XGMI,
+ .get_err_count = mca_pcs_xgmi_mca_get_err_count,
+ },
+};
+
+static const struct mca_ras_info *mca_get_mca_ras_info(struct amdgpu_device *adev, enum amdgpu_ras_block blkid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mca_ras_table); i++) {
+ if (mca_ras_table[i].blkid == blkid)
+ return &mca_ras_table[i];
+ }
+
+ return NULL;
+}
+
+static int mca_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ ret = smu_v13_0_6_get_valid_mca_count(smu, type, count);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
+{
+ if (mca_decode_ipid_to_hwip(entry->regs[MCA_REG_IDX_IPID]) != mca_ras->ip)
+ return false;
+
+ if (mca_ras->bank_is_valid)
+ return mca_ras->bank_is_valid(mca_ras, adev, type, entry);
+
+ return true;
+}
+
+static int __mca_smu_get_ras_mca_set(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
+ enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
+{
+ struct mca_bank_entry entry;
+ uint32_t mca_cnt;
+ int i, ret;
+
+ ret = mca_get_valid_mca_count(adev, type, &mca_cnt);
+ if (ret)
+ return ret;
+
+ /* if valid mca bank count is 0, the driver can return 0 directly */
+ if (!mca_cnt)
+ return 0;
+
+ for (i = 0; i < mca_cnt; i++) {
+ memset(&entry, 0, sizeof(entry));
+ ret = mca_get_mca_entry(adev, type, i, &entry);
+ if (ret)
+ return ret;
+
+ if (mca_ras && !mca_bank_is_valid(adev, mca_ras, type, &entry))
+ continue;
+
+ ret = amdgpu_mca_bank_set_add_entry(mca_set, &entry);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mca_smu_get_ras_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
+{
+ const struct mca_ras_info *mca_ras = NULL;
+
+ if (!mca_set)
+ return -EINVAL;
+
+ if (blk != AMDGPU_RAS_BLOCK_COUNT) {
+ mca_ras = mca_get_mca_ras_info(adev, blk);
+ if (!mca_ras)
+ return -EOPNOTSUPP;
+ }
+
+ return __mca_smu_get_ras_mca_set(adev, mca_ras, type, mca_set);
+}
+
+static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+ struct mca_bank_entry *entry, uint32_t *count)
+{
+ const struct mca_ras_info *mca_ras;
+
+ if (!entry || !count)
+ return -EINVAL;
+
+ mca_ras = mca_get_mca_ras_info(adev, blk);
+ if (!mca_ras)
+ return -EOPNOTSUPP;
+
+ if (!mca_bank_is_valid(adev, mca_ras, type, entry)) {
+ *count = 0;
+ return 0;
+ }
+
+ return mca_ras->get_err_count(mca_ras, adev, type, entry, count);
+}
+
+static int mca_smu_get_mca_entry(struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, int idx, struct mca_bank_entry *entry)
+{
+ return mca_get_mca_entry(adev, type, idx, entry);
+}
+
+static int mca_smu_get_valid_mca_count(struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, uint32_t *count)
+{
+ return mca_get_valid_mca_count(adev, type, count);
+}
+
+static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = {
+ .max_ue_count = 12,
+ .max_ce_count = 12,
+ .mca_set_debug_mode = mca_smu_set_debug_mode,
+ .mca_get_ras_mca_set = mca_smu_get_ras_mca_set,
+ .mca_parse_mca_error_count = mca_smu_parse_mca_error_count,
+ .mca_get_mca_entry = mca_smu_get_mca_entry,
+ .mca_get_valid_mca_count = mca_smu_get_valid_mca_count,
+};
+
+static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
+ enum pp_xgmi_plpd_mode mode)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret, param;
+
+ switch (mode) {
+ case XGMI_PLPD_DEFAULT:
+ param = PPSMC_PLPD_MODE_DEFAULT;
+ break;
+ case XGMI_PLPD_OPTIMIZED:
+ param = PPSMC_PLPD_MODE_OPTIMIZED;
+ break;
+ case XGMI_PLPD_DISALLOW:
+ param = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mode == XGMI_PLPD_DISALLOW)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ param, NULL);
+ else
+ /* change xgmi per-link power down policy */
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SelectPLPDMode,
+ param, NULL);
+
+ if (ret)
+ dev_err(adev->dev,
+ "select xgmi per-link power down policy %d failed\n",
+ mode);
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -2197,6 +2864,8 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_power_limit = smu_v13_0_6_get_power_limit,
.is_dpm_running = smu_v13_0_6_is_dpm_running,
.get_unique_id = smu_v13_0_6_get_unique_id,
+ .init_microcode = smu_v13_0_6_init_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_6_init_smc_tables,
.fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
@@ -2222,11 +2891,9 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
.od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
- .set_df_cstate = smu_v13_0_6_set_df_cstate,
- .allow_xgmi_power_down = smu_v13_0_6_allow_xgmi_power_down,
+ .select_xgmi_plpd_policy = smu_v13_0_6_select_xgmi_plpd_policy,
.log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
- .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
@@ -2237,6 +2904,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
+ .post_init = smu_v13_0_6_post_init,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
@@ -2248,4 +2916,5 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v13_0_6_table_map;
smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
+ amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 51ae41cb4..bc5891c3f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -77,6 +77,12 @@
#define PP_OD_FEATURE_UCLK_FMIN 2
#define PP_OD_FEATURE_UCLK_FMAX 3
#define PP_OD_FEATURE_GFX_VF_CURVE 4
+#define PP_OD_FEATURE_FAN_CURVE_TEMP 5
+#define PP_OD_FEATURE_FAN_CURVE_PWM 6
+#define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
+#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
+#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
+#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
#define LINK_SPEED_MAX 3
@@ -147,6 +153,7 @@ static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(VCLK1, PPCLK_VCLK_1),
CLK_MAP(DCLK, PPCLK_DCLK_0),
CLK_MAP(DCLK1, PPCLK_DCLK_1),
+ CLK_MAP(DCEFCLK, PPCLK_DCFCLK),
};
static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -331,12 +338,10 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *smc_pptable = table_context->driver_pptable;
BoardTable_t *BoardTable = &smc_pptable->BoardTable;
-#if 0
const OverDriveLimits_t * const overdrive_upperlimits =
&smc_pptable->SkuTable.OverDriveLimitsBasicMax;
const OverDriveLimits_t * const overdrive_lowerlimits =
&smc_pptable->SkuTable.OverDriveLimitsMin;
-#endif
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@@ -349,22 +354,18 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
smu_baco->maco_support = true;
}
-#if 0
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
smu->od_enabled = false;
+ table_context->thermal_controller_type =
+ powerplay_table->thermal_controller_type;
+
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
-#else
- smu->od_enabled = false;
-#endif
-
- table_context->thermal_controller_type =
- powerplay_table->thermal_controller_type;
return 0;
}
@@ -697,6 +698,22 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
pcie_table->num_of_link_levels++;
}
+ /* dcefclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.dcef_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
+ ret = smu_v13_0_set_single_dpm_table(smu,
+ SMU_DCEFCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
return 0;
}
@@ -778,6 +795,9 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
case METRICS_CURR_FCLK:
*value = metrics->CurrClock[PPCLK_FCLK];
break;
+ case METRICS_CURR_DCEFCLK:
+ *value = metrics->CurrClock[PPCLK_DCFCLK];
+ break;
case METRICS_AVERAGE_GFXCLK:
*value = metrics->AverageGfxclkFrequencyPreDs;
break;
@@ -1028,6 +1048,9 @@ static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu,
case PPCLK_DCLK_1:
member_type = METRICS_CURR_DCLK1;
break;
+ case PPCLK_DCFCLK:
+ member_type = METRICS_CURR_DCEFCLK;
+ break;
default:
return -EINVAL;
}
@@ -1080,6 +1103,30 @@ static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary;
od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary;
break;
+ case PP_OD_FEATURE_FAN_CURVE_TEMP:
+ od_min_setting = overdrive_lowerlimits->FanLinearTempPoints;
+ od_max_setting = overdrive_upperlimits->FanLinearTempPoints;
+ break;
+ case PP_OD_FEATURE_FAN_CURVE_PWM:
+ od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints;
+ od_max_setting = overdrive_upperlimits->FanLinearPwmPoints;
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
+ od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
+ od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
+ od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
+ od_max_setting = overdrive_upperlimits->FanTargetTemperature;
+ break;
+ case PP_OD_FEATURE_FAN_MINIMUM_PWM:
+ od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
+ od_max_setting = overdrive_upperlimits->FanMinimumPwm;
+ break;
default:
od_min_setting = od_max_setting = INT_MAX;
break;
@@ -1177,6 +1224,9 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
case SMU_DCLK1:
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
break;
+ case SMU_DCEFCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
+ break;
default:
break;
}
@@ -1190,6 +1240,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
case SMU_VCLK1:
case SMU_DCLK:
case SMU_DCLK1:
+ case SMU_DCEFCLK:
ret = smu_v13_0_7_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
if (ret) {
dev_err(smu->adev->dev, "Failed to get current clock freq!");
@@ -1285,16 +1336,115 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
od_table->OverDriveTable.UclkFmax);
break;
- case SMU_OD_VDDC_CURVE:
+ case SMU_OD_VDDGFX_OFFSET:
if (!smu_v13_0_7_is_od_feature_supported(smu,
PP_OD_FEATURE_GFX_VF_CURVE_BIT))
break;
- size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
- for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
- size += sysfs_emit_at(buf, size, "%d: %dmv\n",
+ size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "%dmV\n",
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
+ break;
+
+ case SMU_OD_FAN_CURVE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
+ size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
i,
- od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]);
+ (int)od_table->OverDriveTable.FanLinearTempPoints[i],
+ (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
+ min_value, max_value);
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
+ min_value, max_value);
+
+ break;
+
+ case SMU_OD_ACOUSTIC_LIMIT:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_ACOUSTIC_TARGET:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_TARGET_TEMPERATURE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanTargetTemperature);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_MINIMUM_PWM:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanMinimumPwm);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
+ min_value, max_value);
break;
case SMU_OD_RANGE:
@@ -1336,7 +1486,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFX_VF_CURVE,
&min_value,
&max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n",
+ size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
min_value, max_value);
}
break;
@@ -1348,6 +1498,59 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
return size;
}
+static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *boot_overdrive_table =
+ (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ switch (input) {
+ case PP_OD_EDIT_FAN_CURVE:
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
+ od_table->OverDriveTable.FanLinearTempPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
+ od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
+ }
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ od_table->OverDriveTable.FanTargetTemperature =
+ boot_overdrive_table->OverDriveTable.FanTargetTemperature;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ od_table->OverDriveTable.FanMinimumPwm =
+ boot_overdrive_table->OverDriveTable.FanMinimumPwm;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ default:
+ dev_info(adev->dev, "Invalid table index: %ld\n", input);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[],
@@ -1485,37 +1688,166 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu,
}
break;
- case PP_OD_EDIT_VDDC_CURVE:
+ case PP_OD_EDIT_VDDGFX_OFFSET:
if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
- dev_warn(adev->dev, "VF curve setting not supported!\n");
+ dev_warn(adev->dev, "Gfx offset setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_CURVE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
return -ENOTSUPP;
}
- if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS ||
+ if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
input[0] < 0)
return -EINVAL;
smu_v13_0_7_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFX_VF_CURVE,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
&minimum,
&maximum);
if (input[1] < minimum ||
input[1] > maximum) {
- dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
input[1], minimum, maximum);
return -EINVAL;
}
- od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &minimum,
+ &maximum);
+ if (input[2] < minimum ||
+ input[2] > maximum) {
+ dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
+ input[2], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
+ od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
+ od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanTargetTemperature = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanMinimumPwm = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
- feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
- memcpy(od_table,
- table_context->boot_overdrive_table,
- sizeof(OverDriveTableExternal_t));
- od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ if (size == 1) {
+ ret = smu_v13_0_7_od_restore_table_single(smu, input[0]);
+ if (ret)
+ return ret;
+ } else {
+ feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
+ memcpy(od_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTableExternal_t));
+ od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ }
fallthrough;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1674,7 +2006,6 @@ static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu,
return 0;
}
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1698,12 +2029,12 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
- gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0],
+ gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
metrics->AvgTemperature[TEMP_VR_MEM1]);
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
- gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage,
+ gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
@@ -1755,6 +2086,24 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static void smu_v13_0_7_set_supported_od_feature_mask(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_CURVE_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
+}
+
static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu)
{
OverDriveTableExternal_t *od_table =
@@ -1804,8 +2153,24 @@ static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu)
for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
+ user_od_table->OverDriveTable.FanLinearTempPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
+ user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
+ }
+ user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
+ user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
+ user_od_table->OverDriveTable.FanTargetTemperature =
+ user_od_table_bak.OverDriveTable.FanTargetTemperature;
+ user_od_table->OverDriveTable.FanMinimumPwm =
+ user_od_table_bak.OverDriveTable.FanMinimumPwm;
}
+ smu_v13_0_7_set_supported_od_feature_mask(smu);
+
return 0;
}
@@ -1816,9 +2181,10 @@ static int smu_v13_0_7_restore_user_od_settings(struct smu_context *smu)
OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
int res;
- user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT |
- 1U << PP_OD_FEATURE_UCLK_BIT |
- 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
+ BIT(PP_OD_FEATURE_UCLK_BIT) |
+ BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
@@ -1903,7 +2269,7 @@ static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu,
}
/* Convert the PMFW output which is in percent to pwm(255) based */
- *speed = MIN(*speed * 255 / 100, 255);
+ *speed = min(*speed * 255 / 100, (uint32_t)255);
return 0;
}
@@ -1939,16 +2305,18 @@ static int smu_v13_0_7_enable_mgpu_fan_boost(struct smu_context *smu)
}
static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
struct smu_table_context *table_context = &smu->smu_table;
struct smu_13_0_7_powerplay_table *powerplay_table =
(struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent;
+ uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
@@ -1960,16 +2328,25 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit) {
- if (smu->od_enabled) {
- od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (smu->od_enabled) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else {
+ od_percent_upper = 0;
+ od_percent_lower = 100;
+ }
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
- power_limit *= (100 + od_percent);
- power_limit /= 100;
- }
- *max_power_limit = power_limit;
+ if (max_power_limit) {
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
+
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 - od_percent_lower);
+ *min_power_limit /= 100;
}
return 0;
@@ -2149,14 +2526,20 @@ static int smu_v13_0_7_baco_enter(struct smu_context *smu)
static int smu_v13_0_7_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
usleep_range(10000, 11000);
- return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
} else {
- return smu_v13_0_baco_exit(smu);
+ ret = smu_v13_0_baco_exit(smu);
}
+
+ if (!ret)
+ adev->gfx.is_poweron = false;
+
+ return ret;
}
static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
@@ -2179,6 +2562,55 @@ static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
NULL);
}
+static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (limit <= msg_limit) {
+ if (smu->current_power_limit > msg_limit) {
+ od_table->OverDriveTable.Ppt = 0;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+ }
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+ } else if (smu->od_enabled) {
+ ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+ if (ret)
+ return ret;
+
+ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2230,7 +2662,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.set_fan_control_mode = smu_v13_0_set_fan_control_mode,
.enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
.get_power_limit = smu_v13_0_7_get_power_limit,
- .set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_limit = smu_v13_0_7_set_power_limit,
.set_power_source = smu_v13_0_set_power_source,
.get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
.set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 2e74d749e..2d1736234 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1024,24 +1024,24 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu,
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))
clk_limit = SMU_13_0_8_UMD_PSTATE_GFXCLK;
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
- (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))
clk_limit = SMU_13_0_1_UMD_PSTATE_GFXCLK;
break;
case SMU_SOCCLK:
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))
clk_limit = SMU_13_0_8_UMD_PSTATE_SOCCLK;
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
- (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))
clk_limit = SMU_13_0_1_UMD_PSTATE_SOCCLK;
break;
case SMU_FCLK:
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))
clk_limit = SMU_13_0_8_UMD_PSTATE_FCLK;
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
- (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))
clk_limit = SMU_13_0_1_UMD_PSTATE_FCLK;
break;
default: