diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
commit | 34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch) | |
tree | 62db60558cbf089714b48daeabca82bf2b20b20e /drivers/gpu/drm/amd/pm | |
parent | Adding debian version 6.8.12-1. (diff) | |
download | linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
26 files changed, 1003 insertions, 226 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 6627ee07d5..f84bfed506 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -693,6 +693,21 @@ int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t si return ret; } +int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret; + + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + + mutex_lock(&adev->pm.mutex); + ret = smu_send_rma_reason(smu); + mutex_unlock(&adev->pm.mutex); + + return ret; +} + int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t *min, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index b4698f9856..bbd0169010 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2034,6 +2034,63 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ return 0; } +static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) +{ + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); + + *states = ATTR_STATE_SUPPORTED; + + if (!amdgpu_dpm_is_overdrive_supported(adev)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */ + if (gc_ver == IP_VERSION(9, 4, 3)) { + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + if (!(attr->flags & mask)) + *states = ATTR_STATE_UNSUPPORTED; + + return 0; +} + +static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) +{ + struct device_attribute *dev_attr = &attr->dev_attr; + uint32_t gc_ver; + + *states = ATTR_STATE_SUPPORTED; + + if (!(attr->flags & mask)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); + /* dcefclk node is not available on gfx 11.0.3 sriov */ + if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) || + gc_ver < IP_VERSION(9, 0, 0) || + !amdgpu_device_has_display_hardware(adev)) + *states = ATTR_STATE_UNSUPPORTED; + + /* SMU MP1 does not support dcefclk level setting, + * setting should not be allowed from VF if not in one VF mode. + */ + if (gc_ver >= IP_VERSION(10, 0, 0) || + (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + + return 0; +} + /* Following items will be read out to indicate current plpd policy: * - -1: none * - 0: disallow @@ -2113,12 +2170,14 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_dcefclk_attr_update), AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC, + .attr_update = pp_od_clk_voltage_attr_update), AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), @@ -2156,17 +2215,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (DEVICE_ATTR_IS(pp_dpm_socclk)) { if (gc_ver < IP_VERSION(9, 0, 0)) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { - if (gc_ver < IP_VERSION(9, 0, 0) || - !amdgpu_device_has_display_hardware(adev)) - *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { if (mp1_ver < IP_VERSION(10, 0, 0)) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { - *states = ATTR_STATE_UNSUPPORTED; - if (amdgpu_dpm_is_overdrive_supported(adev)) - *states = ATTR_STATE_SUPPORTED; } else if (DEVICE_ATTR_IS(mem_busy_percent)) { if ((adev->flags & AMD_IS_APU && gc_ver != IP_VERSION(9, 4, 3)) || @@ -2174,7 +2225,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pcie_bw)) { /* PCIe Perf counters won't work on APU nodes */ - if (adev->flags & AMD_IS_APU) + if (adev->flags & AMD_IS_APU || + !adev->asic_funcs->get_pcie_usage) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(unique_id)) { switch (gc_ver) { @@ -2280,14 +2332,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ break; } - if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { - /* SMU MP1 does not support dcefclk level setting */ - if (gc_ver >= IP_VERSION(10, 0, 0)) { - dev_attr->attr.mode &= ~S_IWUGO; - dev_attr->store = NULL; - } - } - /* setting should not be allowed from VF if not in one VF mode */ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { dev_attr->attr.mode &= ~S_IWUGO; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 3047ffe7f2..621200e082 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -450,6 +450,7 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable); int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size); int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size); +int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev); int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t *min, diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 5cb4725c77..c8586cb7d0 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { if (table[i].ulSupportedSCLK != 0) { + if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) + continue; vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = table[i].usVoltageID; vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index f503e61faa..b1b4c09c34 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -226,7 +226,7 @@ int atomctrl_set_engine_dram_timings_rv770( return amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), - (uint32_t *)&engine_clock_parameters); + (uint32_t *)&engine_clock_parameters, sizeof(engine_clock_parameters)); } /* @@ -297,7 +297,7 @@ int atomctrl_get_memory_pll_dividers_si( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), - (uint32_t *)&mpll_parameters); + (uint32_t *)&mpll_parameters, sizeof(mpll_parameters)); if (0 == result) { mpll_param->mpll_fb_divider.clk_frac = @@ -345,7 +345,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), - (uint32_t *)&mpll_parameters); + (uint32_t *)&mpll_parameters, sizeof(mpll_parameters)); if (!result) mpll_param->mpll_post_divider = @@ -366,7 +366,7 @@ int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), - (uint32_t *)&mpll_parameters); + (uint32_t *)&mpll_parameters, sizeof(mpll_parameters)); /* VEGAM's mpll takes sometime to finish computing */ udelay(10); @@ -396,7 +396,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - (uint32_t *)&pll_parameters); + (uint32_t *)&pll_parameters, sizeof(pll_parameters)); if (0 == result) { dividers->pll_post_divider = pll_parameters.ucPostDiv; @@ -420,7 +420,7 @@ int atomctrl_get_engine_pll_dividers_vi( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - (uint32_t *)&pll_patameters); + (uint32_t *)&pll_patameters, sizeof(pll_patameters)); if (0 == result) { dividers->pll_post_divider = @@ -457,7 +457,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - (uint32_t *)&pll_patameters); + (uint32_t *)&pll_patameters, sizeof(pll_patameters)); if (0 == result) { dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac); @@ -490,7 +490,7 @@ int atomctrl_get_dfs_pll_dividers_vi( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - (uint32_t *)&pll_patameters); + (uint32_t *)&pll_patameters, sizeof(pll_patameters)); if (0 == result) { dividers->pll_post_divider = @@ -773,7 +773,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -794,7 +794,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -814,7 +814,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -835,7 +835,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -857,7 +857,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -878,7 +878,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -909,7 +909,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); if (result) return result; @@ -1134,7 +1134,7 @@ int atomctrl_get_voltage_evv_on_sclk( result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - (uint32_t *)&get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space)); *voltage = result ? 0 : le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) @@ -1179,7 +1179,7 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - (uint32_t *)&get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space)); if (0 != result) return result; @@ -1359,7 +1359,7 @@ int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - (uint32_t *)&efuse_param); + (uint32_t *)&efuse_param, sizeof(efuse_param)); *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask; return result; @@ -1380,7 +1380,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), - (uint32_t *)&memory_clock_parameters); + (uint32_t *)&memory_clock_parameters, sizeof(memory_clock_parameters)); return result; } @@ -1399,7 +1399,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - (uint32_t *)&get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space)); *voltage = result ? 0 : le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); @@ -1526,7 +1526,7 @@ int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, SetVoltage), - (uint32_t *)voltage_parameters); + (uint32_t *)voltage_parameters, sizeof(*voltage_parameters)); *virtual_voltage_id = voltage_parameters->usVoltageLevel; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c index a47a47238e..82d5403343 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c @@ -258,7 +258,7 @@ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, idx = GetIndexIntoMasterCmdTable(computegpuclockparam); if (amdgpu_atom_execute_table( - adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters)) + adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters, sizeof(pll_parameters))) return -EINVAL; pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *) @@ -505,7 +505,7 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, ix = GetIndexIntoMasterCmdTable(getsmuclockinfo); if (amdgpu_atom_execute_table( - adev->mode_info.atom_context, ix, (uint32_t *)¶meters)) + adev->mode_info.atom_context, ix, (uint32_t *)¶meters, sizeof(parameters))) return -EINVAL; output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index ba1597b01a..65333141b1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -712,6 +712,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) smu_v13_0_7_set_ppt_funcs(smu); break; case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 1): smu_v14_0_0_set_ppt_funcs(smu); break; default: @@ -751,6 +752,7 @@ static int smu_early_init(void *handle) static int smu_set_default_dpm_table(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; struct smu_power_context *smu_power = &smu->smu_power; struct smu_power_gate *power_gate = &smu_power->power_gate; int vcn_gate, jpeg_gate; @@ -759,25 +761,34 @@ static int smu_set_default_dpm_table(struct smu_context *smu) if (!smu->ppt_funcs->set_default_dpm_table) return 0; - vcn_gate = atomic_read(&power_gate->vcn_gated); - jpeg_gate = atomic_read(&power_gate->jpeg_gated); + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) + vcn_gate = atomic_read(&power_gate->vcn_gated); + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) + jpeg_gate = atomic_read(&power_gate->jpeg_gated); - ret = smu_dpm_set_vcn_enable(smu, true); - if (ret) - return ret; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + ret = smu_dpm_set_vcn_enable(smu, true); + if (ret) + return ret; + } - ret = smu_dpm_set_jpeg_enable(smu, true); - if (ret) - goto err_out; + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + ret = smu_dpm_set_jpeg_enable(smu, true); + if (ret) + goto err_out; + } ret = smu->ppt_funcs->set_default_dpm_table(smu); if (ret) dev_err(smu->adev->dev, "Failed to setup default dpm clock tables!\n"); - smu_dpm_set_jpeg_enable(smu, !jpeg_gate); + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) + smu_dpm_set_jpeg_enable(smu, !jpeg_gate); err_out: - smu_dpm_set_vcn_enable(smu, !vcn_gate); + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) + smu_dpm_set_vcn_enable(smu, !vcn_gate); + return ret; } @@ -1885,6 +1896,7 @@ static int smu_disable_dpms(struct smu_context *smu) case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 11): case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 1): return 0; default: break; @@ -3692,3 +3704,13 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) return ret; } + +int smu_send_rma_reason(struct smu_context *smu) +{ + int ret = 0; + + if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason) + ret = smu->ppt_funcs->send_rma_reason(smu); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 2aa4fea873..1fa8157578 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -1343,6 +1343,11 @@ struct pptable_funcs { int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size); /** + * @send_rma_reason: message rma reason event to SMU. + */ + int (*send_rma_reason)(struct smu_context *smu); + + /** * @get_ecc_table: message SMU to get ECC INFO table. */ ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); @@ -1589,5 +1594,6 @@ int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size); void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size); int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size); +int smu_send_rma_reason(struct smu_context *smu); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 5bb7a63c06..97522c0852 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -144,6 +144,37 @@ typedef struct { uint32_t MaxGfxClk; } DpmClocks_t; +//Freq in MHz +//Voltage in milli volts with 2 fractional bits +typedef struct { + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks0[NUM_VCN_DPM_LEVELS]; + uint32_t VClocks1[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks0[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks1[NUM_VCN_DPM_LEVELS]; + uint32_t VPEClocks[NUM_VPE_DPM_LEVELS]; + uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS]; + uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS]; + + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0 + uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1 + uint8_t VpeClkLevelsEnabled; + uint8_t NumMemPstatesEnabled; + uint8_t NumFclkLevelsEnabled; + uint8_t spare; + + uint32_t MinGfxClk; + uint32_t MaxGfxClk; +} DpmClocks_t_v14_0_1; + typedef struct { uint16_t CoreFrequency[16]; //Target core frequency [MHz] uint16_t CorePower[16]; //CAC calculated core power [mW] @@ -224,7 +255,7 @@ typedef enum { #define TABLE_CUSTOM_DPM 2 // Called by Driver #define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS #define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS -#define TABLE_SPARE0 5 // Unused +#define TABLE_MOMENTARY_PM 5 // Called by Tools #define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log #define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF #define TABLE_COUNT 8 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h index 509e3cd483..86758051cb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h @@ -91,7 +91,8 @@ #define PPSMC_MSG_QueryValidMcaCeCount 0x3A #define PPSMC_MSG_McaBankCeDumpDW 0x3B #define PPSMC_MSG_SelectPLPDMode 0x40 -#define PPSMC_Message_Count 0x41 +#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43 +#define PPSMC_Message_Count 0x44 //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h index 356e0f57a4..ddb6258600 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h @@ -42,7 +42,7 @@ #define FEATURE_EDC_BIT 7 #define FEATURE_PLL_POWER_DOWN_BIT 8 #define FEATURE_VDDOFF_BIT 9 -#define FEATURE_VCN_DPM_BIT 10 +#define FEATURE_VCN_DPM_BIT 10 /* this is for both VCN0 and VCN1 */ #define FEATURE_DS_MPM_BIT 11 #define FEATURE_FCLK_DPM_BIT 12 #define FEATURE_SOCCLK_DPM_BIT 13 @@ -56,9 +56,9 @@ #define FEATURE_DS_GFXCLK_BIT 21 #define FEATURE_DS_SOCCLK_BIT 22 #define FEATURE_DS_LCLK_BIT 23 -#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks +#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 #define FEATURE_DS_SHUBCLK_BIT 25 -#define FEATURE_SPARE0_BIT 26 //SPARE +#define FEATURE_RESERVED0_BIT 26 #define FEATURE_ZSTATES_BIT 27 #define FEATURE_IOMMUL2_PG_BIT 28 #define FEATURE_DS_FCLK_BIT 29 @@ -66,8 +66,8 @@ #define FEATURE_DS_MP1CLK_BIT 31 #define FEATURE_WHISPER_MODE_BIT 32 #define FEATURE_SMU_LOW_POWER_BIT 33 -#define FEATURE_SMART_L3_RINSER_BIT 34 -#define FEATURE_SPARE1_BIT 35 //SPARE +#define FEATURE_RESERVED1_BIT 34 /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */ +#define FEATURE_GFX_DEM_BIT 35 /* v14_0_0 SPARE; v14_0_1 GFX_DEM */ #define FEATURE_PSI_BIT 36 #define FEATURE_PROCHOT_BIT 37 #define FEATURE_CPUOFF_BIT 38 @@ -77,11 +77,11 @@ #define FEATURE_PERF_LIMIT_BIT 42 #define FEATURE_CORE_DLDO_BIT 43 #define FEATURE_DVO_BIT 44 -#define FEATURE_DS_VCN_BIT 45 +#define FEATURE_DS_VCN_BIT 45 /* v14_0_1 this is for both VCN0 and VCN1 */ #define FEATURE_CPPC_BIT 46 #define FEATURE_CPPC_PREFERRED_CORES 47 #define FEATURE_DF_CSTATES_BIT 48 -#define FEATURE_SPARE2_BIT 49 //SPARE +#define FEATURE_FAST_PSTATE_CLDO_BIT 49 /* v14_0_0 SPARE */ #define FEATURE_ATHUB_PG_BIT 50 #define FEATURE_VDDOFF_ECO_BIT 51 #define FEATURE_ZSTATES_ECO_BIT 52 @@ -93,8 +93,8 @@ #define FEATURE_DS_IPUCLK_BIT 58 #define FEATURE_DS_VPECLK_BIT 59 #define FEATURE_VPE_DPM_BIT 60 -#define FEATURE_SPARE_61 61 -#define FEATURE_FP_DIDT 62 +#define FEATURE_SMART_L3_RINSER_BIT 61 /* v14_0_0 SPARE*/ +#define FEATURE_PCC_BIT 62 /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */ #define NUM_FEATURES 63 // Firmware Header/Footer @@ -151,6 +151,43 @@ typedef struct { // MP1_EXT_SCRATCH7 = RTOS Current Job } FwStatus_t; +typedef struct { + // MP1_EXT_SCRATCH0 + uint32_t DpmHandlerID : 8; + uint32_t ActivityMonitorID : 8; + uint32_t DpmTimerID : 8; + uint32_t DpmHubID : 4; + uint32_t DpmHubTask : 4; + // MP1_EXT_SCRATCH1 + uint32_t CclkSyncStatus : 8; + uint32_t ZstateStatus : 4; + uint32_t Cpu1VddOff : 4; + uint32_t DstateFun : 4; + uint32_t DstateDev : 4; + uint32_t GfxOffStatus : 2; + uint32_t Cpu0Off : 2; + uint32_t Cpu1Off : 2; + uint32_t Cpu0VddOff : 2; + // MP1_EXT_SCRATCH2 + uint32_t P2JobHandler :32; + // MP1_EXT_SCRATCH3 + uint32_t PostCode :32; + // MP1_EXT_SCRATCH4 + uint32_t MsgPortBusy :15; + uint32_t RsmuPmiP1Pending : 1; + uint32_t RsmuPmiP2PendingCnt : 8; + uint32_t DfCstateExitPending : 1; + uint32_t Pc6EntryPending : 1; + uint32_t Pc6ExitPending : 1; + uint32_t WarmResetPending : 1; + uint32_t Mp0ClkPending : 1; + uint32_t InWhisperMode : 1; + uint32_t spare2 : 2; + // MP1_EXT_SCRATCH5 + uint32_t IdleMask :32; + // MP1_EXT_SCRATCH6 = RTOS threads' status + // MP1_EXT_SCRATCH7 = RTOS Current Job +} FwStatus_t_v14_0_1; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h index 8a8a57c56b..c4dc5881d8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h @@ -54,14 +54,14 @@ #define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team #define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version #define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version -#define PPSMC_MSG_SPARE0 0x04 ///< SPARE -#define PPSMC_MSG_SPARE1 0x05 ///< SPARE -#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN -#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default -#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display +#define PPSMC_MSG_PowerDownVcn1 0x04 ///< Power down VCN1 +#define PPSMC_MSG_PowerUpVcn1 0x05 ///< Power up VCN1; VCN1 is power gated by default +#define PPSMC_MSG_PowerDownVcn0 0x06 ///< Power down VCN0 +#define PPSMC_MSG_PowerUpVcn0 0x07 ///< Power up VCN0; VCN0 is power gated by default +#define PPSMC_MSG_SetHardMinVcn0 0x08 ///< For wireless display #define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz -#define PPSMC_MSG_SPARE2 0x0A ///< SPARE -#define PPSMC_MSG_SPARE3 0x0B ///< SPARE +#define PPSMC_MSG_SetHardMinVcn1 0x0A ///< For wireless display +#define PPSMC_MSG_SetSoftMinVcn1 0x0B ///< Set soft min for VCN1 clocks (VCLK1 and DCLK1) #define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload #define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer #define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer @@ -71,36 +71,32 @@ #define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW #define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK -#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) - +#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0) #define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU - -#define PPSMC_MSG_spare_0x17 0x17 -#define PPSMC_MSG_spare_0x18 0x18 +#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency +#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency #define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry #define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry #define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK #define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK - #define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK #define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK -#define PPSMC_MSG_SetSoftMaxVcn 0x1F ///< Set soft max for VCN clocks (VCLK and DCLK) -#define PPSMC_MSG_spare_0x20 0x20 -#define PPSMC_MSG_PowerDownJpeg 0x21 ///< Power down Jpeg -#define PPSMC_MSG_PowerUpJpeg 0x22 ///< Power up Jpeg; VCN is power gated by default - +#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0) +#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage +#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0 +#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default #define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK #define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity -#define PPSMC_MSG_Reserved 0x26 ///< Not used -#define PPSMC_MSG_Reserved1 0x27 ///< Not used, previously PPSMC_MSG_RequestActiveWgp -#define PPSMC_MSG_Reserved2 0x28 ///< Not used, previously PPSMC_MSG_QueryActiveWgp +#define PPSMC_MSG_PowerDownJpeg1 0x26 ///< Power down Jpeg of VCN1 +#define PPSMC_MSG_PowerUpJpeg1 0x27 ///< Power up Jpeg of VCN1; VCN1 is power gated by default +#define PPSMC_MSG_SetSoftMaxVcn1 0x28 ///< Set soft max for VCN1 clocks (VCLK1 and DCLK1) #define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default #define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM #define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK #define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK -#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler -#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler +#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler +#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler #define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis #define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn #define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE @@ -110,7 +106,9 @@ #define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA #define PPSMC_MSG_SetSoftMaxVpe 0x36 ///< #define PPSMC_MSG_SetSoftMinVpe 0x37 ///< -#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages +#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache +#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache +#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages /** @}*/ /** diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index 953a767613..af427cc7db 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -115,6 +115,10 @@ __SMU_DUMMY_MAP(PowerDownVcn), \ __SMU_DUMMY_MAP(PowerUpJpeg), \ __SMU_DUMMY_MAP(PowerDownJpeg), \ + __SMU_DUMMY_MAP(PowerUpJpeg0), \ + __SMU_DUMMY_MAP(PowerDownJpeg0), \ + __SMU_DUMMY_MAP(PowerUpJpeg1), \ + __SMU_DUMMY_MAP(PowerDownJpeg1), \ __SMU_DUMMY_MAP(BacoAudioD3PME), \ __SMU_DUMMY_MAP(ArmD3), \ __SMU_DUMMY_MAP(RunDcBtc), \ @@ -135,6 +139,8 @@ __SMU_DUMMY_MAP(PowerUpSdma), \ __SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \ __SMU_DUMMY_MAP(SetHardMinVcn), \ + __SMU_DUMMY_MAP(SetHardMinVcn0), \ + __SMU_DUMMY_MAP(SetHardMinVcn1), \ __SMU_DUMMY_MAP(SetAllowFclkSwitch), \ __SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \ __SMU_DUMMY_MAP(ActiveProcessNotify), \ @@ -150,6 +156,8 @@ __SMU_DUMMY_MAP(SetPhyclkVoltageByFreq), \ __SMU_DUMMY_MAP(SetDppclkVoltageByFreq), \ __SMU_DUMMY_MAP(SetSoftMinVcn), \ + __SMU_DUMMY_MAP(SetSoftMinVcn0), \ + __SMU_DUMMY_MAP(SetSoftMinVcn1), \ __SMU_DUMMY_MAP(EnablePostCode), \ __SMU_DUMMY_MAP(GetGfxclkFrequency), \ __SMU_DUMMY_MAP(GetFclkFrequency), \ @@ -161,6 +169,8 @@ __SMU_DUMMY_MAP(SetSoftMaxSocclkByFreq), \ __SMU_DUMMY_MAP(SetSoftMaxFclkByFreq), \ __SMU_DUMMY_MAP(SetSoftMaxVcn), \ + __SMU_DUMMY_MAP(SetSoftMaxVcn0), \ + __SMU_DUMMY_MAP(SetSoftMaxVcn1), \ __SMU_DUMMY_MAP(PowerGateMmHub), \ __SMU_DUMMY_MAP(UpdatePmeRestore), \ __SMU_DUMMY_MAP(GpuChangeState), \ @@ -261,7 +271,8 @@ __SMU_DUMMY_MAP(SetSoftMaxVpe), \ __SMU_DUMMY_MAP(SetSoftMinVpe), \ __SMU_DUMMY_MAP(GetMetricsVersion), \ - __SMU_DUMMY_MAP(EnableUCLKShadow), + __SMU_DUMMY_MAP(EnableUCLKShadow), \ + __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 3f7463c1c1..4af1985ae4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -27,6 +27,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 #define FEATURE_MASK(feature) (1ULL << feature) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 40ba7227cc..0c2d04f978 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1283,11 +1283,8 @@ static int arcturus_get_power_limit(struct smu_context *smu, uint32_t *max_power_limit, uint32_t *min_power_limit) { - struct smu_11_0_powerplay_table *powerplay_table = - (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; - struct smu_11_0_overdrive_table *od_settings = smu->od_settings; PPTable_t *pptable = smu->smu_table.driver_pptable; - uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; + uint32_t power_limit; if (smu_v11_0_get_current_power_limit(smu, &power_limit)) { /* the last hope to figure out the ppt limit */ @@ -1303,30 +1300,10 @@ static int arcturus_get_power_limit(struct smu_context *smu, *current_power_limit = power_limit; if (default_power_limit) *default_power_limit = power_limit; - - if (powerplay_table) { - if (smu->od_enabled && - od_settings->cap[SMU_11_0_ODCAP_POWER_LIMIT]) { - od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - } else if (od_settings->cap[SMU_11_0_ODCAP_POWER_LIMIT]) { - od_percent_upper = 0; - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - } - } - - dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", - od_percent_upper, od_percent_lower, power_limit); - - if (max_power_limit) { - *max_power_limit = power_limit * (100 + od_percent_upper); - *max_power_limit /= 100; - } - - if (min_power_limit) { - *min_power_limit = power_limit * (100 - od_percent_lower); - *min_power_limit /= 100; - } + if (max_power_limit) + *max_power_limit = power_limit; + if (min_power_limit) + *min_power_limit = power_limit; return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index c7bfa68bf0..f6545093bf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -514,7 +514,7 @@ static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, getsmuclockinfo); ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, - (uint32_t *)&input); + (uint32_t *)&input, sizeof(input)); if (ret) return -EINVAL; @@ -1432,24 +1432,24 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); orderly_poweroff(true); } else if (client_id == SOC15_IH_CLIENTID_MP1) { - if (src_id == 0xfe) { + if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) { /* ACK SMUToHost interrupt */ data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data); switch (ctxid) { - case 0x3: + case SMU_IH_INTERRUPT_CONTEXT_ID_AC: dev_dbg(adev->dev, "Switched to AC mode!\n"); schedule_work(&smu->interrupt_work); adev->pm.ac_power = true; break; - case 0x4: + case SMU_IH_INTERRUPT_CONTEXT_ID_DC: dev_dbg(adev->dev, "Switched to DC mode!\n"); schedule_work(&smu->interrupt_work); adev->pm.ac_power = false; break; - case 0x7: + case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: /* * Increment the throttle interrupt counter */ @@ -1462,6 +1462,10 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, schedule_work(&smu->throttling_logging_work); break; + default: + dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n", + ctxid, client_id); + break; } } } @@ -1504,7 +1508,7 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu) return ret; ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, - 0xfe, + SMU_IH_INTERRUPT_ID_TO_DRIVER, irq_src); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 2ff6deedef..da1f43999d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -451,7 +451,7 @@ static int vangogh_init_smc_tables(struct smu_context *smu) #ifdef CONFIG_X86 /* AMD x86 APU only */ - smu->cpu_core_num = boot_cpu_data.x86_max_cores; + smu->cpu_core_num = topology_num_cores_per_package(); #else smu->cpu_core_num = 4; #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index 5e408a1958..ed15f5a0fd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -301,7 +301,7 @@ static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, getsmuclockinfo); ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, - (uint32_t *)&input); + (uint32_t *)&input, sizeof(input)); if (ret) return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index ca0d5a5b20..f41ac6465f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1746,10 +1746,12 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_fan_speed = 0; - gpu_metrics->pcie_link_width = - smu_v13_0_get_current_pcie_link_width(smu); - gpu_metrics->pcie_link_speed = - aldebaran_get_current_pcie_link_speed(smu); + if (!amdgpu_sriov_vf(smu->adev)) { + gpu_metrics->pcie_link_width = + smu_v13_0_get_current_pcie_link_width(smu); + gpu_metrics->pcie_link_speed = + aldebaran_get_current_pcie_link_speed(smu); + } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index c486182ff2..48170bb511 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1369,24 +1369,24 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); orderly_poweroff(true); } else if (client_id == SOC15_IH_CLIENTID_MP1) { - if (src_id == 0xfe) { + if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) { /* ACK SMUToHost interrupt */ data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); switch (ctxid) { - case 0x3: + case SMU_IH_INTERRUPT_CONTEXT_ID_AC: dev_dbg(adev->dev, "Switched to AC mode!\n"); smu_v13_0_ack_ac_dc_interrupt(smu); adev->pm.ac_power = true; break; - case 0x4: + case SMU_IH_INTERRUPT_CONTEXT_ID_DC: dev_dbg(adev->dev, "Switched to DC mode!\n"); smu_v13_0_ack_ac_dc_interrupt(smu); adev->pm.ac_power = false; break; - case 0x7: + case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: /* * Increment the throttle interrupt counter */ @@ -1399,7 +1399,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, schedule_work(&smu->throttling_logging_work); break; - case 0x8: + case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL: high = smu->thermal_range.software_shutdown_temp + smu->thermal_range.software_shutdown_temp_offset; high = min_t(typeof(high), @@ -1416,7 +1416,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); break; - case 0x9: + case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY: high = min_t(typeof(high), SMU_THERMAL_MAXIMUM_ALERT_TEMP, smu->thermal_range.software_shutdown_temp); @@ -1429,6 +1429,10 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); break; + default: + dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n", + ctxid, client_id); + break; } } } @@ -1473,7 +1477,7 @@ int smu_v13_0_register_irq_handler(struct smu_context *smu) return ret; ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, - 0xfe, + SMU_IH_INTERRUPT_ID_TO_DRIVER, irq_src); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 4abfcd3274..2fb6c9cb0f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -226,15 +226,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!en && adev->in_s4) { - /* Adds a GFX reset as workaround just before sending the - * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering - * an invalid state. - */ - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, - SMU_RESET_MODE_2, NULL); - if (ret) - return ret; + if (!en && !adev->in_s0ix) { + if (adev->in_s4) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + } ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index ddb11eb8c3..c977ebe880 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -45,6 +45,7 @@ #include <linux/pci.h> #include "amdgpu_ras.h" #include "amdgpu_mca.h" +#include "amdgpu_aca.h" #include "smu_cmn.h" #include "mp/mp_13_0_6_offset.h" #include "mp/mp_13_0_6_sh_mask.h" @@ -171,6 +172,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0), MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0), MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), + MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), }; // clang-format on @@ -1438,7 +1440,10 @@ static int smu_v13_0_6_irq_process(struct amdgpu_device *adev, entry->src_data[1]); schedule_work(&smu->throttling_logging_work); } - + break; + default: + dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n", + ctxid, client_id); break; } } @@ -1574,6 +1579,8 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; struct smu_13_0_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table; + struct smu_13_0_dpm_table *uclk_table = + &dpm_context->dpm_tables.uclk_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; int ret; @@ -1589,17 +1596,27 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, return 0; case AMD_DPM_FORCED_LEVEL_AUTO: - if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) && - (gfx_table->max == pstate_table->gfxclk_pstate.curr.max)) - return 0; + if ((gfx_table->min != pstate_table->gfxclk_pstate.curr.min) || + (gfx_table->max != pstate_table->gfxclk_pstate.curr.max)) { + ret = smu_v13_0_6_set_gfx_soft_freq_limited_range( + smu, gfx_table->min, gfx_table->max); + if (ret) + return ret; - ret = smu_v13_0_6_set_gfx_soft_freq_limited_range( - smu, gfx_table->min, gfx_table->max); - if (ret) - return ret; + pstate_table->gfxclk_pstate.curr.min = gfx_table->min; + pstate_table->gfxclk_pstate.curr.max = gfx_table->max; + } + + if (uclk_table->max != pstate_table->uclk_pstate.curr.max) { + /* Min UCLK is not expected to be changed */ + ret = smu_v13_0_set_soft_freq_limited_range( + smu, SMU_UCLK, 0, uclk_table->max); + if (ret) + return ret; + pstate_table->uclk_pstate.curr.max = uclk_table->max; + } + pstate_table->uclk_pstate.custom.max = 0; - pstate_table->gfxclk_pstate.curr.min = gfx_table->min; - pstate_table->gfxclk_pstate.curr.max = gfx_table->max; return 0; case AMD_DPM_FORCED_LEVEL_MANUAL: return 0; @@ -1622,7 +1639,8 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, uint32_t max_clk; int ret = 0; - if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK && + clk_type != SMU_UCLK) return -EINVAL; if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) && @@ -1632,18 +1650,31 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (min >= max) { dev_err(smu->adev->dev, - "Minimum GFX clk should be less than the maximum allowed clock\n"); + "Minimum clk should be less than the maximum allowed clock\n"); return -EINVAL; } - if ((min == pstate_table->gfxclk_pstate.curr.min) && - (max == pstate_table->gfxclk_pstate.curr.max)) - return 0; + if (clk_type == SMU_GFXCLK) { + if ((min == pstate_table->gfxclk_pstate.curr.min) && + (max == pstate_table->gfxclk_pstate.curr.max)) + return 0; - ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max); - if (!ret) { - pstate_table->gfxclk_pstate.curr.min = min; - pstate_table->gfxclk_pstate.curr.max = max; + ret = smu_v13_0_6_set_gfx_soft_freq_limited_range( + smu, min, max); + if (!ret) { + pstate_table->gfxclk_pstate.curr.min = min; + pstate_table->gfxclk_pstate.curr.max = max; + } + } + + if (clk_type == SMU_UCLK) { + if (max == pstate_table->uclk_pstate.curr.max) + return 0; + /* Only max clock limiting is allowed for UCLK */ + ret = smu_v13_0_set_soft_freq_limited_range( + smu, SMU_UCLK, 0, max); + if (!ret) + pstate_table->uclk_pstate.curr.max = max; } return ret; @@ -1736,6 +1767,40 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, return -EINVAL; } break; + case PP_OD_EDIT_MCLK_VDDC_TABLE: + if (size != 2) { + dev_err(smu->adev->dev, + "Input parameter number not correct\n"); + return -EINVAL; + } + + if (!smu_cmn_feature_is_enabled(smu, + SMU_FEATURE_DPM_UCLK_BIT)) { + dev_warn(smu->adev->dev, + "UCLK_LIMITS setting not supported!\n"); + return -EOPNOTSUPP; + } + + if (input[0] == 0) { + dev_info(smu->adev->dev, + "Setting min UCLK level is not supported"); + return -EINVAL; + } else if (input[0] == 1) { + if (input[1] > dpm_context->dpm_tables.uclk_table.max) { + dev_warn( + smu->adev->dev, + "Maximum UCLK (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n", + input[1], + dpm_context->dpm_tables.uclk_table.max); + pstate_table->uclk_pstate.custom.max = + pstate_table->uclk_pstate.curr.max; + return -EINVAL; + } + + pstate_table->uclk_pstate.custom.max = input[1]; + } + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size != 0) { dev_err(smu->adev->dev, @@ -1746,8 +1811,19 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - return smu_v13_0_6_set_soft_freq_limited_range( + ret = smu_v13_0_6_set_soft_freq_limited_range( smu, SMU_GFXCLK, min_clk, max_clk); + + if (ret) + return ret; + + min_clk = dpm_context->dpm_tables.uclk_table.min; + max_clk = dpm_context->dpm_tables.uclk_table.max; + ret = smu_v13_0_6_set_soft_freq_limited_range( + smu, SMU_UCLK, min_clk, max_clk); + if (ret) + return ret; + pstate_table->uclk_pstate.custom.max = 0; } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1767,8 +1843,19 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, min_clk = pstate_table->gfxclk_pstate.custom.min; max_clk = pstate_table->gfxclk_pstate.custom.max; - return smu_v13_0_6_set_soft_freq_limited_range( + ret = smu_v13_0_6_set_soft_freq_limited_range( smu, SMU_GFXCLK, min_clk, max_clk); + + if (ret) + return ret; + + if (!pstate_table->uclk_pstate.custom.max) + return 0; + + min_clk = pstate_table->uclk_pstate.curr.min; + max_clk = pstate_table->uclk_pstate.custom.max; + return smu_v13_0_6_set_soft_freq_limited_range( + smu, SMU_UCLK, min_clk, max_clk); } break; default: @@ -2141,14 +2228,16 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { - link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); - if (link_width_level > MAX_LINK_WIDTH) - link_width_level = 0; - - gpu_metrics->pcie_link_width = - DECODE_LANE_WIDTH(link_width_level); - gpu_metrics->pcie_link_speed = - smu_v13_0_6_get_current_pcie_link_speed(smu); + if (!amdgpu_sriov_vf(adev)) { + link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); + if (link_width_level > MAX_LINK_WIDTH) + link_width_level = 0; + + gpu_metrics->pcie_link_width = + DECODE_LANE_WIDTH(link_width_level); + gpu_metrics->pcie_link_speed = + smu_v13_0_6_get_current_pcie_link_speed(smu); + } gpu_metrics->pcie_bandwidth_acc = SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]); gpu_metrics->pcie_bandwidth_inst = @@ -2230,8 +2319,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu) ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2); - /* This is similar to FLR, wait till max FLR timeout */ - msleep(100); + /* Reset takes a bit longer, wait for 200ms. */ + msleep(200); dev_dbg(smu->adev->dev, "restore config space...\n"); /* Restore the config space saved during init */ @@ -2401,6 +2490,24 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, return ret; } +static int smu_v13_0_6_send_rma_reason(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret; + + /* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */ + if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00) + return 0; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL); + if (ret) + dev_err(smu->adev->dev, + "[%s] failed to send BadPageThreshold event to SMU\n", + __func__); + + return ret; +} + static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -2572,18 +2679,22 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) { uint64_t status0; + uint32_t ext_error_code; + uint32_t odecc_err_cnt; status0 = entry->regs[MCA_REG_IDX_STATUS]; + ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status0); + odecc_err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]); if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) { *count = 0; return 0; } - if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0)) - *count = 1; - else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0)) - *count = 1; + if (umc_v12_0_is_deferred_error(adev, status0) || + umc_v12_0_is_uncorrectable_error(adev, status0) || + umc_v12_0_is_correctable_error(adev, status0)) + *count = (ext_error_code == 0) ? odecc_err_cnt : 1; return 0; } @@ -2882,6 +2993,143 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = { .mca_get_valid_mca_count = mca_smu_get_valid_mca_count, }; +static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_v13_0_6_mca_set_debug_mode(smu, enable); +} + +static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count) +{ + uint32_t msg; + int ret; + + if (!count) + return -EINVAL; + + switch (type) { + case ACA_ERROR_TYPE_UE: + msg = SMU_MSG_QueryValidMcaCount; + break; + case ACA_ERROR_TYPE_CE: + msg = SMU_MSG_QueryValidMcaCeCount; + break; + default: + return -EINVAL; + } + + ret = smu_cmn_send_smc_msg(smu, msg, count); + if (ret) { + *count = 0; + return ret; + } + + return 0; +} + +static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, + enum aca_error_type type, u32 *count) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret; + + switch (type) { + case ACA_ERROR_TYPE_UE: + case ACA_ERROR_TYPE_CE: + ret = smu_v13_0_6_get_valid_aca_count(smu, type, count); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, + int idx, int offset, u32 *val) +{ + uint32_t msg, param; + + switch (type) { + case ACA_ERROR_TYPE_UE: + msg = SMU_MSG_McaBankDumpDW; + break; + case ACA_ERROR_TYPE_CE: + msg = SMU_MSG_McaBankCeDumpDW; + break; + default: + return -EINVAL; + } + + param = ((idx & 0xffff) << 16) | (offset & 0xfffc); + + return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val); +} + +static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, + int idx, int offset, u32 *val, int count) +{ + int ret, i; + + if (!val) + return -EINVAL; + + for (i = 0; i < count; i++) { + ret = __smu_v13_0_6_aca_bank_dump(smu, type, idx, offset + (i << 2), &val[i]); + if (ret) + return ret; + } + + return 0; +} + +static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type, + int idx, int reg_idx, u64 *val) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + u32 data[2] = {0, 0}; + int ret; + + if (!val || reg_idx >= ACA_REG_IDX_COUNT) + return -EINVAL; + + ret = smu_v13_0_6_aca_bank_dump(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data)); + if (ret) + return ret; + + *val = (u64)data[1] << 32 | data[0]; + + dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n", + type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); + + return 0; +} + +static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, + enum aca_error_type type, int idx, struct aca_bank *bank) +{ + int i, ret, count; + + count = min_t(int, 16, ARRAY_SIZE(bank->regs)); + for (i = 0; i < count; i++) { + ret = aca_bank_read_reg(adev, type, idx, i, &bank->regs[i]); + if (ret) + return ret; + } + + return 0; +} + +static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { + .max_ue_bank_count = 12, + .max_ce_bank_count = 12, + .set_debug_mode = aca_smu_set_debug_mode, + .get_valid_aca_count = aca_smu_get_valid_aca_count, + .get_valid_aca_bank = aca_smu_get_valid_aca_bank, +}; + static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, enum pp_xgmi_plpd_mode mode) { @@ -2920,13 +3168,6 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, return ret; } -static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu, - void *table) -{ - /* Support ecc info by default */ - return 0; -} - static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -2981,7 +3222,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .i2c_init = smu_v13_0_6_i2c_control_init, .i2c_fini = smu_v13_0_6_i2c_control_fini, .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, - .get_ecc_info = smu_v13_0_6_get_ecc_info, + .send_rma_reason = smu_v13_0_6_send_rma_reason, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) @@ -2994,4 +3235,5 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; smu_v13_0_set_smu_mailbox_registers(smu); amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); + amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 6dae5ad74f..07a65e0057 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -53,6 +53,8 @@ MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); +#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 + int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -231,6 +233,10 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) case IP_VERSION(14, 0, 0): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; + case IP_VERSION(14, 0, 1): + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; + break; + default: dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", amdgpu_ip_version(adev, MP1_HWIP, 0)); @@ -734,6 +740,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 1): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -890,7 +897,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu) // TODO: THM related ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, - 0xfe, + SMU_IH_INTERRUPT_ID_TO_DRIVER, irq_src); if (ret) return ret; @@ -1395,9 +1402,22 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu, if (adev->vcn.harvest_config & (1 << i)) continue; - ret = smu_cmn_send_smc_msg_with_param(smu, enable ? - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, - i << 16U, NULL); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + if (i == 0) + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0, + i << 16U, NULL); + else if (i == 1) + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1, + i << 16U, NULL); + } else { + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, + i << 16U, NULL); + } + if (ret) return ret; } @@ -1408,9 +1428,34 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu, int smu_v14_0_set_jpeg_enable(struct smu_context *smu, bool enable) { - return smu_cmn_send_smc_msg_with_param(smu, enable ? - SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg, - 0, NULL); + struct amdgpu_device *adev = smu->adev; + int i, ret = 0; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + if (i == 0) + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0, + i << 16U, NULL); + else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1, + i << 16U, NULL); + } else { + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg, + i << 16U, NULL); + } + + if (ret) + return ret; + } + + return ret; } int smu_v14_0_run_btc(struct smu_context *smu) @@ -1628,11 +1673,16 @@ int smu_v14_0_baco_exit(struct smu_context *smu) int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu) { uint16_t index; + struct amdgpu_device *adev = smu->adev; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu, + ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL); + } index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_EnableGfxImu); - /* Param 1 to tell PMFW to enable GFXOFF feature */ - return smu_cmn_send_msg_without_waiting(smu, index, 1); + return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE); } int smu_v14_0_set_default_dpm_tables(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 9310c4758e..63399c00cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -70,9 +70,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1), MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), - MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), - MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), - MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), + MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0, 1), + MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0, 1), + MSG_MAP(SetHardMinVcn0, PPSMC_MSG_SetHardMinVcn0, 1), + MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1, 1), + MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1, 1), + MSG_MAP(SetHardMinVcn1, PPSMC_MSG_SetHardMinVcn1, 1), MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), @@ -83,7 +86,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 1), - MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1), + MSG_MAP(SetSoftMinVcn0, PPSMC_MSG_SetSoftMinVcn0, 1), + MSG_MAP(SetSoftMinVcn1, PPSMC_MSG_SetSoftMinVcn1, 1), MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1), MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), @@ -91,9 +95,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), - MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), - MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), - MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), + MSG_MAP(SetSoftMaxVcn0, PPSMC_MSG_SetSoftMaxVcn0, 1), + MSG_MAP(SetSoftMaxVcn1, PPSMC_MSG_SetSoftMaxVcn1, 1), + MSG_MAP(PowerDownJpeg0, PPSMC_MSG_PowerDownJpeg0, 1), + MSG_MAP(PowerUpJpeg0, PPSMC_MSG_PowerUpJpeg0, 1), + MSG_MAP(PowerDownJpeg1, PPSMC_MSG_PowerDownJpeg1, 1), + MSG_MAP(PowerUpJpeg1, PPSMC_MSG_PowerUpJpeg1, 1), MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1), MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1), @@ -154,7 +161,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu) SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), + SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); @@ -164,7 +171,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu) goto err0_out; smu_table->metrics_time = 0; - smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); + smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL); if (!smu_table->clocks_table) goto err1_out; @@ -586,6 +593,60 @@ static int smu_v14_0_0_mode2_reset(struct smu_context *smu) return ret; } +static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t dpm_level, + uint32_t *freq) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + + if (!clk_table || clk_type >= SMU_CLK_COUNT) + return -EINVAL; + + switch (clk_type) { + case SMU_SOCCLK: + if (dpm_level >= clk_table->NumSocClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->SocClocks[dpm_level]; + break; + case SMU_VCLK: + if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->VClocks0[dpm_level]; + break; + case SMU_DCLK: + if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->DClocks0[dpm_level]; + break; + case SMU_VCLK1: + if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->VClocks1[dpm_level]; + break; + case SMU_DCLK1: + if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->DClocks1[dpm_level]; + break; + case SMU_UCLK: + case SMU_MCLK: + if (dpm_level >= clk_table->NumMemPstatesEnabled) + return -EINVAL; + *freq = clk_table->MemPstateTable[dpm_level].MemClk; + break; + case SMU_FCLK: + if (dpm_level >= clk_table->NumFclkLevelsEnabled) + return -EINVAL; + *freq = clk_table->FclkClocks_Freq[dpm_level]; + break; + default: + return -EINVAL; + } + + return 0; +} + static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t dpm_level, @@ -630,6 +691,19 @@ static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu, return 0; } +static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t dpm_level, + uint32_t *freq) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); + + return 0; +} + static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) { @@ -650,6 +724,8 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, break; case SMU_VCLK: case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: feature_id = SMU_FEATURE_VCN_DPM_BIT; break; default: @@ -659,6 +735,126 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, return smu_cmn_feature_is_enabled(smu, feature_id); } +static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + uint32_t clock_limit; + uint32_t max_dpm_level, min_dpm_level; + int ret = 0; + + if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + clock_limit = smu->smu_table.boot_values.uclk; + break; + case SMU_FCLK: + clock_limit = smu->smu_table.boot_values.fclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + clock_limit = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + clock_limit = smu->smu_table.boot_values.socclk; + break; + case SMU_VCLK: + case SMU_VCLK1: + clock_limit = smu->smu_table.boot_values.vclk; + break; + case SMU_DCLK: + case SMU_DCLK1: + clock_limit = smu->smu_table.boot_values.dclk; + break; + default: + clock_limit = 0; + break; + } + + /* clock in Mhz unit */ + if (min) + *min = clock_limit / 100; + if (max) + *max = clock_limit / 100; + + return 0; + } + + if (max) { + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + *max = clk_table->MaxGfxClk; + break; + case SMU_MCLK: + case SMU_UCLK: + case SMU_FCLK: + max_dpm_level = 0; + break; + case SMU_SOCCLK: + max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; + break; + case SMU_VCLK: + case SMU_DCLK: + max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1; + break; + case SMU_VCLK1: + case SMU_DCLK1: + max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1; + break; + default: + ret = -EINVAL; + goto failed; + } + + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); + if (ret) + goto failed; + } + } + + if (min) { + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + *min = clk_table->MinGfxClk; + break; + case SMU_MCLK: + case SMU_UCLK: + min_dpm_level = clk_table->NumMemPstatesEnabled - 1; + break; + case SMU_FCLK: + min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + break; + case SMU_SOCCLK: + min_dpm_level = 0; + break; + case SMU_VCLK: + case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: + min_dpm_level = 0; + break; + default: + ret = -EINVAL; + goto failed; + } + + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); + if (ret) + goto failed; + } + } + +failed: + return ret; +} + static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, @@ -729,7 +925,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, } if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); if (ret) goto failed; } @@ -761,7 +957,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, } if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); if (ret) goto failed; } @@ -771,6 +967,19 @@ failed: return ret; } +static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max); + + return 0; +} + static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value) @@ -804,6 +1013,37 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value); } +static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *count) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + + switch (clk_type) { + case SMU_SOCCLK: + *count = clk_table->NumSocClkLevelsEnabled; + break; + case SMU_VCLK: + case SMU_DCLK: + *count = clk_table->Vcn0ClkLevelsEnabled; + break; + case SMU_VCLK1: + case SMU_DCLK1: + *count = clk_table->Vcn1ClkLevelsEnabled; + break; + case SMU_MCLK: + *count = clk_table->NumMemPstatesEnabled; + break; + case SMU_FCLK: + *count = clk_table->NumFclkLevelsEnabled; + break; + default: + break; + } + + return 0; +} + static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *count) @@ -833,6 +1073,18 @@ static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, return 0; } +static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *count) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_get_dpm_level_count(smu, clk_type, count); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_get_dpm_level_count(smu, clk_type, count); + + return 0; +} + static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { @@ -859,18 +1111,20 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, case SMU_SOCCLK: case SMU_VCLK: case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: case SMU_MCLK: case SMU_FCLK: ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value); if (ret) break; - ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count); + ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count); if (ret) break; for (i = 0; i < count; i++) { - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) break; @@ -933,8 +1187,13 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, break; case SMU_VCLK: case SMU_DCLK: - msg_set_min = SMU_MSG_SetHardMinVcn; - msg_set_max = SMU_MSG_SetSoftMaxVcn; + msg_set_min = SMU_MSG_SetHardMinVcn0; + msg_set_max = SMU_MSG_SetSoftMaxVcn0; + break; + case SMU_VCLK1: + case SMU_DCLK1: + msg_set_min = SMU_MSG_SetHardMinVcn1; + msg_set_max = SMU_MSG_SetSoftMaxVcn1; break; default: return -EINVAL; @@ -964,11 +1223,11 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, case SMU_FCLK: case SMU_VCLK: case SMU_DCLK: - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) break; - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); if (ret) break; @@ -993,25 +1252,25 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); sclk_min = sclk_max; fclk_min = fclk_max; socclk_min = socclk_max; break; case AMD_DPM_FORCED_LEVEL_LOW: - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); sclk_max = sclk_min; fclk_max = fclk_min; socclk_max = socclk_min; break; case AMD_DPM_FORCED_LEVEL_AUTO: - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: @@ -1060,6 +1319,18 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, return ret; } +static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + + smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; + smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; + smu->gfx_actual_hard_min_freq = 0; + smu->gfx_actual_soft_max_freq = 0; + + return 0; +} + static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) { DpmClocks_t *clk_table = smu->smu_table.clocks_table; @@ -1072,6 +1343,16 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm return 0; } +static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu); + + return 0; +} + static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu, bool enable) { @@ -1088,6 +1369,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, 0, NULL); } +static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + uint8_t idx; + + /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */ + for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) { + clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0; + clock_table->SocClocks[idx].Vol = 0; + } + + for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) { + clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0; + clock_table->VPEClocks[idx].Vol = 0; + } + + return 0; +} + static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) { DpmClocks_t *clk_table = smu->smu_table.clocks_table; @@ -1107,6 +1407,16 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks * return 0; } +static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_14_0_0_get_dpm_table(smu, clock_table); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_14_0_1_get_dpm_table(smu, clock_table); + + return 0; +} + static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .check_fw_version = smu_v14_0_check_fw_version, @@ -1128,16 +1438,16 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .set_driver_table_location = smu_v14_0_set_driver_table_location, .gfx_off_control = smu_v14_0_gfx_off_control, .mode2_reset = smu_v14_0_0_mode2_reset, - .get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq, + .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq, .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, .set_performance_level = smu_v14_0_0_set_performance_level, - .set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters, + .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, - .get_dpm_clock_table = smu_14_0_0_get_dpm_table, + .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, }; static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 00cd615bbc..b8dbd4e253 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -378,8 +378,15 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, res = __smu_cmn_reg2errno(smu, reg); if (res != 0) __smu_cmn_reg_print_error(smu, reg, index, param, msg); - if (read_arg) + if (read_arg) { smu_cmn_read_arg(smu, read_arg); + dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\ + readval: 0x%08x\n", + smu_get_message_name(smu, msg), index, param, reg, *read_arg); + } else { + dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n", + smu_get_message_name(smu, msg), index, param, reg); + } Out: if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) { amdgpu_device_halt(adev); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index cc590e27d8..81bfce1406 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -30,6 +30,16 @@ #define FDO_PWM_MODE_STATIC 1 #define FDO_PWM_MODE_STATIC_RPM 5 +#define SMU_IH_INTERRUPT_ID_TO_DRIVER 0xFE +#define SMU_IH_INTERRUPT_CONTEXT_ID_BACO 0x2 +#define SMU_IH_INTERRUPT_CONTEXT_ID_AC 0x3 +#define SMU_IH_INTERRUPT_CONTEXT_ID_DC 0x4 +#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5 +#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6 +#define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 +#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 +#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 + extern const int link_speed[]; /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */ |