From 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 20:50:03 +0200 Subject: Adding upstream version 6.8.9. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 1 - drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 129 ++++++++++++++++----- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 84 ++++++-------- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 12 +- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 100 ++++++++++++---- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 51 +++----- 6 files changed, 236 insertions(+), 141 deletions(-) (limited to 'drivers/gpu/drm/amd/pm/swsmu/smu13') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index e3579a8fd7..ca0d5a5b20 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1530,7 +1530,6 @@ static int aldebaran_i2c_control_init(struct smu_context *smu) smu_i2c->port = 0; mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &aldebaran_i2c_algo; snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0"); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 68981086b5..c486182ff2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2201,7 +2201,7 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu, return ret; } -int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, +static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_baco_seq baco_seq) { struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -2223,33 +2223,14 @@ int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, return 0; } -bool smu_v13_0_baco_is_support(struct smu_context *smu) -{ - struct smu_baco_context *smu_baco = &smu->smu_baco; - - if (amdgpu_sriov_vf(smu->adev) || - !smu_baco->platform_support) - return false; - - /* return true if ASIC is in BACO state already */ - if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; - - if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && - !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; - - return true; -} - -enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu) +static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; return smu_baco->state; } -int smu_v13_0_baco_set_state(struct smu_context *smu, +static int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) { struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -2283,24 +2264,60 @@ int smu_v13_0_baco_set_state(struct smu_context *smu, return ret; } -int smu_v13_0_baco_enter(struct smu_context *smu) +bool smu_v13_0_baco_is_support(struct smu_context *smu) { - int ret = 0; + struct smu_baco_context *smu_baco = &smu->smu_baco; - ret = smu_v13_0_baco_set_state(smu, - SMU_BACO_STATE_ENTER); - if (ret) - return ret; + if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) + return false; + + /* return true if ASIC is in BACO state already */ + if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) + return true; - msleep(10); + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && + !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) + return false; - return ret; + return true; +} + +int smu_v13_0_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + int ret; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + return smu_v13_0_baco_set_armd3_sequence(smu, + (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + BACO_SEQ_BAMACO : BACO_SEQ_BACO); + } else { + ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); + if (!ret) + usleep_range(10000, 11000); + + return ret; + } } int smu_v13_0_baco_exit(struct smu_context *smu) { - return smu_v13_0_baco_set_state(smu, - SMU_BACO_STATE_EXIT); + struct amdgpu_device *adev = smu->adev; + int ret; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); + } + + if (!ret) + adev->gfx.is_poweron = false; + + return ret; } int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu) @@ -2492,3 +2509,51 @@ int smu_v13_0_disable_pmfw_state(struct smu_context *smu) return ret == 0 ? 0 : -EINVAL; } + +int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable) +{ + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL); +} + +int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, + struct freq_band_range *exclusion_ranges) +{ + WifiBandEntryTable_t wifi_bands; + int valid_entries = 0; + int ret, i; + + memset(&wifi_bands, 0, sizeof(wifi_bands)); + for (i = 0; i < ARRAY_SIZE(wifi_bands.WifiBandEntry); i++) { + if (!exclusion_ranges[i].start && !exclusion_ranges[i].end) + break; + + /* PMFW expects the inputs to be in Mhz unit */ + wifi_bands.WifiBandEntry[valid_entries].LowFreq = + DIV_ROUND_DOWN_ULL(exclusion_ranges[i].start, HZ_PER_MHZ); + wifi_bands.WifiBandEntry[valid_entries++].HighFreq = + DIV_ROUND_UP_ULL(exclusion_ranges[i].end, HZ_PER_MHZ); + } + wifi_bands.WifiBandEntryNum = valid_entries; + + /* + * Per confirm with PMFW team, WifiBandEntryNum = 0 + * is a valid setting. + * + * Considering the scenarios below: + * - At first the wifi device adds an exclusion range e.g. (2400,2500) to + * BIOS and our driver gets notified. We will set WifiBandEntryNum = 1 + * and pass the WifiBandEntry (2400, 2500) to PMFW. + * + * - Later the wifi device removes the wifiband list added above and + * our driver gets notified again. At this time, driver will set + * WifiBandEntryNum = 0 and pass an empty WifiBandEntry list to PMFW. + * + * - PMFW may still need to do some uclk shadow update(e.g. switching + * from shadow clock back to primary clock) on receiving this. + */ + ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true); + if (ret) + dev_warn(smu->adev->dev, "Failed to set wifiband!"); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 9ac6c408d2..9c03296f92 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -169,6 +169,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0), + MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0), }; static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { @@ -253,6 +254,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { TAB_MAP(I2C_COMMANDS), TAB_MAP(ECCINFO), TAB_MAP(OVERDRIVE), + TAB_MAP(WIFIBAND), }; static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -498,6 +500,9 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND, + sizeof(WifiBandEntryTable_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); if (!smu_table->metrics_table) @@ -2544,16 +2549,19 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, workload_mask = 1 << workload_type; - /* Add optimizations for SMU13.0.0. Reuse the power saving profile */ - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && - (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) && - ((smu->adev->pm.fw_version == 0x004e6601) || - (smu->adev->pm.fw_version >= 0x004e7400))) { - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_POWERSAVING); - if (workload_type >= 0) - workload_mask |= 1 << workload_type; + /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { + if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && + ((smu->adev->pm.fw_version == 0x004e6601) || + (smu->adev->pm.fw_version >= 0x004e7300))) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + smu->adev->pm.fw_version >= 0x00504500)) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_POWERSAVING); + if (workload_type >= 0) + workload_mask |= 1 << workload_type; + } } return smu_cmn_send_smc_msg_with_param(smu, @@ -2562,38 +2570,6 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, NULL); } -static int smu_v13_0_0_baco_enter(struct smu_context *smu) -{ - struct smu_baco_context *smu_baco = &smu->smu_baco; - struct amdgpu_device *adev = smu->adev; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) - return smu_v13_0_baco_set_armd3_sequence(smu, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? - BACO_SEQ_BAMACO : BACO_SEQ_BACO); - else - return smu_v13_0_baco_enter(smu); -} - -static int smu_v13_0_0_baco_exit(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - int ret; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { - /* Wait for PMFW handling for the Dstate change */ - usleep_range(10000, 11000); - ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); - } else { - ret = smu_v13_0_baco_exit(smu); - } - - if (!ret) - adev->gfx.is_poweron = false; - - return ret; -} - static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -2724,7 +2700,6 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu) smu_i2c->port = i; mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &smu_v13_0_0_i2c_algo; snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); @@ -2968,6 +2943,20 @@ static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu, return ret; } +static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 0): + return smu->smc_fw_version >= 0x004e6300; + case IP_VERSION(13, 0, 10): + return smu->smc_fw_version >= 0x00503300; + default: + return false; + } +} + static int smu_v13_0_0_set_power_limit(struct smu_context *smu, enum smu_ppt_limit_type limit_type, uint32_t limit) @@ -3082,10 +3071,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .deep_sleep_control = smu_v13_0_deep_sleep_control, .gfx_ulv_control = smu_v13_0_gfx_ulv_control, .baco_is_support = smu_v13_0_baco_is_support, - .baco_get_state = smu_v13_0_baco_get_state, - .baco_set_state = smu_v13_0_baco_set_state, - .baco_enter = smu_v13_0_0_baco_enter, - .baco_exit = smu_v13_0_0_baco_exit, + .baco_enter = smu_v13_0_baco_enter, + .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, .mode1_reset = smu_v13_0_0_mode1_reset, .mode2_reset = smu_v13_0_0_mode2_reset, @@ -3097,6 +3084,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .gpo_control = smu_v13_0_gpo_control, .get_ecc_info = smu_v13_0_0_get_ecc_info, .notify_display_change = smu_v13_0_notify_display_change, + .is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check, + .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow, + .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges, }; void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index bb98156b2f..949131bd1e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!en && !adev->in_s0ix) + if (!en && !adev->in_s0ix) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 58f63b7a72..78491b04df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -120,6 +120,7 @@ struct mca_ras_info { #define P2S_TABLE_ID_A 0x50325341 #define P2S_TABLE_ID_X 0x50325358 +// clang-format off static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -128,6 +129,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0), MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1), + MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), @@ -158,8 +160,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1), MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1), - MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0), - MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), + MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1), + MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0), MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), @@ -171,6 +173,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), }; +// clang-format on static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = { CLK_MAP(SOCCLK, PPCLK_SOCCLK), CLK_MAP(FCLK, PPCLK_FCLK), @@ -432,6 +435,41 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, return 0; } +static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, + void *metrics, size_t max_size) +{ + struct smu_table_context *smu_tbl_ctxt = &smu->smu_table; + uint32_t table_version = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].version; + uint32_t table_size = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].size; + struct amdgpu_pm_metrics *pm_metrics = metrics; + uint32_t pmfw_version; + int ret; + + if (!pm_metrics || !max_size) + return -EINVAL; + + if (max_size < (table_size + sizeof(pm_metrics->common_header))) + return -EOVERFLOW; + + /* Don't use cached metrics data */ + ret = smu_v13_0_6_get_metrics_table(smu, pm_metrics->data, true); + if (ret) + return ret; + + smu_cmn_get_smc_version(smu, NULL, &pmfw_version); + + memset(&pm_metrics->common_header, 0, + sizeof(pm_metrics->common_header)); + pm_metrics->common_header.mp1_ip_discovery_version = + IP_VERSION(13, 0, 6); + pm_metrics->common_header.pmfw_version = pmfw_version; + pm_metrics->common_header.pmmetrics_version = table_version; + pm_metrics->common_header.structure_size = + sizeof(pm_metrics->common_header) + table_size; + + return pm_metrics->common_header.structure_size; +} + static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; @@ -441,6 +479,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) (struct PPTable_t *)smu_table->driver_pptable; struct amdgpu_device *adev = smu->adev; int ret, i, retry = 100; + uint32_t table_version; /* Store one-time values in driver PPTable */ if (!pptable->Init) { @@ -459,6 +498,13 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) if (!retry) return -ETIME; + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion, + &table_version); + if (ret) + return ret; + smu_table->tables[SMU_TABLE_SMU_METRICS].version = + table_version; + pptable->MaxSocketPowerLimit = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit)); pptable->MaxGfxclkFrequency = @@ -1479,7 +1525,6 @@ static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable) if (smu->smc_fw_version < 0x554800) return 0; - amdgpu_ras_set_mca_debug_mode(smu->adev, enable); return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead, enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK, NULL); @@ -1893,7 +1938,6 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu) smu_i2c->port = i; mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &smu_v13_0_6_i2c_algo; snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); @@ -2332,16 +2376,6 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, return ret; } -static int smu_v13_0_6_post_init(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - - if (!amdgpu_sriov_vf(adev) && adev->ras_enabled) - return smu_v13_0_6_mca_set_debug_mode(smu, false); - - return 0; -} - static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -2424,8 +2458,8 @@ static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info) { - uint64_t ipid = entry->regs[MCA_REG_IDX_IPID]; - uint32_t insthi; + u64 ipid = entry->regs[MCA_REG_IDX_IPID]; + u32 instidhi, instid; /* NOTE: All MCA IPID register share the same format, * so the driver can share the MCMP1 register header file. @@ -2434,9 +2468,15 @@ static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_ info->hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); info->mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType); - insthi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi); - info->aid = ((insthi >> 2) & 0x03); - info->socket_id = insthi & 0x03; + /* + * Unfied DieID Format: SAASS. A:AID, S:Socket. + * Unfied DieID[4] = InstanceId[0] + * Unfied DieID[0:3] = InstanceIdHi[0:3] + */ + instidhi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi); + instid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo); + info->aid = ((instidhi >> 2) & 0x03); + info->socket_id = ((instid & 0x1) << 2) | (instidhi & 0x03); } static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, @@ -2515,9 +2555,9 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct return 0; } - if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0)) + if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0)) *count = 1; - else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0)) + else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0)) *count = 1; return 0; @@ -2528,13 +2568,15 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st uint32_t *count) { u32 ext_error_code; + u32 err_cnt; ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]); + err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]); if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0) - *count = 1; + *count = err_cnt; else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6) - *count = 1; + *count = err_cnt; return 0; } @@ -2610,6 +2652,7 @@ static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct uint32_t instlo; instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo); + instlo &= GENMASK(31, 1); switch (instlo) { case 0x36430400: /* SMNAID XCD 0 */ case 0x38430400: /* SMNAID XCD 1 */ @@ -2629,6 +2672,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd uint32_t errcode, instlo; instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo); + instlo &= GENMASK(31, 1); if (instlo != 0x03b30400) return false; @@ -2851,6 +2895,13 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, return ret; } +static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu, + void *table) +{ + /* Support ecc info by default */ + return 0; +} + static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -2895,6 +2946,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, + .get_pm_metrics = smu_v13_0_6_get_pm_metrics, .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported, @@ -2904,7 +2956,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .i2c_init = smu_v13_0_6_i2c_control_init, .i2c_fini = smu_v13_0_6_i2c_control_fini, .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, - .post_init = smu_v13_0_6_post_init, + .get_ecc_info = smu_v13_0_6_get_ecc_info, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 402e0d184e..7318964f1f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -140,6 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), + MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { @@ -222,6 +223,7 @@ static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { TAB_MAP(ACTIVITY_MONITOR_COEFF), [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, TAB_MAP(OVERDRIVE), + TAB_MAP(WIFIBAND), }; static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -512,6 +514,9 @@ static int smu_v13_0_7_tables_init(struct smu_context *smu) AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND, + sizeof(WifiBandEntryTable_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); if (!smu_table->metrics_table) @@ -2513,38 +2518,6 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, return ret; } -static int smu_v13_0_7_baco_enter(struct smu_context *smu) -{ - struct smu_baco_context *smu_baco = &smu->smu_baco; - struct amdgpu_device *adev = smu->adev; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) - return smu_v13_0_baco_set_armd3_sequence(smu, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? - BACO_SEQ_BAMACO : BACO_SEQ_BACO); - else - return smu_v13_0_baco_enter(smu); -} - -static int smu_v13_0_7_baco_exit(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - int ret; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { - /* Wait for PMFW handling for the Dstate change */ - usleep_range(10000, 11000); - ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); - } else { - ret = smu_v13_0_baco_exit(smu); - } - - if (!ret) - adev->gfx.is_poweron = false; - - return ret; -} - static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -2565,6 +2538,11 @@ static int smu_v13_0_7_set_df_cstate(struct smu_context *smu, NULL); } +static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu) +{ + return smu->smc_fw_version > 0x00524600; +} + static int smu_v13_0_7_set_power_limit(struct smu_context *smu, enum smu_ppt_limit_type limit_type, uint32_t limit) @@ -2673,15 +2651,16 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, .baco_is_support = smu_v13_0_baco_is_support, - .baco_get_state = smu_v13_0_baco_get_state, - .baco_set_state = smu_v13_0_baco_set_state, - .baco_enter = smu_v13_0_7_baco_enter, - .baco_exit = smu_v13_0_7_baco_exit, + .baco_enter = smu_v13_0_baco_enter, + .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_7_set_mp1_state, .set_df_cstate = smu_v13_0_7_set_df_cstate, .gpo_control = smu_v13_0_gpo_control, + .is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check, + .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow, + .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges, }; void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) -- cgit v1.2.3