summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/pm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:35:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:31 +0000
commit85c675d0d09a45a135bddd15d7b385f8758c32fb (patch)
tree76267dbc9b9a130337be3640948fe397b04ac629 /drivers/gpu/drm/amd/pm
parentAdding upstream version 6.6.15. (diff)
downloadlinux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz
linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
-rw-r--r--drivers/gpu/drm/amd/pm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c66
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c991
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h21
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h24
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c222
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h48
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h91
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h237
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h112
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h157
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h143
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h230
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c305
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c104
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c150
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c95
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c291
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c45
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c583
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c1103
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c530
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c1729
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c1139
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h3
46 files changed, 7519 insertions, 1113 deletions
diff --git a/drivers/gpu/drm/amd/pm/Makefile b/drivers/gpu/drm/amd/pm/Makefile
index 51751db43..ebbf188f6 100644
--- a/drivers/gpu/drm/amd/pm/Makefile
+++ b/drivers/gpu/drm/amd/pm/Makefile
@@ -30,6 +30,7 @@ subdir-ccflags-y += \
-I$(FULL_AMD_PATH)/pm/swsmu/smu11 \
-I$(FULL_AMD_PATH)/pm/swsmu/smu12 \
-I$(FULL_AMD_PATH)/pm/swsmu/smu13 \
+ -I$(FULL_AMD_PATH)/pm/swsmu/smu14 \
-I$(FULL_AMD_PATH)/pm/powerplay/inc \
-I$(FULL_AMD_PATH)/pm/powerplay/smumgr\
-I$(FULL_AMD_PATH)/pm/powerplay/hwmgr \
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 078aaaa53..8ec11da03 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -93,6 +93,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_JPEG:
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
+ case AMD_IP_BLOCK_TYPE_VPE:
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
@@ -180,6 +181,24 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
+{
+ int ret = 0;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (pp_funcs && pp_funcs->notify_rlc_state) {
+ mutex_lock(&adev->pm.mutex);
+
+ ret = pp_funcs->notify_rlc_state(
+ adev->powerplay.pp_handle,
+ en);
+
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -351,14 +370,43 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
return ret;
}
-int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
+int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
{
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret = 0;
+ int mode = XGMI_PLPD_NONE;
+
+ if (is_support_sw_smu(adev)) {
+ mode = smu->plpd_mode;
+ if (mode_desc == NULL)
+ return mode;
+ switch (smu->plpd_mode) {
+ case XGMI_PLPD_DISALLOW:
+ *mode_desc = "disallow";
+ break;
+ case XGMI_PLPD_DEFAULT:
+ *mode_desc = "default";
+ break;
+ case XGMI_PLPD_OPTIMIZED:
+ *mode_desc = "optimized";
+ break;
+ case XGMI_PLPD_NONE:
+ default:
+ *mode_desc = "none";
+ break;
+ }
+ }
+
+ return mode;
+}
+
+int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
- ret = smu_allow_xgmi_power_down(smu, en);
+ ret = smu_set_xgmi_plpd_mode(smu, mode);
mutex_unlock(&adev->pm.mutex);
}
@@ -461,7 +509,7 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- int ret = -EINVAL;
+ int ret = -EOPNOTSUPP;
if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
mutex_lock(&adev->pm.mutex);
@@ -475,7 +523,7 @@ int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit
int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- int ret = -EINVAL;
+ int ret = -EOPNOTSUPP;
if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
mutex_lock(&adev->pm.mutex);
@@ -1152,7 +1200,7 @@ int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
int ret = 0;
if (!pp_funcs->get_sclk_od)
- return 0;
+ return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
@@ -1166,7 +1214,7 @@ int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (is_support_sw_smu(adev))
- return 0;
+ return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
if (pp_funcs->set_sclk_od)
@@ -1189,7 +1237,7 @@ int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
int ret = 0;
if (!pp_funcs->get_mclk_od)
- return 0;
+ return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
@@ -1203,7 +1251,7 @@ int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (is_support_sw_smu(adev))
- return 0;
+ return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
if (pp_funcs->set_mclk_od)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index b4c9fedaa..20c53eefd 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -35,6 +35,44 @@
#include <linux/pm_runtime.h>
#include <asm/processor.h>
+#define MAX_NUM_OF_FEATURES_PER_SUBSET 8
+#define MAX_NUM_OF_SUBSETS 8
+
+struct od_attribute {
+ struct kobj_attribute attribute;
+ struct list_head entry;
+};
+
+struct od_kobj {
+ struct kobject kobj;
+ struct list_head entry;
+ struct list_head attribute;
+ void *priv;
+};
+
+struct od_feature_ops {
+ umode_t (*is_visible)(struct amdgpu_device *adev);
+ ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count);
+};
+
+struct od_feature_item {
+ const char *name;
+ struct od_feature_ops ops;
+};
+
+struct od_feature_container {
+ char *name;
+ struct od_feature_ops ops;
+ struct od_feature_item sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
+};
+
+struct od_feature_set {
+ struct od_feature_container containers[MAX_NUM_OF_SUBSETS];
+};
+
static const struct hwmon_temp_label {
enum PP_HWMON_TEMP channel;
const char *label;
@@ -643,18 +681,14 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* They can be used to calibrate the sclk voltage curve. This is
* available for Vega20 and NV1X.
*
- * - voltage offset for the six anchor points of the v/f curve labeled
- * OD_VDDC_CURVE. They can be used to calibrate the v/f curve. This
- * is only availabe for some SMU13 ASICs.
- *
* - voltage offset(in mV) applied on target voltage calculation.
- * This is available for Sienna Cichlid, Navy Flounder and Dimgrey
- * Cavefish. For these ASICs, the target voltage calculation can be
- * illustrated by "voltage = voltage calculated from v/f curve +
- * overdrive vddgfx offset"
+ * This is available for Sienna Cichlid, Navy Flounder, Dimgrey
+ * Cavefish and some later SMU13 ASICs. For these ASICs, the target
+ * voltage calculation can be illustrated by "voltage = voltage
+ * calculated from v/f curve + overdrive vddgfx offset"
*
- * - a list of valid ranges for sclk, mclk, and voltage curve points
- * labeled OD_RANGE
+ * - a list of valid ranges for sclk, mclk, voltage curve points
+ * or voltage offset labeled OD_RANGE
*
* < For APUs >
*
@@ -686,24 +720,17 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* E.g., "p 2 0 800" would set the minimum core clock on core
* 2 to 800Mhz.
*
- * For sclk voltage curve,
- * - For NV1X, enter the new values by writing a string that
- * contains "vc point clock voltage" to the file. The points
- * are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will update
- * point1 with clock set as 300Mhz and voltage as 600mV. "vc 2
- * 1000 1000" will update point3 with clock set as 1000Mhz and
- * voltage 1000mV.
- * - For SMU13 ASICs, enter the new values by writing a string that
- * contains "vc anchor_point_index voltage_offset" to the file.
- * There are total six anchor points defined on the v/f curve with
- * index as 0 - 5.
- * - "vc 0 10" will update the voltage offset for point1 as 10mv.
- * - "vc 5 -10" will update the voltage offset for point6 as -10mv.
- *
- * To update the voltage offset applied for gfxclk/voltage calculation,
- * enter the new value by writing a string that contains "vo offset".
- * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
- * And the offset can be a positive or negative value.
+ * For sclk voltage curve supported by Vega20 and NV1X, enter the new
+ * values by writing a string that contains "vc point clock voltage"
+ * to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
+ * 600" will update point1 with clock set as 300Mhz and voltage as 600mV.
+ * "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
+ * voltage 1000mV.
+ *
+ * For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
+ * Cavefish and some later SMU13 ASICs, enter the new value by writing a
+ * string that contains "vo offset". E.g., "vo -10" will update the extra
+ * voltage offset applied to the whole v/f curve line as -10mv.
*
* - When you have edited all of the states as needed, write "c" (commit)
* to the file to commit your changes
@@ -960,7 +987,16 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
* pp_dpm_fclk interface is only available for Vega20 and later ASICs.
*
* Reading back the files will show you the available power levels within
- * the power state and the clock information for those levels.
+ * the power state and the clock information for those levels. If deep sleep is
+ * applied to a clock, the level will be denoted by a special level 'S:'
+ * E.g., ::
+ *
+ * S: 19Mhz *
+ * 0: 615Mhz
+ * 1: 800Mhz
+ * 2: 888Mhz
+ * 3: 1000Mhz
+ *
*
* To manually adjust these states, first select manual using
* power_dpm_force_performance_level.
@@ -1471,9 +1507,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return -EINVAL;
}
-static unsigned int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
- enum amd_pp_sensors sensor,
- void *query)
+static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
+ enum amd_pp_sensors sensor,
+ void *query)
{
int r, size = sizeof(uint32_t);
@@ -1960,6 +1996,70 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
return 0;
}
+/* Following items will be read out to indicate current plpd policy:
+ * - -1: none
+ * - 0: disallow
+ * - 1: default
+ * - 2: optimized
+ */
+static ssize_t amdgpu_get_xgmi_plpd_policy(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ char *mode_desc = "none";
+ int mode;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ mode = amdgpu_dpm_get_xgmi_plpd_mode(adev, &mode_desc);
+
+ return sysfs_emit(buf, "%d: %s\n", mode, mode_desc);
+}
+
+/* Following argument value is expected from user to change plpd policy
+ * - arg 0: disallow plpd
+ * - arg 1: default policy
+ * - arg 2: optimized policy
+ */
+static ssize_t amdgpu_set_xgmi_plpd_policy(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int mode, ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ ret = kstrtos32(buf, 0, &mode);
+ if (ret)
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_set_xgmi_plpd_mode(adev, mode);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -1995,14 +2095,15 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
.attr_update = ss_power_attr_update),
AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
.attr_update = ss_bias_attr_update),
+ AMDGPU_DEVICE_ATTR_RW(xgmi_plpd_policy, ATTR_FLAG_BASIC),
};
static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
struct device_attribute *dev_attr = &attr->dev_attr;
- uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
- uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
const char *attr_name = dev_attr->attr.name;
if (!(attr->flags & mask)) {
@@ -2027,7 +2128,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (amdgpu_dpm_is_overdrive_supported(adev))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
- if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
+ if ((adev->flags & AMD_IS_APU &&
+ gc_ver != IP_VERSION(9, 4, 3)) ||
+ gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
@@ -2091,7 +2194,27 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
- else if (gc_ver == IP_VERSION(10, 3, 0) && amdgpu_sriov_vf(adev))
+ else if ((gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
+ if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
+ if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
+ if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
+ u32 limit;
+
+ if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
+ -EOPNOTSUPP)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
+ if (gc_ver == IP_VERSION(9, 4, 2) ||
+ gc_ver == IP_VERSION(9, 4, 3))
*states = ATTR_STATE_UNSUPPORTED;
}
@@ -2777,8 +2900,8 @@ static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
return sysfs_emit(buf, "vddnb\n");
}
-static unsigned int amdgpu_hwmon_get_power(struct device *dev,
- enum amd_pp_sensors sensor)
+static int amdgpu_hwmon_get_power(struct device *dev,
+ enum amd_pp_sensors sensor)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
unsigned int uw;
@@ -2799,36 +2922,28 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- unsigned int val;
+ ssize_t val;
val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
if (val < 0)
return val;
- return sysfs_emit(buf, "%u\n", val);
+ return sysfs_emit(buf, "%zd\n", val);
}
static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- unsigned int val;
+ ssize_t val;
val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
if (val < 0)
return val;
- return sysfs_emit(buf, "%u\n", val);
-}
-
-static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sysfs_emit(buf, "%i\n", 0);
+ return sysfs_emit(buf, "%zd\n", val);
}
-
static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
struct device_attribute *attr,
char *buf,
@@ -2865,6 +2980,12 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
return size;
}
+static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
+}
static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
struct device_attribute *attr,
@@ -2895,7 +3016,7 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
if (gc_ver == IP_VERSION(10, 3, 1))
return sysfs_emit(buf, "%s\n",
@@ -3183,13 +3304,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
- uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
uint32_t tmp;
- /* under multi-vf mode, the hwmon attributes are all not supported */
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
/* under pp one vf mode manage of hwmon attributes is not supported */
if (amdgpu_sriov_is_pp_one_vf(adev))
effective_mode &= ~S_IWUSR;
@@ -3323,15 +3440,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* hotspot temperature for gc 9,4,3*/
- if ((gc_ver == IP_VERSION(9, 4, 3)) &&
- (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_label.dev_attr.attr))
- return 0;
+ if (gc_ver == IP_VERSION(9, 4, 3)) {
+ if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
+ return 0;
+
+ if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
+ return attr->mode;
+ }
/* only SOC15 dGPUs support hotspot and mem temperatures */
- if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) ||
- (gc_ver == IP_VERSION(9, 4, 3))) &&
- (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
+ if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
+ (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
@@ -3361,10 +3483,703 @@ static const struct attribute_group *hwmon_groups[] = {
NULL
};
-int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
+static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
+ enum pp_clock_type od_type,
+ char *buf)
+{
+ int size = 0;
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ ret = pm_runtime_get_sync(adev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(adev->dev);
+ return ret;
+ }
+
+ size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
+ if (size == 0)
+ size = sysfs_emit(buf, "\n");
+
+ pm_runtime_mark_last_busy(adev->dev);
+ pm_runtime_put_autosuspend(adev->dev);
+
+ return size;
+}
+
+static int parse_input_od_command_lines(const char *buf,
+ size_t count,
+ u32 *type,
+ long *params,
+ uint32_t *num_of_params)
+{
+ const char delimiter[3] = {' ', '\n', '\0'};
+ uint32_t parameter_size = 0;
+ char buf_cpy[128] = {0};
+ char *tmp_str, *sub_str;
+ int ret;
+
+ if (count > sizeof(buf_cpy) - 1)
+ return -EINVAL;
+
+ memcpy(buf_cpy, buf, count);
+ tmp_str = buf_cpy;
+
+ /* skip heading spaces */
+ while (isspace(*tmp_str))
+ tmp_str++;
+
+ switch (*tmp_str) {
+ case 'c':
+ *type = PP_OD_COMMIT_DPM_TABLE;
+ return 0;
+ case 'r':
+ params[parameter_size] = *type;
+ *num_of_params = 1;
+ *type = PP_OD_RESTORE_DEFAULT_TABLE;
+ return 0;
+ default:
+ break;
+ }
+
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ if (strlen(sub_str) == 0)
+ continue;
+
+ ret = kstrtol(sub_str, 0, &params[parameter_size]);
+ if (ret)
+ return -EINVAL;
+ parameter_size++;
+
+ while (isspace(*tmp_str))
+ tmp_str++;
+ }
+
+ *num_of_params = parameter_size;
+
+ return 0;
+}
+
+static int
+amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
+ enum PP_OD_DPM_TABLE_COMMAND cmd_type,
+ const char *in_buf,
+ size_t count)
+{
+ uint32_t parameter_size = 0;
+ long parameter[64];
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ ret = parse_input_od_command_lines(in_buf,
+ count,
+ &cmd_type,
+ parameter,
+ &parameter_size);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(adev->dev);
+ if (ret < 0)
+ goto err_out0;
+
+ ret = amdgpu_dpm_odn_edit_dpm_table(adev,
+ cmd_type,
+ parameter,
+ parameter_size);
+ if (ret)
+ goto err_out1;
+
+ if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
+ ret = amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL);
+ if (ret)
+ goto err_out1;
+ }
+
+ pm_runtime_mark_last_busy(adev->dev);
+ pm_runtime_put_autosuspend(adev->dev);
+
+ return count;
+
+err_out1:
+ pm_runtime_mark_last_busy(adev->dev);
+err_out0:
+ pm_runtime_put_autosuspend(adev->dev);
+
+ return ret;
+}
+
+/**
+ * DOC: fan_curve
+ *
+ * The amdgpu driver provides a sysfs API for checking and adjusting the fan
+ * control curve line.
+ *
+ * Reading back the file shows you the current settings(temperature in Celsius
+ * degree and fan speed in pwm) applied to every anchor point of the curve line
+ * and their permitted ranges if changable.
+ *
+ * Writing a desired string(with the format like "anchor_point_index temperature
+ * fan_speed_in_pwm") to the file, change the settings for the specific anchor
+ * point accordingly.
+ *
+ * When you have finished the editing, write "c" (commit) to the file to commit
+ * your changes.
+ *
+ * If you want to reset to the default value, write "r" (reset) to the file to
+ * reset them
+ *
+ * There are two fan control modes supported: auto and manual. With auto mode,
+ * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
+ * While with manual mode, users can set their own fan curve line as what
+ * described here. Normally the ASIC is booted up with auto mode. Any
+ * settings via this interface will switch the fan control to manual mode
+ * implicitly.
+ */
+static ssize_t fan_curve_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
+}
+
+static ssize_t fan_curve_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
+ PP_OD_EDIT_FAN_CURVE,
+ buf,
+ count);
+}
+
+static umode_t fan_curve_visible(struct amdgpu_device *adev)
+{
+ umode_t umode = 0000;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
+ umode |= S_IRUSR | S_IRGRP | S_IROTH;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
+ umode |= S_IWUSR;
+
+ return umode;
+}
+
+/**
+ * DOC: acoustic_limit_rpm_threshold
+ *
+ * The amdgpu driver provides a sysfs API for checking and adjusting the
+ * acoustic limit in RPM for fan control.
+ *
+ * Reading back the file shows you the current setting and the permitted
+ * ranges if changable.
+ *
+ * Writing an integer to the file, change the setting accordingly.
+ *
+ * When you have finished the editing, write "c" (commit) to the file to commit
+ * your changes.
+ *
+ * If you want to reset to the default value, write "r" (reset) to the file to
+ * reset them
+ *
+ * This setting works under auto fan control mode only. It adjusts the PMFW's
+ * behavior about the maximum speed in RPM the fan can spin. Setting via this
+ * interface will switch the fan control to auto mode implicitly.
+ */
+static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
+}
+
+static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
+ PP_OD_EDIT_ACOUSTIC_LIMIT,
+ buf,
+ count);
+}
+
+static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
+{
+ umode_t umode = 0000;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
+ umode |= S_IRUSR | S_IRGRP | S_IROTH;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
+ umode |= S_IWUSR;
+
+ return umode;
+}
+
+/**
+ * DOC: acoustic_target_rpm_threshold
+ *
+ * The amdgpu driver provides a sysfs API for checking and adjusting the
+ * acoustic target in RPM for fan control.
+ *
+ * Reading back the file shows you the current setting and the permitted
+ * ranges if changable.
+ *
+ * Writing an integer to the file, change the setting accordingly.
+ *
+ * When you have finished the editing, write "c" (commit) to the file to commit
+ * your changes.
+ *
+ * If you want to reset to the default value, write "r" (reset) to the file to
+ * reset them
+ *
+ * This setting works under auto fan control mode only. It can co-exist with
+ * other settings which can work also under auto mode. It adjusts the PMFW's
+ * behavior about the maximum speed in RPM the fan can spin when ASIC
+ * temperature is not greater than target temperature. Setting via this
+ * interface will switch the fan control to auto mode implicitly.
+ */
+static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
+}
+
+static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
+ PP_OD_EDIT_ACOUSTIC_TARGET,
+ buf,
+ count);
+}
+
+static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
+{
+ umode_t umode = 0000;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
+ umode |= S_IRUSR | S_IRGRP | S_IROTH;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
+ umode |= S_IWUSR;
+
+ return umode;
+}
+
+/**
+ * DOC: fan_target_temperature
+ *
+ * The amdgpu driver provides a sysfs API for checking and adjusting the
+ * target tempeature in Celsius degree for fan control.
+ *
+ * Reading back the file shows you the current setting and the permitted
+ * ranges if changable.
+ *
+ * Writing an integer to the file, change the setting accordingly.
+ *
+ * When you have finished the editing, write "c" (commit) to the file to commit
+ * your changes.
+ *
+ * If you want to reset to the default value, write "r" (reset) to the file to
+ * reset them
+ *
+ * This setting works under auto fan control mode only. It can co-exist with
+ * other settings which can work also under auto mode. Paring with the
+ * acoustic_target_rpm_threshold setting, they define the maximum speed in
+ * RPM the fan can spin when ASIC temperature is not greater than target
+ * temperature. Setting via this interface will switch the fan control to
+ * auto mode implicitly.
+ */
+static ssize_t fan_target_temperature_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
+}
+
+static ssize_t fan_target_temperature_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
+ PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
+ buf,
+ count);
+}
+
+static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
{
+ umode_t umode = 0000;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
+ umode |= S_IRUSR | S_IRGRP | S_IROTH;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
+ umode |= S_IWUSR;
+
+ return umode;
+}
+
+/**
+ * DOC: fan_minimum_pwm
+ *
+ * The amdgpu driver provides a sysfs API for checking and adjusting the
+ * minimum fan speed in PWM.
+ *
+ * Reading back the file shows you the current setting and the permitted
+ * ranges if changable.
+ *
+ * Writing an integer to the file, change the setting accordingly.
+ *
+ * When you have finished the editing, write "c" (commit) to the file to commit
+ * your changes.
+ *
+ * If you want to reset to the default value, write "r" (reset) to the file to
+ * reset them
+ *
+ * This setting works under auto fan control mode only. It can co-exist with
+ * other settings which can work also under auto mode. It adjusts the PMFW's
+ * behavior about the minimum fan speed in PWM the fan should spin. Setting
+ * via this interface will switch the fan control to auto mode implicitly.
+ */
+static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
+}
+
+static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
+ PP_OD_EDIT_FAN_MINIMUM_PWM,
+ buf,
+ count);
+}
+
+static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
+{
+ umode_t umode = 0000;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
+ umode |= S_IRUSR | S_IRGRP | S_IROTH;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
+ umode |= S_IWUSR;
+
+ return umode;
+}
+
+static struct od_feature_set amdgpu_od_set = {
+ .containers = {
+ [0] = {
+ .name = "fan_ctrl",
+ .sub_feature = {
+ [0] = {
+ .name = "fan_curve",
+ .ops = {
+ .is_visible = fan_curve_visible,
+ .show = fan_curve_show,
+ .store = fan_curve_store,
+ },
+ },
+ [1] = {
+ .name = "acoustic_limit_rpm_threshold",
+ .ops = {
+ .is_visible = acoustic_limit_threshold_visible,
+ .show = acoustic_limit_threshold_show,
+ .store = acoustic_limit_threshold_store,
+ },
+ },
+ [2] = {
+ .name = "acoustic_target_rpm_threshold",
+ .ops = {
+ .is_visible = acoustic_target_threshold_visible,
+ .show = acoustic_target_threshold_show,
+ .store = acoustic_target_threshold_store,
+ },
+ },
+ [3] = {
+ .name = "fan_target_temperature",
+ .ops = {
+ .is_visible = fan_target_temperature_visible,
+ .show = fan_target_temperature_show,
+ .store = fan_target_temperature_store,
+ },
+ },
+ [4] = {
+ .name = "fan_minimum_pwm",
+ .ops = {
+ .is_visible = fan_minimum_pwm_visible,
+ .show = fan_minimum_pwm_show,
+ .store = fan_minimum_pwm_store,
+ },
+ },
+ },
+ },
+ },
+};
+
+static void od_kobj_release(struct kobject *kobj)
+{
+ struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
+
+ kfree(od_kobj);
+}
+
+static const struct kobj_type od_ktype = {
+ .release = od_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static void amdgpu_od_set_fini(struct amdgpu_device *adev)
+{
+ struct od_kobj *container, *container_next;
+ struct od_attribute *attribute, *attribute_next;
+
+ if (list_empty(&adev->pm.od_kobj_list))
+ return;
+
+ list_for_each_entry_safe(container, container_next,
+ &adev->pm.od_kobj_list, entry) {
+ list_del(&container->entry);
+
+ list_for_each_entry_safe(attribute, attribute_next,
+ &container->attribute, entry) {
+ list_del(&attribute->entry);
+ sysfs_remove_file(&container->kobj,
+ &attribute->attribute.attr);
+ kfree(attribute);
+ }
+
+ kobject_put(&container->kobj);
+ }
+}
+
+static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
+ struct od_feature_ops *feature_ops)
+{
+ umode_t mode;
+
+ if (!feature_ops->is_visible)
+ return false;
+
+ /*
+ * If the feature has no user read and write mode set,
+ * we can assume the feature is actually not supported.(?)
+ * And the revelant sysfs interface should not be exposed.
+ */
+ mode = feature_ops->is_visible(adev);
+ if (mode & (S_IRUSR | S_IWUSR))
+ return true;
+
+ return false;
+}
+
+static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
+ struct od_feature_container *container)
+{
+ int i;
+
+ /*
+ * If there is no valid entry within the container, the container
+ * is recognized as a self contained container. And the valid entry
+ * here means it has a valid naming and it is visible/supported by
+ * the ASIC.
+ */
+ for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
+ if (container->sub_feature[i].name &&
+ amdgpu_is_od_feature_supported(adev,
+ &container->sub_feature[i].ops))
+ return false;
+ }
+
+ return true;
+}
+
+static int amdgpu_od_set_init(struct amdgpu_device *adev)
+{
+ struct od_kobj *top_set, *sub_set;
+ struct od_attribute *attribute;
+ struct od_feature_container *container;
+ struct od_feature_item *feature;
+ int i, j;
int ret;
+
+ /* Setup the top `gpu_od` directory which holds all other OD interfaces */
+ top_set = kzalloc(sizeof(*top_set), GFP_KERNEL);
+ if (!top_set)
+ return -ENOMEM;
+ list_add(&top_set->entry, &adev->pm.od_kobj_list);
+
+ ret = kobject_init_and_add(&top_set->kobj,
+ &od_ktype,
+ &adev->dev->kobj,
+ "%s",
+ "gpu_od");
+ if (ret)
+ goto err_out;
+ INIT_LIST_HEAD(&top_set->attribute);
+ top_set->priv = adev;
+
+ for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
+ container = &amdgpu_od_set.containers[i];
+
+ if (!container->name)
+ continue;
+
+ /*
+ * If there is valid entries within the container, the container
+ * will be presented as a sub directory and all its holding entries
+ * will be presented as plain files under it.
+ * While if there is no valid entry within the container, the container
+ * itself will be presented as a plain file under top `gpu_od` directory.
+ */
+ if (amdgpu_od_is_self_contained(adev, container)) {
+ if (!amdgpu_is_od_feature_supported(adev,
+ &container->ops))
+ continue;
+
+ /*
+ * The container is presented as a plain file under top `gpu_od`
+ * directory.
+ */
+ attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
+ if (!attribute) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ list_add(&attribute->entry, &top_set->attribute);
+
+ attribute->attribute.attr.mode =
+ container->ops.is_visible(adev);
+ attribute->attribute.attr.name = container->name;
+ attribute->attribute.show =
+ container->ops.show;
+ attribute->attribute.store =
+ container->ops.store;
+ ret = sysfs_create_file(&top_set->kobj,
+ &attribute->attribute.attr);
+ if (ret)
+ goto err_out;
+ } else {
+ /* The container is presented as a sub directory. */
+ sub_set = kzalloc(sizeof(*sub_set), GFP_KERNEL);
+ if (!sub_set) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ list_add(&sub_set->entry, &adev->pm.od_kobj_list);
+
+ ret = kobject_init_and_add(&sub_set->kobj,
+ &od_ktype,
+ &top_set->kobj,
+ "%s",
+ container->name);
+ if (ret)
+ goto err_out;
+ INIT_LIST_HEAD(&sub_set->attribute);
+ sub_set->priv = adev;
+
+ for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
+ feature = &container->sub_feature[j];
+ if (!feature->name)
+ continue;
+
+ if (!amdgpu_is_od_feature_supported(adev,
+ &feature->ops))
+ continue;
+
+ /*
+ * With the container presented as a sub directory, the entry within
+ * it is presented as a plain file under the sub directory.
+ */
+ attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
+ if (!attribute) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ list_add(&attribute->entry, &sub_set->attribute);
+
+ attribute->attribute.attr.mode =
+ feature->ops.is_visible(adev);
+ attribute->attribute.attr.name = feature->name;
+ attribute->attribute.show =
+ feature->ops.show;
+ attribute->attribute.store =
+ feature->ops.store;
+ ret = sysfs_create_file(&sub_set->kobj,
+ &attribute->attribute.attr);
+ if (ret)
+ goto err_out;
+ }
+ }
+ }
+
+ return 0;
+
+err_out:
+ amdgpu_od_set_fini(adev);
+
+ return ret;
+}
+
+int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
+{
+ enum amdgpu_sriov_vf_mode mode;
uint32_t mask = 0;
+ int ret;
if (adev->pm.sysfs_initialized)
return 0;
@@ -3374,17 +4189,21 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled == 0)
return 0;
- adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
- DRIVER_NAME, adev,
- hwmon_groups);
- if (IS_ERR(adev->pm.int_hwmon_dev)) {
- ret = PTR_ERR(adev->pm.int_hwmon_dev);
- dev_err(adev->dev,
- "Unable to register hwmon device: %d\n", ret);
- return ret;
+ mode = amdgpu_virt_get_sriov_vf_mode(adev);
+
+ /* under multi-vf mode, the hwmon attributes are all not supported */
+ if (mode != SRIOV_VF_MODE_MULTI_VF) {
+ adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
+ DRIVER_NAME, adev,
+ hwmon_groups);
+ if (IS_ERR(adev->pm.int_hwmon_dev)) {
+ ret = PTR_ERR(adev->pm.int_hwmon_dev);
+ dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
+ return ret;
+ }
}
- switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
+ switch (mode) {
case SRIOV_VF_MODE_ONE_VF:
mask = ATTR_FLAG_ONEVF;
break;
@@ -3403,15 +4222,31 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
mask,
&adev->pm.pm_attr_list);
if (ret)
- return ret;
+ goto err_out0;
+
+ if (amdgpu_dpm_is_overdrive_supported(adev)) {
+ ret = amdgpu_od_set_init(adev);
+ if (ret)
+ goto err_out1;
+ }
adev->pm.sysfs_initialized = true;
return 0;
+
+err_out1:
+ amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
+err_out0:
+ if (adev->pm.int_hwmon_dev)
+ hwmon_device_unregister(adev->pm.int_hwmon_dev);
+
+ return ret;
}
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
+ amdgpu_od_set_fini(adev);
+
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
@@ -3448,8 +4283,8 @@ static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
- uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
- uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
uint32_t value;
uint64_t value64 = 0;
uint32_t query = 0;
@@ -3475,10 +4310,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
seq_printf(m, "\t%u mV (VDDNB)\n", value);
size = sizeof(uint32_t);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
- seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
+ seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff);
size = sizeof(uint32_t);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
- seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff);
+ seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff);
size = sizeof(value);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 42172b00b..482ea3014 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -314,6 +314,17 @@ struct config_table_setting
uint16_t fclk_average_tau;
};
+#define OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE BIT(0)
+#define OD_OPS_SUPPORT_FAN_CURVE_SET BIT(1)
+#define OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE BIT(2)
+#define OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET BIT(3)
+#define OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE BIT(4)
+#define OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET BIT(5)
+#define OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE BIT(6)
+#define OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET BIT(7)
+#define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE BIT(8)
+#define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET BIT(9)
+
struct amdgpu_pm {
struct mutex mutex;
u32 current_sclk;
@@ -366,6 +377,9 @@ struct amdgpu_pm {
struct config_table_setting config_table;
/* runtime mode */
enum amdgpu_runpm_mode rpm_mode;
+
+ struct list_head od_kobj_list;
+ uint32_t od_feature_mask;
};
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
@@ -401,6 +415,8 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state);
+int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en);
+
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
@@ -410,7 +426,10 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
uint32_t cstate);
-int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en);
+int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev,
+ char **mode);
+
+int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode);
int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index f81e4bd48..df4f20293 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -6600,7 +6600,7 @@ static int si_dpm_get_fan_speed_pwm(void *handle,
tmp64 = (u64)duty * 255;
do_div(tmp64, duty100);
- *speed = MIN((u32)tmp64, 255);
+ *speed = min_t(u32, tmp64, 255);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 9e4f8a410..914c15387 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1022,6 +1022,9 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
*limit /= 100;
}
break;
+ case PP_PWR_LIMIT_MIN:
+ *limit = 0;
+ break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
index 9fcad69a9..2cf2a7b12 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
@@ -367,7 +367,7 @@ typedef struct _ATOM_Tonga_VCE_State_Record {
typedef struct _ATOM_Tonga_VCE_State_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
- ATOM_Tonga_VCE_State_Record entries[1];
+ ATOM_Tonga_VCE_State_Record entries[];
} ATOM_Tonga_VCE_State_Table;
typedef struct _ATOM_Tonga_PowerTune_Table {
@@ -481,7 +481,7 @@ typedef struct _ATOM_Tonga_Hard_Limit_Record {
typedef struct _ATOM_Tonga_Hard_Limit_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
- ATOM_Tonga_Hard_Limit_Record entries[1];
+ ATOM_Tonga_Hard_Limit_Record entries[];
} ATOM_Tonga_Hard_Limit_Table;
typedef struct _ATOM_Tonga_GPIO_Table {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
index f2a55c141..17882f8df 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
@@ -200,7 +200,7 @@ static int get_platform_power_management_table(
struct pp_hwmgr *hwmgr,
ATOM_Tonga_PPM_Table *atom_ppm_table)
{
- struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
+ struct phm_ppm_table *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
index 808e0ecbe..42adc2a3d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
@@ -192,7 +192,7 @@ struct smu10_clock_voltage_dependency_record {
struct smu10_voltage_dependency_table {
uint32_t count;
- struct smu10_clock_voltage_dependency_record entries[];
+ struct smu10_clock_voltage_dependency_record entries[] __counted_by(count);
};
struct smu10_clock_voltage_information {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index b1a8799e2..aa91730e4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -3999,6 +3999,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
uint32_t sclk, mclk, activity_percent;
uint32_t offset, val_vid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
@@ -4042,7 +4043,21 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
- return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA))
+ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+ else
+ return -EOPNOTSUPP;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA))
+ return -EOPNOTSUPP;
+ else
+ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX:
if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
(VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
index a6c3610db..a8fc0fa44 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
@@ -72,7 +72,7 @@ int smu7_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
tmp64 = (uint64_t)duty * 255;
do_div(tmp64, duty100);
- *speed = MIN((uint32_t)tmp64, 255);
+ *speed = min_t(uint32_t, tmp64, 255);
return 0;
}
@@ -210,7 +210,7 @@ int smu7_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
- speed = MIN(speed, 255);
+ speed = min_t(uint32_t, speed, 255);
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
index 8b0590b83..de2926df5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
@@ -129,7 +129,7 @@ typedef struct _ATOM_Vega10_State {
typedef struct _ATOM_Vega10_State_Array {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_State states[]; /* Dynamically allocate entries. */
} ATOM_Vega10_State_Array;
typedef struct _ATOM_Vega10_CLK_Dependency_Record {
@@ -169,37 +169,37 @@ typedef struct _ATOM_Vega10_GFXCLK_Dependency_Table {
typedef struct _ATOM_Vega10_MCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_MCLK_Dependency_Table;
typedef struct _ATOM_Vega10_SOCCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_SOCCLK_Dependency_Table;
typedef struct _ATOM_Vega10_DCEFCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_DCEFCLK_Dependency_Table;
typedef struct _ATOM_Vega10_PIXCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_PIXCLK_Dependency_Table;
typedef struct _ATOM_Vega10_DISPCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries.*/
- ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_DISPCLK_Dependency_Table;
typedef struct _ATOM_Vega10_PHYCLK_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_PHYCLK_Dependency_Table;
typedef struct _ATOM_Vega10_MM_Dependency_Record {
@@ -213,7 +213,7 @@ typedef struct _ATOM_Vega10_MM_Dependency_Record {
typedef struct _ATOM_Vega10_MM_Dependency_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries */
- ATOM_Vega10_MM_Dependency_Record entries[1]; /* Dynamically allocate entries */
+ ATOM_Vega10_MM_Dependency_Record entries[]; /* Dynamically allocate entries */
} ATOM_Vega10_MM_Dependency_Table;
typedef struct _ATOM_Vega10_PCIE_Record {
@@ -225,7 +225,7 @@ typedef struct _ATOM_Vega10_PCIE_Record {
typedef struct _ATOM_Vega10_PCIE_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries */
- ATOM_Vega10_PCIE_Record entries[1]; /* Dynamically allocate entries. */
+ ATOM_Vega10_PCIE_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_PCIE_Table;
typedef struct _ATOM_Vega10_Voltage_Lookup_Record {
@@ -235,7 +235,7 @@ typedef struct _ATOM_Vega10_Voltage_Lookup_Record {
typedef struct _ATOM_Vega10_Voltage_Lookup_Table {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries */
- ATOM_Vega10_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries */
+ ATOM_Vega10_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries */
} ATOM_Vega10_Voltage_Lookup_Table;
typedef struct _ATOM_Vega10_Fan_Table {
@@ -327,7 +327,7 @@ typedef struct _ATOM_Vega10_VCE_State_Record {
typedef struct _ATOM_Vega10_VCE_State_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
- ATOM_Vega10_VCE_State_Record entries[1];
+ ATOM_Vega10_VCE_State_Record entries[];
} ATOM_Vega10_VCE_State_Table;
typedef struct _ATOM_Vega10_PowerTune_Table {
@@ -427,7 +427,7 @@ typedef struct _ATOM_Vega10_Hard_Limit_Record {
typedef struct _ATOM_Vega10_Hard_Limit_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
- ATOM_Vega10_Hard_Limit_Record entries[1];
+ ATOM_Vega10_Hard_Limit_Record entries[];
} ATOM_Vega10_Hard_Limit_Table;
typedef struct _Vega10_PPTable_Generic_SubTable_Header {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index 190af79f3..379012494 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -81,7 +81,7 @@ int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
tmp64 = (uint64_t)duty * 255;
do_div(tmp64, duty100);
- *speed = MIN((uint32_t)tmp64, 255);
+ *speed = min_t(uint32_t, tmp64, 255);
return 0;
}
@@ -255,7 +255,7 @@ int vega10_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
- speed = MIN(speed, 255);
+ speed = min_t(uint32_t, speed, 255);
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
index e9737ca84..a3331ffb2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
@@ -131,7 +131,7 @@ int vega20_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
tmp64 = (uint64_t)duty * 255;
do_div(tmp64, duty100);
- *speed = MIN((uint32_t)tmp64, 255);
+ *speed = min_t(uint32_t, tmp64, 255);
return 0;
}
@@ -144,7 +144,7 @@ int vega20_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t duty;
uint64_t tmp64;
- speed = MIN(speed, 255);
+ speed = min_t(uint32_t, speed, 255);
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/Makefile b/drivers/gpu/drm/amd/pm/swsmu/Makefile
index 7987c6cf8..e5fdda49c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/Makefile
@@ -22,7 +22,7 @@
AMD_SWSMU_PATH = ../pm/swsmu
-SWSMU_LIBS = smu11 smu12 smu13
+SWSMU_LIBS = smu11 smu12 smu13 smu14
AMD_SWSMU = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/swsmu/,$(SWSMU_LIBS)))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 56e4c312c..60dce148b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -44,6 +44,7 @@
#include "smu_v13_0_5_ppt.h"
#include "smu_v13_0_6_ppt.h"
#include "smu_v13_0_7_ppt.h"
+#include "smu_v14_0_0_ppt.h"
#include "amd_pcie.h"
/*
@@ -216,6 +217,20 @@ static int smu_set_gfx_imu_enable(struct smu_context *smu)
return smu_set_gfx_power_up_by_imu(smu);
}
+static bool is_vcn_enabled(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
+ !adev->ip_blocks[i].status.valid)
+ return false;
+ }
+
+ return true;
+}
+
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
bool enable)
{
@@ -223,6 +238,12 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
+ /*
+ * don't poweron vcn/jpeg when they are skipped.
+ */
+ if (!is_vcn_enabled(smu->adev))
+ return 0;
+
if (!smu->ppt_funcs->dpm_set_vcn_enable)
return 0;
@@ -243,6 +264,9 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
+ if (!is_vcn_enabled(smu->adev))
+ return 0;
+
if (!smu->ppt_funcs->dpm_set_jpeg_enable)
return 0;
@@ -256,6 +280,49 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
return ret;
}
+static int smu_dpm_set_vpe_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (!smu->ppt_funcs->dpm_set_vpe_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->vpe_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->vpe_gated, !enable);
+
+ return ret;
+}
+
+static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (!smu->adev->enable_umsch_mm)
+ return 0;
+
+ if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
+ return 0;
+
+ if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
+ return 0;
+
+ ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
+ if (!ret)
+ atomic_set(&power_gate->umsch_mm_gated, !enable);
+
+ return ret;
+}
+
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -314,6 +381,12 @@ static int smu_dpm_set_power_gate(void *handle,
dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
gate ? "gate" : "ungate");
break;
+ case AMD_IP_BLOCK_TYPE_VPE:
+ ret = smu_dpm_set_vpe_enable(smu, !gate);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
+ gate ? "gate" : "ungate");
+ break;
default:
dev_err(smu->adev->dev, "Unsupported block type!\n");
return -EINVAL;
@@ -463,7 +536,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20)
return false;
- if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0))
return true;
return false;
@@ -581,7 +654,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
@@ -638,6 +711,9 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 7):
smu_v13_0_7_set_ppt_funcs(smu);
break;
+ case IP_VERSION(14, 0, 0):
+ smu_v14_0_0_set_ppt_funcs(smu);
+ break;
default:
return -EINVAL;
}
@@ -745,8 +821,8 @@ static int smu_late_init(void *handle)
adev->pm.ac_power = power_supply_is_system_supplied() > 0;
smu_set_ac_dc(smu);
- if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
return 0;
if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
@@ -766,7 +842,8 @@ static int smu_late_init(void *handle)
ret = smu_get_asic_power_limits(smu,
&smu->current_power_limit,
&smu->default_power_limit,
- &smu->max_power_limit);
+ &smu->max_power_limit,
+ &smu->min_power_limit);
if (ret) {
dev_err(adev->dev, "Failed to get asic power limits!\n");
return ret;
@@ -1100,6 +1177,21 @@ static void smu_swctf_delayed_work_handler(struct work_struct *work)
orderly_poweroff(true);
}
+static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
+ smu->plpd_mode = XGMI_PLPD_DEFAULT;
+ return;
+ }
+
+ /* PMFW put PLPD into default policy after enabling the feature */
+ if (smu_feature_is_enabled(smu,
+ SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT))
+ smu->plpd_mode = XGMI_PLPD_DEFAULT;
+ else
+ smu->plpd_mode = XGMI_PLPD_NONE;
+}
+
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1120,6 +1212,8 @@ static int smu_sw_init(void *handle)
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
+ atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
@@ -1229,7 +1323,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
uint64_t features_supported;
int ret = 0;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
@@ -1331,6 +1425,8 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
+ smu_init_xgmi_plpd_mode(smu);
+
ret = smu_feature_get_enabled_mask(smu, &features_supported);
if (ret) {
dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
@@ -1419,7 +1515,7 @@ static int smu_start_smc_engine(struct smu_context *smu)
int ret = 0;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
if (smu->ppt_funcs->load_microcode) {
ret = smu->ppt_funcs->load_microcode(smu);
if (ret)
@@ -1470,6 +1566,8 @@ static int smu_hw_init(void *handle)
return ret;
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
+ smu_dpm_set_vpe_enable(smu, true);
+ smu_dpm_set_umsch_mm_enable(smu, true);
smu_set_gfx_cgpg(smu, true);
}
@@ -1519,7 +1617,7 @@ static int smu_disable_dpms(struct smu_context *smu)
* For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
* properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
*/
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
@@ -1540,7 +1638,7 @@ static int smu_disable_dpms(struct smu_context *smu)
* properly.
*/
if (smu->uploading_custom_pp_table) {
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
@@ -1560,7 +1658,7 @@ static int smu_disable_dpms(struct smu_context *smu)
* on BACO in. Driver involvement is unnecessary.
*/
if (use_baco) {
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
@@ -1573,13 +1671,14 @@ static int smu_disable_dpms(struct smu_context *smu)
}
/*
- * For SMU 13.0.4/11, PMFW will handle the features disablement properly
+ * For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly
* for gpu reset and S0i3 cases. Driver involvement is unnecessary.
*/
if (amdgpu_in_reset(adev) || adev->in_s0ix) {
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
+ case IP_VERSION(14, 0, 0):
return 0;
default:
break;
@@ -1604,7 +1703,18 @@ static int smu_disable_dpms(struct smu_context *smu)
}
}
- if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
+ /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
+ * otherwise SMU will hang while interacting with RLC if RLC is halted
+ * this is a WA for Vangogh asic which fix the SMU hang issue.
+ */
+ ret = smu_notify_rlc_state(smu, false);
+ if (ret) {
+ dev_err(adev->dev, "Fail to notify rlc status!\n");
+ return ret;
+ }
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
+ !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
!amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
@@ -1646,6 +1756,8 @@ static int smu_hw_fini(void *handle)
smu_dpm_set_vcn_enable(smu, false);
smu_dpm_set_jpeg_enable(smu, false);
+ smu_dpm_set_vpe_enable(smu, false);
+ smu_dpm_set_umsch_mm_enable(smu, false);
adev->vcn.cur_state = AMD_PG_STATE_GATE;
adev->jpeg.cur_state = AMD_PG_STATE_GATE;
@@ -2131,23 +2243,6 @@ static int smu_set_df_cstate(void *handle,
return ret;
}
-int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
- return 0;
-
- ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
- if (ret)
- dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
-
- return ret;
-}
-
int smu_write_watermarks_table(struct smu_context *smu)
{
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2234,6 +2329,14 @@ const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
.funcs = &smu_ip_funcs,
};
+const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 14,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &smu_ip_funcs,
+};
+
static int smu_load_microcode(void *handle)
{
struct smu_context *smu = handle;
@@ -2336,7 +2439,6 @@ int smu_get_power_limit(void *handle,
break;
default:
return -EOPNOTSUPP;
- break;
}
switch (pp_limit_level) {
@@ -2350,9 +2452,10 @@ int smu_get_power_limit(void *handle,
limit_level = SMU_PPT_LIMIT_MAX;
break;
case PP_PWR_LIMIT_MIN:
+ limit_level = SMU_PPT_LIMIT_MIN;
+ break;
default:
return -EOPNOTSUPP;
- break;
}
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
@@ -2361,16 +2464,16 @@ int smu_get_power_limit(void *handle,
} else {
switch (limit_level) {
case SMU_PPT_LIMIT_CURRENT:
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
ret = smu_get_asic_power_limits(smu,
&smu->current_power_limit,
- NULL,
- NULL);
+ NULL, NULL, NULL);
break;
default:
break;
@@ -2383,8 +2486,11 @@ int smu_get_power_limit(void *handle,
case SMU_PPT_LIMIT_MAX:
*limit = smu->max_power_limit;
break;
- default:
+ case SMU_PPT_LIMIT_MIN:
+ *limit = smu->min_power_limit;
break;
+ default:
+ return -EINVAL;
}
}
@@ -2405,10 +2511,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
if (smu->ppt_funcs->set_power_limit)
return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
- if (limit > smu->max_power_limit) {
+ if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
dev_err(smu->adev->dev,
- "New power limit (%d) is over the max allowed %d\n",
- limit, smu->max_power_limit);
+ "New power limit (%d) is out of range [%d,%d]\n",
+ limit, smu->min_power_limit, smu->max_power_limit);
return -EINVAL;
}
@@ -2474,6 +2580,16 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_OD_VDDGFX_OFFSET; break;
case OD_CCLK:
clk_type = SMU_OD_CCLK; break;
+ case OD_FAN_CURVE:
+ clk_type = SMU_OD_FAN_CURVE; break;
+ case OD_ACOUSTIC_LIMIT:
+ clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
+ case OD_ACOUSTIC_TARGET:
+ clk_type = SMU_OD_ACOUSTIC_TARGET; break;
+ case OD_FAN_TARGET_TEMPERATURE:
+ clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
+ case OD_FAN_MINIMUM_PWM:
+ clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
default:
clk_type = SMU_CLK_COUNT; break;
}
@@ -2607,7 +2723,7 @@ unlock:
static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
{
- int ret = -EINVAL;
+ int ret = -EOPNOTSUPP;
struct smu_context *smu = handle;
if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
@@ -2618,7 +2734,7 @@ static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
{
- int ret = -EINVAL;
+ int ret = -EOPNOTSUPP;
struct smu_context *smu = handle;
if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
@@ -3117,6 +3233,30 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
return 0;
}
+int smu_set_xgmi_plpd_mode(struct smu_context *smu,
+ enum pp_xgmi_plpd_mode mode)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return ret;
+
+ /* PLPD policy is not supported if it's NONE */
+ if (smu->plpd_mode == XGMI_PLPD_NONE)
+ return ret;
+
+ if (smu->plpd_mode == mode)
+ return 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->select_xgmi_plpd_policy)
+ ret = smu->ppt_funcs->select_xgmi_plpd_policy(smu, mode);
+
+ if (!ret)
+ smu->plpd_mode = mode;
+
+ return ret;
+}
+
static const struct amd_pm_funcs swsmu_pm_funcs = {
/* export for sysfs */
.set_fan_control_mode = smu_set_fan_control_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 72ed83632..e8329d157 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -374,6 +374,8 @@ struct smu_power_gate {
bool vce_gated;
atomic_t vcn_gated;
atomic_t jpeg_gated;
+ atomic_t vpe_gated;
+ atomic_t umsch_mm_gated;
};
struct smu_power_context {
@@ -498,6 +500,7 @@ struct smu_context {
uint32_t current_power_limit;
uint32_t default_power_limit;
uint32_t max_power_limit;
+ uint32_t min_power_limit;
/* soft pptable */
uint32_t ppt_offset_bytes;
@@ -563,6 +566,8 @@ struct smu_context {
u32 debug_resp_reg;
struct delayed_work swctf_delayed_work;
+
+ enum pp_xgmi_plpd_mode plpd_mode;
};
struct i2c_adapter;
@@ -817,9 +822,10 @@ struct pptable_funcs {
* @get_power_limit: Get the device's power limits.
*/
int (*get_power_limit)(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit);
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit);
/**
* @get_ppt_limit: Get the device's ppt limits.
@@ -833,10 +839,10 @@ struct pptable_funcs {
int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
/**
- * @allow_xgmi_power_down: Enable/disable external global memory
- * interconnect power down.
+ * @select_xgmi_plpd_policy: Select xgmi per-link power down policy.
*/
- int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
+ int (*select_xgmi_plpd_policy)(struct smu_context *smu,
+ enum pp_xgmi_plpd_mode mode);
/**
* @update_pcie_parameters: Update and upload the system's PCIe
@@ -1341,6 +1347,23 @@ struct pptable_funcs {
* @init_pptable_microcode: Prepare the pptable microcode to upload via PSP
*/
int (*init_pptable_microcode)(struct smu_context *smu);
+
+ /**
+ * @dpm_set_vpe_enable: Enable/disable VPE engine dynamic power
+ * management.
+ */
+ int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable);
+
+ /**
+ * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power
+ * management.
+ */
+ int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
+
+ /**
+ * @notify_rlc_state: Notify RLC power state to SMU.
+ */
+ int (*notify_rlc_state)(struct smu_context *smu, bool en);
};
typedef enum {
@@ -1384,6 +1407,16 @@ typedef enum {
METRICS_PCIE_WIDTH,
METRICS_CURR_FANPWM,
METRICS_CURR_SOCKETPOWER,
+ METRICS_AVERAGE_VPECLK,
+ METRICS_AVERAGE_IPUCLK,
+ METRICS_AVERAGE_MPIPUCLK,
+ METRICS_THROTTLER_RESIDENCY_PROCHOT,
+ METRICS_THROTTLER_RESIDENCY_SPL,
+ METRICS_THROTTLER_RESIDENCY_FPPT,
+ METRICS_THROTTLER_RESIDENCY_SPPT,
+ METRICS_THROTTLER_RESIDENCY_THM_CORE,
+ METRICS_THROTTLER_RESIDENCY_THM_GFX,
+ METRICS_THROTTLER_RESIDENCY_THM_SOC,
} MetricsMember_t;
enum smu_cmn2asic_mapping_type {
@@ -1483,7 +1516,8 @@ int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
int smu_set_ac_dc(struct smu_context *smu);
-int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
+int smu_set_xgmi_plpd_mode(struct smu_context *smu,
+ enum pp_xgmi_plpd_mode mode);
int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
index ca4a5e99c..ced348d2e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
@@ -65,6 +65,94 @@ typedef enum {
#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
+typedef enum {
+ // MMHUB
+ CODE_DAGB0,
+ CODE_EA0 = 5,
+ CODE_UTCL2_ROUTER = 10,
+ CODE_VML2,
+ CODE_VML2_WALKER,
+ CODE_MMCANE,
+
+ // VCN
+ // VCN VCPU
+ CODE_VIDD,
+ CODE_VIDV,
+ // VCN JPEG
+ CODE_JPEG0S,
+ CODE_JPEG0D,
+ CODE_JPEG1S,
+ CODE_JPEG1D,
+ CODE_JPEG2S,
+ CODE_JPEG2D,
+ CODE_JPEG3S,
+ CODE_JPEG3D,
+ CODE_JPEG4S,
+ CODE_JPEG4D,
+ CODE_JPEG5S,
+ CODE_JPEG5D,
+ CODE_JPEG6S,
+ CODE_JPEG6D,
+ CODE_JPEG7S,
+ CODE_JPEG7D,
+ // VCN MMSCH
+ CODE_MMSCHD,
+
+ // SDMA
+ CODE_SDMA0,
+ CODE_SDMA1,
+ CODE_SDMA2,
+ CODE_SDMA3,
+
+ // SOC
+ CODE_HDP,
+ CODE_ATHUB,
+ CODE_IH,
+ CODE_XHUB_POISON,
+ CODE_SMN_SLVERR = 40,
+ CODE_WDT,
+
+ CODE_UNKNOWN = 42,
+ CODE_COUNT,
+} ERR_CODE_e;
+
+typedef enum {
+ // SH POISON FED
+ SH_FED_CODE,
+ // GCEA Pin UE_ERR regs
+ GCEA_CODE,
+ SQ_CODE,
+ LDS_CODE,
+ GDS_CODE,
+ SP0_CODE,
+ SP1_CODE,
+ TCC_CODE,
+ TCA_CODE,
+ TCX_CODE,
+ CPC_CODE,
+ CPF_CODE,
+ CPG_CODE,
+ SPI_CODE,
+ RLC_CODE,
+ // GCEA Pin, UE_EDC regs
+ SQC_CODE,
+ TA_CODE,
+ TD_CODE,
+ TCP_CODE,
+ TCI_CODE,
+ // GC Router
+ GC_ROUTER_CODE,
+ VML2_CODE,
+ VML2_WALKER_CODE,
+ ATCL2_CODE,
+ GC_CANE_CODE,
+
+ // SOC error codes 40-42 are common with ERR_CODE_e
+ MP5_CODE_SMN_SLVERR = 40,
+ MP5_CODE_UNKNOWN = 42,
+} GC_ERROR_CODE_e;
+
+
typedef struct {
uint8_t ReadWriteData; //Return data for read. Data to send for write
uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write
@@ -132,6 +220,9 @@ typedef struct {
#define THROTTLER_THERMAL_VR_BIT 3//VRHOT
#define THROTTLER_THERMAL_HBM_BIT 4
+#define ClearMcaOnRead_UE_FLAG_MASK 0x1
+#define ClearMcaOnRead_CE_POLL_MASK 0x2
+
// These defines are used with the following messages:
// SMC_MSG_TransferTableDram2Smu
// SMC_MSG_TransferTableSmu2Dram
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
new file mode 100644
index 000000000..8f42771e1
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU14_DRIVER_IF_V14_0_0_H
+#define SMU14_DRIVER_IF_V14_0_0_H
+
+// *** IMPORTANT ***
+// SMU TEAM: Always increment the interface version if
+// any structure is changed in this file
+#define PMFW_DRIVER_IF_VERSION 7
+
+typedef struct {
+ int32_t value;
+ uint32_t numFractionalBits;
+} FloatInIntFormat_t;
+
+typedef enum {
+ DSPCLK_DCFCLK = 0,
+ DSPCLK_DISPCLK,
+ DSPCLK_PIXCLK,
+ DSPCLK_PHYCLK,
+ DSPCLK_COUNT,
+} DSPCLK_e;
+
+typedef struct {
+ uint16_t Freq; // in MHz
+ uint16_t Vid; // min voltage in SVI3 VID
+} DisplayClockTable_t;
+
+typedef struct {
+ uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
+ uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
+ uint16_t MinMclk;
+ uint16_t MaxMclk;
+
+ uint8_t WmSetting;
+ uint8_t WmType; // Used for normal pstate change or memory retraining
+ uint8_t Padding[2];
+} WatermarkRowGeneric_t;
+
+#define NUM_WM_RANGES 4
+#define WM_PSTATE_CHG 0
+#define WM_RETRAINING 1
+
+typedef enum {
+ WM_SOCCLK = 0,
+ WM_DCFCLK,
+ WM_COUNT,
+} WM_CLOCK_e;
+
+typedef struct {
+ // Watermarks
+ WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
+
+ uint32_t MmHubPadding[7]; // SMU internal use
+} Watermarks_t;
+
+typedef enum {
+ CUSTOM_DPM_SETTING_GFXCLK,
+ CUSTOM_DPM_SETTING_CCLK,
+ CUSTOM_DPM_SETTING_FCLK_CCX,
+ CUSTOM_DPM_SETTING_FCLK_GFX,
+ CUSTOM_DPM_SETTING_FCLK_STALLS,
+ CUSTOM_DPM_SETTING_LCLK,
+ CUSTOM_DPM_SETTING_COUNT,
+} CUSTOM_DPM_SETTING_e;
+
+typedef struct {
+ uint8_t ActiveHystLimit;
+ uint8_t IdleHystLimit;
+ uint8_t FPS;
+ uint8_t MinActiveFreqType;
+ FloatInIntFormat_t MinActiveFreq;
+ FloatInIntFormat_t PD_Data_limit;
+ FloatInIntFormat_t PD_Data_time_constant;
+ FloatInIntFormat_t PD_Data_error_coeff;
+ FloatInIntFormat_t PD_Data_error_rate_coeff;
+} DpmActivityMonitorCoeffExt_t;
+
+typedef struct {
+ DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT];
+} CustomDpmSettings_t;
+
+#define NUM_DCFCLK_DPM_LEVELS 8
+#define NUM_DISPCLK_DPM_LEVELS 8
+#define NUM_DPPCLK_DPM_LEVELS 8
+#define NUM_SOCCLK_DPM_LEVELS 8
+#define NUM_VCN_DPM_LEVELS 8
+#define NUM_SOC_VOLTAGE_LEVELS 8
+#define NUM_VPE_DPM_LEVELS 8
+#define NUM_FCLK_DPM_LEVELS 8
+#define NUM_MEM_PSTATE_LEVELS 4
+
+
+typedef struct {
+ uint32_t UClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
+} MemPstateTable_t;
+
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
+ uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
+ uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t VpeClkLevelsEnabled;
+
+ uint8_t NumMemPstatesEnabled;
+ uint8_t NumFclkLevelsEnabled;
+ uint8_t spare[2];
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks_t;
+
+typedef struct {
+ uint16_t CoreFrequency[16]; //Target core frequency [MHz]
+ uint16_t CorePower[16]; //CAC calculated core power [mW]
+ uint16_t CoreTemperature[16]; //TSEN measured core temperature [centi-C]
+ uint16_t GfxTemperature; //TSEN measured GFX temperature [centi-C]
+ uint16_t SocTemperature; //TSEN measured SOC temperature [centi-C]
+ uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [mW]
+ uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [mW]
+ uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz]
+ uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz]
+ uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C]
+ uint16_t GfxclkFrequency; //Time filtered target GFXCLK frequency [MHz]
+ uint16_t FclkFrequency; //Time filtered target FCLK frequency [MHz]
+ uint16_t GfxActivity; //Time filtered GFX busy % [0-100]
+ uint16_t SocclkFrequency; //Time filtered target SOCCLK frequency [MHz]
+ uint16_t VclkFrequency; //Time filtered target VCLK frequency [MHz]
+ uint16_t VcnActivity; //Time filtered VCN busy % [0-100]
+ uint16_t VpeclkFrequency; //Time filtered target VPECLK frequency [MHz]
+ uint16_t IpuclkFrequency; //Time filtered target IPUCLK frequency [MHz]
+ uint16_t IpuBusy[8]; //Time filtered IPU per-column busy % [0-100]
+ uint16_t DRAMReads; //Time filtered DRAM read bandwidth [MB/sec]
+ uint16_t DRAMWrites; //Time filtered DRAM write bandwidth [MB/sec]
+ uint16_t CoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100]
+ uint16_t IpuPower; //Time filtered IPU power [mW]
+ uint32_t ApuPower; //Time filtered APU power [mW]
+ uint32_t GfxPower; //Time filtered GFX power [mW]
+ uint32_t dGpuPower; //Time filtered dGPU power [mW]
+ uint32_t SocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [mW]
+ uint32_t AllCorePower; //Time filtered sum of core power across all cores in the socket [mW]
+ uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us]
+ uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles]
+ uint16_t MemclkFrequency; //Time filtered target MEMCLK frequency [MHz]
+ uint16_t MpipuclkFrequency; //Time filtered target MPIPUCLK frequency [MHz]
+ uint16_t IpuReads; //Time filtered IPU read bandwidth [MB/sec]
+ uint16_t IpuWrites; //Time filtered IPU write bandwidth [MB/sec]
+ uint32_t ThrottleResidency_PROCHOT; //Counter that is incremented on every metrics table update when PROCHOT was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_SPL; //Counter that is incremented on every metrics table update when SPL was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_FPPT; //Counter that is incremented on every metrics table update when fast PPT was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_SPPT; //Counter that is incremented on every metrics table update when slow PPT was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_THM_CORE; //Counter that is incremented on every metrics table update when CORE thermal throttling was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_THM_GFX; //Counter that is incremented on every metrics table update when GFX thermal throttling was engaged [PM_TIMER cycles]
+ uint32_t ThrottleResidency_THM_SOC; //Counter that is incremented on every metrics table update when SOC thermal throttling was engaged [PM_TIMER cycles]
+ uint16_t Psys; //Time filtered Psys power [mW]
+ uint16_t spare1;
+ uint32_t spare[6];
+} SmuMetrics_t;
+
+//ISP tile definitions
+typedef enum {
+ TILE_XTILE = 0, //ONO0
+ TILE_MTILE, //ONO1
+ TILE_PDP, //ONO2
+ TILE_CSTAT, //ONO2
+ TILE_LME, //ONO3
+ TILE_BYRP, //ONO4
+ TILE_GRBP, //ONO4
+ TILE_MCFP, //ONO4
+ TILE_YUVP, //ONO4
+ TILE_MCSC, //ONO4
+ TILE_GDC, //ONO5
+ TILE_MAX
+} TILE_NUM_e;
+
+// Tile Selection (Based on arguments)
+#define ISP_TILE_SEL(tile) (1<<tile)
+#define ISP_TILE_SEL_ALL 0x7FF
+
+// Workload bits
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
+#define WORKLOAD_PPLIB_VIDEO_BIT 2
+#define WORKLOAD_PPLIB_VR_BIT 3
+#define WORKLOAD_PPLIB_COMPUTE_BIT 4
+#define WORKLOAD_PPLIB_CUSTOM_BIT 5
+#define WORKLOAD_PPLIB_COUNT 6
+
+#define TABLE_BIOS_IF 0 // Called by BIOS
+#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS
+#define TABLE_CUSTOM_DPM 2 // Called by Driver
+#define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS
+#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS
+#define TABLE_SPARE0 5 // Unused
+#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
+#define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF
+#define TABLE_COUNT 8
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 9be4051c0..7b812b999 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x7
+#define SMU_METRICS_TABLE_VERSION 0xB
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -207,7 +207,115 @@ typedef struct __attribute__((packed, aligned(4))) {
uint64_t PublicSerialNumber_AID[4];
uint64_t PublicSerialNumber_XCD[8];
uint64_t PublicSerialNumber_CCD[12];
-} MetricsTable_t;
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ //PCIE BW Data and error count
+ uint32_t PcieBandwidth[4];
+ uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
+ uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
+} MetricsTableX_t;
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint32_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t MaxSocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t CcdEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t CclkFrequencyLimit;
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+ uint64_t CclkFrequencyAcc[96];
+
+ //FREQUENCY RANGE
+ uint32_t MaxCclkFrequency;
+ uint32_t MinCclkFrequency;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketC0Residency;
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketC0ResidencyAcc;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+ uint32_t GfxLockXCDMak;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+ uint64_t PublicSerialNumber_CCD[12];
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
+} MetricsTableA_t;
#define SMU_VF_METRICS_TABLE_VERSION 0x3
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 70a4a717f..509e3cd48 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -87,7 +87,11 @@
#define PPSMC_MSG_QueryValidMcaCount 0x36
#define PPSMC_MSG_McaBankDumpDW 0x37
#define PPSMC_MSG_GetCTFLimit 0x38
-#define PPSMC_Message_Count 0x39
+#define PPSMC_MSG_ClearMcaOnRead 0x39
+#define PPSMC_MSG_QueryValidMcaCeCount 0x3A
+#define PPSMC_MSG_McaBankCeDumpDW 0x3B
+#define PPSMC_MSG_SelectPLPDMode 0x40
+#define PPSMC_Message_Count 0x41
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
@@ -104,6 +108,10 @@
#define PPSMC_XCD_THM_TYPE 0x3
#define PPSMC_HBM_THM_TYPE 0x4
+//PLPD modes
+#define PPSMC_PLPD_MODE_DEFAULT 0x1
+#define PPSMC_PLPD_MODE_OPTIMIZED 0x2
+
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_MSG;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
new file mode 100644
index 000000000..356e0f57a
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SMU_V14_0_0_PMFW_H__
+#define __SMU_V14_0_0_PMFW_H__
+
+#include "smu14_driver_if_v14_0_0.h"
+
+#pragma pack(push, 1)
+
+#define ENABLE_DEBUG_FEATURES
+
+// Firmware features
+// Feature Control Defines
+#define FEATURE_CCLK_DPM_BIT 0
+#define FEATURE_FAN_CONTROLLER_BIT 1
+#define FEATURE_DATA_CALCULATION_BIT 2
+#define FEATURE_PPT_BIT 3
+#define FEATURE_TDC_BIT 4
+#define FEATURE_THERMAL_BIT 5
+#define FEATURE_FIT_BIT 6
+#define FEATURE_EDC_BIT 7
+#define FEATURE_PLL_POWER_DOWN_BIT 8
+#define FEATURE_VDDOFF_BIT 9
+#define FEATURE_VCN_DPM_BIT 10
+#define FEATURE_DS_MPM_BIT 11
+#define FEATURE_FCLK_DPM_BIT 12
+#define FEATURE_SOCCLK_DPM_BIT 13
+#define FEATURE_DS_MPIO_BIT 14
+#define FEATURE_LCLK_DPM_BIT 15
+#define FEATURE_SHUBCLK_DPM_BIT 16
+#define FEATURE_DCFCLK_DPM_BIT 17
+#define FEATURE_ISP_DPM_BIT 18
+#define FEATURE_IPU_DPM_BIT 19
+#define FEATURE_GFX_DPM_BIT 20
+#define FEATURE_DS_GFXCLK_BIT 21
+#define FEATURE_DS_SOCCLK_BIT 22
+#define FEATURE_DS_LCLK_BIT 23
+#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks
+#define FEATURE_DS_SHUBCLK_BIT 25
+#define FEATURE_SPARE0_BIT 26 //SPARE
+#define FEATURE_ZSTATES_BIT 27
+#define FEATURE_IOMMUL2_PG_BIT 28
+#define FEATURE_DS_FCLK_BIT 29
+#define FEATURE_DS_SMNCLK_BIT 30
+#define FEATURE_DS_MP1CLK_BIT 31
+#define FEATURE_WHISPER_MODE_BIT 32
+#define FEATURE_SMU_LOW_POWER_BIT 33
+#define FEATURE_SMART_L3_RINSER_BIT 34
+#define FEATURE_SPARE1_BIT 35 //SPARE
+#define FEATURE_PSI_BIT 36
+#define FEATURE_PROCHOT_BIT 37
+#define FEATURE_CPUOFF_BIT 38
+#define FEATURE_STAPM_BIT 39
+#define FEATURE_S0I3_BIT 40
+#define FEATURE_DF_LIGHT_CSTATE 41
+#define FEATURE_PERF_LIMIT_BIT 42
+#define FEATURE_CORE_DLDO_BIT 43
+#define FEATURE_DVO_BIT 44
+#define FEATURE_DS_VCN_BIT 45
+#define FEATURE_CPPC_BIT 46
+#define FEATURE_CPPC_PREFERRED_CORES 47
+#define FEATURE_DF_CSTATES_BIT 48
+#define FEATURE_SPARE2_BIT 49 //SPARE
+#define FEATURE_ATHUB_PG_BIT 50
+#define FEATURE_VDDOFF_ECO_BIT 51
+#define FEATURE_ZSTATES_ECO_BIT 52
+#define FEATURE_CC6_BIT 53
+#define FEATURE_DS_UMCCLK_BIT 54
+#define FEATURE_DS_ISPCLK_BIT 55
+#define FEATURE_DS_HSPCLK_BIT 56
+#define FEATURE_P3T_BIT 57
+#define FEATURE_DS_IPUCLK_BIT 58
+#define FEATURE_DS_VPECLK_BIT 59
+#define FEATURE_VPE_DPM_BIT 60
+#define FEATURE_SPARE_61 61
+#define FEATURE_FP_DIDT 62
+#define NUM_FEATURES 63
+
+// Firmware Header/Footer
+struct SMU14_Firmware_Footer {
+ uint32_t Signature;
+};
+typedef struct SMU14_Firmware_Footer SMU14_Firmware_Footer;
+
+// PSP3.0 Header Definition
+typedef struct {
+ uint32_t ImageVersion;
+ uint32_t ImageVersion2; // This is repeated because DW0 cannot be written in SRAM due to HW bug.
+ uint32_t Padding0[3];
+ uint32_t SizeFWSigned;
+ uint32_t Padding1[25];
+ uint32_t FirmwareType;
+ uint32_t Filler[32];
+} SMU_Firmware_Header;
+
+typedef struct {
+ // MP1_EXT_SCRATCH0
+ uint32_t DpmHandlerID : 8;
+ uint32_t ActivityMonitorID : 8;
+ uint32_t DpmTimerID : 8;
+ uint32_t DpmHubID : 4;
+ uint32_t DpmHubTask : 4;
+ // MP1_EXT_SCRATCH1
+ uint32_t CclkSyncStatus : 8;
+ uint32_t Ccx0CpuOff : 2;
+ uint32_t Ccx1CpuOff : 2;
+ uint32_t GfxOffStatus : 2;
+ uint32_t VddOff : 1;
+ uint32_t InWhisperMode : 1;
+ uint32_t ZstateStatus : 4;
+ uint32_t spare0 : 4;
+ uint32_t DstateFun : 4;
+ uint32_t DstateDev : 4;
+ // MP1_EXT_SCRATCH2
+ uint32_t P2JobHandler :24;
+ uint32_t RsmuPmiP2PendingCnt : 8;
+ // MP1_EXT_SCRATCH3
+ uint32_t PostCode :32;
+ // MP1_EXT_SCRATCH4
+ uint32_t MsgPortBusy :24;
+ uint32_t RsmuPmiP1Pending : 1;
+ uint32_t DfCstateExitPending : 1;
+ uint32_t Ccx0Pc6ExitPending : 1;
+ uint32_t Ccx1Pc6ExitPending : 1;
+ uint32_t WarmResetPending : 1;
+ uint32_t spare1 : 3;
+ // MP1_EXT_SCRATCH5
+ uint32_t IdleMask :32;
+ // MP1_EXT_SCRATCH6 = RTOS threads' status
+ // MP1_EXT_SCRATCH7 = RTOS Current Job
+} FwStatus_t;
+
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
new file mode 100644
index 000000000..8a8a57c56
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SMU_V14_0_0_PPSMC_H__
+#define __SMU_V14_0_0_PPSMC_H__
+
+/*! @mainpage PMFW-PPS (PPLib) Message Interface
+ This documentation contains the subsections:\n\n
+ @ref ResponseCodes\n
+ @ref definitions\n
+ @ref enums\n
+*/
+
+/** @def PPS_PMFW_IF_VER
+* PPS (PPLib) to PMFW IF version 1.0
+*/
+#define PPS_PMFW_IF_VER "1.0" ///< Major.Minor
+
+/** @defgroup ResponseCodes PMFW Response Codes
+* @{
+*/
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1 ///< Message Response OK
+#define PPSMC_Result_Failed 0xFF ///< Message Response Failed
+#define PPSMC_Result_UnknownCmd 0xFE ///< Message Response Unknown Command
+#define PPSMC_Result_CmdRejectedPrereq 0xFD ///< Message Response Command Failed Prerequisite
+#define PPSMC_Result_CmdRejectedBusy 0xFC ///< Message Response Command Rejected due to PMFW is busy. Sender should retry sending this message
+/** @}*/
+
+/** @defgroup definitions Message definitions
+* @{
+*/
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
+#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
+#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version
+#define PPSMC_MSG_SPARE0 0x04 ///< SPARE
+#define PPSMC_MSG_SPARE1 0x05 ///< SPARE
+#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN
+#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default
+#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display
+#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
+#define PPSMC_MSG_SPARE2 0x0A ///< SPARE
+#define PPSMC_MSG_SPARE3 0x0B ///< SPARE
+#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
+#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
+#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer
+#define PPSMC_MSG_TransferTableSmu2Dram 0x0F ///< Transfer driver interface table from PMFW SRAM to DRAM
+#define PPSMC_MSG_TransferTableDram2Smu 0x10 ///< Transfer driver interface table from DRAM to PMFW SRAM
+#define PPSMC_MSG_GfxDeviceDriverReset 0x11 ///< Request GFX mode 2 reset
+#define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW
+#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
+#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
+#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK)
+
+#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
+
+#define PPSMC_MSG_spare_0x17 0x17
+#define PPSMC_MSG_spare_0x18 0x18
+#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
+#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
+#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
+
+#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
+#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
+#define PPSMC_MSG_SetSoftMaxVcn 0x1F ///< Set soft max for VCN clocks (VCLK and DCLK)
+#define PPSMC_MSG_spare_0x20 0x20
+#define PPSMC_MSG_PowerDownJpeg 0x21 ///< Power down Jpeg
+#define PPSMC_MSG_PowerUpJpeg 0x22 ///< Power up Jpeg; VCN is power gated by default
+
+#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
+#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
+#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
+#define PPSMC_MSG_Reserved 0x26 ///< Not used
+#define PPSMC_MSG_Reserved1 0x27 ///< Not used, previously PPSMC_MSG_RequestActiveWgp
+#define PPSMC_MSG_Reserved2 0x28 ///< Not used, previously PPSMC_MSG_QueryActiveWgp
+#define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default
+#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
+#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
+#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
+#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
+#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
+#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
+#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
+#define PPSMC_MSG_PowerDownVpe 0x32 ///< Power down VPE
+#define PPSMC_MSG_GetVpeDpmTable 0x33 ///< Get VPE DPM table
+#define PPSMC_MSG_EnableLSdma 0x34 ///< Enable LSDMA
+#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
+#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
+#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
+#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages
+/** @}*/
+
+/**
+* @defgroup enums Enum Definitions
+* @{
+*/
+
+/** @enum Mode_Reset_e
+* Mode reset type, argument for PPSMC_MSG_GfxDeviceDriverReset
+*/
+//argument for PPSMC_MSG_GfxDeviceDriverReset
+typedef enum {
+ MODE1_RESET = 1, ///< Mode reset type 1
+ MODE2_RESET = 2 ///< Mode reset type 2
+} Mode_Reset_e;
+
+/** @}*/
+
+/** @enum ZStates_e
+* Zstate types, argument for PPSMC_MSG_AllowZstates
+*/
+//Argument for PPSMC_MSG_AllowZstates
+typedef enum {
+ DISALLOW_ZSTATES = 0, ///< Disallow Zstates
+ ALLOW_ZSTATES_Z8 = 8, ///< Allows Z8 only
+ ALLOW_ZSTATES_Z9 = 9, ///< Allows Z9 and Z8
+} ZStates_e;
+
+/** @}*/
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index e57265cf6..9dd47d910 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -247,7 +247,19 @@
__SMU_DUMMY_MAP(Mode2Reset), \
__SMU_DUMMY_MAP(RequestI2cTransaction), \
__SMU_DUMMY_MAP(GetMetricsTable), \
- __SMU_DUMMY_MAP(DALNotPresent),
+ __SMU_DUMMY_MAP(DALNotPresent), \
+ __SMU_DUMMY_MAP(ClearMcaOnRead), \
+ __SMU_DUMMY_MAP(QueryValidMcaCount), \
+ __SMU_DUMMY_MAP(QueryValidMcaCeCount), \
+ __SMU_DUMMY_MAP(McaBankDumpDW), \
+ __SMU_DUMMY_MAP(McaBankCeDumpDW), \
+ __SMU_DUMMY_MAP(SelectPLPDMode), \
+ __SMU_DUMMY_MAP(PowerUpVpe), \
+ __SMU_DUMMY_MAP(PowerDownVpe), \
+ __SMU_DUMMY_MAP(PowerUpUmsch), \
+ __SMU_DUMMY_MAP(PowerDownUmsch), \
+ __SMU_DUMMY_MAP(SetSoftMaxVpe), \
+ __SMU_DUMMY_MAP(SetSoftMinVpe),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -280,6 +292,11 @@ enum smu_clk_type {
SMU_OD_VDDC_CURVE,
SMU_OD_RANGE,
SMU_OD_VDDGFX_OFFSET,
+ SMU_OD_FAN_CURVE,
+ SMU_OD_ACOUSTIC_LIMIT,
+ SMU_OD_ACOUSTIC_TARGET,
+ SMU_OD_FAN_TARGET_TEMPERATURE,
+ SMU_OD_FAN_MINIMUM_PWM,
SMU_CLK_COUNT,
};
@@ -404,7 +421,9 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(MEM_TEMP_READ), \
__SMU_DUMMY_MAP(ATHUB_MMHUB_PG), \
__SMU_DUMMY_MAP(BACO_CG), \
- __SMU_DUMMY_MAP(SOC_CG),
+ __SMU_DUMMY_MAP(SOC_CG), \
+ __SMU_DUMMY_MAP(LOW_POWER_DCNCLKS), \
+ __SMU_DUMMY_MAP(WHISPER_MODE),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index cc02f979e..95cb91971 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -299,5 +299,7 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint8_t pcie_gen_cap,
uint8_t pcie_width_cap);
+int smu_v13_0_disable_pmfw_state(struct smu_context *smu);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
new file mode 100644
index 000000000..a5b569976
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V14_0_H__
+#define __SMU_V14_0_H__
+
+#include "amdgpu_smu.h"
+
+#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x6
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+/* address block */
+#define smnMP1_FIRMWARE_FLAGS 0x3010028
+#define smnMP1_PUB_CTRL 0x3010d10
+
+#define MAX_DPM_LEVELS 16
+#define MAX_PCIE_CONF 3
+
+struct smu_14_0_max_sustainable_clocks {
+ uint32_t display_clock;
+ uint32_t phy_clock;
+ uint32_t pixel_clock;
+ uint32_t uclock;
+ uint32_t dcef_clock;
+ uint32_t soc_clock;
+};
+
+struct smu_14_0_dpm_clk_level {
+ bool enabled;
+ uint32_t value;
+};
+
+struct smu_14_0_dpm_table {
+ uint32_t min; /* MHz */
+ uint32_t max; /* MHz */
+ uint32_t count;
+ bool is_fine_grained;
+ struct smu_14_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS];
+};
+
+struct smu_14_0_pcie_table {
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint16_t clk_freq[MAX_PCIE_CONF];
+ uint32_t num_of_link_levels;
+};
+
+struct smu_14_0_dpm_tables {
+ struct smu_14_0_dpm_table soc_table;
+ struct smu_14_0_dpm_table gfx_table;
+ struct smu_14_0_dpm_table uclk_table;
+ struct smu_14_0_dpm_table eclk_table;
+ struct smu_14_0_dpm_table vclk_table;
+ struct smu_14_0_dpm_table dclk_table;
+ struct smu_14_0_dpm_table dcef_table;
+ struct smu_14_0_dpm_table pixel_table;
+ struct smu_14_0_dpm_table display_table;
+ struct smu_14_0_dpm_table phy_table;
+ struct smu_14_0_dpm_table fclk_table;
+ struct smu_14_0_pcie_table pcie_table;
+};
+
+struct smu_14_0_dpm_context {
+ struct smu_14_0_dpm_tables dpm_tables;
+ uint32_t workload_policy_mask;
+ uint32_t dcef_min_ds_clk;
+};
+
+enum smu_14_0_power_state {
+ SMU_14_0_POWER_STATE__D0 = 0,
+ SMU_14_0_POWER_STATE__D1,
+ SMU_14_0_POWER_STATE__D3, /* Sleep*/
+ SMU_14_0_POWER_STATE__D4, /* Hibernate*/
+ SMU_14_0_POWER_STATE__D5, /* Power off*/
+};
+
+struct smu_14_0_power_context {
+ uint32_t power_source;
+ uint8_t in_power_limit_boost_mode;
+ enum smu_14_0_power_state power_state;
+};
+
+#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
+
+int smu_v14_0_init_microcode(struct smu_context *smu);
+
+void smu_v14_0_fini_microcode(struct smu_context *smu);
+
+int smu_v14_0_load_microcode(struct smu_context *smu);
+
+int smu_v14_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v14_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v14_0_init_power(struct smu_context *smu);
+
+int smu_v14_0_fini_power(struct smu_context *smu);
+
+int smu_v14_0_check_fw_status(struct smu_context *smu);
+
+int smu_v14_0_setup_pptable(struct smu_context *smu);
+
+int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu);
+
+int smu_v14_0_check_fw_version(struct smu_context *smu);
+
+int smu_v14_0_set_driver_table_location(struct smu_context *smu);
+
+int smu_v14_0_set_tool_table_location(struct smu_context *smu);
+
+int smu_v14_0_notify_memory_pool_location(struct smu_context *smu);
+
+int smu_v14_0_system_features_control(struct smu_context *smu,
+ bool en);
+
+int smu_v14_0_set_allowed_mask(struct smu_context *smu);
+
+int smu_v14_0_notify_display_change(struct smu_context *smu);
+
+int smu_v14_0_get_current_power_limit(struct smu_context *smu,
+ uint32_t *power_limit);
+
+int smu_v14_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit);
+
+int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v14_0_register_irq_handler(struct smu_context *smu);
+
+int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq);
+
+bool smu_v14_0_baco_is_support(struct smu_context *smu);
+
+enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu);
+
+int smu_v14_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
+
+int smu_v14_0_baco_enter(struct smu_context *smu);
+int smu_v14_0_baco_exit(struct smu_context *smu);
+
+int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
+
+int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max);
+
+int smu_v14_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level);
+
+int smu_v14_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src);
+
+int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct smu_14_0_dpm_table *single_dpm_table);
+
+int smu_v14_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+ uint64_t event_arg);
+
+int smu_v14_0_set_vcn_enable(struct smu_context *smu,
+ bool enable);
+
+int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
+ bool enable);
+
+int smu_v14_0_init_pptable_microcode(struct smu_context *smu);
+
+int smu_v14_0_run_btc(struct smu_context *smu);
+
+int smu_v14_0_gpo_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_deep_sleep_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu);
+
+int smu_v14_0_set_default_dpm_tables(struct smu_context *smu);
+
+int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id);
+
+int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size);
+
+void smu_v14_0_set_smu_mailbox_registers(struct smu_context *smu);
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 704a2b577..2cb6b6822 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -569,9 +569,9 @@ static int arcturus_populate_umd_state_clk(struct smu_context *smu)
return 0;
}
-static int arcturus_get_clk_table(struct smu_context *smu,
- struct pp_clock_levels_with_latency *clocks,
- struct smu_11_0_dpm_table *dpm_table)
+static void arcturus_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct smu_11_0_dpm_table *dpm_table)
{
uint32_t i;
@@ -584,8 +584,6 @@ static int arcturus_get_clk_table(struct smu_context *smu,
dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
-
- return 0;
}
static int arcturus_freqs_in_same_level(int32_t frequency1,
@@ -757,173 +755,133 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
value);
}
-static int arcturus_print_clk_levels(struct smu_context *smu,
- enum smu_clk_type type, char *buf)
+static int arcturus_emit_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, char *buf, int *offset)
{
- int i, now, size = 0;
int ret = 0;
struct pp_clock_levels_with_latency clocks;
struct smu_11_0_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_11_0_dpm_context *dpm_context = NULL;
uint32_t gen_speed, lane_width;
-
- smu_cmn_get_sysfs_buf(&buf, &size);
+ uint32_t i, cur_value = 0;
+ bool freq_match;
+ unsigned int clock_mhz;
+ static const char attempt_string[] = "Attempt to get current";
if (amdgpu_ras_intr_triggered()) {
- size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ *offset += sysfs_emit_at(buf, *offset, "unavailable\n");
+ return -EBUSY;
}
dpm_context = smu_dpm->dpm_context;
switch (type) {
case SMU_SCLK:
- ret = arcturus_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
+ ret = arcturus_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current gfx clk Failed!");
+ dev_err(smu->adev->dev, "%s gfx clk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
- ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get gfx clk levels Failed!");
- return ret;
- }
+ arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- /*
- * For DPM disabled case, there will be only one clock level.
- * And it's safe to assume that is always the current clock.
- */
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
- clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ? "*" :
- (arcturus_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
break;
case SMU_MCLK:
- ret = arcturus_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
+ ret = arcturus_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current mclk Failed!");
+ dev_err(smu->adev->dev, "%s mclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
- ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get memory clk levels Failed!");
- return ret;
- }
+ arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ? "*" :
- (arcturus_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
break;
case SMU_SOCCLK:
- ret = arcturus_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now);
+ ret = arcturus_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current socclk Failed!");
+ dev_err(smu->adev->dev, "%s socclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
- ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get socclk levels Failed!");
- return ret;
- }
+ arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ? "*" :
- (arcturus_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
break;
case SMU_FCLK:
- ret = arcturus_get_current_clk_freq_by_table(smu, SMU_FCLK, &now);
+ ret = arcturus_get_current_clk_freq_by_table(smu, SMU_FCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current fclk Failed!");
+ dev_err(smu->adev->dev, "%s fclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
- ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get fclk levels Failed!");
- return ret;
- }
+ arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ? "*" :
- (arcturus_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
break;
case SMU_VCLK:
- ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+ ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+ dev_err(smu->adev->dev, "%s vclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
- ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
- return ret;
- }
+ arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ? "*" :
- (arcturus_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
break;
case SMU_DCLK:
- ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+ ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+ dev_err(smu->adev->dev, "%s dclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
- ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
- return ret;
- }
+ arcturus_get_clk_table(smu, &clocks, single_dpm_table);
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ? "*" :
- (arcturus_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
break;
case SMU_PCIE:
gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
- size += sysfs_emit_at(buf, size, "0: %s %s %dMhz *\n",
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case SMU_SCLK:
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ /*
+ * For DPM disabled case, there will be only one clock level.
+ * And it's safe to assume that is always the current clock.
+ */
+ for (i = 0; i < clocks.num_levels; i++) {
+ clock_mhz = clocks.data[i].clocks_in_khz / 1000;
+ freq_match = arcturus_freqs_in_same_level(clock_mhz, cur_value);
+ freq_match |= (clocks.num_levels == 1);
+
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
+ i, clock_mhz,
+ freq_match ? "*" : "");
+ }
+ break;
+
+ case SMU_PCIE:
+ *offset += sysfs_emit_at(buf, *offset, "0: %s %s %dMhz *\n",
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
@@ -938,10 +896,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
break;
default:
- break;
+ return -EINVAL;
}
- return size;
+ return 0;
}
static int arcturus_upload_dpm_level(struct smu_context *smu,
@@ -1005,17 +963,10 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_dpm_table *single_dpm_table = NULL;
uint32_t soft_min_level, soft_max_level;
- uint32_t smu_version;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret) {
- dev_err(smu->adev->dev, "Failed to get smu version!\n");
- return ret;
- }
-
- if ((smu_version >= 0x361200) &&
- (smu_version <= 0x361a00)) {
+ if ((smu->smc_fw_version >= 0x361200) &&
+ (smu->smc_fw_version <= 0x361a00)) {
dev_err(smu->adev->dev, "Forcing clock level is not supported with "
"54.18 - 54.26(included) SMU firmwares\n");
return -EOPNOTSUPP;
@@ -1245,7 +1196,7 @@ static int arcturus_set_fan_speed_pwm(struct smu_context *smu,
uint32_t duty100, duty;
uint64_t tmp64;
- speed = MIN(speed, 255);
+ speed = min_t(uint32_t, speed, 255);
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1_ARCT),
CG_FDO_CTRL1, FMAX_DUTY100);
@@ -1309,7 +1260,7 @@ static int arcturus_get_fan_speed_pwm(struct smu_context *smu,
if (duty100) {
tmp64 = (uint64_t)duty * 255;
do_div(tmp64, duty100);
- *speed = MIN((uint32_t)tmp64, 255);
+ *speed = min_t(uint32_t, tmp64, 255);
} else {
*speed = 0;
}
@@ -1327,14 +1278,15 @@ static int arcturus_get_fan_parameters(struct smu_context *smu)
}
static int arcturus_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
struct smu_11_0_powerplay_table *powerplay_table =
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent;
+ uint32_t power_limit, od_percent_upper, od_percent_lower;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
@@ -1351,17 +1303,25 @@ static int arcturus_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit) {
- if (smu->od_enabled) {
- od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ if (smu->od_enabled) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ } else {
+ od_percent_upper = 0;
+ od_percent_lower = 100;
+ }
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
- power_limit *= (100 + od_percent);
- power_limit /= 100;
- }
+ if (max_power_limit) {
+ *max_power_limit = power_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
- *max_power_limit = power_limit;
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 - od_percent_lower);
+ *min_power_limit /= 100;
}
return 0;
@@ -1386,16 +1346,11 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
uint32_t i, size = 0;
int16_t workload_type = 0;
int result = 0;
- uint32_t smu_version;
if (!buf)
return -EINVAL;
- result = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (result)
- return result;
-
- if (smu_version >= 0x360d00)
+ if (smu->smc_fw_version >= 0x360d00)
size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
title[0], title[1], title[2], title[3], title[4], title[5],
title[6], title[7], title[8], title[9], title[10]);
@@ -1414,7 +1369,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
continue;
- if (smu_version >= 0x360d00) {
+ if (smu->smc_fw_version >= 0x360d00) {
result = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
workload_type,
@@ -1429,7 +1384,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
- if (smu_version >= 0x360d00) {
+ if (smu->smc_fw_version >= 0x360d00) {
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
0,
@@ -1471,19 +1426,15 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
int workload_type = 0;
uint32_t profile_mode = input[size];
int ret = 0;
- uint32_t smu_version;
if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
return -EINVAL;
}
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret)
- return ret;
if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
- (smu_version >= 0x360d00)) {
+ (smu->smc_fw_version >= 0x360d00)) {
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
@@ -1559,15 +1510,6 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
static int arcturus_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
- uint32_t smu_version;
- int ret;
-
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret) {
- dev_err(smu->adev->dev, "Failed to get smu version!\n");
- return ret;
- }
-
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_LOW:
@@ -1575,8 +1517,8 @@ static int arcturus_set_performance_level(struct smu_context *smu,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- if ((smu_version >= 0x361200) &&
- (smu_version <= 0x361a00)) {
+ if ((smu->smc_fw_version >= 0x361200) &&
+ (smu->smc_fw_version <= 0x361a00)) {
dev_err(smu->adev->dev, "Forcing clock level is not supported with "
"54.18 - 54.26(included) SMU firmwares\n");
return -EOPNOTSUPP;
@@ -2214,16 +2156,11 @@ static void arcturus_i2c_control_fini(struct smu_context *smu)
static void arcturus_get_unique_id(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t top32 = 0, bottom32 = 0, smu_version;
+ uint32_t top32 = 0, bottom32 = 0;
uint64_t id;
- if (smu_cmn_get_smc_version(smu, NULL, &smu_version)) {
- dev_warn(adev->dev, "Failed to get smu version, cannot get unique_id or serial_number\n");
- return;
- }
-
/* PPSMC_MSG_ReadSerial* is supported by 54.23.0 and onwards */
- if (smu_version < 0x361700) {
+ if (smu->smc_fw_version < 0x361700) {
dev_warn(adev->dev, "ReadSerial is only supported by PMFW 54.23.0 and onwards\n");
return;
}
@@ -2234,18 +2171,12 @@ static void arcturus_get_unique_id(struct smu_context *smu)
id = ((uint64_t)bottom32 << 32) | top32;
adev->unique_id = id;
- /* For Arcturus-and-later, unique_id == serial_number, so convert it to a
- * 16-digit HEX string for convenience and backwards-compatibility
- */
- sprintf(adev->serial, "%llx", id);
}
static int arcturus_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t smu_version;
- int ret;
/*
* Arcturus does not need the cstate disablement
@@ -2254,14 +2185,8 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
if (amdgpu_in_reset(adev) || adev->in_suspend)
return 0;
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret) {
- dev_err(smu->adev->dev, "Failed to get smu version!\n");
- return ret;
- }
-
/* PPSMC_MSG_DFCstateControl is supported by 54.15.0 and onwards */
- if (smu_version < 0x360F00) {
+ if (smu->smc_fw_version < 0x360F00) {
dev_err(smu->adev->dev, "DFCstateControl is only supported by PMFW 54.15.0 and onwards\n");
return -EINVAL;
}
@@ -2269,33 +2194,25 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
-static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
+static int arcturus_select_xgmi_plpd_policy(struct smu_context *smu,
+ enum pp_xgmi_plpd_mode mode)
{
- uint32_t smu_version;
- int ret;
-
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret) {
- dev_err(smu->adev->dev, "Failed to get smu version!\n");
- return ret;
- }
-
/* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */
- if (smu_version < 0x00361700) {
+ if (smu->smc_fw_version < 0x00361700) {
dev_err(smu->adev->dev, "XGMI power down control is only supported by PMFW 54.23.0 and onwards\n");
return -EINVAL;
}
- if (en)
+ if (mode == XGMI_PLPD_DEFAULT)
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GmiPwrDnControl,
- 1,
- NULL);
-
- return smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GmiPwrDnControl,
- 0,
- NULL);
+ 1, NULL);
+ else if (mode == XGMI_PLPD_DISALLOW)
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ 0, NULL);
+ else
+ return -EINVAL;
}
static const struct throttling_logging_label {
@@ -2313,7 +2230,7 @@ static const struct throttling_logging_label {
static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
{
int ret;
- int throttler_idx, throtting_events = 0, buf_idx = 0;
+ int throttler_idx, throttling_events = 0, buf_idx = 0;
struct amdgpu_device *adev = smu->adev;
uint32_t throttler_status;
char log_buf[256];
@@ -2328,11 +2245,11 @@ static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
throttler_idx++) {
if (throttler_status & logging_label[throttler_idx].feature_mask) {
- throtting_events++;
+ throttling_events++;
buf_idx += snprintf(log_buf + buf_idx,
sizeof(log_buf) - buf_idx,
"%s%s",
- throtting_events > 1 ? " and " : "",
+ throttling_events > 1 ? " and " : "",
logging_label[throttler_idx].label);
if (buf_idx >= sizeof(log_buf)) {
dev_err(adev->dev, "buffer overflow!\n");
@@ -2433,7 +2350,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_default_dpm_table = arcturus_set_default_dpm_table,
.populate_umd_state_clk = arcturus_populate_umd_state_clk,
.get_thermal_temperature_range = arcturus_get_thermal_temperature_range,
- .print_clk_levels = arcturus_print_clk_levels,
+ .emit_clk_levels = arcturus_emit_clk_levels,
.force_clk_levels = arcturus_force_clk_levels,
.read_sensor = arcturus_read_sensor,
.get_fan_speed_pwm = arcturus_get_fan_speed_pwm,
@@ -2497,7 +2414,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.set_df_cstate = arcturus_set_df_cstate,
- .allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
+ .select_xgmi_plpd_policy = arcturus_select_xgmi_plpd_policy,
.log_thermal_throttling_event = arcturus_log_thermal_throttling_event,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index c564f6e19..a38233cc5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -345,8 +345,8 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
/* DPM UCLK enablement should be skipped for navi10 A0 secure board */
if (!(is_asic_secure(smu) &&
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&
- (adev->rev_id == 0)) &&
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) &&
+ (adev->rev_id == 0)) &&
(adev->pm.pp_feature & PP_MCLK_DPM_MASK))
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
@@ -354,7 +354,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
/* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
if (is_asic_secure(smu) &&
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) &&
(adev->rev_id == 0))
*(uint64_t *)feature_mask &=
~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
@@ -907,18 +907,11 @@ static int navi1x_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t smu_version;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret) {
- dev_err(adev->dev, "Failed to get smu version!\n");
- return ret;
- }
-
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 9):
- if (smu_version > 0x00341C00)
+ if (smu->smc_fw_version > 0x00341C00)
ret = navi12_get_smu_metrics_data(smu, member, value);
else
ret = navi12_get_legacy_smu_metrics_data(smu, member, value);
@@ -926,8 +919,12 @@ static int navi1x_get_smu_metrics_data(struct smu_context *smu,
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
default:
- if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||
- ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
+ if (((amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 0, 5)) &&
+ smu->smc_fw_version > 0x00351F00) ||
+ ((amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 0, 0)) &&
+ smu->smc_fw_version > 0x002A3B00))
ret = navi10_get_smu_metrics_data(smu, member, value);
else
ret = navi10_get_legacy_smu_metrics_data(smu, member, value);
@@ -1712,7 +1709,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
uint32_t sclk_freq;
pstate_table->gfxclk_pstate.min = gfx_table->min;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
switch (adev->pdev->revision) {
case 0xf0: /* XTX */
@@ -2333,15 +2330,16 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
}
static int navi10_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
struct smu_11_0_powerplay_table *powerplay_table =
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent;
+ uint32_t power_limit, od_percent_upper, od_percent_lower;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
@@ -2358,18 +2356,26 @@ static int navi10_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit) {
- if (smu->od_enabled &&
+ if (smu->od_enabled &&
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
- od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ } else {
+ od_percent_upper = 0;
+ od_percent_lower = 100;
+ }
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
- power_limit *= (100 + od_percent);
- power_limit /= 100;
- }
+ if (max_power_limit) {
+ *max_power_limit = power_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
- *max_power_limit = power_limit;
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 - od_percent_lower);
+ *min_power_limit /= 100;
}
return 0;
@@ -2754,8 +2760,8 @@ static bool navi10_need_umc_cdr_workaround(struct smu_context *smu)
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
return false;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 5))
return true;
return false;
@@ -2843,19 +2849,12 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint8_t umc_fw_greater_than_v136 = false;
uint8_t umc_fw_disable_cdr = false;
- uint32_t pmfw_version;
uint32_t param;
int ret = 0;
if (!navi10_need_umc_cdr_workaround(smu))
return 0;
- ret = smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
- if (ret) {
- dev_err(adev->dev, "Failed to get smu version!\n");
- return ret;
- }
-
/*
* The messages below are only supported by Navi10 42.53.0 and later
* PMFWs and Navi14 53.29.0 and later PMFWs.
@@ -2863,8 +2862,10 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
* - PPSMC_MSG_SetDriverDummyTableDramAddrLow
* - PPSMC_MSG_GetUMCFWWA
*/
- if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (pmfw_version >= 0x2a3500)) ||
- ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && (pmfw_version >= 0x351D00))) {
+ if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) &&
+ (smu->smc_fw_version >= 0x2a3500)) ||
+ ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 5)) &&
+ (smu->smc_fw_version >= 0x351D00))) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GET_UMC_FW_WA,
0,
@@ -2883,13 +2884,15 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
return 0;
if (umc_fw_disable_cdr) {
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 0, 0))
return navi10_umc_hybrid_cdr_workaround(smu);
} else {
return navi10_set_dummy_pstates_table_location(smu);
}
} else {
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 0, 0))
return navi10_umc_hybrid_cdr_workaround(smu);
}
@@ -3347,18 +3350,11 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t smu_version;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret) {
- dev_err(adev->dev, "Failed to get smu version!\n");
- return ret;
- }
-
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 9):
- if (smu_version > 0x00341C00)
+ if (smu->smc_fw_version > 0x00341C00)
ret = navi12_get_gpu_metrics(smu, table);
else
ret = navi12_get_legacy_gpu_metrics(smu, table);
@@ -3366,8 +3362,12 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
default:
- if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||
- ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
+ if (((amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 0, 5)) &&
+ smu->smc_fw_version > 0x00351F00) ||
+ ((amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 0, 0)) &&
+ smu->smc_fw_version > 0x002A3B00))
ret = navi10_get_gpu_metrics(smu, table);
else
ret = navi10_get_legacy_gpu_metrics(smu, table);
@@ -3385,7 +3385,7 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
uint32_t param = 0;
/* Navi12 does not support this */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 9))
return 0;
/*
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index a7f4f82d2..1de9f8b5c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -73,12 +73,16 @@
#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
-#define GET_PPTABLE_MEMBER(field, member) do {\
- if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))\
- (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_beige_goby_t, field));\
- else\
- (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\
-} while(0)
+#define GET_PPTABLE_MEMBER(field, member) \
+ do { \
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == \
+ IP_VERSION(11, 0, 13)) \
+ (*member) = (smu->smu_table.driver_pptable + \
+ offsetof(PPTable_beige_goby_t, field)); \
+ else \
+ (*member) = (smu->smu_table.driver_pptable + \
+ offsetof(PPTable_t, field)); \
+ } while (0)
/* STB FIFO depth is in 64bit units */
#define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8
@@ -91,7 +95,7 @@
static int get_table_size(struct smu_context *smu)
{
- if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))
return sizeof(PPTable_beige_goby_t);
else
return sizeof(PPTable_t);
@@ -309,7 +313,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
}
if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) &&
- (adev->ip_versions[MP1_HWIP][0] > IP_VERSION(11, 0, 7)) &&
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) > IP_VERSION(11, 0, 7)) &&
!(adev->flags & AMD_IS_APU))
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT);
@@ -434,7 +438,7 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)
PPTable_beige_goby_t *ppt_beige_goby;
PPTable_t *ppt;
- if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))
ppt_beige_goby = smu->smu_table.driver_pptable;
else
ppt = smu->smu_table.driver_pptable;
@@ -447,7 +451,7 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)
if (ret)
return ret;
- if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))
smu_memcpy_trailing(ppt_beige_goby, I2cControllers, BoardReserved,
smc_dpm_table, I2cControllers);
else
@@ -616,11 +620,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
static int sienna_cichlid_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
struct smu_11_0_7_powerplay_table *powerplay_table =
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
- uint32_t power_limit, od_percent;
+ uint32_t power_limit, od_percent_upper, od_percent_lower;
uint16_t *table_member;
GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
@@ -635,21 +640,26 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit) {
- if (smu->od_enabled) {
- od_percent =
- le32_to_cpu(powerplay_table->overdrive_table.max[
- SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (smu->od_enabled) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else {
+ od_percent_upper = 0;
+ od_percent_lower = 100;
+ }
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n",
- od_percent, power_limit);
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
- power_limit *= (100 + od_percent);
- power_limit /= 100;
- }
- *max_power_limit = power_limit;
+ if (max_power_limit) {
+ *max_power_limit = power_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
}
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 - od_percent_lower);
+ *min_power_limit /= 100;
+ }
return 0;
}
@@ -668,7 +678,7 @@ static void sienna_cichlid_get_smartshift_power_percentage(struct smu_context *s
uint32_t cur_power_limit;
if (metrics_v4->ApuSTAPMSmartShiftLimit != 0) {
- sienna_cichlid_get_power_limit(smu, &cur_power_limit, NULL, NULL);
+ sienna_cichlid_get_power_limit(smu, &cur_power_limit, NULL, NULL, NULL);
apu_power_limit = metrics_v4->ApuSTAPMLimit;
dgpu_power_limit = cur_power_limit;
powerRatio = (((apu_power_limit +
@@ -725,7 +735,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
uint32_t apu_percent = 0;
uint32_t dgpu_percent = 0;
- switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 7):
if (smu->smc_fw_version >= 0x3A4900)
use_metrics_v3 = true;
@@ -1275,7 +1285,6 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
uint32_t mark_index = 0;
uint32_t gen_speed, lane_width;
uint32_t min_value, max_value;
- uint32_t smu_version;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -1384,9 +1393,9 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
* OD GFX Voltage Offset functionality is supported only by 58.41.0
* and onwards SMU firmwares.
*/
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu_version < 0x003a2900))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 0, 7)) &&
+ (smu->smc_fw_version < 0x003a2900))
break;
size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
@@ -1494,7 +1503,7 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
@@ -1945,7 +1954,8 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
*size = 4;
break;
case AMDGPU_PP_SENSOR_SS_APU_SHARE:
- if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7)) {
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) !=
+ IP_VERSION(11, 0, 7)) {
ret = sienna_cichlid_get_smu_metrics_data(smu,
METRICS_SS_APU_SHARE, (uint32_t *)data);
*size = 4;
@@ -1954,7 +1964,8 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
}
break;
case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
- if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7)) {
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) !=
+ IP_VERSION(11, 0, 7)) {
ret = sienna_cichlid_get_smu_metrics_data(smu,
METRICS_SS_DGPU_SHARE, (uint32_t *)data);
*size = 4;
@@ -1978,7 +1989,7 @@ static void sienna_cichlid_get_unique_id(struct smu_context *smu)
/* Only supported as of version 0.58.83.0 and only on Sienna Cichlid */
if (smu->smc_fw_version < 0x3A5300 ||
- smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7))
+ amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(11, 0, 7))
return;
if (sienna_cichlid_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_UPPER32, &upper32))
@@ -1989,8 +2000,6 @@ static void sienna_cichlid_get_unique_id(struct smu_context *smu)
out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
- if (adev->serial[0] == '\0')
- sprintf(adev->serial, "%016llx", adev->unique_id);
}
static int sienna_cichlid_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)
@@ -2082,8 +2091,6 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint8_t pcie_gen_cap,
uint8_t pcie_width_cap)
@@ -2099,12 +2106,12 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
- min_gen_speed = MAX(0, table_member1[0]);
- max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
+ min_gen_speed = max_t(uint8_t, 0, table_member1[0]);
+ max_gen_speed = min(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
- min_lane_width = MAX(1, table_member2[0]);
- max_lane_width = MIN(pcie_width_cap, table_member2[1]);
+ min_lane_width = max_t(uint8_t, 1, table_member2[0]);
+ max_lane_width = min(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
@@ -2145,16 +2152,14 @@ static void sienna_cichlid_dump_od_table(struct smu_context *smu,
OverDriveTable_t *od_table)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t smu_version;
dev_dbg(smu->adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin,
od_table->GfxclkFmax);
dev_dbg(smu->adev->dev, "OD: Uclk: (%d, %d)\n", od_table->UclkFmin,
od_table->UclkFmax);
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (!((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu_version < 0x003a2900)))
+ if (!((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7)) &&
+ (smu->smc_fw_version < 0x003a2900)))
dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset);
}
@@ -2232,7 +2237,6 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
enum SMU_11_0_7_ODSETTING_ID freq_setting;
uint16_t *freq_ptr;
int i, ret = 0;
- uint32_t smu_version;
if (!smu->od_enabled) {
dev_warn(smu->adev->dev, "OverDrive is not enabled!\n");
@@ -2385,9 +2389,9 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
* OD GFX Voltage Offset functionality is supported only by 58.41.0
* and onwards SMU firmwares.
*/
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu_version < 0x003a2900)) {
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 0, 7)) &&
+ (smu->smc_fw_version < 0x003a2900)) {
dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported "
"only by 58.41.0 and onwards SMU firmwares!\n");
return -EOPNOTSUPP;
@@ -2457,13 +2461,17 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t val;
- u32 smu_version;
+ uint32_t smu_version;
+ int ret;
/**
* SRIOV env will not support SMU mode1 reset
* PM FW support mode1 reset from 58.26
*/
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return false;
+
if (amdgpu_sriov_vf(adev) || (smu_version < 0x003a1a00))
return false;
@@ -3110,7 +3118,8 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
int i;
- if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) {
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 0, 13)) {
beige_goby_dump_pptable(smu);
return;
}
@@ -3915,7 +3924,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
uint16_t average_gfx_activity;
int ret = 0;
- switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 7):
if (smu->smc_fw_version >= 0x3A4900)
use_metrics_v3 = true;
@@ -4031,8 +4040,10 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_fan_speed = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed;
- if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && smu->smc_fw_version > 0x003A1E00) ||
- ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11)) && smu->smc_fw_version > 0x00410400)) {
+ if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7)) &&
+ smu->smc_fw_version > 0x003A1E00) ||
+ ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11)) &&
+ smu->smc_fw_version > 0x00410400)) {
gpu_metrics->pcie_link_width = use_metrics_v3 ? metrics_v3->PcieWidth :
use_metrics_v2 ? metrics_v2->PcieWidth : metrics->PcieWidth;
gpu_metrics->pcie_link_speed = link_speed[use_metrics_v3 ? metrics_v3->PcieRate :
@@ -4053,14 +4064,9 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
static int sienna_cichlid_check_ecc_table_support(struct smu_context *smu)
{
- uint32_t if_version = 0xff, smu_version = 0xff;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return -EOPNOTSUPP;
-
- if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
+ if (smu->smc_fw_version < SUPPORT_ECCTABLE_SMU_VERSION)
ret = -EOPNOTSUPP;
return ret;
@@ -4126,17 +4132,13 @@ static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
static int sienna_cichlid_gpo_control(struct smu_context *smu,
bool enablement)
{
- uint32_t smu_version;
int ret = 0;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret)
- return ret;
if (enablement) {
- if (smu_version < 0x003a2500) {
+ if (smu->smc_fw_version < 0x003a2500) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetGpoFeaturePMask,
GFX_GPO_PACE_MASK | GFX_GPO_DEM_MASK,
@@ -4148,7 +4150,7 @@ static int sienna_cichlid_gpo_control(struct smu_context *smu,
NULL);
}
} else {
- if (smu_version < 0x003a2500) {
+ if (smu->smc_fw_version < 0x003a2500) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetGpoFeaturePMask,
0,
@@ -4167,18 +4169,11 @@ static int sienna_cichlid_gpo_control(struct smu_context *smu,
static int sienna_cichlid_notify_2nd_usb20_port(struct smu_context *smu)
{
- uint32_t smu_version;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret)
- return ret;
-
/*
* Message SMU_MSG_Enable2ndUSB20Port is supported by 58.45
* onwards PMFWs.
*/
- if (smu_version < 0x003A2D00)
+ if (smu->smc_fw_version < 0x003A2D00)
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
@@ -4258,7 +4253,7 @@ static int sienna_cichlid_get_default_config_table_settings(struct smu_context *
table->gfx_activity_average_tau = 10;
table->mem_activity_average_tau = 10;
table->socket_power_average_tau = 100;
- if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(11, 0, 7))
table->apu_socket_power_average_tau = 100;
return 0;
@@ -4325,13 +4320,10 @@ static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu)
static int sienna_cichlid_mode2_reset(struct smu_context *smu)
{
- u32 smu_version;
int ret = 0, index;
struct amdgpu_device *adev = smu->adev;
int timeout = 100;
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
-
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_DriverMode2Reset);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 123c19bb6..c7bfa68bf 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -101,8 +101,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
struct amdgpu_firmware_info *ucode = NULL;
if (amdgpu_sriov_vf(adev) &&
- ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) ||
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7))))
+ ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 9)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7))))
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
@@ -213,7 +213,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
if (smu->is_apu)
adev->pm.fw_version = smu_version;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
@@ -246,7 +246,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
break;
default:
dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
- adev->ip_versions[MP1_HWIP][0]);
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
break;
}
@@ -474,9 +474,10 @@ int smu_v11_0_init_power(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
- size_t size = adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ?
- sizeof(struct smu_11_5_power_context) :
- sizeof(struct smu_11_0_power_context);
+ size_t size = amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 5, 0) ?
+ sizeof(struct smu_11_5_power_context) :
+ sizeof(struct smu_11_0_power_context);
smu_power->power_context = kzalloc(size, GFP_KERNEL);
if (!smu_power->power_context)
@@ -731,10 +732,10 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
/* Navy_Flounder/Dimgrey_Cavefish do not support to change
* display num currently
*/
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 12) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
@@ -1103,7 +1104,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
@@ -1173,7 +1174,7 @@ smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, uint32_t speed)
uint32_t duty100, duty;
uint64_t tmp64;
- speed = MIN(speed, 255);
+ speed = min_t(uint32_t, speed, 255);
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
CG_FDO_CTRL1, FMAX_DUTY100);
@@ -1248,7 +1249,7 @@ int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu,
tmp64 = (uint64_t)duty * 255;
do_div(tmp64, duty100);
- *speed = MIN((uint32_t)tmp64, 255);
+ *speed = min_t(uint32_t, tmp64, 255);
return 0;
}
@@ -1593,7 +1594,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
return 0;
if (state == SMU_BACO_STATE_ENTER) {
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
@@ -1612,7 +1613,8 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
default:
if (!ras || !adev->ras_enabled ||
adev->gmc.xgmi.pending_reset) {
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) {
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(11, 0, 2)) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);
@@ -1896,7 +1898,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
* Separate MCLK and SOCCLK soft min/max settings are not allowed
* on Arcturus.
*/
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) {
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
mclk_min = mclk_max = 0;
socclk_min = socclk_max = 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 201cec599..2ff6deede 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -225,14 +225,6 @@ static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t if_version;
- uint32_t smu_version;
- uint32_t ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret) {
- return ret;
- }
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -242,24 +234,15 @@ static int vangogh_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- if (if_version < 0x3) {
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
- } else {
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
- }
+ smu_table->metrics_table = kzalloc(max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), GFP_KERNEL);
if (!smu_table->metrics_table)
goto err0_out;
smu_table->metrics_time = 0;
- if (smu_version >= 0x043F3E00)
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3);
- else
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2));
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
@@ -430,17 +413,9 @@ static int vangogh_common_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
- struct amdgpu_device *adev = smu->adev;
- uint32_t if_version;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
- if (ret) {
- dev_err(adev->dev, "Failed to get smu if version!\n");
- return ret;
- }
-
- if (if_version < 0x3)
+ if (smu->smc_fw_if_version < 0x3)
ret = vangogh_get_legacy_smu_metrics_data(smu, member, value);
else
ret = vangogh_get_smu_metrics_data(smu, member, value);
@@ -813,17 +788,9 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
static int vangogh_common_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- struct amdgpu_device *adev = smu->adev;
- uint32_t if_version;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
- if (ret) {
- dev_err(adev->dev, "Failed to get smu if version!\n");
- return ret;
- }
-
- if (if_version < 0x3)
+ if (smu->smc_fw_if_version < 0x3)
ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf);
else
ret = vangogh_print_clk_levels(smu, clk_type, buf);
@@ -1893,21 +1860,21 @@ static ssize_t vangogh_get_gpu_metrics_v2_4(struct smu_context *smu,
sizeof(uint16_t) * 4);
gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0];
- gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
- gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
+ gpu_metrics->average_gfx_activity = metrics.Average.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.Average.UvdActivity;
- gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
- gpu_metrics->average_cpu_power = metrics.Current.Power[0];
- gpu_metrics->average_soc_power = metrics.Current.Power[1];
- gpu_metrics->average_gfx_power = metrics.Current.Power[2];
+ gpu_metrics->average_socket_power = metrics.Average.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Average.Power[0];
+ gpu_metrics->average_soc_power = metrics.Average.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Average.Power[2];
- gpu_metrics->average_cpu_voltage = metrics.Current.Voltage[0];
- gpu_metrics->average_soc_voltage = metrics.Current.Voltage[1];
- gpu_metrics->average_gfx_voltage = metrics.Current.Voltage[2];
+ gpu_metrics->average_cpu_voltage = metrics.Average.Voltage[0];
+ gpu_metrics->average_soc_voltage = metrics.Average.Voltage[1];
+ gpu_metrics->average_gfx_voltage = metrics.Average.Voltage[2];
- gpu_metrics->average_cpu_current = metrics.Current.Current[0];
- gpu_metrics->average_soc_current = metrics.Current.Current[1];
- gpu_metrics->average_gfx_current = metrics.Current.Current[2];
+ gpu_metrics->average_cpu_current = metrics.Average.Current[0];
+ gpu_metrics->average_soc_current = metrics.Average.Current[1];
+ gpu_metrics->average_gfx_current = metrics.Average.Current[2];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.Average.CorePower[0],
@@ -2011,18 +1978,12 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
void **table)
{
- uint32_t if_version;
- uint32_t smu_version;
uint32_t smu_program;
uint32_t fw_version;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return ret;
-
- smu_program = (smu_version >> 24) & 0xff;
- fw_version = smu_version & 0xffffff;
+ smu_program = (smu->smc_fw_version >> 24) & 0xff;
+ fw_version = smu->smc_fw_version & 0xffffff;
if (smu_program == 6) {
if (fw_version >= 0x3F0800)
ret = vangogh_get_gpu_metrics_v2_4(smu, table);
@@ -2030,13 +1991,13 @@ static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
ret = vangogh_get_gpu_metrics_v2_3(smu, table);
} else {
- if (smu_version >= 0x043F3E00) {
- if (if_version < 0x3)
+ if (smu->smc_fw_version >= 0x043F3E00) {
+ if (smu->smc_fw_if_version < 0x3)
ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
else
ret = vangogh_get_gpu_metrics_v2_3(smu, table);
} else {
- if (if_version < 0x3)
+ if (smu->smc_fw_if_version < 0x3)
ret = vangogh_get_legacy_gpu_metrics(smu, table);
else
ret = vangogh_get_gpu_metrics(smu, table);
@@ -2232,8 +2193,7 @@ static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clock
return 0;
}
-
-static int vangogh_system_features_control(struct smu_context *smu, bool en)
+static int vangogh_notify_rlc_state(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
@@ -2344,7 +2304,8 @@ static u32 vangogh_get_gfxoff_status(struct smu_context *smu)
static int vangogh_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
struct smu_11_5_power_context *power_context =
smu->smu_power.power_context;
@@ -2366,6 +2327,8 @@ static int vangogh_get_power_limit(struct smu_context *smu,
*default_power_limit = ppt_limit / 1000;
if (max_power_limit)
*max_power_limit = 29;
+ if (min_power_limit)
+ *min_power_limit = 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
if (ret) {
@@ -2559,7 +2522,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.print_clk_levels = vangogh_common_print_clk_levels,
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
- .system_features_control = vangogh_system_features_control,
+ .notify_rlc_state = vangogh_notify_rlc_state,
.feature_is_enabled = smu_cmn_feature_is_enabled,
.set_power_profile_mode = vangogh_set_power_profile_mode,
.get_power_profile_mode = vangogh_get_power_profile_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index c8119491c..8908bbb3f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1198,8 +1198,12 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageUvdActivity / 100;
break;
case METRICS_CURR_SOCKETPOWER:
- if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) ||
- ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200)))
+ if (((amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(12, 0, 1)) &&
+ (adev->pm.fw_version >= 0x40000f)) ||
+ ((amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(12, 0, 0)) &&
+ (adev->pm.fw_version >= 0x373200)))
*value = metrics->CurrentSocketPower << 8;
else
*value = (metrics->CurrentSocketPower << 8) / 1000;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 08fff9600..f1440869d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -470,18 +470,12 @@ static bool aldebaran_is_primary(struct smu_context *smu)
static int aldebaran_run_board_btc(struct smu_context *smu)
{
- u32 smu_version;
int ret;
if (!aldebaran_is_primary(smu))
return 0;
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret) {
- dev_err(smu->adev->dev, "Failed to get smu version!\n");
- return ret;
- }
- if (smu_version <= 0x00441d00)
+ if (smu->smc_fw_version <= 0x00441d00)
return 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL);
@@ -553,9 +547,9 @@ static int aldebaran_populate_umd_state_clk(struct smu_context *smu)
return 0;
}
-static int aldebaran_get_clk_table(struct smu_context *smu,
- struct pp_clock_levels_with_latency *clocks,
- struct smu_13_0_dpm_table *dpm_table)
+static void aldebaran_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct smu_13_0_dpm_table *dpm_table)
{
uint32_t i;
@@ -569,7 +563,6 @@ static int aldebaran_get_clk_table(struct smu_context *smu,
clocks->data[i].latency_in_us = 0;
}
- return 0;
}
static int aldebaran_freqs_in_same_level(int32_t frequency1,
@@ -739,25 +732,26 @@ static int aldebaran_get_current_clk_freq_by_table(struct smu_context *smu,
value);
}
-static int aldebaran_print_clk_levels(struct smu_context *smu,
- enum smu_clk_type type, char *buf)
+static int aldebaran_emit_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, char *buf, int *offset)
{
- int i, now, size = 0;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct pp_clock_levels_with_latency clocks;
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
+ uint32_t i;
int display_levels;
uint32_t freq_values[3] = {0};
- uint32_t min_clk, max_clk;
-
- smu_cmn_get_sysfs_buf(&buf, &size);
+ uint32_t min_clk, max_clk, cur_value = 0;
+ bool freq_match;
+ unsigned int clock_mhz;
+ static const char attempt_string[] = "Attempt to get current";
if (amdgpu_ras_intr_triggered()) {
- size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ *offset += sysfs_emit_at(buf, *offset, "unavailable\n");
+ return -EBUSY;
}
dpm_context = smu_dpm->dpm_context;
@@ -765,21 +759,17 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
switch (type) {
case SMU_OD_SCLK:
- size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "GFXCLK");
fallthrough;
case SMU_SCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current gfx clk Failed!");
+ dev_err(smu->adev->dev, "%s gfx clk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get gfx clk levels Failed!");
- return ret;
- }
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
display_levels = (clocks.num_levels == 1) ? 1 : 2;
@@ -790,147 +780,110 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
freq_values[1] = max_clk;
/* fine-grained dpm has only 2 levels */
- if (now > min_clk && now < max_clk) {
+ if (cur_value > min_clk && cur_value < max_clk) {
display_levels++;
freq_values[2] = max_clk;
- freq_values[1] = now;
+ freq_values[1] = cur_value;
}
-
- for (i = 0; i < display_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
- freq_values[i],
- (display_levels == 1) ?
- "*" :
- (aldebaran_freqs_in_same_level(
- freq_values[i], now) ?
- "*" :
- ""));
-
break;
case SMU_OD_MCLK:
- size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "MCLK");
fallthrough;
case SMU_MCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current mclk Failed!");
+ dev_err(smu->adev->dev, "%s mclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get memory clk levels Failed!");
- return ret;
- }
-
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ? "*" :
- (aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
break;
case SMU_SOCCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current socclk Failed!");
+ dev_err(smu->adev->dev, "%s socclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get socclk levels Failed!");
- return ret;
- }
-
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ? "*" :
- (aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
break;
case SMU_FCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current fclk Failed!");
+ dev_err(smu->adev->dev, "%s fclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get fclk levels Failed!");
- return ret;
- }
-
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ? "*" :
- (aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
break;
case SMU_VCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+ dev_err(smu->adev->dev, "%s vclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
- return ret;
- }
-
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ? "*" :
- (aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
break;
case SMU_DCLK:
- ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &cur_value);
if (ret) {
- dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+ dev_err(smu->adev->dev, "%s dclk Failed!", attempt_string);
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
- ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
- return ret;
- }
-
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
- i, single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ? "*" :
- (aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
break;
default:
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case SMU_OD_SCLK:
+ case SMU_SCLK:
+ for (i = 0; i < display_levels; i++) {
+ clock_mhz = freq_values[i];
+ freq_match = aldebaran_freqs_in_same_level(clock_mhz, cur_value);
+ freq_match |= (display_levels == 1);
+
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n", i,
+ clock_mhz,
+ (freq_match) ? "*" : "");
+ }
break;
+
+ case SMU_OD_MCLK:
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ for (i = 0; i < clocks.num_levels; i++) {
+ clock_mhz = clocks.data[i].clocks_in_khz / 1000;
+ freq_match = aldebaran_freqs_in_same_level(clock_mhz, cur_value);
+ freq_match |= (clocks.num_levels == 1);
+
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
+ i, clock_mhz,
+ (freq_match) ? "*" : "");
+ }
+ break;
+ default:
+ return -EINVAL;
}
- return size;
+ return 0;
}
static int aldebaran_upload_dpm_level(struct smu_context *smu,
@@ -1189,9 +1142,10 @@ static int aldebaran_read_sensor(struct smu_context *smu,
}
static int aldebaran_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t power_limit = 0;
@@ -1204,7 +1158,8 @@ static int aldebaran_get_power_limit(struct smu_context *smu,
*default_power_limit = 0;
if (max_power_limit)
*max_power_limit = 0;
-
+ if (min_power_limit)
+ *min_power_limit = 0;
dev_warn(smu->adev->dev,
"PPT feature is not enabled, power values can't be fetched.");
@@ -1239,6 +1194,9 @@ static int aldebaran_get_power_limit(struct smu_context *smu,
*max_power_limit = pptable->PptLimit;
}
+ if (min_power_limit)
+ *min_power_limit = 0;
+
return 0;
}
@@ -1622,8 +1580,6 @@ static void aldebaran_get_unique_id(struct smu_context *smu)
out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
- if (adev->serial[0] == '\0')
- sprintf(adev->serial, "%016llx", adev->unique_id);
}
static bool aldebaran_is_baco_supported(struct smu_context *smu)
@@ -1648,20 +1604,27 @@ static int aldebaran_set_df_cstate(struct smu_context *smu,
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
-static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
+static int aldebaran_select_xgmi_plpd_policy(struct smu_context *smu,
+ enum pp_xgmi_plpd_mode mode)
{
struct amdgpu_device *adev = smu->adev;
/* The message only works on master die and NACK will be sent
back for other dies, only send it on master die */
- if (!adev->smuio.funcs->get_socket_id(adev) &&
- !adev->smuio.funcs->get_die_id(adev))
+ if (adev->smuio.funcs->get_socket_id(adev) ||
+ adev->smuio.funcs->get_die_id(adev))
+ return 0;
+
+ if (mode == XGMI_PLPD_DEFAULT)
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ 0, NULL);
+ else if (mode == XGMI_PLPD_DISALLOW)
return smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GmiPwrDnControl,
- en ? 0 : 1,
- NULL);
+ SMU_MSG_GmiPwrDnControl,
+ 1, NULL);
else
- return 0;
+ return -EINVAL;
}
static const struct throttling_logging_label {
@@ -1677,7 +1640,7 @@ static const struct throttling_logging_label {
static void aldebaran_log_thermal_throttling_event(struct smu_context *smu)
{
int ret;
- int throttler_idx, throtting_events = 0, buf_idx = 0;
+ int throttler_idx, throttling_events = 0, buf_idx = 0;
struct amdgpu_device *adev = smu->adev;
uint32_t throttler_status;
char log_buf[256];
@@ -1692,11 +1655,11 @@ static void aldebaran_log_thermal_throttling_event(struct smu_context *smu)
for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
throttler_idx++) {
if (throttler_status & logging_label[throttler_idx].feature_mask) {
- throtting_events++;
+ throttling_events++;
buf_idx += snprintf(log_buf + buf_idx,
sizeof(log_buf) - buf_idx,
"%s%s",
- throtting_events > 1 ? " and " : "",
+ throttling_events > 1 ? " and " : "",
logging_label[throttler_idx].label);
if (buf_idx >= sizeof(log_buf)) {
dev_err(adev->dev, "buffer overflow!\n");
@@ -1808,24 +1771,15 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
static int aldebaran_check_ecc_table_support(struct smu_context *smu,
int *ecctable_version)
{
- uint32_t if_version = 0xff, smu_version = 0xff;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret) {
- /* return not support if failed get smu_version */
- ret = -EOPNOTSUPP;
- }
-
- if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
- ret = -EOPNOTSUPP;
- else if (smu_version >= SUPPORT_ECCTABLE_SMU_VERSION &&
- smu_version < SUPPORT_ECCTABLE_V2_SMU_VERSION)
+ if (smu->smc_fw_version < SUPPORT_ECCTABLE_SMU_VERSION)
+ return -EOPNOTSUPP;
+ else if (smu->smc_fw_version >= SUPPORT_ECCTABLE_SMU_VERSION &&
+ smu->smc_fw_version < SUPPORT_ECCTABLE_V2_SMU_VERSION)
*ecctable_version = 1;
else
*ecctable_version = 2;
- return ret;
+ return 0;
}
static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
@@ -1888,7 +1842,7 @@ static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
static int aldebaran_mode1_reset(struct smu_context *smu)
{
- u32 smu_version, fatal_err, param;
+ u32 fatal_err, param;
int ret = 0;
struct amdgpu_device *adev = smu->adev;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
@@ -1899,13 +1853,12 @@ static int aldebaran_mode1_reset(struct smu_context *smu)
/*
* PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07
*/
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version < 0x00440700) {
+ if (smu->smc_fw_version < 0x00440700) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
} else {
/* fatal error triggered by ras, PMFW supports the flag
from 68.44.0 */
- if ((smu_version >= 0x00442c00) && ras &&
+ if ((smu->smc_fw_version >= 0x00442c00) && ras &&
atomic_read(&ras->in_recovery))
fatal_err = 1;
@@ -1922,18 +1875,15 @@ static int aldebaran_mode1_reset(struct smu_context *smu)
static int aldebaran_mode2_reset(struct smu_context *smu)
{
- u32 smu_version;
int ret = 0, index;
struct amdgpu_device *adev = smu->adev;
int timeout = 10;
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
-
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_GfxDeviceDriverReset);
mutex_lock(&smu->message_lock);
- if (smu_version >= 0x00441400) {
+ if (smu->smc_fw_version >= 0x00441400) {
ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
/* This is similar to FLR, wait till max FLR timeout */
msleep(100);
@@ -1960,7 +1910,7 @@ static int aldebaran_mode2_reset(struct smu_context *smu)
} else {
dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
- smu_version);
+ smu->smc_fw_version);
}
if (ret == 1)
@@ -1983,14 +1933,20 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
{
#if 0
struct amdgpu_device *adev = smu->adev;
- u32 smu_version;
uint32_t val;
+ uint32_t smu_version;
+ int ret;
+
/**
* PM FW version support mode1 reset from 68.07
*/
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return false;
+
if ((smu_version < 0x00440700))
return false;
+
/**
* mode1 reset relies on PSP, so we should check if
* PSP is alive.
@@ -2034,19 +1990,10 @@ static int aldebaran_smu_send_hbm_bad_page_num(struct smu_context *smu,
static int aldebaran_check_bad_channel_info_support(struct smu_context *smu)
{
- uint32_t if_version = 0xff, smu_version = 0xff;
- int ret = 0;
-
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret) {
- /* return not support if failed get smu_version */
- ret = -EOPNOTSUPP;
- }
-
- if (smu_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION)
- ret = -EOPNOTSUPP;
+ if (smu->smc_fw_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION)
+ return -EOPNOTSUPP;
- return ret;
+ return 0;
}
static int aldebaran_send_hbm_bad_channel_flag(struct smu_context *smu,
@@ -2074,7 +2021,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_default_dpm_table = aldebaran_set_default_dpm_table,
.populate_umd_state_clk = aldebaran_populate_umd_state_clk,
.get_thermal_temperature_range = aldebaran_get_thermal_temperature_range,
- .print_clk_levels = aldebaran_print_clk_levels,
+ .emit_clk_levels = aldebaran_emit_clk_levels,
.force_clk_levels = aldebaran_force_clk_levels,
.read_sensor = aldebaran_read_sensor,
.set_performance_level = aldebaran_set_performance_level,
@@ -2116,7 +2063,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
.set_df_cstate = aldebaran_set_df_cstate,
- .allow_xgmi_power_down = aldebaran_allow_xgmi_power_down,
+ .select_xgmi_plpd_policy = aldebaran_select_xgmi_plpd_policy,
.log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index c097aed47..68981086b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -82,6 +82,8 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
+#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
+
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
@@ -196,9 +198,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
- if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) ||
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 7)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)))
return 0;
/* override pptable_id from driver parameter */
@@ -234,7 +236,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
mp1_fw_flags = RREG32_PCIE(MP1_Public |
@@ -269,7 +271,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
if (smu->is_apu ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6))
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6))
adev->pm.fw_version = smu_version;
/* only for dGPU w/ SMU13*/
@@ -802,7 +804,7 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
@@ -1170,7 +1172,7 @@ int smu_v13_0_set_fan_speed_pwm(struct smu_context *smu,
uint32_t duty100, duty;
uint64_t tmp64;
- speed = MIN(speed, 255);
+ speed = min_t(uint32_t, speed, 255);
if (smu_v13_0_auto_fan_control(smu, 0))
return -EINVAL;
@@ -1782,7 +1784,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
* Unset those settings for SMU 13.0.2. As soft limits settings
* for those clock domains are not supported.
*/
- if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) {
mclk_min = mclk_max = 0;
socclk_min = socclk_max = 0;
vclk_min = vclk_max = 0;
@@ -1929,7 +1931,7 @@ static int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
/* SMU v13.0.2 FW returns 0 based max level, increment by one for it */
- if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value))
+ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) && (!ret && value))
++(*value);
return ret;
@@ -1989,7 +1991,7 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
return ret;
}
- if (smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) {
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) {
ret = smu_v13_0_get_fine_grained_status(smu,
clk_type,
&single_dpm_table->is_fine_grained);
@@ -2304,11 +2306,17 @@ int smu_v13_0_baco_exit(struct smu_context *smu)
int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
{
uint16_t index;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
+ }
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_EnableGfxImu);
- /* Param 1 to tell PMFW to enable GFXOFF feature */
- return smu_cmn_send_msg_without_waiting(smu, index, 1);
+ return smu_cmn_send_msg_without_waiting(smu, index,
+ ENABLE_IMU_ARG_GFXOFF_ENABLE);
}
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
@@ -2471,3 +2479,16 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
return 0;
}
+
+int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
+{
+ int ret;
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0);
+
+ ret = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ return ret == 0 ? 0 : -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 4022dd44e..5625a6e57 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -101,6 +101,12 @@
#define PP_OD_FEATURE_UCLK_FMIN 2
#define PP_OD_FEATURE_UCLK_FMAX 3
#define PP_OD_FEATURE_GFX_VF_CURVE 4
+#define PP_OD_FEATURE_FAN_CURVE_TEMP 5
+#define PP_OD_FEATURE_FAN_CURVE_PWM 6
+#define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
+#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
+#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
+#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
#define LINK_SPEED_MAX 3
@@ -176,6 +182,7 @@ static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(VCLK1, PPCLK_VCLK_1),
CLK_MAP(DCLK, PPCLK_DCLK_0),
CLK_MAP(DCLK1, PPCLK_DCLK_1),
+ CLK_MAP(DCEFCLK, PPCLK_DCFCLK),
};
static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -289,7 +296,6 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
struct amdgpu_device *adev = smu->adev;
- u32 smu_version;
if (num > 2)
return -EINVAL;
@@ -309,8 +315,7 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
/* PMFW 78.58 contains a critical fix for gfxoff feature */
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if ((smu_version < 0x004e3a00) ||
+ if ((smu->smc_fw_version < 0x004e3a00) ||
!(adev->pm.pp_feature & PP_GFXOFF_MASK))
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
@@ -341,13 +346,10 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *pptable = smu->smu_table.driver_pptable;
-#if 0
- PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
const OverDriveLimits_t * const overdrive_lowerlimits =
&pptable->SkuTable.OverDriveLimitsMin;
-#endif
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@@ -359,27 +361,18 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
smu_baco->maco_support = true;
}
- /*
- * We are in the transition to a new OD mechanism.
- * Disable the OD feature support for SMU13 temporarily.
- * TODO: get this reverted when new OD mechanism online
- */
-#if 0
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
smu->od_enabled = false;
+ table_context->thermal_controller_type =
+ powerplay_table->thermal_controller_type;
+
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
-#else
- smu->od_enabled = false;
-#endif
-
- table_context->thermal_controller_type =
- powerplay_table->thermal_controller_type;
smu->adev->pm.no_fan =
!(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
@@ -707,6 +700,22 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
pcie_table->num_of_link_levels++;
}
+ /* dcefclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.dcef_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
+ ret = smu_v13_0_set_single_dpm_table(smu,
+ SMU_DCEFCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
return 0;
}
@@ -794,6 +803,9 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
case METRICS_CURR_FCLK:
*value = metrics->CurrClock[PPCLK_FCLK];
break;
+ case METRICS_CURR_DCEFCLK:
+ *value = metrics->CurrClock[PPCLK_DCFCLK];
+ break;
case METRICS_AVERAGE_GFXCLK:
if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD)
*value = metrics->AverageGfxclkFrequencyPostDs;
@@ -1047,6 +1059,9 @@ static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu,
case PPCLK_DCLK_1:
member_type = METRICS_AVERAGE_DCLK1;
break;
+ case PPCLK_DCFCLK:
+ member_type = METRICS_CURR_DCEFCLK;
+ break;
default:
return -EINVAL;
}
@@ -1099,6 +1114,30 @@ static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,
od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary;
od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary;
break;
+ case PP_OD_FEATURE_FAN_CURVE_TEMP:
+ od_min_setting = overdrive_lowerlimits->FanLinearTempPoints;
+ od_max_setting = overdrive_upperlimits->FanLinearTempPoints;
+ break;
+ case PP_OD_FEATURE_FAN_CURVE_PWM:
+ od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints;
+ od_max_setting = overdrive_upperlimits->FanLinearPwmPoints;
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
+ od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
+ od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
+ od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
+ od_max_setting = overdrive_upperlimits->FanTargetTemperature;
+ break;
+ case PP_OD_FEATURE_FAN_MINIMUM_PWM:
+ od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
+ od_max_setting = overdrive_upperlimits->FanMinimumPwm;
+ break;
default:
od_min_setting = od_max_setting = INT_MAX;
break;
@@ -1196,6 +1235,9 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
case SMU_DCLK1:
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
break;
+ case SMU_DCEFCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
+ break;
default:
break;
}
@@ -1209,6 +1251,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
case SMU_VCLK1:
case SMU_DCLK:
case SMU_DCLK1:
+ case SMU_DCEFCLK:
ret = smu_v13_0_0_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
if (ret) {
dev_err(smu->adev->dev, "Failed to get current clock freq!");
@@ -1304,16 +1347,115 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
od_table->OverDriveTable.UclkFmax);
break;
- case SMU_OD_VDDC_CURVE:
+ case SMU_OD_VDDGFX_OFFSET:
if (!smu_v13_0_0_is_od_feature_supported(smu,
PP_OD_FEATURE_GFX_VF_CURVE_BIT))
break;
- size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
- for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
- size += sysfs_emit_at(buf, size, "%d: %dmv\n",
+ size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "%dmV\n",
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
+ break;
+
+ case SMU_OD_FAN_CURVE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
+ size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
i,
- od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]);
+ (int)od_table->OverDriveTable.FanLinearTempPoints[i],
+ (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
+ min_value, max_value);
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
+ min_value, max_value);
+
+ break;
+
+ case SMU_OD_ACOUSTIC_LIMIT:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_ACOUSTIC_TARGET:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_TARGET_TEMPERATURE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanTargetTemperature);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_MINIMUM_PWM:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanMinimumPwm);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
+ min_value, max_value);
break;
case SMU_OD_RANGE:
@@ -1355,7 +1497,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFX_VF_CURVE,
&min_value,
&max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n",
+ size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
min_value, max_value);
}
break;
@@ -1367,6 +1509,60 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
return size;
}
+
+static int smu_v13_0_0_od_restore_table_single(struct smu_context *smu, long input)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *boot_overdrive_table =
+ (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ switch (input) {
+ case PP_OD_EDIT_FAN_CURVE:
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
+ od_table->OverDriveTable.FanLinearTempPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
+ od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
+ }
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ od_table->OverDriveTable.FanTargetTemperature =
+ boot_overdrive_table->OverDriveTable.FanTargetTemperature;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ od_table->OverDriveTable.FanMinimumPwm =
+ boot_overdrive_table->OverDriveTable.FanMinimumPwm;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ default:
+ dev_info(adev->dev, "Invalid table index: %ld\n", input);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[],
@@ -1504,39 +1700,167 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu,
}
break;
- case PP_OD_EDIT_VDDC_CURVE:
+ case PP_OD_EDIT_VDDGFX_OFFSET:
if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
- dev_warn(adev->dev, "VF curve setting not supported!\n");
+ dev_warn(adev->dev, "Gfx offset setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_CURVE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
return -ENOTSUPP;
}
- if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS ||
+ if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
input[0] < 0)
return -EINVAL;
smu_v13_0_0_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFX_VF_CURVE,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
&minimum,
&maximum);
if (input[1] < minimum ||
input[1] > maximum) {
- dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
input[1], minimum, maximum);
return -EINVAL;
}
- od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &minimum,
+ &maximum);
+ if (input[2] < minimum ||
+ input[2] > maximum) {
+ dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
+ input[2], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
+ od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
+ od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanTargetTemperature = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanMinimumPwm = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
- feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
- memcpy(od_table,
+ if (size == 1) {
+ ret = smu_v13_0_0_od_restore_table_single(smu, input[0]);
+ if (ret)
+ return ret;
+ } else {
+ feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
+ memcpy(od_table,
table_context->boot_overdrive_table,
sizeof(OverDriveTableExternal_t));
- od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ }
fallthrough;
-
case PP_OD_COMMIT_DPM_TABLE:
/*
* The member below instructs PMFW the settings focused in
@@ -1696,7 +2020,6 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
return 0;
}
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1720,12 +2043,12 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
- gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0],
+ gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
metrics->AvgTemperature[TEMP_VR_MEM1]);
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
- gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage,
+ gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
@@ -1779,6 +2102,24 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static void smu_v13_0_0_set_supported_od_feature_mask(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_CURVE_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
+}
+
static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu)
{
OverDriveTableExternal_t *od_table =
@@ -1828,8 +2169,24 @@ static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu)
for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
+ user_od_table->OverDriveTable.FanLinearTempPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
+ user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
+ }
+ user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
+ user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
+ user_od_table->OverDriveTable.FanTargetTemperature =
+ user_od_table_bak.OverDriveTable.FanTargetTemperature;
+ user_od_table->OverDriveTable.FanMinimumPwm =
+ user_od_table_bak.OverDriveTable.FanMinimumPwm;
}
+ smu_v13_0_0_set_supported_od_feature_mask(smu);
+
return 0;
}
@@ -1840,9 +2197,10 @@ static int smu_v13_0_0_restore_user_od_settings(struct smu_context *smu)
OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
int res;
- user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT |
- 1U << PP_OD_FEATURE_UCLK_BIT |
- 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
+ BIT(PP_OD_FEATURE_UCLK_BIT) |
+ BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
@@ -1928,8 +2286,6 @@ static void smu_v13_0_0_get_unique_id(struct smu_context *smu)
out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
- if (adev->serial[0] == '\0')
- sprintf(adev->serial, "%016llx", adev->unique_id);
}
static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
@@ -1949,7 +2305,7 @@ static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
}
/* Convert the PMFW output which is in percent to pwm(255) based */
- *speed = MIN(*speed * 255 / 100, 255);
+ *speed = min(*speed * 255 / 100, (uint32_t)255);
return 0;
}
@@ -1985,16 +2341,18 @@ static int smu_v13_0_0_enable_mgpu_fan_boost(struct smu_context *smu)
}
static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
struct smu_table_context *table_context = &smu->smu_table;
struct smu_13_0_0_powerplay_table *powerplay_table =
(struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent;
+ uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
@@ -2006,16 +2364,25 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit) {
- if (smu->od_enabled) {
- od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ if (smu->od_enabled) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ } else {
+ od_percent_upper = 0;
+ od_percent_lower = 100;
+ }
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
- power_limit *= (100 + od_percent);
- power_limit /= 100;
- }
- *max_power_limit = power_limit;
+ if (max_power_limit) {
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
+
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 - od_percent_lower);
+ *min_power_limit /= 100;
}
return 0;
@@ -2110,6 +2477,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
+ u32 workload_mask;
smu->power_profile_mode = input[size];
@@ -2171,9 +2539,23 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
+ workload_mask = 1 << workload_type;
+
+ /* Add optimizations for SMU13.0.0. Reuse the power saving profile */
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7400))) {
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_POWERSAVING);
+ if (workload_type >= 0)
+ workload_mask |= 1 << workload_type;
+ }
+
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
- 1 << workload_type,
+ workload_mask,
NULL);
}
@@ -2193,27 +2575,37 @@ static int smu_v13_0_0_baco_enter(struct smu_context *smu)
static int smu_v13_0_0_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
usleep_range(10000, 11000);
- return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
} else {
- return smu_v13_0_baco_exit(smu);
+ ret = smu_v13_0_baco_exit(smu);
}
+
+ if (!ret)
+ adev->gfx.is_poweron = false;
+
+ return ret;
}
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
u32 smu_version;
+ int ret;
/* SRIOV does not support SMU mode1 reset */
if (amdgpu_sriov_vf(adev))
return false;
/* PMFW support is available since 78.41 */
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return false;
+
if (smu_version < 0x004e2900)
return false;
@@ -2404,13 +2796,10 @@ static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu,
uint32_t supported_version,
uint32_t *param)
{
- uint32_t smu_version;
struct amdgpu_device *adev = smu->adev;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
-
- if ((smu_version >= supported_version) &&
+ if ((smu->smc_fw_version >= supported_version) &&
ras && atomic_read(&ras->in_recovery))
/* Set RAS fatal error reset flag */
*param = 1 << 16;
@@ -2424,7 +2813,7 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
uint32_t param;
struct amdgpu_device *adev = smu->adev;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
/* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */
smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, &param);
@@ -2457,7 +2846,7 @@ static int smu_v13_0_0_mode2_reset(struct smu_context *smu)
int ret;
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10))
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode2Reset, NULL);
else
return -EOPNOTSUPP;
@@ -2469,7 +2858,7 @@ static int smu_v13_0_0_enable_gfx_features(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10))
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
FEATURE_PWR_GFX, NULL);
else
@@ -2526,15 +2915,10 @@ static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu,
static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t if_version = 0xff, smu_version = 0xff;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret)
- return -EOPNOTSUPP;
-
- if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) &&
- (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)) &&
+ (smu->smc_fw_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))
return ret;
else
return -EOPNOTSUPP;
@@ -2581,6 +2965,55 @@ static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (limit <= msg_limit) {
+ if (smu->current_power_limit > msg_limit) {
+ od_table->OverDriveTable.Ppt = 0;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+ }
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+ } else if (smu->od_enabled) {
+ ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+ if (ret)
+ return ret;
+
+ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -2635,7 +3068,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.set_fan_control_mode = smu_v13_0_set_fan_control_mode,
.enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost,
.get_power_limit = smu_v13_0_0_get_power_limit,
- .set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_limit = smu_v13_0_0_set_power_limit,
.set_power_source = smu_v13_0_set_power_source,
.get_power_profile_mode = smu_v13_0_0_get_power_profile_mode,
.set_power_profile_mode = smu_v13_0_0_set_power_profile_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 626591f54..bb98156b2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -1144,7 +1144,7 @@ void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)
smu->smc_driver_if_version = SMU13_0_4_DRIVER_IF_VERSION;
smu->is_apu = true;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 4))
smu_v13_0_4_set_smu_mailbox_registers(smu);
else
smu_v13_0_set_smu_mailbox_registers(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index c6e7c2115..0dce672ac 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -1060,7 +1060,7 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
return -EINVAL;
}
- if (sclk_min && sclk_max && smu_v13_0_5_clk_dpm_is_enabled(smu, SMU_SCLK)) {
+ if (sclk_min && sclk_max) {
ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
SMU_SCLK,
sclk_min,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 24d681143..8cd2b8cc3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -44,9 +44,11 @@
#include "amdgpu_xgmi.h"
#include <linux/pci.h>
#include "amdgpu_ras.h"
+#include "amdgpu_mca.h"
#include "smu_cmn.h"
#include "mp/mp_13_0_6_offset.h"
#include "mp/mp_13_0_6_sh_mask.h"
+#include "umc_v12_0.h"
#undef MP1_Public
#undef smnMP1_FIRMWARE_FLAGS
@@ -64,6 +66,8 @@
#undef pr_info
#undef pr_debug
+MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin");
+
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature) \
@@ -91,6 +95,31 @@
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
#define LINK_SPEED_MAX 4
+#define SMU_13_0_6_DSCLK_THRESHOLD 140
+
+#define MCA_BANK_IPID(_ip, _hwid, _type) \
+ [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
+
+struct mca_bank_ipid {
+ enum amdgpu_mca_ip ip;
+ uint16_t hwid;
+ uint16_t mcatype;
+};
+
+struct mca_ras_info {
+ enum amdgpu_ras_block blkid;
+ enum amdgpu_mca_ip ip;
+ int *err_code_array;
+ int err_code_count;
+ int (*get_err_count)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count);
+ bool (*bank_is_valid)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry);
+};
+
+#define P2S_TABLE_ID_A 0x50325341
+#define P2S_TABLE_ID_X 0x50325358
+
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -133,6 +162,13 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
+ MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
+ MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
+ MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, 0),
+ MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, 0),
+ MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0),
+ MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0),
+ MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
};
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
@@ -207,6 +243,10 @@ struct PPTable_t {
};
#define SMUQ10_TO_UINT(x) ((x) >> 10)
+#define SMUQ10_FRAC(x) ((x) & 0x3ff)
+#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\
+ (metrics_a->field) : (metrics_x->field))
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
@@ -215,6 +255,70 @@ struct smu_v13_0_6_dpm_map {
uint32_t *freq_table;
};
+static int smu_v13_0_6_init_microcode(struct smu_context *smu)
+{
+ const struct smc_firmware_header_v2_1 *v2_1;
+ const struct common_firmware_header *hdr;
+ struct amdgpu_firmware_info *ucode = NULL;
+ struct smc_soft_pptable_entry *entries;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t p2s_table_id = P2S_TABLE_ID_A;
+ int ret = 0, i, p2stable_count;
+ char ucode_prefix[15];
+ char fw_name[30];
+
+ /* No need to load P2S tables in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (!(adev->flags & AMD_IS_APU))
+ p2s_table_id = P2S_TABLE_ID_X;
+
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
+ sizeof(ucode_prefix));
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+
+ ret = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ if (ret)
+ goto out;
+
+ hdr = (const struct common_firmware_header *)adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(hdr);
+
+ /* SMU v13.0.6 binary file doesn't carry pptables, instead the entries
+ * are used to carry p2s tables.
+ */
+ v2_1 = (const struct smc_firmware_header_v2_1 *)adev->pm.fw->data;
+ entries = (struct smc_soft_pptable_entry
+ *)((uint8_t *)v2_1 +
+ le32_to_cpu(v2_1->pptable_entry_offset));
+ p2stable_count = le32_to_cpu(v2_1->pptable_count);
+ for (i = 0; i < p2stable_count; i++) {
+ if (le32_to_cpu(entries[i].id) == p2s_table_id) {
+ smu->pptable_firmware.data =
+ ((uint8_t *)v2_1 +
+ le32_to_cpu(entries[i].ppt_offset_bytes));
+ smu->pptable_firmware.size =
+ le32_to_cpu(entries[i].ppt_size_bytes);
+ break;
+ }
+ }
+
+ if (smu->pptable_firmware.data && smu->pptable_firmware.size) {
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
+ ucode->ucode_id = AMDGPU_UCODE_ID_P2S_TABLE;
+ ucode->fw = &smu->pptable_firmware;
+ adev->firmware.fw_size += ALIGN(ucode->fw->size, PAGE_SIZE);
+ }
+
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->pm.fw);
+
+ return ret;
+}
+
static int smu_v13_0_6_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -225,7 +329,8 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+ max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
@@ -233,12 +338,13 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t),
+ sizeof(MetricsTableA_t)), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -329,9 +435,11 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+ MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
+ struct amdgpu_device *adev = smu->adev;
int ret, i, retry = 100;
/* Store one-time values in driver PPTable */
@@ -342,7 +450,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return ret;
/* Ensure that metrics have been updated */
- if (metrics->AccumulationCounter)
+ if (GET_METRIC_FIELD(AccumulationCounter))
break;
usleep_range(1000, 1100);
@@ -352,29 +460,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return -ETIME;
pptable->MaxSocketPowerLimit =
- SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit));
pptable->MaxGfxclkFrequency =
- SMUQ10_TO_UINT(metrics->MaxGfxclkFrequency);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency));
pptable->MinGfxclkFrequency =
- SMUQ10_TO_UINT(metrics->MinGfxclkFrequency);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency));
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
- SMUQ10_TO_UINT(metrics->FclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]);
pptable->UclkFrequencyTable[i] =
- SMUQ10_TO_UINT(metrics->UclkFrequencyTable[i]);
- pptable->SocclkFrequencyTable[i] = SMUQ10_TO_UINT(
- metrics->SocclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]);
+ pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(SocclkFrequencyTable)[i]);
pptable->VclkFrequencyTable[i] =
- SMUQ10_TO_UINT(metrics->VclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]);
pptable->DclkFrequencyTable[i] =
- SMUQ10_TO_UINT(metrics->DclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]);
pptable->LclkFrequencyTable[i] =
- SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]);
}
/* use AID0 serial number by default */
- pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0];
+ pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0];
pptable->Init = true;
}
@@ -676,9 +784,9 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+ MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct amdgpu_device *adev = smu->adev;
- uint32_t smu_version;
int ret = 0;
int xcc_id;
@@ -690,53 +798,52 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version >= 0x552F00) {
+ if (smu->smc_fw_version >= 0x552F00) {
xcc_id = GET_INST(GC, 0);
- *value = SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc_id]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
- *value = SMUQ10_TO_UINT(metrics->SocclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
- *value = SMUQ10_TO_UINT(metrics->UclkFrequency);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
break;
case METRICS_CURR_VCLK:
- *value = SMUQ10_TO_UINT(metrics->VclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]);
break;
case METRICS_CURR_DCLK:
- *value = SMUQ10_TO_UINT(metrics->DclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]);
break;
case METRICS_CURR_FCLK:
- *value = SMUQ10_TO_UINT(metrics->FclkFrequency);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency));
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = SMUQ10_TO_UINT(metrics->SocketGfxBusy);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
break;
case METRICS_CURR_SOCKETPOWER:
- *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
@@ -782,13 +889,63 @@ static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
}
+static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
+ struct smu_13_0_dpm_table *single_dpm_table,
+ uint32_t curr_clk, const char *clk_name)
+{
+ struct pp_clock_levels_with_latency clocks;
+ int i, ret, level = -1;
+ uint32_t clk1, clk2;
+
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Attempt to get %s clk levels failed!",
+ clk_name);
+ return ret;
+ }
+
+ if (!clocks.num_levels)
+ return -EINVAL;
+
+ if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) {
+ size = sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk);
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(buf, size, "%d: %uMhz\n", i,
+ clocks.data[i].clocks_in_khz /
+ 1000);
+
+ } else {
+ if ((clocks.num_levels == 1) ||
+ (curr_clk < (clocks.data[0].clocks_in_khz / 1000)))
+ level = 0;
+ for (i = 0; i < clocks.num_levels; i++) {
+ clk1 = clocks.data[i].clocks_in_khz / 1000;
+
+ if (i < (clocks.num_levels - 1))
+ clk2 = clocks.data[i + 1].clocks_in_khz / 1000;
+
+ if (curr_clk == clk1) {
+ level = i;
+ } else if (curr_clk >= clk1 && curr_clk < clk2) {
+ level = (curr_clk - clk1) <= (clk2 - curr_clk) ?
+ i :
+ i + 1;
+ }
+
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
+ clk1, (level == i) ? "*" : "");
+ }
+ }
+
+ return size;
+}
+
static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
enum smu_clk_type type, char *buf)
{
- int i, now, size = 0;
+ int now, size = 0;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
- struct pp_clock_levels_with_latency clocks;
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
@@ -819,7 +976,15 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
min_clk = pstate_table->gfxclk_pstate.curr.min;
max_clk = pstate_table->gfxclk_pstate.curr.max;
- if (!smu_v13_0_6_freqs_in_same_level(now, min_clk) &&
+ if (now < SMU_13_0_6_DSCLK_THRESHOLD) {
+ size += sysfs_emit_at(buf, size, "S: %uMhz *\n",
+ now);
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n",
+ min_clk);
+ size += sysfs_emit_at(buf, size, "1: %uMhz\n",
+ max_clk);
+
+ } else if (!smu_v13_0_6_freqs_in_same_level(now, min_clk) &&
!smu_v13_0_6_freqs_in_same_level(now, max_clk)) {
size += sysfs_emit_at(buf, size, "0: %uMhz\n",
min_clk);
@@ -851,26 +1016,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
}
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get memory clk levels Failed!");
- return ret;
- }
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- clocks.data[i].clocks_in_khz /
- 1000,
- now) ?
- "*" :
- ""));
- break;
+ return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "mclk");
case SMU_SOCCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
@@ -882,26 +1030,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
}
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get socclk levels Failed!");
- return ret;
- }
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- clocks.data[i].clocks_in_khz /
- 1000,
- now) ?
- "*" :
- ""));
- break;
+ return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "socclk");
case SMU_FCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
@@ -913,26 +1044,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
}
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get fclk levels Failed!");
- return ret;
- }
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- clocks.data[i].clocks_in_khz /
- 1000,
- now) ?
- "*" :
- ""));
- break;
+ return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "fclk");
case SMU_VCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
@@ -944,26 +1058,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
}
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get vclk levels Failed!");
- return ret;
- }
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- clocks.data[i].clocks_in_khz /
- 1000,
- now) ?
- "*" :
- ""));
- break;
+ return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "vclk");
case SMU_DCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
@@ -975,26 +1072,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
}
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get dclk levels Failed!");
- return ret;
- }
- for (i = 0; i < single_dpm_table->count; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- single_dpm_table->dpm_levels[i].value,
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- clocks.data[i].clocks_in_khz /
- 1000,
- now) ?
- "*" :
- ""));
- break;
+ return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "dclk");
default:
break;
@@ -1230,9 +1310,10 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
}
static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
@@ -1256,6 +1337,8 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
*max_power_limit = pptable->MaxSocketPowerLimit;
}
+ if (min_power_limit)
+ *min_power_limit = 0;
return 0;
}
@@ -1380,10 +1463,7 @@ static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
static int smu_v13_0_6_notify_unload(struct smu_context *smu)
{
- uint32_t smu_version;
-
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version <= 0x553500)
+ if (amdgpu_in_reset(smu->adev))
return 0;
dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
@@ -1393,6 +1473,18 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu)
return 0;
}
+static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
+{
+ /* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
+ if (smu->smc_fw_version < 0x554800)
+ return 0;
+
+ amdgpu_ras_set_mca_debug_mode(smu->adev, enable);
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
+ enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK,
+ NULL);
+}
+
static int smu_v13_0_6_system_features_control(struct smu_context *smu,
bool enable)
{
@@ -1644,13 +1736,11 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
uint64_t *feature_mask)
{
- uint32_t smu_version;
int ret;
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
ret = smu_cmn_get_enabled_mask(smu, feature_mask);
- if (ret == -EIO && smu_version < 0x552F00) {
+ if (ret == -EIO && smu->smc_fw_version < 0x552F00) {
*feature_mask = 0;
ret = 0;
}
@@ -1854,8 +1944,6 @@ static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
(struct PPTable_t *)smu_table->driver_pptable;
adev->unique_id = pptable->PublicSerialNumber_AID;
- if (adev->serial[0] == '\0')
- sprintf(adev->serial, "%016llx", adev->unique_id);
}
static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
@@ -1865,19 +1953,6 @@ static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
return false;
}
-static int smu_v13_0_6_set_df_cstate(struct smu_context *smu,
- enum pp_df_cstate state)
-{
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl,
- state, NULL);
-}
-
-static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en)
-{
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl,
- en ? 0 : 1, NULL);
-}
-
static const char *const throttling_logging_label[] = {
[THROTTLER_PROCHOT_BIT] = "Prochot",
[THROTTLER_PPT_BIT] = "PPT",
@@ -1888,7 +1963,7 @@ static const char *const throttling_logging_label[] = {
static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
{
- int throttler_idx, throtting_events = 0, buf_idx = 0;
+ int throttler_idx, throttling_events = 0, buf_idx = 0;
struct amdgpu_device *adev = smu->adev;
uint32_t throttler_status;
char log_buf[256];
@@ -1902,10 +1977,10 @@ static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
throttler_idx < ARRAY_SIZE(throttling_logging_label);
throttler_idx++) {
if (throttler_status & (1U << throttler_idx)) {
- throtting_events++;
+ throttling_events++;
buf_idx += snprintf(
log_buf + buf_idx, sizeof(log_buf) - buf_idx,
- "%s%s", throtting_events > 1 ? " and " : "",
+ "%s%s", throttling_events > 1 ? " and " : "",
throttling_logging_label[throttler_idx]);
if (buf_idx >= sizeof(log_buf)) {
dev_err(adev->dev, "buffer overflow!\n");
@@ -1956,63 +2031,71 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_3 *gpu_metrics =
- (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_5 *gpu_metrics =
+ (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0, inst0, xcc0;
- MetricsTable_t *metrics;
+ int ret = 0, xcc_id, inst, i, j;
+ MetricsTableX_t *metrics_x;
+ MetricsTableA_t *metrics_a;
u16 link_width_level;
- inst0 = adev->sdma.instance[0].aid_id;
- xcc0 = GET_INST(GC, 0);
-
- metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+ metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
if (ret) {
- kfree(metrics);
+ kfree(metrics_x);
return ret;
}
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
+ metrics_a = (MetricsTableA_t *)metrics_x;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5);
gpu_metrics->temperature_hotspot =
- SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature));
/* Individual HBM stack temperature is not reported */
gpu_metrics->temperature_mem =
- SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature));
/* Reports max temperature of all voltage rails */
gpu_metrics->temperature_vrsoc =
- SMUQ10_TO_UINT(metrics->MaxVrTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature));
gpu_metrics->average_gfx_activity =
- SMUQ10_TO_UINT(metrics->SocketGfxBusy);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
gpu_metrics->average_umc_activity =
- SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
- gpu_metrics->average_socket_power =
- SMUQ10_TO_UINT(metrics->SocketPower);
+ gpu_metrics->curr_socket_power =
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower));
/* Energy counter reported in 15.259uJ (2^-16) units */
- gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
-
- gpu_metrics->current_gfxclk =
- SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]);
- gpu_metrics->current_socclk =
- SMUQ10_TO_UINT(metrics->SocclkFrequency[inst0]);
- gpu_metrics->current_uclk = SMUQ10_TO_UINT(metrics->UclkFrequency);
- gpu_metrics->current_vclk0 =
- SMUQ10_TO_UINT(metrics->VclkFrequency[inst0]);
- gpu_metrics->current_dclk0 =
- SMUQ10_TO_UINT(metrics->DclkFrequency[inst0]);
-
- gpu_metrics->average_gfxclk_frequency = gpu_metrics->current_gfxclk;
- gpu_metrics->average_socclk_frequency = gpu_metrics->current_socclk;
- gpu_metrics->average_uclk_frequency = gpu_metrics->current_uclk;
- gpu_metrics->average_vclk0_frequency = gpu_metrics->current_vclk0;
- gpu_metrics->average_dclk0_frequency = gpu_metrics->current_dclk0;
+ gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc);
+
+ for (i = 0; i < MAX_GFX_CLKS; i++) {
+ xcc_id = GET_INST(GC, i);
+ if (xcc_id >= 0)
+ gpu_metrics->current_gfxclk[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
+
+ if (i < MAX_CLKS) {
+ gpu_metrics->current_socclk[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]);
+ inst = GET_INST(VCN, i);
+ if (inst >= 0) {
+ gpu_metrics->current_vclk0[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]);
+ gpu_metrics->current_dclk0[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]);
+ }
+ }
+ }
+
+ gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
/* Throttle status is not reported through metrics now */
gpu_metrics->throttle_status = 0;
+ /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
+ gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
+
if (!(adev->flags & AMD_IS_APU)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
if (link_width_level > MAX_LINK_WIDTH)
@@ -2022,21 +2105,60 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
DECODE_LANE_WIDTH(link_width_level);
gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu);
+ gpu_metrics->pcie_bandwidth_acc =
+ SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
+ gpu_metrics->pcie_bandwidth_inst =
+ SMUQ10_ROUND(metrics_x->PcieBandwidth[0]);
+ gpu_metrics->pcie_l0_to_recov_count_acc =
+ metrics_x->PCIeL0ToRecoveryCountAcc;
+ gpu_metrics->pcie_replay_count_acc =
+ metrics_x->PCIenReplayAAcc;
+ gpu_metrics->pcie_replay_rover_count_acc =
+ metrics_x->PCIenReplayARolloverCountAcc;
+ gpu_metrics->pcie_nak_sent_count_acc =
+ metrics_x->PCIeNAKSentCountAcc;
+ gpu_metrics->pcie_nak_rcvd_count_acc =
+ metrics_x->PCIeNAKReceivedCountAcc;
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
- SMUQ10_TO_UINT(metrics->SocketGfxBusyAcc);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc));
gpu_metrics->mem_activity_acc =
- SMUQ10_TO_UINT(metrics->DramBandwidthUtilizationAcc);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc));
+
+ for (i = 0; i < NUM_XGMI_LINKS; i++) {
+ gpu_metrics->xgmi_read_data_acc[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]);
+ gpu_metrics->xgmi_write_data_acc[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]);
+ }
- gpu_metrics->firmware_timestamp = metrics->Timestamp;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ inst = GET_INST(JPEG, i);
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy)
+ [(inst * adev->jpeg.num_jpeg_rings) + j]);
+ }
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+ gpu_metrics->vcn_activity[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]);
+ }
+
+ gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth));
+ gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate));
+
+ gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp);
*table = (void *)gpu_metrics;
- kfree(metrics);
+ kfree(metrics_x);
- return sizeof(struct gpu_metrics_v1_3);
+ return sizeof(*gpu_metrics);
}
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
@@ -2070,17 +2192,18 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
continue;
}
- if (ret) {
- dev_err(adev->dev,
- "failed to send mode2 message \tparam: 0x%08x error code %d\n",
- SMU_RESET_MODE_2, ret);
+ if (ret)
goto out;
- }
+
} while (ret == -ETIME && timeout);
out:
mutex_unlock(&smu->message_lock);
+ if (ret)
+ dev_err(adev->dev, "failed to send mode2 reset, error code %d",
+ ret);
+
return ret;
}
@@ -2088,8 +2211,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
struct smu_temperature_range *range)
{
struct amdgpu_device *adev = smu->adev;
- u32 aid_temp, xcd_temp, mem_temp;
- uint32_t smu_version;
+ u32 aid_temp, xcd_temp, max_temp;
u32 ccd_temp = 0;
int ret;
@@ -2100,35 +2222,53 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
return -EINVAL;
/*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version < 0x554500)
+ if (smu->smc_fw_version < 0x554500)
return 0;
+ /* Get SOC Max operating temperature */
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_AID_THM_TYPE, &aid_temp);
if (ret)
goto failed;
-
if (adev->flags & AMD_IS_APU) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_CCD_THM_TYPE, &ccd_temp);
if (ret)
goto failed;
}
-
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_XCD_THM_TYPE, &xcd_temp);
if (ret)
goto failed;
-
- range->hotspot_crit_max = max3(aid_temp, xcd_temp, ccd_temp) *
+ range->hotspot_emergency_max = max3(aid_temp, xcd_temp, ccd_temp) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ /* Get HBM Max operating temperature */
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
- PPSMC_HBM_THM_TYPE, &mem_temp);
+ PPSMC_HBM_THM_TYPE, &max_temp);
+ if (ret)
+ goto failed;
+ range->mem_emergency_max =
+ max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ /* Get SOC thermal throttle limit */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetThermalLimit,
+ PPSMC_THROTTLING_LIMIT_TYPE_SOCKET,
+ &max_temp);
+ if (ret)
+ goto failed;
+ range->hotspot_crit_max =
+ max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ /* Get HBM thermal throttle limit */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetThermalLimit,
+ PPSMC_THROTTLING_LIMIT_TYPE_HBM,
+ &max_temp);
if (ret)
goto failed;
- range->mem_crit_max = mem_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_crit_max = max_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
failed:
return ret;
}
@@ -2136,16 +2276,24 @@ failed:
static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_hive_info *hive = NULL;
+ u32 hive_ras_recovery = 0;
struct amdgpu_ras *ras;
u32 fatal_err, param;
int ret = 0;
+ hive = amdgpu_get_xgmi_hive(adev);
ras = amdgpu_ras_get_context(adev);
fatal_err = 0;
param = SMU_RESET_MODE_1;
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+
/* fatal error triggered by ras, PMFW supports the flag */
- if (ras && atomic_read(&ras->in_recovery))
+ if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
fatal_err = 1;
param |= (fatal_err << 16);
@@ -2184,6 +2332,525 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_6_post_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!amdgpu_sriov_vf(adev) && adev->ras_enabled)
+ return smu_v13_0_6_mca_set_debug_mode(smu, false);
+
+ return 0;
+}
+
+static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_v13_0_6_mca_set_debug_mode(smu, enable);
+}
+
+static int smu_v13_0_6_get_valid_mca_count(struct smu_context *smu, enum amdgpu_mca_error_type type, uint32_t *count)
+{
+ uint32_t msg;
+ int ret;
+
+ if (!count)
+ return -EINVAL;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ msg = SMU_MSG_QueryValidMcaCount;
+ break;
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ msg = SMU_MSG_QueryValidMcaCeCount;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg(smu, msg, count);
+ if (ret) {
+ *count = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type,
+ int idx, int offset, uint32_t *val)
+{
+ uint32_t msg, param;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ msg = SMU_MSG_McaBankDumpDW;
+ break;
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ msg = SMU_MSG_McaBankCeDumpDW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ param = ((idx & 0xffff) << 16) | (offset & 0xfffc);
+
+ return smu_cmn_send_smc_msg_with_param(smu, msg, param, val);
+}
+
+static int smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type,
+ int idx, int offset, uint32_t *val, int count)
+{
+ int ret, i;
+
+ if (!val)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ ret = __smu_v13_0_6_mca_dump_bank(smu, type, idx, offset + (i << 2), &val[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT] = {
+ MCA_BANK_IPID(UMC, 0x96, 0x0),
+ MCA_BANK_IPID(SMU, 0x01, 0x1),
+ MCA_BANK_IPID(MP5, 0x01, 0x2),
+ MCA_BANK_IPID(PCS_XGMI, 0x50, 0x0),
+};
+
+static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info)
+{
+ uint64_t ipid = entry->regs[MCA_REG_IDX_IPID];
+ uint32_t insthi;
+
+ /* NOTE: All MCA IPID register share the same format,
+ * so the driver can share the MCMP1 register header file.
+ * */
+
+ info->hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
+ info->mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
+
+ insthi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi);
+ info->aid = ((insthi >> 2) & 0x03);
+ info->socket_id = insthi & 0x03;
+}
+
+static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
+ int idx, int reg_idx, uint64_t *val)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t data[2] = {0, 0};
+ int ret;
+
+ if (!val || reg_idx >= MCA_REG_IDX_COUNT)
+ return -EINVAL;
+
+ ret = smu_v13_0_6_mca_dump_bank(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data));
+ if (ret)
+ return ret;
+
+ *val = (uint64_t)data[1] << 32 | data[0];
+
+ dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n",
+ type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
+
+ return 0;
+}
+
+static int mca_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
+ int idx, struct mca_bank_entry *entry)
+{
+ int i, ret;
+
+ /* NOTE: populated all mca register by default */
+ for (i = 0; i < ARRAY_SIZE(entry->regs); i++) {
+ ret = mca_bank_read_reg(adev, type, idx, i, &entry->regs[i]);
+ if (ret)
+ return ret;
+ }
+
+ entry->idx = idx;
+ entry->type = type;
+
+ mca_bank_entry_info_decode(entry, &entry->info);
+
+ return 0;
+}
+
+static int mca_decode_ipid_to_hwip(uint64_t val)
+{
+ const struct mca_bank_ipid *ipid;
+ uint16_t hwid, mcatype;
+ int i;
+
+ hwid = REG_GET_FIELD(val, MCMP1_IPIDT0, HardwareID);
+ mcatype = REG_GET_FIELD(val, MCMP1_IPIDT0, McaType);
+
+ for (i = 0; i < ARRAY_SIZE(smu_v13_0_6_mca_ipid_table); i++) {
+ ipid = &smu_v13_0_6_mca_ipid_table[i];
+
+ if (!ipid->hwid)
+ continue;
+
+ if (ipid->hwid == hwid && ipid->mcatype == mcatype)
+ return i;
+ }
+
+ return AMDGPU_MCA_IP_UNKNOW;
+}
+
+static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
+{
+ uint64_t status0;
+
+ status0 = entry->regs[MCA_REG_IDX_STATUS];
+
+ if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
+ *count = 0;
+ return 0;
+ }
+
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0))
+ *count = 1;
+ else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0))
+ *count = 1;
+
+ return 0;
+}
+
+static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry,
+ uint32_t *count)
+{
+ u32 ext_error_code;
+
+ ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]);
+
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0)
+ *count = 1;
+ else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6)
+ *count = 1;
+
+ return 0;
+}
+
+static bool mca_smu_check_error_code(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
+ uint32_t errcode)
+{
+ int i;
+
+ if (!mca_ras->err_code_count || !mca_ras->err_code_array)
+ return true;
+
+ for (i = 0; i < mca_ras->err_code_count; i++) {
+ if (errcode == mca_ras->err_code_array[i])
+ return true;
+ }
+
+ return false;
+}
+
+static int mca_gfx_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
+{
+ uint64_t status0, misc0;
+
+ status0 = entry->regs[MCA_REG_IDX_STATUS];
+ if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
+ *count = 0;
+ return 0;
+ }
+
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
+ REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 &&
+ REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) {
+ *count = 1;
+ return 0;
+ } else {
+ misc0 = entry->regs[MCA_REG_IDX_MISC0];
+ *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt);
+ }
+
+ return 0;
+}
+
+static int mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
+{
+ uint64_t status0, misc0;
+
+ status0 = entry->regs[MCA_REG_IDX_STATUS];
+ if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
+ *count = 0;
+ return 0;
+ }
+
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
+ REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 &&
+ REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) {
+ if (count)
+ *count = 1;
+ return 0;
+ }
+
+ misc0 = entry->regs[MCA_REG_IDX_MISC0];
+ *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt);
+
+ return 0;
+}
+
+static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
+{
+ uint32_t instlo;
+
+ instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
+ switch (instlo) {
+ case 0x36430400: /* SMNAID XCD 0 */
+ case 0x38430400: /* SMNAID XCD 1 */
+ case 0x40430400: /* SMNXCD XCD 0, NOTE: FIXME: fix this error later */
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+};
+
+static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t errcode, instlo;
+
+ instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
+ if (instlo != 0x03b30400)
+ return false;
+
+ if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
+ errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
+ errcode &= 0xff;
+ } else {
+ errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode);
+ }
+
+ return mca_smu_check_error_code(adev, mca_ras, errcode);
+}
+
+static int sdma_err_codes[] = { CODE_SDMA0, CODE_SDMA1, CODE_SDMA2, CODE_SDMA3 };
+static int mmhub_err_codes[] = {
+ CODE_DAGB0, CODE_DAGB0 + 1, CODE_DAGB0 + 2, CODE_DAGB0 + 3, CODE_DAGB0 + 4, /* DAGB0-4 */
+ CODE_EA0, CODE_EA0 + 1, CODE_EA0 + 2, CODE_EA0 + 3, CODE_EA0 + 4, /* MMEA0-4*/
+ CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE,
+};
+
+static const struct mca_ras_info mca_ras_table[] = {
+ {
+ .blkid = AMDGPU_RAS_BLOCK__UMC,
+ .ip = AMDGPU_MCA_IP_UMC,
+ .get_err_count = mca_umc_mca_get_err_count,
+ }, {
+ .blkid = AMDGPU_RAS_BLOCK__GFX,
+ .ip = AMDGPU_MCA_IP_SMU,
+ .get_err_count = mca_gfx_mca_get_err_count,
+ .bank_is_valid = mca_gfx_smu_bank_is_valid,
+ }, {
+ .blkid = AMDGPU_RAS_BLOCK__SDMA,
+ .ip = AMDGPU_MCA_IP_SMU,
+ .err_code_array = sdma_err_codes,
+ .err_code_count = ARRAY_SIZE(sdma_err_codes),
+ .get_err_count = mca_smu_mca_get_err_count,
+ .bank_is_valid = mca_smu_bank_is_valid,
+ }, {
+ .blkid = AMDGPU_RAS_BLOCK__MMHUB,
+ .ip = AMDGPU_MCA_IP_SMU,
+ .err_code_array = mmhub_err_codes,
+ .err_code_count = ARRAY_SIZE(mmhub_err_codes),
+ .get_err_count = mca_smu_mca_get_err_count,
+ .bank_is_valid = mca_smu_bank_is_valid,
+ }, {
+ .blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ .ip = AMDGPU_MCA_IP_PCS_XGMI,
+ .get_err_count = mca_pcs_xgmi_mca_get_err_count,
+ },
+};
+
+static const struct mca_ras_info *mca_get_mca_ras_info(struct amdgpu_device *adev, enum amdgpu_ras_block blkid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mca_ras_table); i++) {
+ if (mca_ras_table[i].blkid == blkid)
+ return &mca_ras_table[i];
+ }
+
+ return NULL;
+}
+
+static int mca_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ ret = smu_v13_0_6_get_valid_mca_count(smu, type, count);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
+{
+ if (mca_decode_ipid_to_hwip(entry->regs[MCA_REG_IDX_IPID]) != mca_ras->ip)
+ return false;
+
+ if (mca_ras->bank_is_valid)
+ return mca_ras->bank_is_valid(mca_ras, adev, type, entry);
+
+ return true;
+}
+
+static int __mca_smu_get_ras_mca_set(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
+ enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
+{
+ struct mca_bank_entry entry;
+ uint32_t mca_cnt;
+ int i, ret;
+
+ ret = mca_get_valid_mca_count(adev, type, &mca_cnt);
+ if (ret)
+ return ret;
+
+ /* if valid mca bank count is 0, the driver can return 0 directly */
+ if (!mca_cnt)
+ return 0;
+
+ for (i = 0; i < mca_cnt; i++) {
+ memset(&entry, 0, sizeof(entry));
+ ret = mca_get_mca_entry(adev, type, i, &entry);
+ if (ret)
+ return ret;
+
+ if (mca_ras && !mca_bank_is_valid(adev, mca_ras, type, &entry))
+ continue;
+
+ ret = amdgpu_mca_bank_set_add_entry(mca_set, &entry);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mca_smu_get_ras_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
+{
+ const struct mca_ras_info *mca_ras = NULL;
+
+ if (!mca_set)
+ return -EINVAL;
+
+ if (blk != AMDGPU_RAS_BLOCK_COUNT) {
+ mca_ras = mca_get_mca_ras_info(adev, blk);
+ if (!mca_ras)
+ return -EOPNOTSUPP;
+ }
+
+ return __mca_smu_get_ras_mca_set(adev, mca_ras, type, mca_set);
+}
+
+static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+ struct mca_bank_entry *entry, uint32_t *count)
+{
+ const struct mca_ras_info *mca_ras;
+
+ if (!entry || !count)
+ return -EINVAL;
+
+ mca_ras = mca_get_mca_ras_info(adev, blk);
+ if (!mca_ras)
+ return -EOPNOTSUPP;
+
+ if (!mca_bank_is_valid(adev, mca_ras, type, entry)) {
+ *count = 0;
+ return 0;
+ }
+
+ return mca_ras->get_err_count(mca_ras, adev, type, entry, count);
+}
+
+static int mca_smu_get_mca_entry(struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, int idx, struct mca_bank_entry *entry)
+{
+ return mca_get_mca_entry(adev, type, idx, entry);
+}
+
+static int mca_smu_get_valid_mca_count(struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, uint32_t *count)
+{
+ return mca_get_valid_mca_count(adev, type, count);
+}
+
+static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = {
+ .max_ue_count = 12,
+ .max_ce_count = 12,
+ .mca_set_debug_mode = mca_smu_set_debug_mode,
+ .mca_get_ras_mca_set = mca_smu_get_ras_mca_set,
+ .mca_parse_mca_error_count = mca_smu_parse_mca_error_count,
+ .mca_get_mca_entry = mca_smu_get_mca_entry,
+ .mca_get_valid_mca_count = mca_smu_get_valid_mca_count,
+};
+
+static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
+ enum pp_xgmi_plpd_mode mode)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret, param;
+
+ switch (mode) {
+ case XGMI_PLPD_DEFAULT:
+ param = PPSMC_PLPD_MODE_DEFAULT;
+ break;
+ case XGMI_PLPD_OPTIMIZED:
+ param = PPSMC_PLPD_MODE_OPTIMIZED;
+ break;
+ case XGMI_PLPD_DISALLOW:
+ param = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mode == XGMI_PLPD_DISALLOW)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ param, NULL);
+ else
+ /* change xgmi per-link power down policy */
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SelectPLPDMode,
+ param, NULL);
+
+ if (ret)
+ dev_err(adev->dev,
+ "select xgmi per-link power down policy %d failed\n",
+ mode);
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -2197,6 +2864,8 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_power_limit = smu_v13_0_6_get_power_limit,
.is_dpm_running = smu_v13_0_6_is_dpm_running,
.get_unique_id = smu_v13_0_6_get_unique_id,
+ .init_microcode = smu_v13_0_6_init_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_6_init_smc_tables,
.fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
@@ -2222,11 +2891,9 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
.od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
- .set_df_cstate = smu_v13_0_6_set_df_cstate,
- .allow_xgmi_power_down = smu_v13_0_6_allow_xgmi_power_down,
+ .select_xgmi_plpd_policy = smu_v13_0_6_select_xgmi_plpd_policy,
.log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
- .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
@@ -2237,6 +2904,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
+ .post_init = smu_v13_0_6_post_init,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
@@ -2248,4 +2916,5 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v13_0_6_table_map;
smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
+ amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 51ae41cb4..bc5891c3f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -77,6 +77,12 @@
#define PP_OD_FEATURE_UCLK_FMIN 2
#define PP_OD_FEATURE_UCLK_FMAX 3
#define PP_OD_FEATURE_GFX_VF_CURVE 4
+#define PP_OD_FEATURE_FAN_CURVE_TEMP 5
+#define PP_OD_FEATURE_FAN_CURVE_PWM 6
+#define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
+#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
+#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
+#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
#define LINK_SPEED_MAX 3
@@ -147,6 +153,7 @@ static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(VCLK1, PPCLK_VCLK_1),
CLK_MAP(DCLK, PPCLK_DCLK_0),
CLK_MAP(DCLK1, PPCLK_DCLK_1),
+ CLK_MAP(DCEFCLK, PPCLK_DCFCLK),
};
static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -331,12 +338,10 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *smc_pptable = table_context->driver_pptable;
BoardTable_t *BoardTable = &smc_pptable->BoardTable;
-#if 0
const OverDriveLimits_t * const overdrive_upperlimits =
&smc_pptable->SkuTable.OverDriveLimitsBasicMax;
const OverDriveLimits_t * const overdrive_lowerlimits =
&smc_pptable->SkuTable.OverDriveLimitsMin;
-#endif
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@@ -349,22 +354,18 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
smu_baco->maco_support = true;
}
-#if 0
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
smu->od_enabled = false;
+ table_context->thermal_controller_type =
+ powerplay_table->thermal_controller_type;
+
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
-#else
- smu->od_enabled = false;
-#endif
-
- table_context->thermal_controller_type =
- powerplay_table->thermal_controller_type;
return 0;
}
@@ -697,6 +698,22 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
pcie_table->num_of_link_levels++;
}
+ /* dcefclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.dcef_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
+ ret = smu_v13_0_set_single_dpm_table(smu,
+ SMU_DCEFCLK,
+ dpm_table);
+ if (ret)
+ return ret;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
return 0;
}
@@ -778,6 +795,9 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
case METRICS_CURR_FCLK:
*value = metrics->CurrClock[PPCLK_FCLK];
break;
+ case METRICS_CURR_DCEFCLK:
+ *value = metrics->CurrClock[PPCLK_DCFCLK];
+ break;
case METRICS_AVERAGE_GFXCLK:
*value = metrics->AverageGfxclkFrequencyPreDs;
break;
@@ -1028,6 +1048,9 @@ static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu,
case PPCLK_DCLK_1:
member_type = METRICS_CURR_DCLK1;
break;
+ case PPCLK_DCFCLK:
+ member_type = METRICS_CURR_DCEFCLK;
+ break;
default:
return -EINVAL;
}
@@ -1080,6 +1103,30 @@ static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary;
od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary;
break;
+ case PP_OD_FEATURE_FAN_CURVE_TEMP:
+ od_min_setting = overdrive_lowerlimits->FanLinearTempPoints;
+ od_max_setting = overdrive_upperlimits->FanLinearTempPoints;
+ break;
+ case PP_OD_FEATURE_FAN_CURVE_PWM:
+ od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints;
+ od_max_setting = overdrive_upperlimits->FanLinearPwmPoints;
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
+ od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
+ od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
+ od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
+ od_max_setting = overdrive_upperlimits->FanTargetTemperature;
+ break;
+ case PP_OD_FEATURE_FAN_MINIMUM_PWM:
+ od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
+ od_max_setting = overdrive_upperlimits->FanMinimumPwm;
+ break;
default:
od_min_setting = od_max_setting = INT_MAX;
break;
@@ -1177,6 +1224,9 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
case SMU_DCLK1:
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
break;
+ case SMU_DCEFCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
+ break;
default:
break;
}
@@ -1190,6 +1240,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
case SMU_VCLK1:
case SMU_DCLK:
case SMU_DCLK1:
+ case SMU_DCEFCLK:
ret = smu_v13_0_7_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
if (ret) {
dev_err(smu->adev->dev, "Failed to get current clock freq!");
@@ -1285,16 +1336,115 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
od_table->OverDriveTable.UclkFmax);
break;
- case SMU_OD_VDDC_CURVE:
+ case SMU_OD_VDDGFX_OFFSET:
if (!smu_v13_0_7_is_od_feature_supported(smu,
PP_OD_FEATURE_GFX_VF_CURVE_BIT))
break;
- size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
- for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
- size += sysfs_emit_at(buf, size, "%d: %dmv\n",
+ size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "%dmV\n",
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
+ break;
+
+ case SMU_OD_FAN_CURVE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
+ size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
i,
- od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]);
+ (int)od_table->OverDriveTable.FanLinearTempPoints[i],
+ (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
+ min_value, max_value);
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
+ min_value, max_value);
+
+ break;
+
+ case SMU_OD_ACOUSTIC_LIMIT:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_ACOUSTIC_TARGET:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_TARGET_TEMPERATURE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanTargetTemperature);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_MINIMUM_PWM:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanMinimumPwm);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
+ min_value, max_value);
break;
case SMU_OD_RANGE:
@@ -1336,7 +1486,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFX_VF_CURVE,
&min_value,
&max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n",
+ size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
min_value, max_value);
}
break;
@@ -1348,6 +1498,59 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
return size;
}
+static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *boot_overdrive_table =
+ (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ switch (input) {
+ case PP_OD_EDIT_FAN_CURVE:
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
+ od_table->OverDriveTable.FanLinearTempPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
+ od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
+ }
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ od_table->OverDriveTable.FanTargetTemperature =
+ boot_overdrive_table->OverDriveTable.FanTargetTemperature;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ od_table->OverDriveTable.FanMinimumPwm =
+ boot_overdrive_table->OverDriveTable.FanMinimumPwm;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ default:
+ dev_info(adev->dev, "Invalid table index: %ld\n", input);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[],
@@ -1485,37 +1688,166 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu,
}
break;
- case PP_OD_EDIT_VDDC_CURVE:
+ case PP_OD_EDIT_VDDGFX_OFFSET:
if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
- dev_warn(adev->dev, "VF curve setting not supported!\n");
+ dev_warn(adev->dev, "Gfx offset setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_CURVE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
return -ENOTSUPP;
}
- if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS ||
+ if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
input[0] < 0)
return -EINVAL;
smu_v13_0_7_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFX_VF_CURVE,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
&minimum,
&maximum);
if (input[1] < minimum ||
input[1] > maximum) {
- dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
input[1], minimum, maximum);
return -EINVAL;
}
- od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &minimum,
+ &maximum);
+ if (input[2] < minimum ||
+ input[2] > maximum) {
+ dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
+ input[2], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
+ od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
+ od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanTargetTemperature = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanMinimumPwm = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
- feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
- memcpy(od_table,
- table_context->boot_overdrive_table,
- sizeof(OverDriveTableExternal_t));
- od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ if (size == 1) {
+ ret = smu_v13_0_7_od_restore_table_single(smu, input[0]);
+ if (ret)
+ return ret;
+ } else {
+ feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
+ memcpy(od_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTableExternal_t));
+ od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ }
fallthrough;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1674,7 +2006,6 @@ static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu,
return 0;
}
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1698,12 +2029,12 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
- gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0],
+ gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
metrics->AvgTemperature[TEMP_VR_MEM1]);
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
- gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage,
+ gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
@@ -1755,6 +2086,24 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static void smu_v13_0_7_set_supported_od_feature_mask(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_CURVE_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
+}
+
static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu)
{
OverDriveTableExternal_t *od_table =
@@ -1804,8 +2153,24 @@ static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu)
for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
+ user_od_table->OverDriveTable.FanLinearTempPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
+ user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
+ }
+ user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
+ user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
+ user_od_table->OverDriveTable.FanTargetTemperature =
+ user_od_table_bak.OverDriveTable.FanTargetTemperature;
+ user_od_table->OverDriveTable.FanMinimumPwm =
+ user_od_table_bak.OverDriveTable.FanMinimumPwm;
}
+ smu_v13_0_7_set_supported_od_feature_mask(smu);
+
return 0;
}
@@ -1816,9 +2181,10 @@ static int smu_v13_0_7_restore_user_od_settings(struct smu_context *smu)
OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
int res;
- user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT |
- 1U << PP_OD_FEATURE_UCLK_BIT |
- 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
+ BIT(PP_OD_FEATURE_UCLK_BIT) |
+ BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
@@ -1903,7 +2269,7 @@ static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu,
}
/* Convert the PMFW output which is in percent to pwm(255) based */
- *speed = MIN(*speed * 255 / 100, 255);
+ *speed = min(*speed * 255 / 100, (uint32_t)255);
return 0;
}
@@ -1939,16 +2305,18 @@ static int smu_v13_0_7_enable_mgpu_fan_boost(struct smu_context *smu)
}
static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit,
+ uint32_t *min_power_limit)
{
struct smu_table_context *table_context = &smu->smu_table;
struct smu_13_0_7_powerplay_table *powerplay_table =
(struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent;
+ uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
@@ -1960,16 +2328,25 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit) {
- if (smu->od_enabled) {
- od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (smu->od_enabled) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else {
+ od_percent_upper = 0;
+ od_percent_lower = 100;
+ }
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
- power_limit *= (100 + od_percent);
- power_limit /= 100;
- }
- *max_power_limit = power_limit;
+ if (max_power_limit) {
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
+
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 - od_percent_lower);
+ *min_power_limit /= 100;
}
return 0;
@@ -2149,14 +2526,20 @@ static int smu_v13_0_7_baco_enter(struct smu_context *smu)
static int smu_v13_0_7_baco_exit(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */
usleep_range(10000, 11000);
- return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
} else {
- return smu_v13_0_baco_exit(smu);
+ ret = smu_v13_0_baco_exit(smu);
}
+
+ if (!ret)
+ adev->gfx.is_poweron = false;
+
+ return ret;
}
static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
@@ -2179,6 +2562,55 @@ static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
NULL);
}
+static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (limit <= msg_limit) {
+ if (smu->current_power_limit > msg_limit) {
+ od_table->OverDriveTable.Ppt = 0;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+ }
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+ } else if (smu->od_enabled) {
+ ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+ if (ret)
+ return ret;
+
+ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2230,7 +2662,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.set_fan_control_mode = smu_v13_0_set_fan_control_mode,
.enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
.get_power_limit = smu_v13_0_7_get_power_limit,
- .set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_limit = smu_v13_0_7_set_power_limit,
.set_power_source = smu_v13_0_set_power_source,
.get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
.set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 2e74d749e..2d1736234 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1024,24 +1024,24 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu,
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))
clk_limit = SMU_13_0_8_UMD_PSTATE_GFXCLK;
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
- (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))
clk_limit = SMU_13_0_1_UMD_PSTATE_GFXCLK;
break;
case SMU_SOCCLK:
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))
clk_limit = SMU_13_0_8_UMD_PSTATE_SOCCLK;
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
- (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))
clk_limit = SMU_13_0_1_UMD_PSTATE_SOCCLK;
break;
case SMU_FCLK:
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8))
clk_limit = SMU_13_0_8_UMD_PSTATE_FCLK;
- if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
- (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3))
clk_limit = SMU_13_0_1_UMD_PSTATE_FCLK;
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile
new file mode 100644
index 000000000..ddbac5c65
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile
@@ -0,0 +1,30 @@
+#
+# Copyright 2023 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'smu manager' sub-component of powerplay.
+# It provides the smu management services for the driver.
+
+SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o
+
+AMD_SWSMU_SMU14MGR = $(addprefix $(AMD_SWSMU_PATH)/smu14/,$(SMU14_MGR))
+
+AMD_POWERPLAY_FILES += $(AMD_SWSMU_SMU14MGR)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
new file mode 100644
index 000000000..d8f8ad0e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -0,0 +1,1729 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/reboot.h>
+
+#define SWSMU_CODE_LAYER_L3
+
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
+#include "smu_v14_0.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "amdgpu_ras.h"
+#include "smu_cmn.h"
+
+#include "asic_reg/mp/mp_14_0_0_offset.h"
+#include "asic_reg/mp/mp_14_0_0_sh_mask.h"
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
+
+int smu_v14_0_init_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ char fw_name[30];
+ char ucode_prefix[15];
+ int err = 0;
+ const struct smc_firmware_header_v1_0 *hdr;
+ const struct common_firmware_header *header;
+ struct amdgpu_firmware_info *ucode = NULL;
+
+ /* doesn't need to load smu firmware in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ if (err)
+ goto out;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
+ ucode->fw = adev->pm.fw;
+ header = (const struct common_firmware_header *)ucode->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+
+out:
+ if (err)
+ amdgpu_ucode_release(&adev->pm.fw);
+ return err;
+}
+
+void smu_v14_0_fini_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ amdgpu_ucode_release(&adev->pm.fw);
+ adev->pm.fw_version = 0;
+}
+
+int smu_v14_0_load_microcode(struct smu_context *smu)
+{
+#if 0
+ struct amdgpu_device *adev = smu->adev;
+ const uint32_t *src;
+ const struct smc_firmware_header_v1_0 *hdr;
+ uint32_t addr_start = MP1_SRAM;
+ uint32_t i;
+ uint32_t smc_fw_size;
+ uint32_t mp1_fw_flags;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ src = (const uint32_t *)(adev->pm.fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ smc_fw_size = hdr->header.ucode_size_bytes;
+
+ for (i = 1; i < smc_fw_size/4 - 1; i++) {
+ WREG32_PCIE(addr_start, src[i]);
+ addr_start += 4;
+ }
+
+ WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
+ 1 & MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
+ WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
+ 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout)
+ return -ETIME;
+
+#endif
+ return 0;
+
+}
+
+int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_firmware_info *ucode = NULL;
+ uint32_t size = 0, pptable_id = 0;
+ int ret = 0;
+ void *table;
+
+ /* doesn't need to load smu firmware in IOV mode */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
+ if (!adev->scpm_enabled)
+ return 0;
+
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu->smu_table.boot_values.pp_table_id;
+ }
+
+ /* "pptable_id == 0" means vbios carries the pptable. */
+ if (!pptable_id)
+ return 0;
+
+ ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
+ if (ret)
+ return ret;
+
+ smu->pptable_firmware.data = table;
+ smu->pptable_firmware.size = size;
+
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE];
+ ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE;
+ ucode->fw = &smu->pptable_firmware;
+ adev->firmware.fw_size +=
+ ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
+
+ return 0;
+}
+
+int smu_v14_0_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+int smu_v14_0_check_fw_version(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return ret;
+
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
+ smu_minor = (smu_version >> 8) & 0xff;
+ smu_debug = (smu_version >> 0) & 0xff;
+ if (smu->is_apu)
+ adev->pm.fw_version = smu_version;
+
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(14, 0, 2):
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
+ break;
+ case IP_VERSION(14, 0, 0):
+ if ((smu->smc_fw_version < 0x5d3a00))
+ dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version);
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ break;
+ default:
+ dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
+ adev->ip_versions[MP1_HWIP][0]);
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
+ break;
+ }
+
+ if (adev->pm.fw)
+ dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
+
+ /*
+ * 1. if_version mismatch is not critical as our fw is designed
+ * to be backward compatible.
+ * 2. New fw usually brings some optimizations. But that's visible
+ * only on the paired driver.
+ * Considering above, we just leave user a verbal message instead
+ * of halt driver loading.
+ */
+ if (if_version != smu->smc_driver_if_version) {
+ dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
+ "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
+ smu->smc_driver_if_version, if_version,
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
+ dev_info(adev->dev, "SMU driver if version not matched\n");
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t ppt_offset_bytes;
+ const struct smc_firmware_header_v2_0 *v2;
+
+ v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
+
+ ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
+ *size = le32_to_cpu(v2->ppt_size_bytes);
+ *table = (uint8_t *)v2 + ppt_offset_bytes;
+
+ return 0;
+}
+
+static int smu_v14_0_set_pptable_v2_1(struct smu_context *smu, void **table,
+ uint32_t *size, uint32_t pptable_id)
+{
+ struct amdgpu_device *adev = smu->adev;
+ const struct smc_firmware_header_v2_1 *v2_1;
+ struct smc_soft_pptable_entry *entries;
+ uint32_t pptable_count = 0;
+ int i = 0;
+
+ v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
+ entries = (struct smc_soft_pptable_entry *)
+ ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
+ pptable_count = le32_to_cpu(v2_1->pptable_count);
+ for (i = 0; i < pptable_count; i++) {
+ if (le32_to_cpu(entries[i].id) == pptable_id) {
+ *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
+ *size = le32_to_cpu(entries[i].ppt_size_bytes);
+ break;
+ }
+ }
+
+ if (i == pptable_count)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int smu_v14_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t atom_table_size;
+ uint8_t frev, crev;
+ int ret, index;
+
+ dev_info(adev->dev, "use vbios provided pptable\n");
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ powerplayinfo);
+
+ ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
+ (uint8_t **)table);
+ if (ret)
+ return ret;
+
+ if (size)
+ *size = atom_table_size;
+
+ return 0;
+}
+
+int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id)
+{
+ const struct smc_firmware_header_v1_0 *hdr;
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t version_major, version_minor;
+ int ret;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ if (!hdr)
+ return -EINVAL;
+
+ dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
+
+ version_major = le16_to_cpu(hdr->header.header_version_major);
+ version_minor = le16_to_cpu(hdr->header.header_version_minor);
+ if (version_major != 2) {
+ dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
+ version_major, version_minor);
+ return -EINVAL;
+ }
+
+ switch (version_minor) {
+ case 0:
+ ret = smu_v14_0_set_pptable_v2_0(smu, table, size);
+ break;
+ case 1:
+ ret = smu_v14_0_set_pptable_v2_1(smu, table, size, pptable_id);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_setup_pptable(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t size = 0, pptable_id = 0;
+ void *table;
+ int ret = 0;
+
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu->smu_table.boot_values.pp_table_id;
+ }
+
+ /* force using vbios pptable in sriov mode */
+ if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
+ ret = smu_v14_0_get_pptable_from_vbios(smu, &table, &size);
+ else
+ ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
+
+ if (ret)
+ return ret;
+
+ if (!smu->smu_table.power_play_table)
+ smu->smu_table.power_play_table = table;
+ if (!smu->smu_table.power_play_table_size)
+ smu->smu_table.power_play_table_size = size;
+
+ return 0;
+}
+
+int smu_v14_0_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ int ret = 0;
+
+ smu_table->driver_pptable =
+ kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
+ if (!smu_table->driver_pptable) {
+ ret = -ENOMEM;
+ goto err0_out;
+ }
+
+ smu_table->max_sustainable_clocks =
+ kzalloc(sizeof(struct smu_14_0_max_sustainable_clocks), GFP_KERNEL);
+ if (!smu_table->max_sustainable_clocks) {
+ ret = -ENOMEM;
+ goto err1_out;
+ }
+
+ if (tables[SMU_TABLE_OVERDRIVE].size) {
+ smu_table->overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->overdrive_table) {
+ ret = -ENOMEM;
+ goto err2_out;
+ }
+
+ smu_table->boot_overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->boot_overdrive_table) {
+ ret = -ENOMEM;
+ goto err3_out;
+ }
+ }
+
+ smu_table->combo_pptable =
+ kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
+ if (!smu_table->combo_pptable) {
+ ret = -ENOMEM;
+ goto err4_out;
+ }
+
+ return 0;
+
+err4_out:
+ kfree(smu_table->boot_overdrive_table);
+err3_out:
+ kfree(smu_table->overdrive_table);
+err2_out:
+ kfree(smu_table->max_sustainable_clocks);
+err1_out:
+ kfree(smu_table->driver_pptable);
+err0_out:
+ return ret;
+}
+
+int smu_v14_0_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ kfree(smu_table->gpu_metrics_table);
+ kfree(smu_table->combo_pptable);
+ kfree(smu_table->boot_overdrive_table);
+ kfree(smu_table->overdrive_table);
+ kfree(smu_table->max_sustainable_clocks);
+ kfree(smu_table->driver_pptable);
+ smu_table->gpu_metrics_table = NULL;
+ smu_table->combo_pptable = NULL;
+ smu_table->boot_overdrive_table = NULL;
+ smu_table->overdrive_table = NULL;
+ smu_table->max_sustainable_clocks = NULL;
+ smu_table->driver_pptable = NULL;
+ kfree(smu_table->hardcode_pptable);
+ smu_table->hardcode_pptable = NULL;
+
+ kfree(smu_table->ecc_table);
+ kfree(smu_table->metrics_table);
+ kfree(smu_table->watermarks_table);
+ smu_table->ecc_table = NULL;
+ smu_table->metrics_table = NULL;
+ smu_table->watermarks_table = NULL;
+ smu_table->metrics_time = 0;
+
+ kfree(smu_dpm->dpm_context);
+ kfree(smu_dpm->golden_dpm_context);
+ kfree(smu_dpm->dpm_current_power_state);
+ kfree(smu_dpm->dpm_request_power_state);
+ smu_dpm->dpm_context = NULL;
+ smu_dpm->golden_dpm_context = NULL;
+ smu_dpm->dpm_context_size = 0;
+ smu_dpm->dpm_current_power_state = NULL;
+ smu_dpm->dpm_request_power_state = NULL;
+
+ return 0;
+}
+
+int smu_v14_0_init_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (smu_power->power_context || smu_power->power_context_size != 0)
+ return -EINVAL;
+
+ smu_power->power_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
+ GFP_KERNEL);
+ if (!smu_power->power_context)
+ return -ENOMEM;
+ smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context);
+
+ return 0;
+}
+
+int smu_v14_0_fini_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (!smu_power->power_context || smu_power->power_context_size == 0)
+ return -EINVAL;
+
+ kfree(smu_power->power_context);
+ smu_power->power_context = NULL;
+ smu_power->power_context_size = 0;
+
+ return 0;
+}
+
+int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ struct atom_common_table_header *header;
+ struct atom_firmware_info_v3_4 *v_3_4;
+ struct atom_firmware_info_v3_3 *v_3_3;
+ struct atom_firmware_info_v3_1 *v_3_1;
+ struct atom_smu_info_v3_6 *smu_info_v3_6;
+ struct atom_smu_info_v4_0 *smu_info_v4_0;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header);
+ if (ret)
+ return ret;
+
+ if (header->format_revision != 3) {
+ dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu14\n");
+ return -EINVAL;
+ }
+
+ switch (header->content_revision) {
+ case 0:
+ case 1:
+ case 2:
+ v_3_1 = (struct atom_firmware_info_v3_1 *)header;
+ smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = 0;
+ break;
+ case 3:
+ v_3_3 = (struct atom_firmware_info_v3_3 *)header;
+ smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ break;
+ case 4:
+ default:
+ v_3_4 = (struct atom_firmware_info_v3_4 *)header;
+ smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
+ break;
+ }
+
+ smu->smu_table.boot_values.format_revision = header->format_revision;
+ smu->smu_table.boot_values.content_revision = header->content_revision;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smu_info);
+ if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header)) {
+
+ if ((frev == 3) && (crev == 6)) {
+ smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header;
+
+ smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
+ smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
+ smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
+ smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
+ } else if ((frev == 3) && (crev == 1)) {
+ return 0;
+ } else if ((frev == 4) && (crev == 0)) {
+ smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
+
+ smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
+ smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
+ smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
+ smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
+ smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
+ } else {
+ dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
+ (uint32_t)frev, (uint32_t)crev);
+ }
+ }
+
+ return 0;
+}
+
+
+int smu_v14_0_notify_memory_pool_location(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ int ret = 0;
+ uint64_t address;
+ uint32_t address_low, address_high;
+
+ if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
+ return ret;
+
+ address = memory_pool->mc_address;
+ address_high = (uint32_t)upper_32_bits(address);
+ address_low = (uint32_t)lower_32_bits(address);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
+ address_high, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
+ address_low, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
+ (uint32_t)memory_pool->size, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+int smu_v14_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address),
+ NULL);
+ if (!ret)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address),
+ NULL);
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_tool_table_location(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
+
+ if (tool_table->mc_address) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrHigh,
+ upper_32_bits(tool_table->mc_address),
+ NULL);
+ if (!ret)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrLow,
+ lower_32_bits(tool_table->mc_address),
+ NULL);
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_allowed_mask(struct smu_context *smu)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+ uint32_t feature_mask[2];
+
+ if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+ feature->feature_num < 64)
+ return -EINVAL;
+
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+ feature_mask[1], NULL);
+ if (ret)
+ return ret;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetAllowedFeaturesMaskLow,
+ feature_mask[0],
+ NULL);
+}
+
+int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 0):
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+ if (enable)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+ else
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_system_features_control(struct smu_context *smu,
+ bool en)
+{
+ return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures), NULL);
+}
+
+int smu_v14_0_notify_display_change(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
+
+ return ret;
+}
+
+int smu_v14_0_get_current_power_limit(struct smu_context *smu,
+ uint32_t *power_limit)
+{
+ int power_src;
+ int ret = 0;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
+ return -EINVAL;
+
+ power_src = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ smu->adev->pm.ac_power ?
+ SMU_POWER_SOURCE_AC :
+ SMU_POWER_SOURCE_DC);
+ if (power_src < 0)
+ return -EINVAL;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetPptLimit,
+ power_src << 16,
+ power_limit);
+ if (ret)
+ dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
+
+ return ret;
+}
+
+int smu_v14_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+
+ return 0;
+}
+
+static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned tyep,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ /* For THM irqs */
+ // TODO
+
+ /* For MP1 SW irqs */
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ /* For THM irqs */
+ // TODO
+
+ /* For MP1 SW irqs */
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int smu_v14_0_irq_process(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ // TODO
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs smu_v14_0_irq_funcs = {
+ .set = smu_v14_0_set_irq_state,
+ .process = smu_v14_0_irq_process,
+};
+
+int smu_v14_0_register_irq_handler(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_irq_src *irq_src = &smu->irq_source;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ irq_src->num_types = 1;
+ irq_src->funcs = &smu_v14_0_irq_funcs;
+
+ // TODO: THM related
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
+ 0xfe,
+ irq_src);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v14_0_wait_for_reset_complete(struct smu_context *smu,
+ uint64_t event_arg)
+{
+ int ret = 0;
+
+ dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
+
+ return ret;
+}
+
+int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+ uint64_t event_arg)
+{
+ int ret = -EINVAL;
+
+ switch (event) {
+ case SMU_EVENT_RESET_COMPLETE:
+ ret = smu_v14_0_wait_for_reset_complete(smu, event_arg);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param = 0;
+ uint32_t clock_limit;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ param = (clk_id & 0xffff) << 16;
+
+ if (max) {
+ if (smu->adev->pm.ac_power)
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxDpmFreq,
+ param,
+ max);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDcModeMaxDpmFreq,
+ param,
+ max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ return ret;
+}
+
+int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param, NULL);
+ if (ret)
+ goto out;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param, NULL);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (min <= 0 && max <= 0)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct smu_14_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_14_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_14_0_dpm_table *mem_table =
+ &dpm_context->dpm_tables.uclk_table;
+ struct smu_14_0_dpm_table *soc_table =
+ &dpm_context->dpm_tables.soc_table;
+ struct smu_14_0_dpm_table *vclk_table =
+ &dpm_context->dpm_tables.vclk_table;
+ struct smu_14_0_dpm_table *dclk_table =
+ &dpm_context->dpm_tables.dclk_table;
+ struct smu_14_0_dpm_table *fclk_table =
+ &dpm_context->dpm_tables.fclk_table;
+ struct smu_umd_pstate_table *pstate_table =
+ &smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t sclk_min = 0, sclk_max = 0;
+ uint32_t mclk_min = 0, mclk_max = 0;
+ uint32_t socclk_min = 0, socclk_max = 0;
+ uint32_t vclk_min = 0, vclk_max = 0;
+ uint32_t dclk_min = 0, dclk_max = 0;
+ uint32_t fclk_min = 0, fclk_max = 0;
+ int ret = 0, i;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ sclk_min = sclk_max = gfx_table->max;
+ mclk_min = mclk_max = mem_table->max;
+ socclk_min = socclk_max = soc_table->max;
+ vclk_min = vclk_max = vclk_table->max;
+ dclk_min = dclk_max = dclk_table->max;
+ fclk_min = fclk_max = fclk_table->max;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ sclk_min = sclk_max = gfx_table->min;
+ mclk_min = mclk_max = mem_table->min;
+ socclk_min = socclk_max = soc_table->min;
+ vclk_min = vclk_max = vclk_table->min;
+ dclk_min = dclk_max = dclk_table->min;
+ fclk_min = fclk_max = fclk_table->min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ sclk_min = gfx_table->min;
+ sclk_max = gfx_table->max;
+ mclk_min = mem_table->min;
+ mclk_max = mem_table->max;
+ socclk_min = soc_table->min;
+ socclk_max = soc_table->max;
+ vclk_min = vclk_table->min;
+ vclk_max = vclk_table->max;
+ dclk_min = dclk_table->min;
+ dclk_max = dclk_table->max;
+ fclk_min = fclk_table->min;
+ fclk_max = fclk_table->max;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
+ vclk_min = vclk_max = pstate_table->vclk_pstate.standard;
+ dclk_min = dclk_max = pstate_table->dclk_pstate.standard;
+ fclk_min = fclk_max = pstate_table->fclk_pstate.standard;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ mclk_min = mclk_max = pstate_table->uclk_pstate.min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
+ vclk_min = vclk_max = pstate_table->vclk_pstate.peak;
+ dclk_min = dclk_max = pstate_table->dclk_pstate.peak;
+ fclk_min = fclk_max = pstate_table->fclk_pstate.peak;
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ return 0;
+ default:
+ dev_err(adev->dev, "Invalid performance level %d\n", level);
+ return -EINVAL;
+ }
+
+ if (sclk_min && sclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_GFXCLK,
+ sclk_min,
+ sclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->gfxclk_pstate.curr.min = sclk_min;
+ pstate_table->gfxclk_pstate.curr.max = sclk_max;
+ }
+
+ if (mclk_min && mclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_MCLK,
+ mclk_min,
+ mclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->uclk_pstate.curr.min = mclk_min;
+ pstate_table->uclk_pstate.curr.max = mclk_max;
+ }
+
+ if (socclk_min && socclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_SOCCLK,
+ socclk_min,
+ socclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->socclk_pstate.curr.min = socclk_min;
+ pstate_table->socclk_pstate.curr.max = socclk_max;
+ }
+
+ if (vclk_min && vclk_max) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ i ? SMU_VCLK1 : SMU_VCLK,
+ vclk_min,
+ vclk_max);
+ if (ret)
+ return ret;
+ }
+ pstate_table->vclk_pstate.curr.min = vclk_min;
+ pstate_table->vclk_pstate.curr.max = vclk_max;
+ }
+
+ if (dclk_min && dclk_max) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ i ? SMU_DCLK1 : SMU_DCLK,
+ dclk_min,
+ dclk_max);
+ if (ret)
+ return ret;
+ }
+ pstate_table->dclk_pstate.curr.min = dclk_min;
+ pstate_table->dclk_pstate.curr.max = dclk_max;
+ }
+
+ if (fclk_min && fclk_max) {
+ ret = smu_v14_0_set_soft_freq_limited_range(smu,
+ SMU_FCLK,
+ fclk_min,
+ fclk_max);
+ if (ret)
+ return ret;
+
+ pstate_table->fclk_pstate.curr.min = fclk_min;
+ pstate_table->fclk_pstate.curr.max = fclk_max;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src)
+{
+ int pwr_source;
+
+ pwr_source = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ (uint32_t)power_src);
+ if (pwr_source < 0)
+ return -EINVAL;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_NotifyPowerSource,
+ pwr_source,
+ NULL);
+}
+
+static int smu_v14_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint16_t level,
+ uint32_t *value)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!value)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ param,
+ value);
+ if (ret)
+ return ret;
+
+ *value = *value & 0x7fffffff;
+
+ return ret;
+}
+
+static int smu_v14_0_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ int ret;
+
+ ret = smu_v14_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+
+ return ret;
+}
+
+static int smu_v14_0_get_fine_grained_status(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ bool *is_fine_grained_dpm)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+ uint32_t value;
+
+ if (!is_fine_grained_dpm)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ param,
+ &value);
+ if (ret)
+ return ret;
+
+ /*
+ * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM
+ * now, we un-support it
+ */
+ *is_fine_grained_dpm = value & 0x80000000;
+
+ return 0;
+}
+
+int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct smu_14_0_dpm_table *single_dpm_table)
+{
+ int ret = 0;
+ uint32_t clk;
+ int i;
+
+ ret = smu_v14_0_get_dpm_level_count(smu,
+ clk_type,
+ &single_dpm_table->count);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
+ return ret;
+ }
+
+ ret = smu_v14_0_get_fine_grained_status(smu,
+ clk_type,
+ &single_dpm_table->is_fine_grained);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++) {
+ ret = smu_v14_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ i,
+ &clk);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
+ return ret;
+ }
+
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+
+ if (i == 0)
+ single_dpm_table->min = clk;
+ else if (i == single_dpm_table->count - 1)
+ single_dpm_table->max = clk;
+ }
+
+ return 0;
+}
+
+int smu_v14_0_set_vcn_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ i << 16U, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
+ bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
+ 0, NULL);
+}
+
+int smu_v14_0_run_btc(struct smu_context *smu)
+{
+ int res;
+
+ res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+ if (res)
+ dev_err(smu->adev->dev, "RunDcBtc failed!\n");
+
+ return res;
+}
+
+int smu_v14_0_gpo_control(struct smu_context *smu,
+ bool enablement)
+{
+ int res;
+
+ res = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_AllowGpo,
+ enablement ? 1 : 0,
+ NULL);
+ if (res)
+ dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
+
+ return res;
+}
+
+int smu_v14_0_deep_sleep_control(struct smu_context *smu,
+ bool enablement)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int smu_v14_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement)
+{
+ int ret = 0;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
+
+ return ret;
+}
+
+int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
+ enum smu_baco_seq baco_seq)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ArmD3,
+ baco_seq,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (baco_seq == BACO_SEQ_BAMACO ||
+ baco_seq == BACO_SEQ_BACO)
+ smu_baco->state = SMU_BACO_STATE_ENTER;
+ else
+ smu_baco->state = SMU_BACO_STATE_EXIT;
+
+ return 0;
+}
+
+bool smu_v14_0_baco_is_support(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+
+ if (amdgpu_sriov_vf(smu->adev) ||
+ !smu_baco->platform_support)
+ return false;
+
+ /* return true if ASIC is in BACO state already */
+ if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+ return true;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ return false;
+
+ return true;
+}
+
+enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+
+ return smu_baco->state;
+}
+
+int smu_v14_0_baco_set_state(struct smu_context *smu,
+ enum smu_baco_state state)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (smu_v14_0_baco_get_state(smu) == state)
+ return 0;
+
+ if (state == SMU_BACO_STATE_ENTER) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_EnterBaco,
+ smu_baco->maco_support ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO,
+ NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg(smu,
+ SMU_MSG_ExitBaco,
+ NULL);
+ if (ret)
+ return ret;
+
+ /* clear vbios scratch 6 and 7 for coming asic reinit */
+ WREG32(adev->bios_scratch_reg_offset + 6, 0);
+ WREG32(adev->bios_scratch_reg_offset + 7, 0);
+ }
+
+ if (!ret)
+ smu_baco->state = state;
+
+ return ret;
+}
+
+int smu_v14_0_baco_enter(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v14_0_baco_set_state(smu,
+ SMU_BACO_STATE_ENTER);
+ if (ret)
+ return ret;
+
+ msleep(10);
+
+ return ret;
+}
+
+int smu_v14_0_baco_exit(struct smu_context *smu)
+{
+ return smu_v14_0_baco_set_state(smu,
+ SMU_BACO_STATE_EXIT);
+}
+
+int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu)
+{
+ uint16_t index;
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_EnableGfxImu);
+ /* Param 1 to tell PMFW to enable GFXOFF feature */
+ return smu_cmn_send_msg_without_waiting(smu, index, 1);
+}
+
+int smu_v14_0_set_default_dpm_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
+ smu_table->clocks_table, false);
+}
+
+int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ int ret = 0;
+
+ /* Only allowed in manual mode */
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < smu->gfx_default_hard_min_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_hard_min_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > smu->gfx_default_soft_max_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
+ dev_err(smu->adev->dev,
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max sclk failed!");
+ return ret;
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
new file mode 100644
index 000000000..94ccdbfd7
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -0,0 +1,1139 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smu_types.h"
+#define SWSMU_CODE_LAYER_L2
+
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_v14_0.h"
+#include "smu14_driver_if_v14_0_0.h"
+#include "smu_v14_0_0_ppt.h"
+#include "smu_v14_0_0_ppsmc.h"
+#include "smu_v14_0_0_pmfw.h"
+#include "smu_cmn.h"
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+ FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
+ FEATURE_MASK(FEATURE_ISP_DPM_BIT)| \
+ FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_VPE_DPM_BIT))
+
+static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
+ MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
+ MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 1),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1),
+ MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1),
+ MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1),
+ MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 1),
+ MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1),
+ MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
+ MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1),
+ MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1),
+ MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1),
+ MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1),
+ MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
+ MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
+ MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1),
+ MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1),
+ MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1),
+ MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 1),
+ MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 1),
+ MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 1),
+ MSG_MAP(PowerUpVpe, PPSMC_MSG_PowerUpVpe, 1),
+ MSG_MAP(PowerDownVpe, PPSMC_MSG_PowerDownVpe, 1),
+ MSG_MAP(PowerUpUmsch, PPSMC_MSG_PowerUpUmsch, 1),
+ MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1),
+ MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1),
+ MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1),
+};
+
+static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
+ FEA_MAP(CCLK_DPM),
+ FEA_MAP(FAN_CONTROLLER),
+ FEA_MAP(PPT),
+ FEA_MAP(TDC),
+ FEA_MAP(THERMAL),
+ FEA_MAP(VCN_DPM),
+ FEA_MAP_REVERSE(FCLK),
+ FEA_MAP_REVERSE(SOCCLK),
+ FEA_MAP(LCLK_DPM),
+ FEA_MAP(SHUBCLK_DPM),
+ FEA_MAP(DCFCLK_DPM),
+ FEA_MAP_HALF_REVERSE(GFX),
+ FEA_MAP(DS_GFXCLK),
+ FEA_MAP(DS_SOCCLK),
+ FEA_MAP(DS_LCLK),
+ FEA_MAP(LOW_POWER_DCNCLKS),
+ FEA_MAP(DS_FCLK),
+ FEA_MAP(DS_MP1CLK),
+ FEA_MAP(PSI),
+ FEA_MAP(PROCHOT),
+ FEA_MAP(CPUOFF),
+ FEA_MAP(STAPM),
+ FEA_MAP(S0I3),
+ FEA_MAP(PERF_LIMIT),
+ FEA_MAP(CORE_DLDO),
+ FEA_MAP(DS_VCN),
+ FEA_MAP(CPPC),
+ FEA_MAP(DF_CSTATES),
+ FEA_MAP(ATHUB_PG),
+};
+
+static struct cmn2asic_mapping smu_v14_0_0_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP_VALID(WATERMARKS),
+ TAB_MAP_VALID(SMU_METRICS),
+ TAB_MAP_VALID(CUSTOM_DPM),
+ TAB_MAP_VALID(DPMCLOCKS),
+};
+
+static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ goto err0_out;
+ smu_table->metrics_time = 0;
+
+ smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
+ if (!smu_table->clocks_table)
+ goto err1_out;
+
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ goto err2_out;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table)
+ goto err3_out;
+
+ return 0;
+
+err3_out:
+ kfree(smu_table->watermarks_table);
+err2_out:
+ kfree(smu_table->clocks_table);
+err1_out:
+ kfree(smu_table->metrics_table);
+err0_out:
+ return -ENOMEM;
+}
+
+static int smu_v14_0_0_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ kfree(smu_table->clocks_table);
+ smu_table->clocks_table = NULL;
+
+ kfree(smu_table->metrics_table);
+ smu_table->metrics_table = NULL;
+
+ kfree(smu_table->watermarks_table);
+ smu_table->watermarks_table = NULL;
+
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
+ return 0;
+}
+
+static int smu_v14_0_0_system_features_control(struct smu_context *smu, bool en)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (!en && !adev->in_s0ix)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+
+ return ret;
+}
+
+static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
+ return ret;
+
+ switch (member) {
+ case METRICS_AVERAGE_GFXCLK:
+ *value = metrics->GfxclkFrequency;
+ break;
+ case METRICS_AVERAGE_SOCCLK:
+ *value = metrics->SocclkFrequency;
+ break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->VclkFrequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = 0;
+ break;
+ case METRICS_AVERAGE_UCLK:
+ *value = metrics->MemclkFrequency;
+ break;
+ case METRICS_AVERAGE_FCLK:
+ *value = metrics->FclkFrequency;
+ break;
+ case METRICS_AVERAGE_VPECLK:
+ *value = metrics->VpeclkFrequency;
+ break;
+ case METRICS_AVERAGE_IPUCLK:
+ *value = metrics->IpuclkFrequency;
+ break;
+ case METRICS_AVERAGE_MPIPUCLK:
+ *value = metrics->MpipuclkFrequency;
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = metrics->GfxActivity / 100;
+ break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = metrics->VcnActivity / 100;
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ case METRICS_CURR_SOCKETPOWER:
+ *value = (metrics->SocketPower / 1000 << 8) +
+ (metrics->SocketPower % 1000 / 10);
+ break;
+ case METRICS_TEMPERATURE_EDGE:
+ *value = metrics->GfxTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = metrics->SocTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_PROCHOT:
+ *value = metrics->ThrottleResidency_PROCHOT;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_SPL:
+ *value = metrics->ThrottleResidency_SPL;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_FPPT:
+ *value = metrics->ThrottleResidency_FPPT;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_SPPT:
+ *value = metrics->ThrottleResidency_SPPT;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_THM_CORE:
+ *value = metrics->ThrottleResidency_THM_CORE;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_THM_GFX:
+ *value = metrics->ThrottleResidency_THM_GFX;
+ break;
+ case METRICS_THROTTLER_RESIDENCY_THM_SOC:
+ *value = metrics->ThrottleResidency_THM_SOC;
+ break;
+ case METRICS_VOLTAGE_VDDGFX:
+ *value = 0;
+ break;
+ case METRICS_VOLTAGE_VDDSOC:
+ *value = 0;
+ break;
+ case METRICS_SS_APU_SHARE:
+ /* return the percentage of APU power with respect to APU's power limit.
+ * percentage is reported, this isn't boost value. Smartshift power
+ * boost/shift is only when the percentage is more than 100.
+ */
+ if (metrics->StapmOpnLimit > 0)
+ *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit;
+ else
+ *value = 0;
+ break;
+ case METRICS_SS_DGPU_SHARE:
+ /* return the percentage of dGPU power with respect to dGPU's power limit.
+ * percentage is reported, this isn't boost value. Smartshift power
+ * boost/shift is only when the percentage is more than 100.
+ */
+ if ((metrics->dGpuPower > 0) &&
+ (metrics->StapmCurrentLimit > metrics->StapmOpnLimit))
+ *value = (metrics->dGpuPower * 100) /
+ (metrics->StapmCurrentLimit - metrics->StapmOpnLimit);
+ else
+ *value = 0;
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_CURR_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_EDGE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_HOTSPOT,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_UCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDGFX,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDNB:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDSOC,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_SS_APU_SHARE:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_SS_APU_SHARE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
+ ret = smu_v14_0_0_get_smu_metrics_data(smu,
+ METRICS_SS_DGPU_SHARE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static bool smu_v14_0_0_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ uint64_t feature_enabled;
+
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
+
+ if (ret)
+ return false;
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static int smu_v14_0_0_set_watermarks_table(struct smu_context *smu,
+ struct pp_smu_wm_range_sets *clock_ranges)
+{
+ int i;
+ int ret = 0;
+ Watermarks_t *table = smu->smu_table.watermarks_table;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
+ return -EINVAL;
+
+ for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting =
+ clock_ranges->reader_wm_sets[i].wm_inst;
+ }
+
+ for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+ clock_ranges->writer_wm_sets[i].wm_inst;
+ }
+
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
+ /* pass data to smu controller */
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_cmn_write_watermarks_table(smu);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
+ return 0;
+}
+
+static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v3_0 *gpu_metrics =
+ (struct gpu_metrics_v3_0 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 3, 0);
+
+ gpu_metrics->temperature_gfx = metrics.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.CoreTemperature[0],
+ sizeof(uint16_t) * 16);
+ gpu_metrics->temperature_skin = metrics.SkinTemp;
+
+ gpu_metrics->average_gfx_activity = metrics.GfxActivity;
+ gpu_metrics->average_vcn_activity = metrics.VcnActivity;
+ memcpy(&gpu_metrics->average_ipu_activity[0],
+ &metrics.IpuBusy[0],
+ sizeof(uint16_t) * 8);
+ memcpy(&gpu_metrics->average_core_c0_activity[0],
+ &metrics.CoreC0Residency[0],
+ sizeof(uint16_t) * 16);
+ gpu_metrics->average_dram_reads = metrics.DRAMReads;
+ gpu_metrics->average_dram_writes = metrics.DRAMWrites;
+ gpu_metrics->average_ipu_reads = metrics.IpuReads;
+ gpu_metrics->average_ipu_writes = metrics.IpuWrites;
+
+ gpu_metrics->average_socket_power = metrics.SocketPower;
+ gpu_metrics->average_ipu_power = metrics.IpuPower;
+ gpu_metrics->average_apu_power = metrics.ApuPower;
+ gpu_metrics->average_gfx_power = metrics.GfxPower;
+ gpu_metrics->average_dgpu_power = metrics.dGpuPower;
+ gpu_metrics->average_all_core_power = metrics.AllCorePower;
+ gpu_metrics->average_sys_power = metrics.Psys;
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.CorePower[0],
+ sizeof(uint16_t) * 16);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_vpeclk_frequency = metrics.VpeclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.FclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_ipuclk_frequency = metrics.IpuclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
+ gpu_metrics->average_mpipu_frequency = metrics.MpipuclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.CoreFrequency[0],
+ sizeof(uint16_t) * 16);
+ gpu_metrics->current_core_maxfreq = metrics.InfrastructureCpuMaxFreq;
+ gpu_metrics->current_gfx_maxfreq = metrics.InfrastructureGfxMaxFreq;
+
+ gpu_metrics->throttle_residency_prochot = metrics.ThrottleResidency_PROCHOT;
+ gpu_metrics->throttle_residency_spl = metrics.ThrottleResidency_SPL;
+ gpu_metrics->throttle_residency_fppt = metrics.ThrottleResidency_FPPT;
+ gpu_metrics->throttle_residency_sppt = metrics.ThrottleResidency_SPPT;
+ gpu_metrics->throttle_residency_thm_core = metrics.ThrottleResidency_THM_CORE;
+ gpu_metrics->throttle_residency_thm_gfx = metrics.ThrottleResidency_THM_GFX;
+ gpu_metrics->throttle_residency_thm_soc = metrics.ThrottleResidency_THM_SOC;
+
+ gpu_metrics->time_filter_alphavalue = metrics.FilterAlphaValue;
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v3_0);
+}
+
+static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to mode2 reset!\n");
+
+ return ret;
+}
+
+static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t dpm_level,
+ uint32_t *freq)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->SocClocks[dpm_level];
+ break;
+ case SMU_VCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VClocks[dpm_level];
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->DClocks[dpm_level];
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ if (dpm_level >= clk_table->NumMemPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->MemPstateTable[dpm_level].MemClk;
+ break;
+ case SMU_FCLK:
+ if (dpm_level >= clk_table->NumFclkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->FclkClocks_Freq[dpm_level];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
+ enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ feature_id = SMU_FEATURE_DPM_FCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ feature_id = SMU_FEATURE_VCN_DPM_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ return smu_cmn_feature_is_enabled(smu, feature_id);
+}
+
+static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ uint32_t clock_limit;
+ uint32_t max_dpm_level, min_dpm_level;
+ int ret = 0;
+
+ if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_FCLK:
+ clock_limit = smu->smu_table.boot_values.fclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ case SMU_VCLK:
+ clock_limit = smu->smu_table.boot_values.vclk;
+ break;
+ case SMU_DCLK:
+ clock_limit = smu->smu_table.boot_values.dclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ if (max) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *max = clk_table->MaxGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ max_dpm_level = 0;
+ break;
+ case SMU_SOCCLK:
+ max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ max_dpm_level = clk_table->VcnClkLevelsEnabled - 1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+ if (ret)
+ goto failed;
+ }
+ }
+
+ if (min) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *min = clk_table->MinGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ min_dpm_level = clk_table->NumMemPstatesEnabled - 1;
+ break;
+ case SMU_FCLK:
+ min_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
+ break;
+ case SMU_SOCCLK:
+ min_dpm_level = 0;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ min_dpm_level = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+ if (ret)
+ goto failed;
+ }
+ }
+
+failed:
+ return ret;
+}
+
+static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ member_type = METRICS_AVERAGE_SOCCLK;
+ break;
+ case SMU_VCLK:
+ member_type = METRICS_AVERAGE_VCLK;
+ break;
+ case SMU_DCLK:
+ member_type = METRICS_AVERAGE_DCLK;
+ break;
+ case SMU_MCLK:
+ member_type = METRICS_AVERAGE_UCLK;
+ break;
+ case SMU_FCLK:
+ member_type = METRICS_AVERAGE_FCLK;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ member_type = METRICS_AVERAGE_GFXCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *count)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ *count = clk_table->NumSocClkLevelsEnabled;
+ break;
+ case SMU_VCLK:
+ *count = clk_table->VcnClkLevelsEnabled;
+ break;
+ case SMU_DCLK:
+ *count = clk_table->VcnClkLevelsEnabled;
+ break;
+ case SMU_MCLK:
+ *count = clk_table->NumMemPstatesEnabled;
+ break;
+ case SMU_FCLK:
+ *count = clk_table->NumFclkLevelsEnabled;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+{
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0;
+ uint32_t min, max;
+
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ switch (clk_type) {
+ case SMU_OD_SCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
+ break;
+ case SMU_OD_RANGE:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ smu->gfx_default_hard_min_freq,
+ smu->gfx_default_soft_max_freq);
+ break;
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_MCLK:
+ case SMU_FCLK:
+ ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ break;
+
+ ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count);
+ if (ret)
+ break;
+
+ for (i = 0; i < count; i++) {
+ ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ if (ret)
+ break;
+
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ }
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ break;
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ if (cur_value == max)
+ i = 2;
+ else if (cur_value == min)
+ i = 0;
+ else
+ i = 1;
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
+ i == 0 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ i == 1 ? cur_value : 1100, /* UMD PSTATE GFXCLK 1100 */
+ i == 1 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
+ i == 2 ? "*" : "");
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ enum smu_message_type msg_set_min, msg_set_max;
+ int ret = 0;
+
+ if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type))
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ msg_set_min = SMU_MSG_SetHardMinGfxClk;
+ msg_set_max = SMU_MSG_SetSoftMaxGfxClk;
+ break;
+ case SMU_FCLK:
+ msg_set_min = SMU_MSG_SetHardMinFclkByFreq;
+ msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq;
+ break;
+ case SMU_SOCCLK:
+ msg_set_min = SMU_MSG_SetHardMinSocclkByFreq;
+ msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ msg_set_min = SMU_MSG_SetHardMinVcn;
+ msg_set_max = SMU_MSG_SetSoftMaxVcn;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
+ if (ret)
+ return ret;
+
+ return smu_cmn_send_smc_msg_with_param(smu, msg_set_max,
+ max, NULL);
+}
+
+static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask)
+{
+ uint32_t soft_min_level = 0, soft_max_level = 0;
+ uint32_t min_freq = 0, max_freq = 0;
+ int ret = 0;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+ if (ret)
+ break;
+
+ ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
+ if (ret)
+ break;
+
+ ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t sclk_min = 0, sclk_max = 0;
+ uint32_t fclk_min = 0, fclk_max = 0;
+ uint32_t socclk_min = 0, socclk_max = 0;
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
+ sclk_min = sclk_max;
+ fclk_min = fclk_max;
+ socclk_min = socclk_max;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
+ sclk_max = sclk_min;
+ fclk_max = fclk_min;
+ socclk_max = socclk_min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ /* Temporarily do nothing since the optimal clocks haven't been provided yet */
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ return 0;
+ default:
+ dev_err(adev->dev, "Invalid performance level %d\n", level);
+ return -EINVAL;
+ }
+
+ if (sclk_min && sclk_max) {
+ ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
+ SMU_SCLK,
+ sclk_min,
+ sclk_max);
+ if (ret)
+ return ret;
+
+ smu->gfx_actual_hard_min_freq = sclk_min;
+ smu->gfx_actual_soft_max_freq = sclk_max;
+ }
+
+ if (fclk_min && fclk_max) {
+ ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
+ SMU_FCLK,
+ fclk_min,
+ fclk_max);
+ if (ret)
+ return ret;
+ }
+
+ if (socclk_min && socclk_max) {
+ ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
+ SMU_SOCCLK,
+ socclk_min,
+ socclk_max);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
+ smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
+static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
+ bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVpe : SMU_MSG_PowerDownVpe,
+ 0, NULL);
+}
+
+static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
+ bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpUmsch : SMU_MSG_PowerDownUmsch,
+ 0, NULL);
+}
+
+static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
+ .check_fw_status = smu_v14_0_check_fw_status,
+ .check_fw_version = smu_v14_0_check_fw_version,
+ .init_smc_tables = smu_v14_0_0_init_smc_tables,
+ .fini_smc_tables = smu_v14_0_0_fini_smc_tables,
+ .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,
+ .system_features_control = smu_v14_0_0_system_features_control,
+ .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+ .send_smc_msg = smu_cmn_send_smc_msg,
+ .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable,
+ .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable,
+ .set_default_dpm_table = smu_v14_0_set_default_dpm_tables,
+ .read_sensor = smu_v14_0_0_read_sensor,
+ .is_dpm_running = smu_v14_0_0_is_dpm_running,
+ .set_watermarks_table = smu_v14_0_0_set_watermarks_table,
+ .get_gpu_metrics = smu_v14_0_0_get_gpu_metrics,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_driver_table_location = smu_v14_0_set_driver_table_location,
+ .gfx_off_control = smu_v14_0_gfx_off_control,
+ .mode2_reset = smu_v14_0_0_mode2_reset,
+ .get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq,
+ .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
+ .print_clk_levels = smu_v14_0_0_print_clk_levels,
+ .force_clk_levels = smu_v14_0_0_force_clk_levels,
+ .set_performance_level = smu_v14_0_0_set_performance_level,
+ .set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters,
+ .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
+ .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
+ .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
+};
+
+static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+void smu_v14_0_0_set_ppt_funcs(struct smu_context *smu)
+{
+
+ smu->ppt_funcs = &smu_v14_0_0_ppt_funcs;
+ smu->message_map = smu_v14_0_0_message_map;
+ smu->feature_map = smu_v14_0_0_feature_mask_map;
+ smu->table_map = smu_v14_0_0_table_map;
+ smu->is_apu = true;
+
+ smu_v14_0_0_set_smu_mailbox_registers(smu);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.h
new file mode 100644
index 000000000..93e645ee3
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V14_0_0_PPT_H__
+#define __SMU_V14_0_0_PPT_H__
+
+extern void smu_v14_0_0_set_ppt_funcs(struct smu_context *smu);
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 12618a583..00cd615bb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -986,6 +986,12 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(1, 3):
structure_size = sizeof(struct gpu_metrics_v1_3);
break;
+ case METRICS_VERSION(1, 4):
+ structure_size = sizeof(struct gpu_metrics_v1_4);
+ break;
+ case METRICS_VERSION(1, 5):
+ structure_size = sizeof(struct gpu_metrics_v1_5);
+ break;
case METRICS_VERSION(2, 0):
structure_size = sizeof(struct gpu_metrics_v2_0);
break;
@@ -1001,6 +1007,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(2, 4):
structure_size = sizeof(struct gpu_metrics_v2_4);
break;
+ case METRICS_VERSION(3, 0):
+ structure_size = sizeof(struct gpu_metrics_v3_0);
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index bcc42abfc..64766ac69 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -85,7 +85,7 @@
#define smu_i2c_fini(smu) smu_ppt_funcs(i2c_fini, 0, smu)
#define smu_get_unique_id(smu) smu_ppt_funcs(get_unique_id, 0, smu)
#define smu_log_thermal_throttling(smu) smu_ppt_funcs(log_thermal_throttling_event, 0, smu)
-#define smu_get_asic_power_limits(smu, current, default, max) smu_ppt_funcs(get_power_limit, 0, smu, current, default, max)
+#define smu_get_asic_power_limits(smu, current, default, max, min) smu_ppt_funcs(get_power_limit, 0, smu, current, default, max, min)
#define smu_get_pp_feature_mask(smu, buf) smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
#define smu_set_pp_feature_mask(smu, new_mask) smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
#define smu_gfx_ulv_control(smu, enablement) smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement)
@@ -97,6 +97,7 @@
#define smu_get_default_config_table_settings(smu, config_table) smu_ppt_funcs(get_default_config_table_settings, -EOPNOTSUPP, smu, config_table)
#define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table)
#define smu_init_pptable_microcode(smu) smu_ppt_funcs(init_pptable_microcode, 0, smu)
+#define smu_notify_rlc_state(smu, en) smu_ppt_funcs(notify_rlc_state, 0, smu, en)
#endif
#endif