summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/clk_mgr
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:35:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:31 +0000
commit85c675d0d09a45a135bddd15d7b385f8758c32fb (patch)
tree76267dbc9b9a130337be3640948fe397b04ac629 /drivers/gpu/drm/amd/display/dc/clk_mgr
parentAdding upstream version 6.6.15. (diff)
downloadlinux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz
linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/clk_mgr')
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dalsmc.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c154
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c1145
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c471
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h203
16 files changed, 2220 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index ad390e4cd..1c443e549 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -172,4 +172,12 @@ AMD_DAL_CLK_MGR_DCN32 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn32/,$(CLK_MGR_DC
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN32)
+###############################################################################
+# DCN35
+###############################################################################
+CLK_MGR_DCN35 = dcn35_smu.o dcn35_clk_mgr.o
+
+AMD_DAL_CLK_MGR_DCN35 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn35/,$(CLK_MGR_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN35)
endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index dcedf9645..3e73c4e59 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -48,6 +48,7 @@
#include "dcn315/dcn315_clk_mgr.h"
#include "dcn316/dcn316_clk_mgr.h"
#include "dcn32/dcn32_clk_mgr.h"
+#include "dcn35/dcn35_clk_mgr.h"
int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
@@ -354,6 +355,19 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
break;
+ case AMDGPU_FAMILY_GC_11_5_0: {
+ struct clk_mgr_dcn35 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base.base;
+ }
+ break;
+
#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */
default:
ASSERT(0); /* Unknown Asic */
@@ -405,6 +419,10 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
dcn314_clk_mgr_destroy(clk_mgr);
break;
+ case AMDGPU_FAMILY_GC_11_5_0:
+ dcn35_clk_mgr_destroy(clk_mgr);
+ break;
+
default:
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
index 5399b8cf6..c9ba7b3fd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
@@ -30,7 +30,7 @@
#include "dce110/dce110_clk_mgr.h"
#include "dce120_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
-#include "dce120/dce120_hw_sequencer.h"
+#include "dce120/dce120_hwseq.h"
static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
/*ClocksStateInvalid - should not be used*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index 694fe4271..9c90090e7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -59,8 +59,6 @@
#define CTX \
clk_mgr->base.ctx
-#define DC_LOGGER \
- clk_mgr->base.ctx->logger
static const struct clk_mgr_registers clk_mgr_regs = {
CLK_COMMON_REG_LIST_DCN_201()
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 3db4ef564..ce1386e22 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -253,7 +253,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 2618504e2..a84f1e376 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -87,6 +87,20 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define regCLK1_CLK2_BYPASS_CNTL 0x029c
+#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+
+#define regCLK6_0_CLK6_spll_field_8 0x464b
+#define regCLK6_0_CLK6_spll_field_8_BASE_IDX 0
+
+#define CLK6_0_CLK6_spll_field_8__spll_ssc_en__SHIFT 0xd
+#define CLK6_0_CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
@@ -157,6 +171,37 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state
}
}
+bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t ssc_enable;
+
+ REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ return ssc_enable == 1;
+}
+
+void dcn314_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+
+ // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+ if (dcn314_is_spll_ssc_enabled(clk_mgr))
+ clk_mgr->dp_dto_source_clock_in_khz =
+ dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+ else
+ clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+}
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -281,7 +326,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
@@ -433,6 +478,11 @@ static DpmClocks314_t dummy_clocks;
static struct dcn314_watermarks dummy_wms = { 0 };
+static struct dcn314_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table)
{
int i, num_valid_sets;
@@ -705,13 +755,31 @@ static struct clk_mgr_funcs dcn314_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn314_update_clocks,
- .init_clocks = dcn31_init_clocks,
+ .init_clocks = dcn314_init_clocks,
.enable_pme_wa = dcn314_enable_pme_wa,
.are_clock_states_equal = dcn314_are_clock_states_equal,
.notify_wm_ranges = dcn314_notify_wm_ranges
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
+static void dcn314_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t clock_source;
+ //uint32_t ssc_enable;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ //REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ if (dcn314_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+ }
+}
+
void dcn314_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn314 *clk_mgr,
@@ -779,6 +847,7 @@ void dcn314_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
+ dcn314_read_ss_info_from_lut(&clk_mgr->base);
/*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
clk_mgr->base.base.bw_params = &dcn314_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index 171f84340..002c28e80 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -28,6 +28,8 @@
#define __DCN314_CLK_MGR_H__
#include "clk_mgr_internal.h"
+#define DCN314_NUM_CLOCK_SOURCES 5
+
struct dcn314_watermarks;
struct dcn314_smu_watermark_set {
@@ -40,9 +42,18 @@ struct clk_mgr_dcn314 {
struct dcn314_smu_watermark_set smu_wm_set;
};
+struct dcn314_ss_info_table {
+ uint32_t ss_divider;
+ uint32_t ss_percentage[DCN314_NUM_CLOCK_SOURCES];
+};
+
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b);
+bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base);
+
+void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 8776055bb..644da4637 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -232,7 +232,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 09151cc56..12f3e8aa4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -239,7 +239,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dalsmc.h
index c427be6ad..724a508b0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dalsmc.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dalsmc.h
@@ -55,7 +55,16 @@
#define DALSMC_MSG_SetFclkSwitchAllow 0x11
#define DALSMC_MSG_SetCabForUclkPstate 0x12
#define DALSMC_MSG_SetWorstCaseUclkLatency 0x13
-#define DALSMC_Message_Count 0x14
+#define DALSMC_MSG_SetAlwaysWaitDmcubResp 0x14
+#define DALSMC_MSG_ReturnHardMinStatus 0x15
+#define DALSMC_Message_Count 0x16
+
+#define CHECK_HARD_MIN_CLK_DISPCLK 0x1
+#define CHECK_HARD_MIN_CLK_DPPCLK 0x2
+#define CHECK_HARD_MIN_CLK_DPREFCLK 0x4
+#define CHECK_HARD_MIN_CLK_DCFCLK 0x8
+#define CHECK_HARD_MIN_CLK_DTBCLK 0x10
+#define CHECK_HARD_MIN_CLK_UCLK 0x20
typedef enum {
FCLK_SWITCH_DISALLOW,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index e9345f655..a496930b1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -53,6 +53,14 @@
#define mmCLK1_CLK3_DFS_CNTL 0x16E72
#define mmCLK1_CLK4_DFS_CNTL 0x16E75
+#define mmCLK1_CLK0_CURRENT_CNT 0x16EE7
+#define mmCLK1_CLK1_CURRENT_CNT 0x16EE8
+#define mmCLK1_CLK2_CURRENT_CNT 0x16EE9
+#define mmCLK1_CLK3_CURRENT_CNT 0x16EEA
+#define mmCLK1_CLK4_CURRENT_CNT 0x16EEB
+
+#define mmCLK4_CLK0_CURRENT_CNT 0x1B0C9
+
#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001ffUL
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000f000UL
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xffff0000UL
@@ -450,6 +458,58 @@ static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
return 0;
}
+static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr)
+{
+ unsigned int dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
+ unsigned int dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
+ unsigned int dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK
+ unsigned int dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK
+ unsigned int dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK
+ unsigned int fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK
+
+ // Overrides for these clocks in case there is no p_state change support
+ int dramclk_khz_override = new_clocks->dramclk_khz;
+ int fclk_khz_override = new_clocks->fclk_khz;
+
+ int num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
+
+ if (!new_clocks->p_state_change_support) {
+ dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000;
+ }
+ if (!new_clocks->fclk_p_state_change_support) {
+ fclk_khz_override = clk_mgr->base.bw_params->clk_table.entries[num_fclk_levels].fclk_mhz * 1000;
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+ // IMPORTANT: When adding more clocks to these logs, do NOT put a newline
+ // anywhere other than at the very end of the string.
+ //
+ // Formatting example (make sure to have " - " between each entry):
+ //
+ // AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n"
+ ////////////////////////////////////////////////////////////////////////////
+ if (new_clocks &&
+ new_clocks->dramclk_khz > 0 &&
+ new_clocks->fclk_khz > 0 &&
+ new_clocks->dcfclk_khz > 0 &&
+ new_clocks->dppclk_khz > 0) {
+
+ DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - "
+ "dcfclk:%d - dppclk:%d - dispclk_hw:%d - "
+ "dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - "
+ "dtbclk_hw:%d - fclk_hw:%d\n",
+ dramclk_khz_override,
+ fclk_khz_override,
+ new_clocks->dcfclk_khz,
+ new_clocks->dppclk_khz,
+ dispclk_khz_reg,
+ dppclk_khz_reg,
+ dprefclk_khz_reg,
+ dcfclk_khz_reg,
+ dtbclk_khz_reg,
+ fclk_khz_reg);
+ }
+}
static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
@@ -646,6 +706,10 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
/*update dmcu for wait_loop count*/
dmcu->funcs->set_psr_wait_loop(dmcu,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
+
+ if (dc->config.enable_auto_dpm_test_logs) {
+ dcn32_auto_dpm_test_log(new_clocks, clk_mgr);
+ }
}
static uint32_t dcn32_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index 700ce4203..df244b175 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -90,6 +90,64 @@ static bool dcn32_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint
return false;
}
+/*
+ * Use these functions to return back delay information so we can aggregate the total
+ * delay when requesting hardmin clk
+ *
+ * dcn32_smu_wait_for_response_delay
+ * dcn32_smu_send_msg_with_param_delay
+ *
+ */
+static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries, unsigned int *total_delay_us)
+{
+ uint32_t reg = 0;
+ *total_delay_us = 0;
+
+ do {
+ reg = REG_READ(DAL_RESP_REG);
+ if (reg)
+ break;
+
+ if (delay_us >= 1000)
+ msleep(delay_us/1000);
+ else if (delay_us > 0)
+ udelay(delay_us);
+ *total_delay_us += delay_us;
+ } while (max_retries--);
+
+ return reg;
+}
+
+static bool dcn32_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mgr, uint32_t msg_id, uint32_t param_in, uint32_t *param_out, unsigned int *total_delay_us)
+{
+ unsigned int delay1_us, delay2_us;
+ *total_delay_us = 0;
+
+ /* Wait for response register to be ready */
+ dcn32_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay1_us);
+
+ /* Clear response register */
+ REG_WRITE(DAL_RESP_REG, 0);
+
+ /* Set the parameter register for the SMU message */
+ REG_WRITE(DAL_ARG_REG, param_in);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(DAL_MSG_REG, msg_id);
+
+ /* Wait for response */
+ if (dcn32_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) {
+ if (param_out)
+ *param_out = REG_READ(DAL_ARG_REG);
+
+ *total_delay_us = delay1_us + delay2_us;
+ return true;
+ }
+
+ *total_delay_us = delay1_us + 2000000;
+ return false;
+}
+
void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable)
{
smu_print("FCLK P-state support value is : %d\n", enable);
@@ -122,10 +180,98 @@ void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr)
DALSMC_MSG_BacoAudioD3PME, 0, NULL);
}
+/* Check PMFW version if it supports ReturnHardMinStatus message */
+static bool dcn32_get_hard_min_status_supported(struct clk_mgr_internal *clk_mgr)
+{
+ if (ASICREV_IS_GC_11_0_0(clk_mgr->base.ctx->asic_id.hw_internal_rev)) {
+ if (clk_mgr->smu_ver >= 0x4e6a00)
+ return true;
+ } else if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev)) {
+ if (clk_mgr->smu_ver >= 0x524e00)
+ return true;
+ } else { /* ASICREV_IS_GC_11_0_3 */
+ if (clk_mgr->smu_ver >= 0x503900)
+ return true;
+ }
+ return false;
+}
+
+/* Returns the clocks which were fulfilled by the DAL hard min arbiter in PMFW */
+static unsigned int dcn32_smu_get_hard_min_status(struct clk_mgr_internal *clk_mgr, bool *no_timeout, unsigned int *total_delay_us)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
+ uint32_t param = 0;
+
+ *no_timeout = dcn32_smu_send_msg_with_param_delay(clk_mgr,
+ DALSMC_MSG_ReturnHardMinStatus, param, &response, total_delay_us);
+
+ smu_print("SMU Get hard min status: no_timeout %d delay %d us clk bits %x\n",
+ *no_timeout, *total_delay_us, response);
+
+ return response;
+}
+
+static bool dcn32_smu_wait_get_hard_min_status(struct clk_mgr_internal *clk_mgr,
+ uint32_t clk)
+{
+ int readDalHardMinClkBits, checkDalHardMinClkBits;
+ unsigned int total_delay_us, read_total_delay_us;
+ bool no_timeout, hard_min_done;
+
+ static unsigned int cur_wait_get_hard_min_max_us;
+ static unsigned int cur_wait_get_hard_min_max_timeouts;
+
+ checkDalHardMinClkBits = CHECK_HARD_MIN_CLK_DPREFCLK;
+ if (clk == PPCLK_DISPCLK)
+ checkDalHardMinClkBits |= CHECK_HARD_MIN_CLK_DISPCLK;
+ if (clk == PPCLK_DPPCLK)
+ checkDalHardMinClkBits |= CHECK_HARD_MIN_CLK_DPPCLK;
+ if (clk == PPCLK_DCFCLK)
+ checkDalHardMinClkBits |= CHECK_HARD_MIN_CLK_DCFCLK;
+ if (clk == PPCLK_DTBCLK)
+ checkDalHardMinClkBits |= CHECK_HARD_MIN_CLK_DTBCLK;
+ if (clk == PPCLK_UCLK)
+ checkDalHardMinClkBits |= CHECK_HARD_MIN_CLK_UCLK;
+
+ if (checkDalHardMinClkBits == CHECK_HARD_MIN_CLK_DPREFCLK)
+ return 0;
+
+ total_delay_us = 0;
+ hard_min_done = false;
+ while (1) {
+ readDalHardMinClkBits = dcn32_smu_get_hard_min_status(clk_mgr, &no_timeout, &read_total_delay_us);
+ total_delay_us += read_total_delay_us;
+ if (checkDalHardMinClkBits == (readDalHardMinClkBits & checkDalHardMinClkBits)) {
+ hard_min_done = true;
+ break;
+ }
+
+
+ if (total_delay_us >= 2000000) {
+ cur_wait_get_hard_min_max_timeouts++;
+ smu_print("SMU Wait get hard min status: %d timeouts\n", cur_wait_get_hard_min_max_timeouts);
+ break;
+ }
+ msleep(1);
+ total_delay_us += 1000;
+ }
+
+ if (total_delay_us > cur_wait_get_hard_min_max_us)
+ cur_wait_get_hard_min_max_us = total_delay_us;
+
+ smu_print("SMU Wait get hard min status: no_timeout %d, delay %d us, max %d us, read %x, check %x\n",
+ no_timeout, total_delay_us, cur_wait_get_hard_min_max_us, readDalHardMinClkBits, checkDalHardMinClkBits);
+
+ return hard_min_done;
+}
+
/* Returns the actual frequency that was set in MHz, 0 on failure */
unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz)
{
uint32_t response = 0;
+ bool hard_min_done = false;
/* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
uint32_t param = (clk << 16) | freq_mhz;
@@ -133,9 +279,13 @@ unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, ui
smu_print("SMU Set hard min by freq: clk = %d, freq_mhz = %d MHz\n", clk, freq_mhz);
dcn32_smu_send_msg_with_param(clk_mgr,
- DALSMC_MSG_SetHardMinByFreq, param, &response);
+ DALSMC_MSG_SetHardMinByFreq, param, &response);
- smu_print("SMU Frequency set = %d KHz\n", response);
+ if (dcn32_get_hard_min_status_supported(clk_mgr)) {
+ hard_min_done = dcn32_smu_wait_get_hard_min_status(clk_mgr, clk);
+ smu_print("SMU Frequency set = %d KHz hard_min_done %d\n", response, hard_min_done);
+ } else
+ smu_print("SMU Frequency set = %d KHz\n", response);
return response;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
new file mode 100644
index 000000000..54df6cac1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -0,0 +1,1145 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dcn35_clk_mgr.h"
+
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+
+// For dce12_get_dp_ref_freq_khz
+#include "dce100/dce_clk_mgr.h"
+
+// For dcn20_update_clocks_update_dpp_dto
+#include "dcn20/dcn20_clk_mgr.h"
+
+
+
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dcn35_smu.h"
+#include "dm_helpers.h"
+
+/* TODO: remove this include once we ported over remaining clk mgr functions*/
+#include "dcn30/dcn30_clk_mgr.h"
+#include "dcn31/dcn31_clk_mgr.h"
+
+#include "dc_dmub_srv.h"
+#include "link.h"
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
+#define regCLK1_CLK_PLL_REQ 0x0237
+#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define regCLK1_CLK2_BYPASS_CNTL 0x029c
+#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+
+#define REG(reg_name) \
+ (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
+#define TO_CLK_MGR_DCN35(clk_mgr)\
+ container_of(clk_mgr, struct clk_mgr_dcn35, base)
+
+static int dcn35_get_active_display_cnt_wa(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, display_count;
+ bool tmds_present = false;
+
+ display_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ tmds_present = true;
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ const struct dc_link *link = dc->links[i];
+
+ /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
+ if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ display_count++;
+ }
+
+ /* WA for hang on HDMI after display off back on*/
+ if (display_count == 0 && tmds_present)
+ display_count = 1;
+
+ return display_count;
+}
+
+static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
+{
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->top_pipe || pipe->prev_odm_pipe)
+ continue;
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
+ if (disable) {
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
+ reset_sync_context_for_pipe(dc, context, i);
+ } else {
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ }
+ }
+ }
+}
+
+static void dcn35_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context,
+ int ref_dtbclk_khz)
+{
+ struct dccg *dccg = clk_mgr->dccg;
+ uint32_t tg_mask = 0;
+ int i;
+
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct dtbclk_dto_params dto_params = {0};
+
+ /* use mask to program DTO once per tg */
+ if (pipe_ctx->stream_res.tg &&
+ !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) {
+ tg_mask |= (1 << pipe_ctx->stream_res.tg->inst);
+
+ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+ dto_params.ref_dtbclk_khz = ref_dtbclk_khz;
+
+ dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params);
+ //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params);
+ }
+ }
+}
+
+static void dcn35_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context, bool safe_to_lower)
+{
+ int i;
+ bool dppclk_active[MAX_PIPES] = {0};
+
+
+ clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ int dpp_inst = 0, dppclk_khz, prev_dppclk_khz;
+
+ dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
+ if (context->res_ctx.pipe_ctx[i].plane_res.dpp)
+ dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) {
+ /* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting.
+ * In this case just continue in loop
+ */
+ continue;
+ } else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) {
+ /* The software state is not valid if dpp resource is NULL and
+ * dppclk_khz > 0.
+ */
+ ASSERT(false);
+ continue;
+ }
+
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
+ clk_mgr->dccg->funcs->update_dpp_dto(
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
+ dppclk_active[dpp_inst] = true;
+ }
+ if (safe_to_lower)
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct dpp *old_dpp = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.dpp;
+
+ if (old_dpp && !dppclk_active[old_dpp->inst])
+ clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, old_dpp->inst, 0);
+ }
+}
+
+void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ union dmub_rb_cmd cmd;
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int display_count;
+ bool update_dppclk = false;
+ bool update_dispclk = false;
+ bool dpp_clock_lowered = false;
+
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
+ /* DTBCLK is fixed, so set a default if unspecified. */
+ if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
+ new_clocks->ref_dtbclk_khz = 600000;
+
+ /*
+ * if it is safe to lower, but we are already in the lower state, we don't have to do anything
+ * also if safe to lower is false, we just go in the higher state
+ */
+ if (safe_to_lower) {
+ if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW &&
+ new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
+ dcn35_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support);
+ dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true);
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
+ }
+
+ if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
+ dcn35_smu_set_dtbclk(clk_mgr, false);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
+ /* check that we're not already in lower */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+ display_count = dcn35_get_active_display_cnt_wa(dc, context);
+ /* if we can go lower, go lower */
+ if (display_count == 0)
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ } else {
+ if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
+ new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
+ dcn35_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW);
+ dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false);
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
+ }
+
+ if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
+ dcn35_smu_set_dtbclk(clk_mgr, true);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+
+ dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ }
+
+ /* check that we're not already in D0 */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
+ union display_idle_optimization_u idle_info = { 0 };
+
+ dcn35_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
+ }
+ }
+ if (dc->debug.force_min_dcfclk_mhz > 0)
+ new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
+ new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+ clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ dcn35_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ dcn35_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
+ }
+
+ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+ clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
+ update_dppclk = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+ dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+
+ update_dispclk = true;
+ }
+
+ /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
+ if (!dc->debug.disable_dtb_ref_clk_switch &&
+ should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000,
+ clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
+ dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ }
+
+ if (dpp_clock_lowered) {
+ // increase per DPP DTO before lowering global dppclk
+ dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ } else {
+ // increase global DPPCLK before lowering per DPP DTO
+ if (update_dppclk || update_dispclk)
+ dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ }
+
+ // notify DMCUB of latest clocks
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR;
+ cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS;
+ cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz;
+ cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz =
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz;
+ cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+{
+ /* get FbMult value */
+ struct fixed31_32 pll_req;
+ unsigned int fbmult_frac_val = 0;
+ unsigned int fbmult_int_val = 0;
+ struct dc_context *ctx = clk_mgr->base.ctx;
+
+ /*
+ * Register value of fbmult is in 8.16 format, we are converting to 314.32
+ * to leverage the fix point operations available in driver
+ */
+
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
+
+ pll_req = dc_fixpt_from_int(fbmult_int_val);
+
+ /*
+ * since fractional part is only 16 bit in register definition but is 32 bit
+ * in our fix point definiton, need to shift left by 16 to obtain correct value
+ */
+ pll_req.value |= fbmult_frac_val << 16;
+
+ /* multiply by REFCLK period */
+ pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
+
+ /* integer part is now VCO frequency in kHz */
+ return dc_fixpt_floor(pll_req);
+}
+
+static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ dcn35_smu_enable_pme_wa(clk_mgr);
+}
+
+void dcn35_init_clocks(struct clk_mgr *clk_mgr)
+{
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+}
+
+bool dcn35_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+ else if (a->zstate_support != b->zstate_support)
+ return false;
+ else if (a->dtbclk_en != b->dtbclk_en)
+ return false;
+
+ return true;
+}
+
+static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
+{
+
+}
+
+static struct clk_bw_params dcn35_bw_params = {
+ .vram_type = Ddr4MemType,
+ .num_channels = 1,
+ .clk_table = {
+ .num_entries = 4,
+ },
+
+};
+
+static struct wm_table ddr5_wm_table = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
+ .valid = true,
+ },
+ }
+};
+
+static struct wm_table lpddr5_wm_table = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
+ .valid = true,
+ },
+ }
+};
+
+static DpmClocks_t_dcn35 dummy_clocks;
+
+static struct dcn35_watermarks dummy_wms = { 0 };
+
+static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
+{
+ int i, num_valid_sets;
+
+ num_valid_sets = 0;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ /* skip empty entries, the smu array has no holes*/
+ if (!bw_params->wm_table.entries[i].valid)
+ continue;
+
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
+ /* We will not select WM based on fclk, so leave it as unconstrained */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
+
+ if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
+ if (i == 0)
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
+ else {
+ /* add 1 to make it non-overlapping with next lvl */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
+ bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
+ }
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
+ bw_params->clk_table.entries[i].dcfclk_mhz;
+
+ } else {
+ /* unconstrained for memory retraining */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
+
+ /* Modify previous watermark range to cover up to max */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
+ }
+ num_valid_sets++;
+ }
+
+ ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
+
+ /* modify the min and max to make sure we cover the whole range*/
+ table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
+ table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
+
+ /* This is for writeback only, does not matter currently as no writeback support*/
+ table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
+ table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
+ table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
+ table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
+ table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
+}
+
+static void dcn35_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr);
+ struct dcn35_watermarks *table = clk_mgr_dcn35->smu_wm_set.wm_set;
+
+ if (!clk_mgr->smu_ver)
+ return;
+
+ if (!table || clk_mgr_dcn35->smu_wm_set.mc_address.quad_part == 0)
+ return;
+
+ memset(table, 0, sizeof(*table));
+
+ dcn35_build_watermark_ranges(clk_mgr_base->bw_params, table);
+
+ dcn35_smu_set_dram_addr_high(clk_mgr,
+ clk_mgr_dcn35->smu_wm_set.mc_address.high_part);
+ dcn35_smu_set_dram_addr_low(clk_mgr,
+ clk_mgr_dcn35->smu_wm_set.mc_address.low_part);
+ dcn35_smu_transfer_wm_table_dram_2_smu(clk_mgr);
+}
+
+static void dcn35_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
+ struct dcn35_smu_dpm_clks *smu_dpm_clks)
+{
+ DpmClocks_t_dcn35 *table = smu_dpm_clks->dpm_clks;
+
+ if (!clk_mgr->smu_ver)
+ return;
+
+ if (!table || smu_dpm_clks->mc_address.quad_part == 0)
+ return;
+
+ memset(table, 0, sizeof(*table));
+
+ dcn35_smu_set_dram_addr_high(clk_mgr,
+ smu_dpm_clks->mc_address.high_part);
+ dcn35_smu_set_dram_addr_low(clk_mgr,
+ smu_dpm_clks->mc_address.low_part);
+ dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
+}
+
+static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
+{
+ uint32_t max = 0;
+ int i;
+
+ for (i = 0; i < num_clocks; ++i) {
+ if (clocks[i] > max)
+ max = clocks[i];
+ }
+
+ return max;
+}
+
+static inline bool is_valid_clock_value(uint32_t clock_value)
+{
+ return clock_value > 1 && clock_value < 100000;
+}
+
+static unsigned int convert_wck_ratio(uint8_t wck_ratio)
+{
+ switch (wck_ratio) {
+ case WCK_RATIO_1_2:
+ return 2;
+
+ case WCK_RATIO_1_4:
+ return 4;
+ /* Find lowest DPM, FCLK is filled in reverse order*/
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry)
+{
+ return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2;
+}
+
+static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
+ struct integrated_info *bios_info,
+ DpmClocks_t_dcn35 *clock_table)
+{
+ struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+ uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
+ uint32_t num_memps, num_fclk, num_dcfclk;
+ int i;
+
+ /* Determine min/max p-state values. */
+ num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS :
+ clock_table->NumMemPstatesEnabled;
+ for (i = 0; i < num_memps; i++) {
+ uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
+
+ if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
+ max_dram_speed_mts = dram_speed_mts;
+ max_pstate = i;
+ }
+ }
+
+ min_dram_speed_mts = max_dram_speed_mts;
+ min_pstate = max_pstate;
+
+ for (i = 0; i < num_memps; i++) {
+ uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
+
+ if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
+ min_dram_speed_mts = dram_speed_mts;
+ min_pstate = i;
+ }
+ }
+
+ /* We expect the table to contain at least one valid P-state entry. */
+ ASSERT(clock_table->NumMemPstatesEnabled &&
+ is_valid_clock_value(max_dram_speed_mts) &&
+ is_valid_clock_value(min_dram_speed_mts));
+
+ /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
+ clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
+ max_dispclk = find_max_clk_value(clock_table->DispClocks,
+ clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks,
+ clock_table->NumDispClkLevelsEnabled);
+ } else {
+ /* Invalid number of entries in the table from PMFW. */
+ ASSERT(0);
+ }
+
+ /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+ ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
+
+ num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS :
+ clock_table->NumFclkLevelsEnabled;
+ max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
+
+ num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ clock_table->NumDcfClkLevelsEnabled;
+ for (i = 0; i < num_dcfclk; i++) {
+ int j;
+
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+ if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+ break;
+
+ bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio =
+ convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio);
+
+ /* Dcfclk and Fclk are tied, but at a different ratio */
+ bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]);
+ }
+
+ /* Make sure to include at least one entry at highest pstate */
+ if (max_pstate != min_pstate || i == 0) {
+ if (i > MAX_NUM_DPM_LVL - 1)
+ i = MAX_NUM_DPM_LVL - 1;
+
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz =
+ find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].socclk_mhz =
+ find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->MemPstateTable[max_pstate].WckRatio);
+ i++;
+ }
+ bw_params->clk_table.num_entries = i--;
+
+ /* Make sure all highest clocks are included*/
+ bw_params->clk_table.entries[i].socclk_mhz =
+ find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz =
+ find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz =
+ find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].fclk_mhz =
+ find_max_clk_value(clock_table->FclkClocks_Freq, NUM_FCLK_DPM_LEVELS);
+ ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels = clock_table->NumDcfClkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_dispclk_levels = clock_table->NumDispClkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_dppclk_levels = clock_table->NumDispClkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled;
+
+ /*
+ * Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ if (!bw_params->clk_table.entries[i].fclk_mhz)
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_mhz)
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ }
+ ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
+ bw_params->vram_type = bios_info->memory_type;
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
+ bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ bw_params->wm_table.entries[i].wm_inst = i;
+
+ if (i >= bw_params->clk_table.num_entries) {
+ bw_params->wm_table.entries[i].valid = false;
+ continue;
+ }
+
+ bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
+ bw_params->wm_table.entries[i].valid = true;
+ }
+}
+
+static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
+{
+ int display_count;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ struct dc_state *context = dc->current_state;
+
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+ display_count = dcn35_get_active_display_cnt_wa(dc, context);
+ /* if we can go lower, go lower */
+ if (display_count == 0)
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+}
+
+static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr);
+
+ if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
+ dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
+ val = val & ~DMUB_IPS1_ALLOW_MASK;
+ val = val & ~DMUB_IPS2_ALLOW_MASK;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
+ val |= DMUB_IPS1_ALLOW_MASK;
+ val |= DMUB_IPS2_ALLOW_MASK;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ val = val & ~DMUB_IPS1_ALLOW_MASK;
+ val |= DMUB_IPS2_ALLOW_MASK;
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ val = val & ~DMUB_IPS1_ALLOW_MASK;
+ val = val & ~DMUB_IPS2_ALLOW_MASK;
+ }
+
+ if (!allow_idle) {
+ val |= DMUB_IPS1_ALLOW_MASK;
+ val |= DMUB_IPS2_ALLOW_MASK;
+ }
+
+ dcn35_smu_write_ips_scratch(clk_mgr, val);
+}
+
+static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ //SMU optimization is performed part of low power state exit.
+ dcn35_smu_exit_low_power_state(clk_mgr);
+
+}
+
+static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ bool ips_supported = true;
+
+ ips_supported = dcn35_smu_get_ips_supported(clk_mgr) ? true : false;
+
+ return ips_supported;
+}
+
+static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return dcn35_smu_read_ips_scratch(clk_mgr);
+}
+
+static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
+{
+ dcn35_init_clocks(clk_mgr);
+
+/* TODO: Implement the functions and remove the ifndef guard */
+}
+
+static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ int fclk_adj = new_clocks->fclk_khz;
+
+ /* TODO: remove this after correctly set by DML */
+ new_clocks->dcfclk_khz = 400000;
+ new_clocks->socclk_khz = 400000;
+
+ /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */
+ //int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000;
+ new_clocks->fclk_khz = 4320000;
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
+ clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
+ clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr->clks.socclk_khz)) {
+ clk_mgr->clks.socclk_khz = new_clocks->socclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr->clks.dramclk_khz)) {
+ clk_mgr->clks.dramclk_khz = new_clocks->dramclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) {
+ clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, fclk_adj, clk_mgr->clks.fclk_khz)) {
+ clk_mgr->clks.fclk_khz = fclk_adj;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+ }
+
+ /* Both fclk and ref_dppclk run on the same scemi clock.
+ * So take the higher value since the DPP DTO is typically programmed
+ * such that max dppclk is 1:1 with ref_dppclk.
+ */
+ if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
+ clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
+ if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
+ clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
+
+ // Both fclk and ref_dppclk run on the same scemi clock.
+ clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
+
+ /* TODO: set dtbclk in correct place */
+ clk_mgr->clks.dtbclk_en = true;
+ dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
+ dcn35_update_clocks_update_dpp_dto(clk_mgr_int, context, safe_to_lower);
+
+ dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz);
+}
+
+static struct clk_mgr_funcs dcn35_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
+ .update_clocks = dcn35_update_clocks,
+ .init_clocks = dcn35_init_clocks,
+ .enable_pme_wa = dcn35_enable_pme_wa,
+ .are_clock_states_equal = dcn35_are_clock_states_equal,
+ .notify_wm_ranges = dcn35_notify_wm_ranges,
+ .set_low_power_state = dcn35_set_low_power_state,
+ .exit_low_power_state = dcn35_exit_low_power_state,
+ .is_ips_supported = dcn35_is_ips_supported,
+ .set_idle_state = dcn35_set_idle_state,
+ .get_idle_state = dcn35_get_idle_state
+};
+
+struct clk_mgr_funcs dcn35_fpga_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn35_update_clocks_fpga,
+ .init_clocks = dcn35_init_clocks_fpga,
+ .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
+};
+
+void dcn35_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_dcn35 *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
+ struct clk_log_info log_info = {0};
+ clk_mgr->base.base.ctx = ctx;
+ clk_mgr->base.base.funcs = &dcn35_funcs;
+
+ clk_mgr->base.pp_smu = pp_smu;
+
+ clk_mgr->base.dccg = dccg;
+ clk_mgr->base.dfs_bypass_disp_clk = 0;
+
+ clk_mgr->base.dprefclk_ss_percentage = 0;
+ clk_mgr->base.dprefclk_ss_divider = 1000;
+ clk_mgr->base.ss_on_dprefclk = false;
+ clk_mgr->base.dfs_ref_freq_khz = 48000;
+
+ clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.base.ctx,
+ DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ sizeof(struct dcn35_watermarks),
+ &clk_mgr->smu_wm_set.mc_address.quad_part);
+
+ if (!clk_mgr->smu_wm_set.wm_set) {
+ clk_mgr->smu_wm_set.wm_set = &dummy_wms;
+ clk_mgr->smu_wm_set.mc_address.quad_part = 0;
+ }
+ ASSERT(clk_mgr->smu_wm_set.wm_set);
+
+ smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.base.ctx,
+ DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ sizeof(DpmClocks_t_dcn35),
+ &smu_dpm_clks.mc_address.quad_part);
+
+ if (smu_dpm_clks.dpm_clks == NULL) {
+ smu_dpm_clks.dpm_clks = &dummy_clocks;
+ smu_dpm_clks.mc_address.quad_part = 0;
+ }
+
+ ASSERT(smu_dpm_clks.dpm_clks);
+
+ clk_mgr->base.smu_ver = dcn35_smu_get_smu_version(&clk_mgr->base);
+
+ if (clk_mgr->base.smu_ver)
+ clk_mgr->base.smu_present = true;
+
+ /* TODO: Check we get what we expect during bringup */
+ clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ dcn35_bw_params.wm_table = lpddr5_wm_table;
+ } else {
+ dcn35_bw_params.wm_table = ddr5_wm_table;
+ }
+ /* Saved clocks configured at boot for debug purposes */
+ dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
+
+ clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
+ clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
+
+ dce_clock_read_ss_info(&clk_mgr->base);
+ /*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
+
+ clk_mgr->base.base.bw_params = &dcn35_bw_params;
+
+ if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+ dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "FClkLevelsEnabled: %d\n"
+ "NumMemPstatesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumMemPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Freq[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->FclkClocks_Freq[i]);
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Voltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->FclkClocks_Voltage[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumMemPstatesEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.MemPstateTable[%d].UClk = %d\n"
+ "smu_dpm_clks.dpm_clks->MemPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->MemPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->MemPstateTable[i].UClk,
+ i, smu_dpm_clks.dpm_clks->MemPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->MemPstateTable[i].Voltage);
+ }
+
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
+ dcn35_clk_mgr_helper_populate_bw_params(
+ &clk_mgr->base,
+ ctx->dc_bios->integrated_info,
+ smu_dpm_clks.dpm_clks);
+ }
+ }
+
+ if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ smu_dpm_clks.dpm_clks);
+
+ if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
+ bool ips_support = false;
+
+ /*avoid call pmfw at init*/
+ ips_support = dcn35_smu_get_ips_supported(&clk_mgr->base);
+ if (ips_support) {
+ ctx->dc->debug.ignore_pg = false;
+ ctx->dc->debug.disable_dpp_power_gate = false;
+ ctx->dc->debug.disable_hubp_power_gate = false;
+ ctx->dc->debug.disable_dsc_power_gate = false;
+ } else {
+ /*let's reset the config control flag*/
+ ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/
+ }
+ }
+}
+
+void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
+{
+ struct clk_mgr_dcn35 *clk_mgr = TO_CLK_MGR_DCN35(clk_mgr_int);
+
+ if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ clk_mgr->smu_wm_set.wm_set);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
new file mode 100644
index 000000000..1203dc605
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN35_CLK_MGR_H__
+#define __DCN35_CLK_MGR_H__
+#include "clk_mgr_internal.h"
+
+#define NUM_CLOCK_SOURCES 5
+
+struct dcn35_watermarks;
+
+struct dcn35_smu_watermark_set {
+ struct dcn35_watermarks *wm_set;
+ union large_integer mc_address;
+};
+
+struct dcn35_ss_info_table {
+ uint32_t ss_divider;
+ uint32_t ss_percentage[NUM_CLOCK_SOURCES];
+};
+
+struct clk_mgr_dcn35 {
+ struct clk_mgr_internal base;
+ struct dcn35_smu_watermark_set smu_wm_set;
+};
+
+bool dcn35_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b);
+void dcn35_init_clocks(struct clk_mgr *clk_mgr);
+void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower);
+
+void dcn35_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_dcn35 *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg);
+
+void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+
+#endif //__DCN35_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
new file mode 100644
index 000000000..b6b8c3ca1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+
+#include "core_types.h"
+#include "clk_mgr_internal.h"
+#include "reg_helper.h"
+#include "dm_helpers.h"
+#include "dcn35_smu.h"
+
+#include "mp/mp_14_0_0_offset.h"
+#include "mp/mp_14_0_0_sh_mask.h"
+
+/* TODO: Use the real headers when they're correct */
+#define MP1_BASE__INST0_SEG0 0x00016000
+#define MP1_BASE__INST0_SEG1 0x0243FC00
+#define MP1_BASE__INST0_SEG2 0x00DC0000
+#define MP1_BASE__INST0_SEG3 0x00E00000
+#define MP1_BASE__INST0_SEG4 0x00E40000
+#define MP1_BASE__INST0_SEG5 0
+
+#ifdef BASE_INNER
+#undef BASE_INNER
+#endif
+
+#define BASE_INNER(seg) MP1_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define REG(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
+
+#define FN(reg_name, field) \
+ FD(reg_name##__##field)
+
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ CTX->logger
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
+
+#define VBIOSSMC_MSG_TestMessage 0x1
+#define VBIOSSMC_MSG_GetSmuVersion 0x2
+#define VBIOSSMC_MSG_PowerUpGfx 0x3
+#define VBIOSSMC_MSG_SetDispclkFreq 0x4
+#define VBIOSSMC_MSG_SetDprefclkFreq 0x5 //Not used. DPRef is constant
+#define VBIOSSMC_MSG_SetDppclkFreq 0x6
+#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x7
+#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x8
+#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x9 //Keep it in case VMIN dees not support phy clk
+#define VBIOSSMC_MSG_GetFclkFrequency 0xA
+#define VBIOSSMC_MSG_SetDisplayCount 0xB //Not used anymore
+#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xC //To ask PMFW turn off TMDP 48MHz refclk during display off to save power
+#define VBIOSSMC_MSG_UpdatePmeRestore 0xD
+#define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0xE //Used for WM table txfr
+#define VBIOSSMC_MSG_SetVbiosDramAddrLow 0xF
+#define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10
+#define VBIOSSMC_MSG_TransferTableDram2Smu 0x11
+#define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12
+#define VBIOSSMC_MSG_GetDprefclkFreq 0x13
+#define VBIOSSMC_MSG_GetDtbclkFreq 0x14
+#define VBIOSSMC_MSG_AllowZstatesEntry 0x15
+#define VBIOSSMC_MSG_DisallowZstatesEntry 0x16
+#define VBIOSSMC_MSG_SetDtbClk 0x17
+#define VBIOSSMC_MSG_DispPsrEntry 0x18 ///< Display PSR entry, DMU
+#define VBIOSSMC_MSG_DispPsrExit 0x19 ///< Display PSR exit, DMU
+#define VBIOSSMC_MSG_DisableLSdma 0x1A ///< Disable LSDMA; only sent by VBIOS
+#define VBIOSSMC_MSG_DpControllerPhyStatus 0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number
+#define VBIOSSMC_MSG_QueryIPS2Support 0x1C ///< Return 1: support; else not supported
+#define VBIOSSMC_Message_Count 0x1D
+
+#define VBIOSSMC_Status_BUSY 0x0
+#define VBIOSSMC_Result_OK 0x1
+#define VBIOSSMC_Result_Failed 0xFF
+#define VBIOSSMC_Result_UnknownCmd 0xFE
+#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
+#define VBIOSSMC_Result_CmdRejectedBusy 0xFC
+
+/*
+ * Function to be used instead of REG_WAIT macro because the wait ends when
+ * the register is NOT EQUAL to zero, and because `the translation in msg_if.h
+ * won't work with REG_WAIT.
+ */
+static uint32_t dcn35_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
+{
+ uint32_t res_val = VBIOSSMC_Status_BUSY;
+
+ do {
+ res_val = REG_READ(MP1_SMN_C2PMSG_91);
+ if (res_val != VBIOSSMC_Status_BUSY)
+ break;
+
+ if (delay_us >= 1000)
+ msleep(delay_us/1000);
+ else if (delay_us > 0)
+ udelay(delay_us);
+ } while (max_retries--);
+
+ return res_val;
+}
+
+static int dcn35_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ unsigned int msg_id,
+ unsigned int param)
+{
+ uint32_t result;
+
+ result = dcn35_smu_wait_for_response(clk_mgr, 10, 2000000);
+ ASSERT(result == VBIOSSMC_Result_OK);
+
+ if (result != VBIOSSMC_Result_OK) {
+ DC_LOG_WARNING("SMU response after wait: %d, msg id = %d\n", result, msg_id);
+
+ if (result == VBIOSSMC_Status_BUSY)
+ return -1;
+ }
+
+ /* First clear response register */
+ REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
+
+ /* Set the parameter register for the SMU message, unit is Mhz */
+ REG_WRITE(MP1_SMN_C2PMSG_83, param);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
+
+ result = dcn35_smu_wait_for_response(clk_mgr, 10, 2000000);
+
+ if (result == VBIOSSMC_Result_Failed) {
+ if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
+ param == TABLE_WATERMARKS)
+ DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ else
+ ASSERT(0);
+ REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
+ DC_LOG_WARNING("SMU response after wait: %d, msg id = %d\n", result, msg_id);
+ return -1;
+ }
+
+ if (IS_SMU_TIMEOUT(result)) {
+ ASSERT(0);
+ result = dcn35_smu_wait_for_response(clk_mgr, 10, 2000000);
+ //dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
+ DC_LOG_WARNING("SMU response after wait: %d, msg id = %d\n", result, msg_id);
+ }
+
+ return REG_READ(MP1_SMN_C2PMSG_83);
+}
+
+int dcn35_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
+{
+ return dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_GetSmuVersion,
+ 0);
+}
+
+
+int dcn35_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
+{
+ int actual_dispclk_set_mhz = -1;
+
+ if (!clk_mgr->smu_present)
+ return requested_dispclk_khz;
+
+ /* Unit of SMU msg parameter is Mhz */
+ actual_dispclk_set_mhz = dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDispclkFreq,
+ khz_to_mhz_ceil(requested_dispclk_khz));
+
+ smu_print("requested_dispclk_khz = %d, actual_dispclk_set_mhz: %d\n", requested_dispclk_khz, actual_dispclk_set_mhz);
+ return actual_dispclk_set_mhz * 1000;
+}
+
+int dcn35_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
+{
+ int actual_dprefclk_set_mhz = -1;
+
+ if (!clk_mgr->smu_present)
+ return clk_mgr->base.dprefclk_khz;
+
+ actual_dprefclk_set_mhz = dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDprefclkFreq,
+ khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
+
+ /* TODO: add code for programing DP DTO, currently this is down by command table */
+
+ return actual_dprefclk_set_mhz * 1000;
+}
+
+int dcn35_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
+{
+ int actual_dcfclk_set_mhz = -1;
+
+ if (!clk_mgr->smu_present)
+ return requested_dcfclk_khz;
+
+ actual_dcfclk_set_mhz = dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
+ khz_to_mhz_ceil(requested_dcfclk_khz));
+
+ smu_print("requested_dcfclk_khz = %d, actual_dcfclk_set_mhz: %d\n", requested_dcfclk_khz, actual_dcfclk_set_mhz);
+
+ return actual_dcfclk_set_mhz * 1000;
+}
+
+int dcn35_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
+{
+ int actual_min_ds_dcfclk_mhz = -1;
+
+ if (!clk_mgr->smu_present)
+ return requested_min_ds_dcfclk_khz;
+
+ actual_min_ds_dcfclk_mhz = dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
+ khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
+
+ smu_print("requested_min_ds_dcfclk_khz = %d, actual_min_ds_dcfclk_mhz: %d\n", requested_min_ds_dcfclk_khz, actual_min_ds_dcfclk_mhz);
+
+ return actual_min_ds_dcfclk_mhz * 1000;
+}
+
+int dcn35_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
+{
+ int actual_dppclk_set_mhz = -1;
+
+ if (!clk_mgr->smu_present)
+ return requested_dpp_khz;
+
+ actual_dppclk_set_mhz = dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDppclkFreq,
+ khz_to_mhz_ceil(requested_dpp_khz));
+
+ smu_print("requested_dpp_khz = %d, actual_dppclk_set_mhz: %d\n", requested_dpp_khz, actual_dppclk_set_mhz);
+
+ return actual_dppclk_set_mhz * 1000;
+}
+
+void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info)
+{
+ if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
+ return;
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ //TODO: Work with smu team to define optimization options.
+ dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDisplayIdleOptimizations,
+ idle_info);
+ smu_print("VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %d\n", idle_info);
+}
+
+void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ union display_idle_optimization_u idle_info = { 0 };
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ if (enable) {
+ idle_info.idle_info.df_request_disabled = 1;
+ idle_info.idle_info.phy_ref_clk_off = 1;
+ }
+
+ dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDisplayIdleOptimizations,
+ idle_info.data);
+ smu_print("dcn35_smu_enable_phy_refclk_pwrdwn = %d\n", enable ? 1 : 0);
+}
+
+void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
+{
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_UpdatePmeRestore,
+ 0);
+}
+
+void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
+{
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn35_smu_send_msg_with_param(clk_mgr,
+ VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
+}
+
+void dcn35_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
+{
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn35_smu_send_msg_with_param(clk_mgr,
+ VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
+}
+
+void dcn35_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
+{
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn35_smu_send_msg_with_param(clk_mgr,
+ VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS);
+}
+
+void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
+{
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn35_smu_send_msg_with_param(clk_mgr,
+ VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
+}
+
+void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support)
+{
+ unsigned int msg_id, param;
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ switch (support) {
+
+ case DCN_ZSTATE_SUPPORT_ALLOW:
+ msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
+ param = (1 << 10) | (1 << 9) | (1 << 8);
+ break;
+
+ case DCN_ZSTATE_SUPPORT_DISALLOW:
+ msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
+ param = 0;
+ break;
+
+
+ case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
+ msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
+ param = (1 << 10);
+ break;
+
+ case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
+ msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
+ param = (1 << 10) | (1 << 8);
+ break;
+
+ case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
+ msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
+ param = (1 << 8);
+ break;
+
+ default: //DCN_ZSTATE_SUPPORT_UNKNOWN
+ msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
+ param = 0;
+ break;
+ }
+
+
+ dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ msg_id,
+ param);
+ smu_print("dcn35_smu_set_zstate_support msg_id = %d, param = %d\n", msg_id, param);
+}
+
+int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
+{
+ int dprefclk;
+
+ if (!clk_mgr->smu_present)
+ return 0;
+
+ dprefclk = dcn35_smu_send_msg_with_param(clk_mgr,
+ VBIOSSMC_MSG_GetDprefclkFreq,
+ 0);
+
+ smu_print("dcn35_smu_get_DPREF clk = %d mhz\n", dprefclk);
+ return dprefclk * 1000;
+}
+
+int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
+{
+ int dtbclk;
+
+ if (!clk_mgr->smu_present)
+ return 0;
+
+ dtbclk = dcn35_smu_send_msg_with_param(clk_mgr,
+ VBIOSSMC_MSG_GetDtbclkFreq,
+ 0);
+
+ smu_print("dcn35_smu_get_dtbclk = %d mhz\n", dtbclk);
+ return dtbclk * 1000;
+}
+/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
+void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDtbClk,
+ enable);
+ smu_print("dcn35_smu_set_dtbclk = %d \n", enable ? 1 : 0);
+}
+
+void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
+ enable);
+}
+
+int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
+{
+ return dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_DispPsrExit,
+ 0);
+}
+
+int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
+{
+ return dcn35_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_QueryIPS2Support,
+ 0);
+}
+
+void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
+{
+ REG_WRITE(MP1_SMN_C2PMSG_71, param);
+}
+
+uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
+{
+ return REG_READ(MP1_SMN_C2PMSG_71);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
new file mode 100644
index 000000000..2b8e6959a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_35_SMU_H_
+#define DAL_DC_35_SMU_H_
+
+#include "os_types.h"
+
+#ifndef PMFW_DRIVER_IF_H
+#define PMFW_DRIVER_IF_H
+#define PMFW_DRIVER_IF_VERSION 4
+
+typedef enum {
+ DSPCLK_DCFCLK = 0,
+ DSPCLK_DISPCLK,
+ DSPCLK_PIXCLK,
+ DSPCLK_PHYCLK,
+ DSPCLK_COUNT,
+} DSPCLK_e;
+
+typedef struct {
+ uint16_t Freq; // in MHz
+ uint16_t Vid; // min voltage in SVI3 VID
+} DisplayClockTable_t;
+
+typedef struct {
+ uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
+ uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
+ uint16_t MinMclk;
+ uint16_t MaxMclk;
+
+ uint8_t WmSetting;
+ uint8_t WmType; // Used for normal pstate change or memory retraining
+ uint8_t Padding[2];
+} WatermarkRowGeneric_t;
+
+#define NUM_WM_RANGES 4
+#define WM_PSTATE_CHG 0
+#define WM_RETRAINING 1
+
+typedef enum {
+ WM_SOCCLK = 0,
+ WM_DCFCLK,
+ WM_COUNT,
+} WM_CLOCK_e;
+
+typedef struct {
+ // Watermarks
+ WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
+
+ uint32_t MmHubPadding[7]; // SMU internal use
+} Watermarks_t;
+
+#define NUM_DCFCLK_DPM_LEVELS 8
+#define NUM_DISPCLK_DPM_LEVELS 8
+#define NUM_DPPCLK_DPM_LEVELS 8
+#define NUM_SOCCLK_DPM_LEVELS 8
+#define NUM_VCN_DPM_LEVELS 8
+#define NUM_SOC_VOLTAGE_LEVELS 8
+#define NUM_VPE_DPM_LEVELS 8
+#define NUM_FCLK_DPM_LEVELS 8
+#define NUM_MEM_PSTATE_LEVELS 4
+
+typedef enum{
+ WCK_RATIO_1_1 = 0, // DDR5, Wck:ck is always 1:1;
+ WCK_RATIO_1_2,
+ WCK_RATIO_1_4,
+ WCK_RATIO_MAX
+} WCK_RATIO_e;
+
+typedef struct {
+ uint32_t UClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
+} MemPstateTable_t;
+
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
+ uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
+ uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t VpeClkLevelsEnabled;
+ uint8_t NumMemPstatesEnabled;
+ uint8_t NumFclkLevelsEnabled;
+ uint8_t spare[2];
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks_t_dcn35;
+
+
+// Throttler Status Bitmask
+
+
+
+
+
+
+
+
+
+
+
+#define TABLE_BIOS_IF 0 // Called by BIOS
+#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS
+#define TABLE_CUSTOM_DPM 2 // Called by Driver
+#define TABLE_SPARE1 3
+#define TABLE_DPMCLOCKS 4 // Called by Driver
+#define TABLE_MOMENTARY_PM 5 // Called by Tools
+#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
+#define TABLE_SMU_METRICS 7 // Called by Driver
+#define TABLE_COUNT 8
+
+#endif
+
+struct dcn35_watermarks {
+ // Watermarks
+ WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
+
+ uint32_t MmHubPadding[7]; // SMU internal use
+};
+
+struct dcn35_smu_dpm_clks {
+ DpmClocks_t_dcn35 *dpm_clks;
+ union large_integer mc_address;
+};
+
+/* TODO: taken from vgh, may not be correct */
+struct display_idle_optimization {
+ unsigned int df_request_disabled : 1;
+ unsigned int phy_ref_clk_off : 1;
+ unsigned int s0i2_rdy : 1;
+ unsigned int reserved : 29;
+};
+
+union display_idle_optimization_u {
+ struct display_idle_optimization idle_info;
+ uint32_t data;
+};
+
+int dcn35_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
+int dcn35_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
+int dcn35_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
+int dcn35_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz);
+int dcn35_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
+int dcn35_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
+void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info);
+void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
+void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
+void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high);
+void dcn35_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
+void dcn35_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr);
+void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
+
+void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support);
+void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
+void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
+
+int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr);
+int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr);
+int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
+int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr);
+void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param);
+uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr);
+#endif /* DAL_DC_35_SMU_H_ */