summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/dcn31
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/gpu/drm/amd/display/dc/dcn31
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/dcn31')
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c755
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h233
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c681
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h286
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c622
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h223
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c775
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h245
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c1067
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c113
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.h246
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c603
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c157
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c310
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h267
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c161
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2214
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h97
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h162
29 files changed, 10051 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
new file mode 100644
index 000000000..ec041e3cd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -0,0 +1,20 @@
+#
+# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
+#
+# All rights reserved. This notice is intended as a precaution against
+# inadvertent publication and does not imply publication or any waiver
+# of confidentiality. The year included in the foregoing notice is the
+# year of creation of the work.
+#
+# Authors: AMD
+#
+# Makefile for dcn31.
+
+DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_hwseq.o dcn31_init.o dcn31_hubp.o \
+ dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
+ dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
+ dcn31_afmt.o dcn31_vpg.o
+
+AMD_DAL_DCN31 = $(addprefix $(AMDDALPATH)/dc/dcn31/,$(DCN31))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN31)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c
new file mode 100644
index 000000000..d380a8ec2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "hw_shared.h"
+#include "dcn30/dcn30_afmt.h"
+#include "dcn31_afmt.h"
+#include "reg_helper.h"
+#include "dc/dc.h"
+
+#define DC_LOGGER \
+ afmt31->base.ctx->logger
+
+#define REG(reg)\
+ (afmt31->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ afmt31->afmt_shift->field_name, afmt31->afmt_mask->field_name
+
+
+#define CTX \
+ afmt31->base.ctx
+
+static struct afmt_funcs dcn31_afmt_funcs = {
+ .setup_hdmi_audio = afmt3_setup_hdmi_audio,
+ .se_audio_setup = afmt3_se_audio_setup,
+ .audio_mute_control = afmt3_audio_mute_control,
+ .audio_info_immediate_update = afmt3_audio_info_immediate_update,
+ .setup_dp_audio = afmt3_setup_dp_audio,
+ .afmt_powerdown = afmt31_powerdown,
+ .afmt_poweron = afmt31_poweron
+};
+
+void afmt31_powerdown(struct afmt *afmt)
+{
+ struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt);
+
+ if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false)
+ return;
+
+ REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 0, AFMT_MEM_PWR_FORCE, 1);
+}
+
+void afmt31_poweron(struct afmt *afmt)
+{
+ struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt);
+
+ if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false)
+ return;
+
+ REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 1, AFMT_MEM_PWR_FORCE, 0);
+}
+
+void afmt31_construct(struct dcn31_afmt *afmt31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_afmt_registers *afmt_regs,
+ const struct dcn31_afmt_shift *afmt_shift,
+ const struct dcn31_afmt_mask *afmt_mask)
+{
+ afmt31->base.ctx = ctx;
+
+ afmt31->base.inst = inst;
+ afmt31->base.funcs = &dcn31_afmt_funcs;
+
+ afmt31->regs = afmt_regs;
+ afmt31->afmt_shift = afmt_shift;
+ afmt31->afmt_mask = afmt_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h
new file mode 100644
index 000000000..802cb05b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_AFMT_H__
+#define __DAL_DCN31_AFMT_H__
+
+
+#define DCN31_AFMT_FROM_AFMT(afmt)\
+ container_of(afmt, struct dcn31_afmt, base)
+
+#define AFMT_DCN31_REG_LIST(id) \
+ SRI(AFMT_INFOFRAME_CONTROL0, AFMT, id), \
+ SRI(AFMT_VBI_PACKET_CONTROL, AFMT, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \
+ SRI(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \
+ SRI(AFMT_60958_0, AFMT, id), \
+ SRI(AFMT_60958_1, AFMT, id), \
+ SRI(AFMT_60958_2, AFMT, id), \
+ SRI(AFMT_MEM_PWR, AFMT, id)
+
+struct dcn31_afmt_registers {
+ uint32_t AFMT_INFOFRAME_CONTROL0;
+ uint32_t AFMT_VBI_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+ uint32_t AFMT_AUDIO_SRC_CONTROL;
+ uint32_t AFMT_60958_0;
+ uint32_t AFMT_60958_1;
+ uint32_t AFMT_60958_2;
+ uint32_t AFMT_MEM_PWR;
+};
+
+#define DCN31_AFMT_MASK_SH_LIST(mask_sh)\
+ SE_SF(AFMT0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_STATE, mask_sh)
+
+#define AFMT_DCN31_REG_FIELD_LIST(type) \
+ type AFMT_AUDIO_INFO_UPDATE;\
+ type AFMT_AUDIO_SRC_SELECT;\
+ type AFMT_AUDIO_CHANNEL_ENABLE;\
+ type AFMT_60958_CS_UPDATE;\
+ type AFMT_AUDIO_LAYOUT_OVRD;\
+ type AFMT_60958_OSF_OVRD;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_L;\
+ type AFMT_60958_CS_CLOCK_ACCURACY;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_R;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_2;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_3;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_4;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_5;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_6;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_7;\
+ type AFMT_AUDIO_SAMPLE_SEND;\
+ type AFMT_MEM_PWR_FORCE;\
+ type AFMT_MEM_PWR_DIS;\
+ type AFMT_MEM_PWR_STATE
+
+struct dcn31_afmt_shift {
+ AFMT_DCN31_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_afmt_mask {
+ AFMT_DCN31_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_afmt {
+ struct afmt base;
+ const struct dcn31_afmt_registers *regs;
+ const struct dcn31_afmt_shift *afmt_shift;
+ const struct dcn31_afmt_mask *afmt_mask;
+};
+
+void afmt31_poweron(
+ struct afmt *afmt);
+
+void afmt31_powerdown(
+ struct afmt *afmt);
+
+void afmt31_construct(struct dcn31_afmt *afmt31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_afmt_registers *afmt_regs,
+ const struct dcn31_afmt_shift *afmt_shift,
+ const struct dcn31_afmt_mask *afmt_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
new file mode 100644
index 000000000..05aac3e44
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dc_bios_types.h"
+#include "hw_shared.h"
+#include "dcn31_apg.h"
+#include "reg_helper.h"
+
+#define DC_LOGGER \
+ apg31->base.ctx->logger
+
+#define REG(reg)\
+ (apg31->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ apg31->apg_shift->field_name, apg31->apg_mask->field_name
+
+
+#define CTX \
+ apg31->base.ctx
+
+
+static void apg31_enable(
+ struct apg *apg)
+{
+ struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
+
+ /* Reset APG */
+ REG_UPDATE(APG_CONTROL, APG_RESET, 1);
+ REG_WAIT(APG_CONTROL,
+ APG_RESET_DONE, 1,
+ 1, 10);
+ REG_UPDATE(APG_CONTROL, APG_RESET, 0);
+ REG_WAIT(APG_CONTROL,
+ APG_RESET_DONE, 0,
+ 1, 10);
+
+ /* Enable APG */
+ REG_UPDATE(APG_CONTROL2, APG_ENABLE, 1);
+}
+
+static void apg31_disable(
+ struct apg *apg)
+{
+ struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
+
+ /* Disable APG */
+ REG_UPDATE(APG_CONTROL2, APG_ENABLE, 0);
+}
+
+static void apg31_se_audio_setup(
+ struct apg *apg,
+ unsigned int az_inst,
+ struct audio_info *audio_info)
+{
+ struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
+
+ ASSERT(audio_info);
+ /* This should not happen.it does so we don't get BSOD*/
+ if (audio_info == NULL)
+ return;
+
+ /* DisplayPort only allows for one audio stream with stream ID 0 */
+ REG_UPDATE(APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, 0);
+
+ /* When running in "pair mode", pairs of audio channels have their own enable
+ * this is for really old audio drivers */
+ REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xFF);
+
+ /* Disable forced mem power off */
+ REG_UPDATE(APG_MEM_PWR, APG_MEM_PWR_FORCE, 0);
+}
+
+static struct apg_funcs dcn31_apg_funcs = {
+ .se_audio_setup = apg31_se_audio_setup,
+ .enable_apg = apg31_enable,
+ .disable_apg = apg31_disable,
+};
+
+void apg31_construct(struct dcn31_apg *apg31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_apg_registers *apg_regs,
+ const struct dcn31_apg_shift *apg_shift,
+ const struct dcn31_apg_mask *apg_mask)
+{
+ apg31->base.ctx = ctx;
+
+ apg31->base.inst = inst;
+ apg31->base.funcs = &dcn31_apg_funcs;
+
+ apg31->regs = apg_regs;
+ apg31->apg_shift = apg_shift;
+ apg31->apg_mask = apg_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h
new file mode 100644
index 000000000..1b81f6773
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_AGP_H__
+#define __DAL_DCN31_AGP_H__
+
+
+#define DCN31_APG_FROM_APG(apg)\
+ container_of(apg, struct dcn31_apg, base)
+
+#define APG_DCN31_REG_LIST(id) \
+ SRI(APG_CONTROL, APG, id), \
+ SRI(APG_CONTROL2, APG, id),\
+ SRI(APG_MEM_PWR, APG, id),\
+ SRI(APG_DBG_GEN_CONTROL, APG, id)
+
+struct dcn31_apg_registers {
+ uint32_t APG_CONTROL;
+ uint32_t APG_CONTROL2;
+ uint32_t APG_MEM_PWR;
+ uint32_t APG_DBG_GEN_CONTROL;
+};
+
+
+#define DCN31_APG_MASK_SH_LIST(mask_sh)\
+ SE_SF(APG0_APG_CONTROL, APG_RESET, mask_sh),\
+ SE_SF(APG0_APG_CONTROL, APG_RESET_DONE, mask_sh),\
+ SE_SF(APG0_APG_CONTROL2, APG_ENABLE, mask_sh),\
+ SE_SF(APG0_APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, mask_sh),\
+ SE_SF(APG0_APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(APG0_APG_MEM_PWR, APG_MEM_PWR_FORCE, mask_sh)
+
+#define APG_DCN31_REG_FIELD_LIST(type) \
+ type APG_RESET;\
+ type APG_RESET_DONE;\
+ type APG_ENABLE;\
+ type APG_DP_AUDIO_STREAM_ID;\
+ type APG_DBG_AUDIO_CHANNEL_ENABLE;\
+ type APG_MEM_PWR_FORCE
+
+struct dcn31_apg_shift {
+ APG_DCN31_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_apg_mask {
+ APG_DCN31_REG_FIELD_LIST(uint32_t);
+};
+
+struct apg {
+ const struct apg_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+};
+
+struct apg_funcs {
+
+ void (*setup_hdmi_audio)(
+ struct apg *apg);
+
+ void (*se_audio_setup)(
+ struct apg *apg,
+ unsigned int az_inst,
+ struct audio_info *audio_info);
+
+ void (*enable_apg)(
+ struct apg *apg);
+
+ void (*disable_apg)(
+ struct apg *apg);
+};
+
+
+
+struct dcn31_apg {
+ struct apg base;
+ const struct dcn31_apg_registers *regs;
+ const struct dcn31_apg_shift *apg_shift;
+ const struct dcn31_apg_mask *apg_mask;
+};
+
+void apg31_construct(struct dcn31_apg *apg3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_apg_registers *apg_regs,
+ const struct dcn31_apg_shift *apg_shift,
+ const struct dcn31_apg_mask *apg_mask);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
new file mode 100644
index 000000000..8664f0c4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dcn31_dccg.h"
+#include "dal_asic_id.h"
+
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
+
+#define REG(reg) \
+ (dccg_dcn->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
+
+#define CTX \
+ dccg_dcn->base.ctx
+#define DC_LOGGER \
+ dccg->ctx->logger
+
+void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->dpp_clock_gated[dpp_inst]) {
+ /*
+ * Do not update the DPPCLK DTO if the clock is stopped.
+ * It is treated the same as if the pipe itself were in PG.
+ */
+ return;
+ }
+
+ if (dccg->ref_dppclk && req_dppclk) {
+ int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
+
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
+
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
+ }
+
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ } else {
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 0);
+ }
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
+}
+
+static enum phyd32clk_clock_source get_phy_mux_symclk(
+ struct dcn_dccg *dccg_dcn,
+ enum phyd32clk_clock_source src)
+{
+ if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (src == PHYD32CLKC)
+ src = PHYD32CLKF;
+ if (src == PHYD32CLKD)
+ src = PHYD32CLKG;
+ }
+ return src;
+}
+
+static void dccg31_enable_dpstreamclk(struct dccg *dccg, int otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* enabled to select one of the DTBCLKs for pipe */
+ switch (otg_inst) {
+ case 0:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE0_EN, 1);
+ break;
+ case 1:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE1_EN, 1);
+ break;
+ case 2:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE2_EN, 1);
+ break;
+ case 3:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE3_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ DPSTREAMCLK_GATE_DISABLE, 1,
+ DPSTREAMCLK_ROOT_GATE_DISABLE, 1);
+}
+
+static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ DPSTREAMCLK_ROOT_GATE_DISABLE, 0,
+ DPSTREAMCLK_GATE_DISABLE, 0);
+
+ switch (otg_inst) {
+ case 0:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE0_EN, 0);
+ break;
+ case 1:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE1_EN, 0);
+ break;
+ case 2:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE2_EN, 0);
+ break;
+ case 3:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE3_EN, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_set_dpstreamclk(
+ struct dccg *dccg,
+ enum streamclk_source src,
+ int otg_inst,
+ int dp_hpo_inst)
+{
+ if (src == REFCLK)
+ dccg31_disable_dpstreamclk(dccg, otg_inst);
+ else
+ dccg31_enable_dpstreamclk(dccg, otg_inst);
+}
+
+void dccg31_enable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst,
+ enum phyd32clk_clock_source phyd32clk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk);
+
+ /* select one of the PHYD32CLKs as the source for symclk32_se */
+ switch (hpo_se_inst) {
+ case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, 1,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, phyd32clk,
+ SYMCLK32_SE0_EN, 1);
+ break;
+ case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, 1,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, phyd32clk,
+ SYMCLK32_SE1_EN, 1);
+ break;
+ case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, 1,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, phyd32clk,
+ SYMCLK32_SE2_EN, 1);
+ break;
+ case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, 1,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, phyd32clk,
+ SYMCLK32_SE3_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_disable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* set refclk as the source for symclk32_se */
+ switch (hpo_se_inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, 0,
+ SYMCLK32_SE0_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, 0,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, 0,
+ SYMCLK32_SE1_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, 0,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, 0);
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, 0,
+ SYMCLK32_SE2_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, 0,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, 0);
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, 0,
+ SYMCLK32_SE3_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, 0,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_enable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ enum phyd32clk_clock_source phyd32clk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk);
+
+ /* select one of the PHYD32CLKs as the source for symclk32_le */
+ switch (hpo_le_inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE0_SRC_SEL, phyd32clk,
+ SYMCLK32_LE0_EN, 1);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE1_SRC_SEL, phyd32clk,
+ SYMCLK32_LE1_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_disable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* set refclk as the source for symclk32_le */
+ switch (hpo_le_inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE0_SRC_SEL, 0,
+ SYMCLK32_LE0_EN, 0);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE1_SRC_SEL, 0,
+ SYMCLK32_LE1_EN, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_set_symclk32_le_root_clock_gating(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ return;
+
+ switch (hpo_le_inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE0_GATE_DISABLE, enable ? 1 : 0,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 1 : 0);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE1_GATE_DISABLE, enable ? 1 : 0,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 1 : 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_disable_dscclk(struct dccg *dccg, int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ return;
+ //DTO must be enabled to generate a 0 Hz clock output
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK0_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+ DSCCLK0_DTO_PHASE, 0,
+ DSCCLK0_DTO_MODULO, 1);
+ break;
+ case 1:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK1_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+ DSCCLK1_DTO_PHASE, 0,
+ DSCCLK1_DTO_MODULO, 1);
+ break;
+ case 2:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK2_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+ DSCCLK2_DTO_PHASE, 0,
+ DSCCLK2_DTO_MODULO, 1);
+ break;
+ case 3:
+ if (REG(DSCCLK3_DTO_PARAM)) {
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK3_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK3_DTO_PARAM,
+ DSCCLK3_DTO_PHASE, 0,
+ DSCCLK3_DTO_MODULO, 1);
+ }
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_enable_dscclk(struct dccg *dccg, int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ return;
+ //Disable DTO
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+ DSCCLK0_DTO_PHASE, 0,
+ DSCCLK0_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK0_DTO_ENABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+ DSCCLK1_DTO_PHASE, 0,
+ DSCCLK1_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK1_DTO_ENABLE, 0);
+ break;
+ case 2:
+ REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+ DSCCLK2_DTO_PHASE, 0,
+ DSCCLK2_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK2_DTO_ENABLE, 0);
+ break;
+ case 3:
+ if (REG(DSCCLK3_DTO_PARAM)) {
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK3_DTO_ENABLE, 0);
+ REG_UPDATE_2(DSCCLK3_DTO_PARAM,
+ DSCCLK3_DTO_PHASE, 0,
+ DSCCLK3_DTO_MODULO, 0);
+ }
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_set_physymclk(
+ struct dccg *dccg,
+ int phy_inst,
+ enum physymclk_clock_source clk_src,
+ bool force_enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* Force PHYSYMCLK on and Select phyd32clk as the source of clock which is output to PHY through DCIO */
+ switch (phy_inst) {
+ case 0:
+ if (force_enable) {
+ REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
+ PHYASYMCLK_FORCE_EN, 1,
+ PHYASYMCLK_FORCE_SRC_SEL, clk_src);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYASYMCLK_GATE_DISABLE, 1);
+ } else {
+ REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
+ PHYASYMCLK_FORCE_EN, 0,
+ PHYASYMCLK_FORCE_SRC_SEL, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYASYMCLK_GATE_DISABLE, 0);
+ }
+ break;
+ case 1:
+ if (force_enable) {
+ REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
+ PHYBSYMCLK_FORCE_EN, 1,
+ PHYBSYMCLK_FORCE_SRC_SEL, clk_src);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYBSYMCLK_GATE_DISABLE, 1);
+ } else {
+ REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
+ PHYBSYMCLK_FORCE_EN, 0,
+ PHYBSYMCLK_FORCE_SRC_SEL, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYBSYMCLK_GATE_DISABLE, 0);
+ }
+ break;
+ case 2:
+ if (force_enable) {
+ REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
+ PHYCSYMCLK_FORCE_EN, 1,
+ PHYCSYMCLK_FORCE_SRC_SEL, clk_src);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYCSYMCLK_GATE_DISABLE, 1);
+ } else {
+ REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
+ PHYCSYMCLK_FORCE_EN, 0,
+ PHYCSYMCLK_FORCE_SRC_SEL, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYCSYMCLK_GATE_DISABLE, 0);
+ }
+ break;
+ case 3:
+ if (force_enable) {
+ REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
+ PHYDSYMCLK_FORCE_EN, 1,
+ PHYDSYMCLK_FORCE_SRC_SEL, clk_src);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYDSYMCLK_GATE_DISABLE, 1);
+ } else {
+ REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
+ PHYDSYMCLK_FORCE_EN, 0,
+ PHYDSYMCLK_FORCE_SRC_SEL, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYDSYMCLK_GATE_DISABLE, 0);
+ }
+ break;
+ case 4:
+ if (force_enable) {
+ REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
+ PHYESYMCLK_FORCE_EN, 1,
+ PHYESYMCLK_FORCE_SRC_SEL, clk_src);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYESYMCLK_GATE_DISABLE, 1);
+ } else {
+ REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
+ PHYESYMCLK_FORCE_EN, 0,
+ PHYESYMCLK_FORCE_SRC_SEL, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYESYMCLK_GATE_DISABLE, 0);
+ }
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
+void dccg31_set_dtbclk_dto(
+ struct dccg *dccg,
+ const struct dtbclk_dto_params *params)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ int req_dtbclk_khz = params->pixclk_khz;
+ uint32_t dtbdto_div;
+
+ /* Mode DTBDTO Rate DTBCLK_DTO<x>_DIV Register
+ * ODM 4:1 combine pixel rate/4 2
+ * ODM 2:1 combine pixel rate/2 4
+ * non-DSC 4:2:0 mode pixel rate/2 4
+ * DSC native 4:2:0 pixel rate/2 4
+ * DSC native 4:2:2 pixel rate/2 4
+ * Other modes pixel rate 8
+ */
+ if (params->num_odm_segments == 4) {
+ dtbdto_div = 2;
+ req_dtbclk_khz = params->pixclk_khz / 4;
+ } else if ((params->num_odm_segments == 2) ||
+ (params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
+ (params->timing->flags.DSC && params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
+ && !params->timing->dsc_cfg.ycbcr422_simple)) {
+ dtbdto_div = 4;
+ req_dtbclk_khz = params->pixclk_khz / 2;
+ } else
+ dtbdto_div = 8;
+
+ if (params->ref_dtbclk_khz && req_dtbclk_khz) {
+ uint32_t modulo, phase;
+
+ // phase / modulo = dtbclk / dtbclk ref
+ modulo = params->ref_dtbclk_khz * 1000;
+ phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + params->ref_dtbclk_khz - 1),
+ params->ref_dtbclk_khz);
+
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div);
+
+ REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo);
+ REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase);
+
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_ENABLE[params->otg_inst], 1);
+
+ REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1,
+ 1, 100);
+
+ /* The recommended programming sequence to enable DTBCLK DTO to generate
+ * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should
+ * be set only after DTO is enabled
+ */
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ PIPE_DTO_SRC_SEL[params->otg_inst], 1);
+ } else {
+ REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_ENABLE[params->otg_inst], 0,
+ PIPE_DTO_SRC_SEL[params->otg_inst], 0,
+ DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div);
+
+ REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
+ REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+ }
+}
+
+void dccg31_set_audio_dtbclk_dto(
+ struct dccg *dccg,
+ const struct dtbclk_dto_params *params)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (params->ref_dtbclk_khz && params->req_audio_dtbclk_khz) {
+ uint32_t modulo, phase;
+
+ // phase / modulo = dtbclk / dtbclk ref
+ modulo = params->ref_dtbclk_khz * 1000;
+ phase = div_u64((((unsigned long long)modulo * params->req_audio_dtbclk_khz) + params->ref_dtbclk_khz - 1),
+ params->ref_dtbclk_khz);
+
+
+ REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, modulo);
+ REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, phase);
+
+ //REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ // DCCG_AUDIO_DTBCLK_DTO_USE_512FBR_DTO, 1);
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 4); // 04 - DCCG_AUDIO_DTO_SEL_AUDIO_DTO_DTBCLK
+ } else {
+ REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, 0);
+ REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, 0);
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 3); // 03 - DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO
+ }
+}
+
+void dccg31_get_dccg_ref_freq(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz)
+{
+ /*
+ * Assume refclk is sourced from xtalin
+ * expect 24MHz
+ */
+ *dccg_ref_freq_inKhz = xtalin_freq_inKhz;
+ return;
+}
+
+void dccg31_set_dispclk_change_mode(
+ struct dccg *dccg,
+ enum dentist_dispclk_change_mode change_mode)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE,
+ change_mode == DISPCLK_CHANGE_MODE_RAMPING ? 2 : 0);
+}
+
+void dccg31_init(struct dccg *dccg)
+{
+ /* Set HPO stream encoder to use refclk to avoid case where PHY is
+ * disabled and SYMCLK32 for HPO SE is sourced from PHYD32CLK which
+ * will cause DCN to hang.
+ */
+ dccg31_disable_symclk32_se(dccg, 0);
+ dccg31_disable_symclk32_se(dccg, 1);
+ dccg31_disable_symclk32_se(dccg, 2);
+ dccg31_disable_symclk32_se(dccg, 3);
+
+ dccg31_set_symclk32_le_root_clock_gating(dccg, 0, false);
+ dccg31_set_symclk32_le_root_clock_gating(dccg, 1, false);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ dccg31_disable_dpstreamclk(dccg, 0);
+ dccg31_disable_dpstreamclk(dccg, 1);
+ dccg31_disable_dpstreamclk(dccg, 2);
+ dccg31_disable_dpstreamclk(dccg, 3);
+ }
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) {
+ dccg31_set_physymclk(dccg, 0, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+ dccg31_set_physymclk(dccg, 1, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+ dccg31_set_physymclk(dccg, 2, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+ dccg31_set_physymclk(dccg, 3, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+ dccg31_set_physymclk(dccg, 4, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+ }
+}
+
+void dccg31_otg_add_pixel(struct dccg *dccg,
+ uint32_t otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst],
+ OTG_ADD_PIXEL[otg_inst], 1);
+}
+
+void dccg31_otg_drop_pixel(struct dccg *dccg,
+ uint32_t otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst],
+ OTG_DROP_PIXEL[otg_inst], 1);
+}
+
+static const struct dccg_funcs dccg31_funcs = {
+ .update_dpp_dto = dccg31_update_dpp_dto,
+ .get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
+ .dccg_init = dccg31_init,
+ .set_dpstreamclk = dccg31_set_dpstreamclk,
+ .enable_symclk32_se = dccg31_enable_symclk32_se,
+ .disable_symclk32_se = dccg31_disable_symclk32_se,
+ .enable_symclk32_le = dccg31_enable_symclk32_le,
+ .disable_symclk32_le = dccg31_disable_symclk32_le,
+ .set_physymclk = dccg31_set_physymclk,
+ .set_dtbclk_dto = dccg31_set_dtbclk_dto,
+ .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
+ .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
+ .otg_add_pixel = dccg31_otg_add_pixel,
+ .otg_drop_pixel = dccg31_otg_drop_pixel,
+ .set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
+ .disable_dsc = dccg31_disable_dscclk,
+ .enable_dsc = dccg31_enable_dscclk,
+};
+
+struct dccg *dccg31_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask)
+{
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+ struct dccg *base;
+
+ if (dccg_dcn == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ base = &dccg_dcn->base;
+ base->ctx = ctx;
+ base->funcs = &dccg31_funcs;
+
+ dccg_dcn->regs = regs;
+ dccg_dcn->dccg_shift = dccg_shift;
+ dccg_dcn->dccg_mask = dccg_mask;
+
+ return &dccg_dcn->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
new file mode 100644
index 000000000..e3caaacf7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN31_DCCG_H__
+#define __DCN31_DCCG_H__
+
+#include "dcn30/dcn30_dccg.h"
+
+#define DCCG_REG_LIST_DCN31() \
+ SR(DPPCLK_DTO_CTRL),\
+ DCCG_SRII(DTO_PARAM, DPPCLK, 0),\
+ DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
+ DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
+ DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
+ SR(PHYASYMCLK_CLOCK_CNTL),\
+ SR(PHYBSYMCLK_CLOCK_CNTL),\
+ SR(PHYCSYMCLK_CLOCK_CNTL),\
+ SR(PHYDSYMCLK_CLOCK_CNTL),\
+ SR(PHYESYMCLK_CLOCK_CNTL),\
+ SR(DPSTREAMCLK_CNTL),\
+ SR(SYMCLK32_SE_CNTL),\
+ SR(SYMCLK32_LE_CNTL),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\
+ DCCG_SRII(MODULO, DTBCLK_DTO, 0),\
+ DCCG_SRII(MODULO, DTBCLK_DTO, 1),\
+ DCCG_SRII(MODULO, DTBCLK_DTO, 2),\
+ DCCG_SRII(MODULO, DTBCLK_DTO, 3),\
+ DCCG_SRII(PHASE, DTBCLK_DTO, 0),\
+ DCCG_SRII(PHASE, DTBCLK_DTO, 1),\
+ DCCG_SRII(PHASE, DTBCLK_DTO, 2),\
+ DCCG_SRII(PHASE, DTBCLK_DTO, 3),\
+ SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
+ SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
+ SR(DCCG_AUDIO_DTO_SOURCE),\
+ SR(DENTIST_DISPCLK_CNTL),\
+ SR(DSCCLK0_DTO_PARAM),\
+ SR(DSCCLK1_DTO_PARAM),\
+ SR(DSCCLK2_DTO_PARAM),\
+ SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL2),\
+ SR(DCCG_GATE_DISABLE_CNTL3),\
+ SR(HDMISTREAMCLK0_DTO_PARAM)
+
+
+#define DCCG_MASK_SH_LIST_DCN31(mask_sh) \
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 1, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 2, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\
+ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE0_EN, mask_sh),\
+ DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE1_EN, mask_sh),\
+ DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE2_EN, mask_sh),\
+ DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE3_EN, mask_sh),\
+ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\
+ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\
+ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, mask_sh),\
+ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, mask_sh),\
+ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_EN, mask_sh),\
+ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_EN, mask_sh),\
+ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE2_EN, mask_sh),\
+ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE3_EN, mask_sh),\
+ DCCG_SF(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, mask_sh),\
+ DCCG_SF(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, mask_sh),\
+ DCCG_SF(SYMCLK32_LE_CNTL, SYMCLK32_LE0_EN, mask_sh),\
+ DCCG_SF(SYMCLK32_LE_CNTL, SYMCLK32_LE1_EN, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, ENABLE, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, ENABLE, 1, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, ENABLE, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, ENABLE, 3, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLKDTO, ENABLE_STATUS, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLKDTO, ENABLE_STATUS, 1, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLKDTO, ENABLE_STATUS, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLKDTO, ENABLE_STATUS, 3, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 1, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, PIPE, DTO_SRC_SEL, 3, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, DIV, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, DIV, 1, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, DIV, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, DIV, 3, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 3, mask_sh),\
+ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh), \
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh)
+
+
+struct dccg *dccg31_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask);
+
+void dccg31_init(struct dccg *dccg);
+
+void dccg31_enable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst,
+ enum phyd32clk_clock_source phyd32clk);
+
+void dccg31_disable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst);
+
+void dccg31_enable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ enum phyd32clk_clock_source phyd32clk);
+
+void dccg31_disable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst);
+
+void dccg31_set_symclk32_le_root_clock_gating(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable);
+
+void dccg31_set_physymclk(
+ struct dccg *dccg,
+ int phy_inst,
+ enum physymclk_clock_source clk_src,
+ bool force_enable);
+
+void dccg31_set_audio_dtbclk_dto(
+ struct dccg *dccg,
+ const struct dtbclk_dto_params *params);
+
+void dccg31_update_dpp_dto(
+ struct dccg *dccg,
+ int dpp_inst,
+ int req_dppclk);
+
+void dccg31_get_dccg_ref_freq(
+ struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz);
+
+void dccg31_set_dpstreamclk(
+ struct dccg *dccg,
+ enum streamclk_source src,
+ int otg_inst,
+ int dp_hpo_inst);
+
+void dccg31_set_dtbclk_dto(
+ struct dccg *dccg,
+ const struct dtbclk_dto_params *params);
+
+void dccg31_otg_add_pixel(
+ struct dccg *dccg,
+ uint32_t otg_inst);
+
+void dccg31_otg_drop_pixel(
+ struct dccg *dccg,
+ uint32_t otg_inst);
+
+void dccg31_set_dispclk_change_mode(
+ struct dccg *dccg,
+ enum dentist_dispclk_change_mode change_mode);
+
+void dccg31_disable_dscclk(struct dccg *dccg, int inst);
+
+void dccg31_enable_dscclk(struct dccg *dccg, int inst);
+
+#endif //__DCN31_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
new file mode 100644
index 000000000..4596f3bac
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -0,0 +1,681 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn31_dio_link_encoder.h"
+#include "stream_encoder.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#include "link_enc_cfg.h"
+#include "dc_dmub_srv.h"
+#include "dal_asic_id.h"
+#include "link.h"
+
+#define CTX \
+ enc10->base.ctx
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+#define AUX_REG(reg)\
+ (enc10->aux_regs->reg)
+
+#define AUX_REG_READ(reg_name) \
+ dm_read_reg(CTX, AUX_REG(reg_name))
+
+#define AUX_REG_WRITE(reg_name, val) \
+ dm_write_reg(CTX, AUX_REG(reg_name), val)
+
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
+static uint8_t phy_id_from_transmitter(enum transmitter t)
+{
+ uint8_t phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ phy_id = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ phy_id = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ phy_id = 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ phy_id = 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ phy_id = 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ phy_id = 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ phy_id = 6;
+ break;
+ default:
+ phy_id = 0;
+ break;
+ }
+ return phy_id;
+}
+
+static bool has_query_dp_alt(struct link_encoder *enc)
+{
+ struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+
+ if (enc->ctx->dce_version >= DCN_VERSION_3_15)
+ return true;
+
+ /* Supports development firmware and firmware >= 4.0.11 */
+ return dc_dmub_srv &&
+ !(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
+ dc_dmub_srv->dmub->fw_version <= DMUB_FW_VERSION(4, 0, 10));
+}
+
+static bool query_dp_alt_from_dmub(struct link_encoder *enc,
+ union dmub_rb_cmd *cmd)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
+ cmd->query_dp_alt.header.sub_type =
+ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
+ cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
+ cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+
+ if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
+
+ return true;
+}
+
+void dcn31_link_encoder_set_dio_phy_mux(
+ struct link_encoder *enc,
+ enum encoder_type_select sel,
+ uint32_t hpo_inst)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ switch (enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ if (sel == ENCODER_TYPE_HDMI_FRL)
+ REG_UPDATE(DIO_LINKA_CNTL,
+ HPO_HDMI_ENC_SEL, hpo_inst);
+ else if (sel == ENCODER_TYPE_DP_128B132B)
+ REG_UPDATE(DIO_LINKA_CNTL,
+ HPO_DP_ENC_SEL, hpo_inst);
+ REG_UPDATE(DIO_LINKA_CNTL,
+ ENC_TYPE_SEL, sel);
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ if (sel == ENCODER_TYPE_HDMI_FRL)
+ REG_UPDATE(DIO_LINKB_CNTL,
+ HPO_HDMI_ENC_SEL, hpo_inst);
+ else if (sel == ENCODER_TYPE_DP_128B132B)
+ REG_UPDATE(DIO_LINKB_CNTL,
+ HPO_DP_ENC_SEL, hpo_inst);
+ REG_UPDATE(DIO_LINKB_CNTL,
+ ENC_TYPE_SEL, sel);
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ if (sel == ENCODER_TYPE_HDMI_FRL)
+ REG_UPDATE(DIO_LINKC_CNTL,
+ HPO_HDMI_ENC_SEL, hpo_inst);
+ else if (sel == ENCODER_TYPE_DP_128B132B)
+ REG_UPDATE(DIO_LINKC_CNTL,
+ HPO_DP_ENC_SEL, hpo_inst);
+ REG_UPDATE(DIO_LINKC_CNTL,
+ ENC_TYPE_SEL, sel);
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ if (sel == ENCODER_TYPE_HDMI_FRL)
+ REG_UPDATE(DIO_LINKD_CNTL,
+ HPO_HDMI_ENC_SEL, hpo_inst);
+ else if (sel == ENCODER_TYPE_DP_128B132B)
+ REG_UPDATE(DIO_LINKD_CNTL,
+ HPO_DP_ENC_SEL, hpo_inst);
+ REG_UPDATE(DIO_LINKD_CNTL,
+ ENC_TYPE_SEL, sel);
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ if (sel == ENCODER_TYPE_HDMI_FRL)
+ REG_UPDATE(DIO_LINKE_CNTL,
+ HPO_HDMI_ENC_SEL, hpo_inst);
+ else if (sel == ENCODER_TYPE_DP_128B132B)
+ REG_UPDATE(DIO_LINKE_CNTL,
+ HPO_DP_ENC_SEL, hpo_inst);
+ REG_UPDATE(DIO_LINKE_CNTL,
+ ENC_TYPE_SEL, sel);
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ if (sel == ENCODER_TYPE_HDMI_FRL)
+ REG_UPDATE(DIO_LINKF_CNTL,
+ HPO_HDMI_ENC_SEL, hpo_inst);
+ else if (sel == ENCODER_TYPE_DP_128B132B)
+ REG_UPDATE(DIO_LINKF_CNTL,
+ HPO_DP_ENC_SEL, hpo_inst);
+ REG_UPDATE(DIO_LINKF_CNTL,
+ ENC_TYPE_SEL, sel);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+}
+
+static void enc31_hw_init(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+/*
+ 00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
+ 01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
+ 02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8
+ 03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16
+ 04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32
+ 05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64
+ 06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128
+ 07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256
+*/
+
+/*
+ AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0,
+ AUX_RX_START_WINDOW = 1 [6:4]
+ AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8]
+ AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1
+ AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1
+ AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0
+ AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1
+ AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1
+ AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
+ AUX_RX_DETECTION_THRESHOLD [30:28] = 1
+*/
+ // dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init
+
+ //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
+ // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
+ // 27MHz -> 0xd
+ // 100MHz -> 0x32
+ // 48MHz -> 0x18
+
+ // Set TMDS_CTL0 to 1. This is a legacy setting.
+ REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1);
+
+ dcn10_aux_initialize(enc10);
+}
+
+static const struct link_encoder_funcs dcn31_link_enc_funcs = {
+ .read_state = link_enc2_read_state,
+ .validate_output_with_stream =
+ dcn30_link_encoder_validate_output_with_stream,
+ .hw_init = enc31_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_dp_output = dcn31_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn31_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn31_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .fec_is_active = enc2_fec_is_active,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .get_dig_mode = dcn10_get_dig_mode,
+ .is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn31_link_encoder_get_max_link_cap,
+ .set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
+};
+
+void dcn31_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc20->enc10;
+
+ enc10->base.funcs = &dcn31_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc10->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc10->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc10->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc10->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc10->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE;
+ enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
+ enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
+ enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
+
+void dcn31_link_encoder_construct_minimal(
+ struct dcn20_link_encoder *enc20,
+ struct dc_context *ctx,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ enum engine_id eng_id)
+{
+ struct dcn10_link_encoder *enc10 = &enc20->enc10;
+
+ enc10->base.funcs = &dcn31_link_enc_funcs;
+ enc10->base.ctx = ctx;
+ enc10->base.id.type = OBJECT_TYPE_ENCODER;
+ enc10->base.hpd_source = HPD_SOURCEID_UNKNOWN;
+ enc10->base.connector.type = OBJECT_TYPE_CONNECTOR;
+ enc10->base.preferred_engine = eng_id;
+ enc10->base.features = *enc_features;
+ enc10->base.transmitter = TRANSMITTER_UNKNOWN;
+ enc10->link_regs = link_regs;
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP;
+}
+
+/* DPIA equivalent of link_transmitter_control. */
+static bool link_dpia_control(struct dc_context *dc_ctx,
+ struct dmub_cmd_dig_dpia_control_data *dpia_control)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.dig1_dpia_control.header.type = DMUB_CMD__DPIA;
+ cmd.dig1_dpia_control.header.sub_type =
+ DMUB_CMD__DPIA_DIG1_DPIA_CONTROL;
+ cmd.dig1_dpia_control.header.payload_bytes =
+ sizeof(cmd.dig1_dpia_control) -
+ sizeof(cmd.dig1_dpia_control.header);
+
+ cmd.dig1_dpia_control.dpia_control = *dpia_control;
+
+ dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+static void link_encoder_disable(struct dcn10_link_encoder *enc10)
+{
+ /* reset training complete */
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+}
+
+void dcn31_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ /* Enable transmitter and encoder. */
+ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) {
+
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn20_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+
+ } else {
+
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+ struct dc_link *link;
+
+ link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = 0; /* 0 for SST; 5 for MST */
+ dpia_control.lanenum = (uint8_t)link_settings->lane_count;
+ dpia_control.symclk_10khz = link_settings->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ / 10;
+ /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin
+ * unused by DPIA.
+ */
+ dpia_control.hpdsel = 6;
+
+ if (link) {
+ dpia_control.dpia_id = link->ddc_hw_inst;
+ dpia_control.fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
+ } else {
+ DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+ }
+}
+
+void dcn31_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ /* Enable transmitter and encoder. */
+ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) {
+
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+
+ } else {
+
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+ struct dc_link *link;
+
+ link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = 5; /* 0 for SST; 5 for MST */
+ dpia_control.lanenum = (uint8_t)link_settings->lane_count;
+ dpia_control.symclk_10khz = link_settings->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ / 10;
+ /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin
+ * unused by DPIA.
+ */
+ dpia_control.hpdsel = 6;
+
+ if (link) {
+ dpia_control.dpia_id = link->ddc_hw_inst;
+ dpia_control.fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
+ } else {
+ DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+ }
+}
+
+void dcn31_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ /* Disable transmitter and encoder. */
+ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) {
+
+ DC_LOG_DEBUG("%s: enc_id(%d)\n", __func__, enc->preferred_engine);
+ dcn10_link_encoder_disable_output(enc, signal);
+
+ } else {
+
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+ struct dc_link *link;
+
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc))
+ return;
+
+ link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_DISABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ dpia_control.mode_laneset.digmode = 0; /* 0 for SST; 5 for MST */
+ } else if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dpia_control.mode_laneset.digmode = 5; /* 0 for SST; 5 for MST */
+ } else {
+ DC_LOG_ERROR("%s: USB4 DPIA only supports DisplayPort.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ }
+
+ if (link) {
+ dpia_control.dpia_id = link->ddc_hw_inst;
+ } else {
+ DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+
+ link_encoder_disable(enc10);
+ }
+}
+
+bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ union dmub_rb_cmd cmd;
+ uint32_t dp_alt_mode_disable;
+
+ /* Only applicable to USB-C PHY. */
+ if (!enc->features.flags.bits.DP_IS_USB_C)
+ return false;
+
+ /*
+ * Use the new interface from DMCUB if available.
+ * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running.
+ */
+ if (has_query_dp_alt(enc)) {
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return false;
+
+ return (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
+ }
+
+ /* Legacy path, avoid if possible. */
+ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
+ &dp_alt_mode_disable);
+ } else {
+ /*
+ * B0 phys use a new set of registers to check whether alt mode is disabled.
+ * if value == 1 alt mode is disabled, otherwise it is enabled.
+ */
+ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
+ &dp_alt_mode_disable);
+ } else {
+ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
+ &dp_alt_mode_disable);
+ }
+ }
+
+ return (dp_alt_mode_disable == 0);
+}
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ union dmub_rb_cmd cmd;
+ uint32_t is_in_usb_c_dp4_mode = 0;
+
+ dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+ /* Take the link cap directly if not USB */
+ if (!enc->features.flags.bits.DP_IS_USB_C)
+ return;
+
+ /*
+ * Use the new interface from DMCUB if available.
+ * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running.
+ */
+ if (has_query_dp_alt(enc)) {
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return;
+
+ if (cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+
+ return;
+ }
+
+ /* Legacy path, avoid if possible. */
+ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
+ &is_in_usb_c_dp4_mode);
+ } else {
+ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
+ &is_in_usb_c_dp4_mode);
+ } else {
+ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
+ &is_in_usb_c_dp4_mode);
+ }
+ }
+
+ if (!is_in_usb_c_dp4_mode)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
new file mode 100644
index 000000000..221671563
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN31_H__
+#define __DC_LINK_ENCODER__DCN31_H__
+
+#include "dcn30/dcn30_dio_link_encoder.h"
+
+
+#define LE_DCN31_REG_LIST(id)\
+ LE_DCN3_REG_LIST(id),\
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR(DIO_LINKA_CNTL), \
+ SR(DIO_LINKB_CNTL), \
+ SR(DIO_LINKC_CNTL), \
+ SR(DIO_LINKD_CNTL), \
+ SR(DIO_LINKE_CNTL), \
+ SR(DIO_LINKF_CNTL)
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN31(mask_sh) \
+ LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
+ LE_SF(DP0_DP_DPHY_CNTL, DPHY_FEC_EN, mask_sh),\
+ LE_SF(DP0_DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, mask_sh),\
+ LE_SF(DP0_DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, mask_sh),\
+ LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_START_WINDOW, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_HALF_SYM_DETECT_LEN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_TRANSITION_FILTER_EN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_ALLOW_BELOW_THRESHOLD_START, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_ALLOW_BELOW_THRESHOLD_STOP, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_PHASE_DETECT_LEN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_DETECTION_THRESHOLD, mask_sh), \
+ LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_LEN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_SYMBOLS, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_PRECHARGE_SKIP, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh),\
+ LE_SF(DIO_LINKA_CNTL, ENC_TYPE_SEL, mask_sh),\
+ LE_SF(DIO_LINKA_CNTL, HPO_DP_ENC_SEL, mask_sh),\
+ LE_SF(DIO_LINKA_CNTL, HPO_HDMI_ENC_SEL, mask_sh)
+
+#define DPCS_DCN31_REG_LIST(id) \
+ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+ SRI(RDPCSPIPE_PHY_CNTL6, RDPCSPIPE, id), \
+ SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \
+ SRI(RDPCSTX_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH), \
+ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
+#define DPCS_DCN31_MASK_SH_LIST(mask_sh)\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh)
+
+#define DPCS_DCN314_REG_LIST(id) \
+ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \
+ SRI(RDPCSTX_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH), \
+ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
+void dcn31_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+/*
+ * Create a minimal link encoder object with no dc_link object associated with it.
+ */
+void dcn31_link_encoder_construct_minimal(
+ struct dcn20_link_encoder *enc20,
+ struct dc_context *ctx,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ enum engine_id eng_id);
+
+void dcn31_link_encoder_set_dio_phy_mux(
+ struct link_encoder *enc,
+ enum encoder_type_select sel,
+ uint32_t hpo_inst);
+
+/*
+ * Enable DP transmitter and its encoder.
+ */
+void dcn31_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/*
+ * Enable DP transmitter and its encoder in MST mode.
+ */
+void dcn31_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/*
+ * Disable transmitter and its encoder.
+ */
+void dcn31_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal);
+
+/*
+ * Check whether USB-C DP Alt mode is disabled
+ */
+bool dcn31_link_encoder_is_in_alt_mode(
+ struct link_encoder *enc);
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
+#endif /* __DC_LINK_ENCODER__DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
new file mode 100644
index 000000000..5b7ad38f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -0,0 +1,622 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dcn31_hpo_dp_link_encoder.h"
+#include "reg_helper.h"
+#include "stream_encoder.h"
+
+#define DC_LOGGER \
+ enc3->base.ctx->logger
+
+#define REG(reg)\
+ (enc3->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc3->hpo_le_shift->field_name, enc3->hpo_le_mask->field_name
+
+
+#define CTX \
+ enc3->base.ctx
+
+enum {
+ DP_SAT_UPDATE_MAX_RETRY = 200
+};
+
+void dcn31_hpo_dp_link_enc_enable(
+ struct hpo_dp_link_encoder *enc,
+ enum dc_lane_count num_lanes)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t dp_link_enabled;
+
+ /* get current status of link enabled */
+ REG_GET(DP_DPHY_SYM32_STATUS,
+ STATUS, &dp_link_enabled);
+
+ /* Enable clocks first */
+ REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 1);
+
+ /* Reset DPHY. Only reset if going from disable to enable */
+ if (!dp_link_enabled) {
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 1);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 0);
+ }
+
+ /* Configure DPHY settings */
+ REG_UPDATE_3(DP_DPHY_SYM32_CONTROL,
+ DPHY_ENABLE, 1,
+ PRECODER_ENABLE, 1,
+ NUM_LANES, num_lanes == LANE_COUNT_ONE ? 0 : num_lanes == LANE_COUNT_TWO ? 1 : 3);
+}
+
+void dcn31_hpo_dp_link_enc_disable(
+ struct hpo_dp_link_encoder *enc)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+
+ /* Configure DPHY settings */
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ DPHY_ENABLE, 0);
+
+ /* Shut down clock last */
+ REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 0);
+}
+
+void dcn31_hpo_dp_link_enc_set_link_test_pattern(
+ struct hpo_dp_link_encoder *enc,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t tp_custom;
+
+ switch (tp_params->dp_phy_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_LINK_ACTIVE);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE:
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_LINK_TRAINING_TPS1);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE:
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_LINK_TRAINING_TPS2);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS1:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_TPS1,
+ TP_SELECT1, DP_DPHY_TP_SELECT_TPS1,
+ TP_SELECT2, DP_DPHY_TP_SELECT_TPS1,
+ TP_SELECT3, DP_DPHY_TP_SELECT_TPS1);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_TPS2,
+ TP_SELECT1, DP_DPHY_TP_SELECT_TPS2,
+ TP_SELECT2, DP_DPHY_TP_SELECT_TPS2,
+ TP_SELECT3, DP_DPHY_TP_SELECT_TPS2);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS7,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS7,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS7,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS7);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS9:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS9,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS9,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS9,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS9);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS11:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS11,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS11,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS11,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS11);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS15:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS15,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS15,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS15,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS15);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS23:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS23,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS23,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS23,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS23);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS31:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS31,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS31,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS31,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS31);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_264BIT_CUSTOM:
+ tp_custom = (tp_params->custom_pattern[2] << 16) | (tp_params->custom_pattern[1] << 8) | tp_params->custom_pattern[0];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM0, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[5] << 16) | (tp_params->custom_pattern[4] << 8) | tp_params->custom_pattern[3];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM1, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[8] << 16) | (tp_params->custom_pattern[7] << 8) | tp_params->custom_pattern[6];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM2, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[11] << 16) | (tp_params->custom_pattern[10] << 8) | tp_params->custom_pattern[9];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM3, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[14] << 16) | (tp_params->custom_pattern[13] << 8) | tp_params->custom_pattern[12];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM4, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[17] << 16) | (tp_params->custom_pattern[16] << 8) | tp_params->custom_pattern[15];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM5, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[20] << 16) | (tp_params->custom_pattern[19] << 8) | tp_params->custom_pattern[18];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM6, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[23] << 16) | (tp_params->custom_pattern[22] << 8) | tp_params->custom_pattern[21];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM7, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[26] << 16) | (tp_params->custom_pattern[25] << 8) | tp_params->custom_pattern[24];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM8, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[29] << 16) | (tp_params->custom_pattern[28] << 8) | tp_params->custom_pattern[27];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM9, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[32] << 16) | (tp_params->custom_pattern[31] << 8) | tp_params->custom_pattern[30];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM10, 0, TP_CUSTOM, tp_custom);
+
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_CUSTOM,
+ TP_SELECT1, DP_DPHY_TP_SELECT_CUSTOM,
+ TP_SELECT2, DP_DPHY_TP_SELECT_CUSTOM,
+ TP_SELECT3, DP_DPHY_TP_SELECT_CUSTOM);
+
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_SQUARE:
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+ case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
+ REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0,
+ TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]);
+
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_SQUARE,
+ TP_SELECT1, DP_DPHY_TP_SELECT_SQUARE,
+ TP_SELECT2, DP_DPHY_TP_SELECT_SQUARE,
+ TP_SELECT3, DP_DPHY_TP_SELECT_SQUARE);
+
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ default:
+ break;
+ }
+}
+
+static void fill_stream_allocation_row_info(
+ const struct link_mst_stream_allocation *stream_allocation,
+ uint32_t *src,
+ uint32_t *slots)
+{
+ const struct hpo_dp_stream_encoder *stream_enc = stream_allocation->hpo_dp_stream_enc;
+
+ if (stream_enc && (stream_enc->id >= ENGINE_ID_HPO_DP_0)) {
+ *src = stream_enc->id - ENGINE_ID_HPO_DP_0;
+ *slots = stream_allocation->slot_count;
+ } else {
+ *src = 0;
+ *slots = 0;
+ }
+}
+
+/* programs DP VC payload allocation */
+void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
+ struct hpo_dp_link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t slots = 0;
+ uint32_t src = 0;
+
+ /* --- Set MSE Stream Attribute -
+ * Setup VC Payload Table on Tx Side,
+ * Issue allocation change trigger
+ * to commit payload on both tx and rx side
+ */
+
+ /* we should clean-up table each time */
+
+ if (table->stream_count >= 1) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[0],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC0,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ if (table->stream_count >= 2) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[1],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC1,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ if (table->stream_count >= 3) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[2],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC2,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ if (table->stream_count >= 4) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[3],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC3,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ /* --- wait for transaction finish */
+
+ /* send allocation change trigger (ACT)
+ * this step first sends the ACT,
+ * then double buffers the SAT into the hardware
+ * making the new allocation active on the DP MST mode link
+ */
+
+ /* SAT_UPDATE:
+ * 0 - No Action
+ * 1 - Update SAT with trigger
+ * 2 - Update SAT without trigger
+ */
+ REG_UPDATE(DP_DPHY_SYM32_SAT_UPDATE,
+ SAT_UPDATE, 1);
+
+ /* wait for update to complete
+ * (i.e. SAT_UPDATE_PENDING field is set to 0)
+ * No need for HW to enforce keepout.
+ */
+ /* Best case and worst case wait time for SAT_UPDATE_PENDING
+ * best: 109 us
+ * worst: 868 us
+ */
+ REG_WAIT(DP_DPHY_SYM32_STATUS,
+ SAT_UPDATE_PENDING, 0,
+ 10, DP_SAT_UPDATE_MAX_RETRY);
+}
+
+void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
+ struct hpo_dp_link_encoder *enc,
+ uint32_t stream_encoder_inst,
+ struct fixed31_32 avg_time_slots_per_mtp)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t x = dc_fixpt_floor(
+ avg_time_slots_per_mtp);
+ uint32_t y = dc_fixpt_ceil(
+ dc_fixpt_shl(
+ dc_fixpt_sub_int(
+ avg_time_slots_per_mtp,
+ x),
+ 25));
+
+ switch (stream_encoder_inst) {
+ case 0:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ case 1:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ case 2:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ case 3:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ /* Best case and worst case wait time for RATE_UPDATE_PENDING
+ * best: 116 ns
+ * worst: 903 ns
+ */
+ /* wait for update to be completed on the link */
+ REG_WAIT(DP_DPHY_SYM32_STATUS,
+ RATE_UPDATE_PENDING, 0,
+ 1, 10);
+}
+
+static bool dcn31_hpo_dp_link_enc_is_in_alt_mode(
+ struct hpo_dp_link_encoder *enc)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t dp_alt_mode_disable = 0;
+
+ ASSERT((enc->transmitter >= TRANSMITTER_UNIPHY_A) && (enc->transmitter <= TRANSMITTER_UNIPHY_E));
+
+ /* if value == 1 alt mode is disabled, otherwise it is enabled */
+ REG_GET(RDPCSTX_PHY_CNTL6[enc->transmitter], RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ return (dp_alt_mode_disable == 0);
+}
+
+void dcn31_hpo_dp_link_enc_read_state(
+ struct hpo_dp_link_encoder *enc,
+ struct hpo_dp_link_enc_state *state)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+
+ ASSERT(state);
+
+ REG_GET(DP_DPHY_SYM32_STATUS,
+ STATUS, &state->link_enc_enabled);
+ REG_GET(DP_DPHY_SYM32_CONTROL,
+ NUM_LANES, &state->lane_count);
+ REG_GET(DP_DPHY_SYM32_CONTROL,
+ MODE, (uint32_t *)&state->link_mode);
+
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC0,
+ SAT_STREAM_SOURCE, &state->stream_src[0],
+ SAT_SLOT_COUNT, &state->slot_count[0]);
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC1,
+ SAT_STREAM_SOURCE, &state->stream_src[1],
+ SAT_SLOT_COUNT, &state->slot_count[1]);
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC2,
+ SAT_STREAM_SOURCE, &state->stream_src[2],
+ SAT_SLOT_COUNT, &state->slot_count[2]);
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC3,
+ SAT_STREAM_SOURCE, &state->stream_src[3],
+ SAT_SLOT_COUNT, &state->slot_count[3]);
+
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL0,
+ STREAM_VC_RATE_X, &state->vc_rate_x[0],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[0]);
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL1,
+ STREAM_VC_RATE_X, &state->vc_rate_x[1],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[1]);
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL2,
+ STREAM_VC_RATE_X, &state->vc_rate_x[2],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[2]);
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL3,
+ STREAM_VC_RATE_X, &state->vc_rate_x[3],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[3]);
+}
+
+static enum bp_result link_transmitter_control(
+ struct dcn31_hpo_dp_link_encoder *enc3,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result;
+ struct dc_bios *bp = enc3->base.ctx->dc_bios;
+
+ result = bp->funcs->transmitter_control(bp, cntl);
+
+ return result;
+}
+
+/* enables DP PHY output for 128b132b encoding */
+void dcn31_hpo_dp_link_enc_enable_dp_output(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum transmitter transmitter,
+ enum hpd_source_id hpd_source)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Set the transmitter */
+ enc3->base.transmitter = transmitter;
+
+ /* Set the hpd source */
+ enc3->base.hpd_source = hpd_source;
+
+ /* Enable the PHY */
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = ENGINE_ID_UNKNOWN;
+ cntl.transmitter = enc3->base.transmitter;
+ //cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc3->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate * 1000;
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+ cntl.hpo_engine_id = enc->inst + ENGINE_ID_HPO_DP_0;
+
+ result = link_transmitter_control(enc3, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+void dcn31_hpo_dp_link_enc_disable_output(
+ struct hpo_dp_link_encoder *enc,
+ enum signal_type signal)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* disable transmitter */
+ cntl.action = TRANSMITTER_CONTROL_DISABLE;
+ cntl.transmitter = enc3->base.transmitter;
+ cntl.hpd_sel = enc3->base.hpd_source;
+ cntl.signal = signal;
+
+ result = link_transmitter_control(enc3, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* disable encoder */
+ dcn31_hpo_dp_link_enc_disable(enc);
+}
+
+void dcn31_hpo_dp_link_enc_set_ffe(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t ffe_preset)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* disable transmitter */
+ cntl.transmitter = enc3->base.transmitter;
+ cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.pixel_clock = link_settings->link_rate * 1000;
+ cntl.lane_settings = ffe_preset;
+
+ result = link_transmitter_control(enc3, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static struct hpo_dp_link_encoder_funcs dcn31_hpo_dp_link_encoder_funcs = {
+ .enable_link_phy = dcn31_hpo_dp_link_enc_enable_dp_output,
+ .disable_link_phy = dcn31_hpo_dp_link_enc_disable_output,
+ .link_enable = dcn31_hpo_dp_link_enc_enable,
+ .link_disable = dcn31_hpo_dp_link_enc_disable,
+ .set_link_test_pattern = dcn31_hpo_dp_link_enc_set_link_test_pattern,
+ .update_stream_allocation_table = dcn31_hpo_dp_link_enc_update_stream_allocation_table,
+ .set_throttled_vcp_size = dcn31_hpo_dp_link_enc_set_throttled_vcp_size,
+ .is_in_alt_mode = dcn31_hpo_dp_link_enc_is_in_alt_mode,
+ .read_state = dcn31_hpo_dp_link_enc_read_state,
+ .set_ffe = dcn31_hpo_dp_link_enc_set_ffe,
+};
+
+void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs,
+ const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift,
+ const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask)
+{
+ enc31->base.ctx = ctx;
+
+ enc31->base.inst = inst;
+ enc31->base.funcs = &dcn31_hpo_dp_link_encoder_funcs;
+ enc31->base.hpd_source = HPD_SOURCEID_UNKNOWN;
+ enc31->base.transmitter = TRANSMITTER_UNKNOWN;
+
+ enc31->regs = hpo_le_regs;
+ enc31->hpo_le_shift = hpo_le_shift;
+ enc31->hpo_le_mask = hpo_le_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
new file mode 100644
index 000000000..e324e9b83
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_HPO_DP_LINK_ENCODER_H__
+#define __DAL_DCN31_HPO_DP_LINK_ENCODER_H__
+
+#include "link_encoder.h"
+
+
+#define DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(hpo_dp_link_encoder)\
+ container_of(hpo_dp_link_encoder, struct dcn31_hpo_dp_link_encoder, base)
+
+
+#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id) \
+ SRI(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \
+ SRI(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id)
+
+#define DCN3_1_RDPCSTX_REG_LIST(id) \
+ SRII(RDPCSTX_PHY_CNTL6, RDPCSTX, id)
+
+
+#define DCN3_1_HPO_DP_LINK_ENC_REGS \
+ uint32_t DP_LINK_ENC_CLOCK_CONTROL;\
+ uint32_t DP_DPHY_SYM32_CONTROL;\
+ uint32_t DP_DPHY_SYM32_STATUS;\
+ uint32_t DP_DPHY_SYM32_TP_CONFIG;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED0;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED1;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED2;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED3;\
+ uint32_t DP_DPHY_SYM32_TP_SQ_PULSE;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM0;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM1;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM2;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM3;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM4;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM5;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM6;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM7;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM8;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM9;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM10;\
+ uint32_t DP_DPHY_SYM32_SAT_VC0;\
+ uint32_t DP_DPHY_SYM32_SAT_VC1;\
+ uint32_t DP_DPHY_SYM32_SAT_VC2;\
+ uint32_t DP_DPHY_SYM32_SAT_VC3;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL0;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL1;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL2;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL3;\
+ uint32_t DP_DPHY_SYM32_SAT_UPDATE
+
+struct dcn31_hpo_dp_link_encoder_registers {
+ DCN3_1_HPO_DP_LINK_ENC_REGS;
+ uint32_t RDPCSTX_PHY_CNTL6[5];
+};
+
+#define DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(mask_sh)\
+ SE_SF(DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_RESET, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_ENABLE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, PRECODER_ENABLE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, MODE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, NUM_LANES, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, STATUS, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, SAT_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0, TP_CUSTOM, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT1, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT2, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\
+ SE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_Y, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE, SAT_UPDATE, mask_sh)
+
+#define DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(type) \
+ type DP_LINK_ENC_CLOCK_EN;\
+ type DPHY_RESET;\
+ type DPHY_ENABLE;\
+ type PRECODER_ENABLE;\
+ type NUM_LANES;\
+ type MODE;\
+ type STATUS;\
+ type SAT_UPDATE_PENDING;\
+ type RATE_UPDATE_PENDING;\
+ type TP_CUSTOM;\
+ type TP_SELECT0;\
+ type TP_SELECT1;\
+ type TP_SELECT2;\
+ type TP_SELECT3;\
+ type TP_PRBS_SEL0;\
+ type TP_PRBS_SEL1;\
+ type TP_PRBS_SEL2;\
+ type TP_PRBS_SEL3;\
+ type TP_SQ_PULSE_WIDTH;\
+ type SAT_STREAM_SOURCE;\
+ type SAT_SLOT_COUNT;\
+ type STREAM_VC_RATE_X;\
+ type STREAM_VC_RATE_Y;\
+ type SAT_UPDATE;\
+ type RDPCS_PHY_DPALT_DISABLE
+
+
+struct dcn31_hpo_dp_link_encoder_shift {
+ DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_hpo_dp_link_encoder_mask {
+ DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_hpo_dp_link_encoder {
+ struct hpo_dp_link_encoder base;
+ const struct dcn31_hpo_dp_link_encoder_registers *regs;
+ const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift;
+ const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask;
+};
+
+void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs,
+ const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift,
+ const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask);
+
+void dcn31_hpo_dp_link_enc_enable_dp_output(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum transmitter transmitter,
+ enum hpd_source_id hpd_source);
+
+void dcn31_hpo_dp_link_enc_disable_output(
+ struct hpo_dp_link_encoder *enc,
+ enum signal_type signal);
+
+void dcn31_hpo_dp_link_enc_enable(
+ struct hpo_dp_link_encoder *enc,
+ enum dc_lane_count num_lanes);
+
+void dcn31_hpo_dp_link_enc_disable(
+ struct hpo_dp_link_encoder *enc);
+
+void dcn31_hpo_dp_link_enc_set_link_test_pattern(
+ struct hpo_dp_link_encoder *enc,
+ struct encoder_set_dp_phy_pattern_param *tp_params);
+
+void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
+ struct hpo_dp_link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+
+void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
+ struct hpo_dp_link_encoder *enc,
+ uint32_t stream_encoder_inst,
+ struct fixed31_32 avg_time_slots_per_mtp);
+
+void dcn31_hpo_dp_link_enc_read_state(
+ struct hpo_dp_link_encoder *enc,
+ struct hpo_dp_link_enc_state *state);
+
+void dcn31_hpo_dp_link_enc_set_ffe(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t ffe_preset);
+
+#endif // __DAL_DCN31_HPO_LINK_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
new file mode 100644
index 000000000..45143459e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -0,0 +1,775 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dcn31_hpo_dp_stream_encoder.h"
+#include "reg_helper.h"
+#include "dc.h"
+
+#define DC_LOGGER \
+ enc3->base.ctx->logger
+
+#define REG(reg)\
+ (enc3->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc3->hpo_se_shift->field_name, enc3->hpo_se_mask->field_name
+
+#define CTX \
+ enc3->base.ctx
+
+
+enum dp2_pixel_encoding {
+ DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444,
+ DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422,
+ DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420,
+ DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY
+};
+
+enum dp2_uncompressed_component_depth {
+ DP_SYM32_ENC_COMPONENT_DEPTH_6BPC,
+ DP_SYM32_ENC_COMPONENT_DEPTH_8BPC,
+ DP_SYM32_ENC_COMPONENT_DEPTH_10BPC,
+ DP_SYM32_ENC_COMPONENT_DEPTH_12BPC
+};
+
+
+static void dcn31_hpo_dp_stream_enc_enable_stream(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Enable all clocks in the DP_STREAM_ENC */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL,
+ DP_STREAM_ENC_CLOCK_EN, 1);
+
+ /* Assert reset to the DP_SYM32_ENC logic */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET, 1);
+ /* Wait for reset to complete (to assert) */
+ REG_WAIT(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET_DONE, 1,
+ 1, 10);
+
+ /* De-assert reset to the DP_SYM32_ENC logic */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET, 0);
+ /* Wait for reset to de-assert */
+ REG_WAIT(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET_DONE, 0,
+ 1, 10);
+
+ /* Enable idle pattern generation */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_ENABLE, 1);
+}
+
+static void dcn31_hpo_dp_stream_enc_dp_unblank(
+ struct hpo_dp_stream_encoder *enc,
+ uint32_t stream_source)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Set the input mux for video stream source */
+ REG_UPDATE(DP_STREAM_ENC_INPUT_MUX_CONTROL,
+ DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, stream_source);
+
+ /* Enable video transmission in main framer */
+ REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_ENABLE, 1);
+
+ /* Reset and Enable Pixel to Symbol FIFO */
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_RESET, 1);
+ REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 1,
+ 1, 10);
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_RESET, 0);
+ REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, /* Disable Clock Ramp Adjuster FIFO */
+ PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 0,
+ 1, 10);
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_ENABLE, 1);
+
+ /* Reset and Enable Clock Ramp Adjuster FIFO */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET, 1);
+ REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET_DONE, 1,
+ 1, 10);
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET, 0);
+ REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET_DONE, 0,
+ 1, 10);
+
+ /* For Debug -- Enable CRC */
+ REG_UPDATE_2(DP_SYM32_ENC_VID_CRC_CONTROL,
+ CRC_ENABLE, 1,
+ CRC_CONT_MODE_ENABLE, 1);
+
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_ENABLE, 1);
+}
+
+static void dcn31_hpo_dp_stream_enc_dp_blank(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Disable video transmission */
+ REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_ENABLE, 0);
+
+ /* Wait for video stream transmission disabled
+ * Larger delay to wait until VBLANK - use max retry of
+ * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
+ * a little more because we may not trust delay accuracy.
+ */
+ REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_STATUS, 0,
+ 10, 5000);
+
+ /* Disable SDP transmission */
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 0);
+
+ /* Disable Pixel to Symbol FIFO */
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_ENABLE, 0);
+
+ /* Disable Clock Ramp Adjuster FIFO */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_ENABLE, 0);
+}
+
+static void dcn31_hpo_dp_stream_enc_disable(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Disable DP_SYM32_ENC */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_ENABLE, 0);
+
+ /* Disable clocks in the DP_STREAM_ENC */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL,
+ DP_STREAM_ENC_CLOCK_EN, 0);
+}
+
+static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
+ struct hpo_dp_stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
+ bool compressed_format,
+ bool double_buffer_en)
+{
+ enum dp2_pixel_encoding pixel_encoding;
+ enum dp2_uncompressed_component_depth component_depth;
+ uint32_t h_active_start;
+ uint32_t v_active_start;
+ uint32_t h_blank;
+ uint32_t h_back_porch;
+ uint32_t h_width;
+ uint32_t v_height;
+ uint64_t v_freq;
+ uint8_t misc0 = 0;
+ uint8_t misc1 = 0;
+ uint8_t hsp;
+ uint8_t vsp;
+
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ struct dc_crtc_timing hw_crtc_timing = *crtc_timing;
+
+ /* MISC0[0] = 0 video and link clocks are asynchronous
+ * MISC1[0] = 0 interlace not supported
+ * MISC1[2:1] = 0 stereo field is handled by hardware
+ * MISC1[5:3] = 0 Reserved
+ */
+
+ /* Interlaced not supported */
+ if (hw_crtc_timing.flags.INTERLACE) {
+ BREAK_TO_DEBUGGER();
+ }
+
+ /* Double buffer enable for MSA and pixel format registers
+ * Only double buffer for changing stream attributes for active streams
+ * Do not double buffer when initially enabling a stream
+ */
+ REG_UPDATE(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL,
+ MSA_DOUBLE_BUFFER_ENABLE, double_buffer_en);
+ REG_UPDATE(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL,
+ PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, double_buffer_en);
+
+ /* Pixel Encoding */
+ switch (hw_crtc_timing.pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422;
+ misc0 = misc0 | 0x2; // MISC0[2:1] = 01
+ break;
+ case PIXEL_ENCODING_YCBCR444:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444;
+ misc0 = misc0 | 0x4; // MISC0[2:1] = 10
+
+ if (hw_crtc_timing.flags.Y_ONLY) {
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY;
+ if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) {
+ /* HW testing only, no use case yet.
+ * Color depth of Y-only could be
+ * 8, 10, 12, 16 bits
+ */
+ misc1 = misc1 | 0x80; // MISC1[7] = 1
+ }
+ }
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420;
+ misc1 = misc1 | 0x40; // MISC1[6] = 1
+ break;
+ case PIXEL_ENCODING_RGB:
+ default:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444;
+ break;
+ }
+
+ /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
+ * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
+ * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
+ * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
+ */
+ if (use_vsc_sdp_for_colorimetry)
+ misc1 = misc1 | 0x40;
+ else
+ misc1 = misc1 & ~0x40;
+
+ /* Color depth */
+ switch (hw_crtc_timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC;
+ // MISC0[7:5] = 000
+ break;
+ case COLOR_DEPTH_888:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_8BPC;
+ misc0 = misc0 | 0x20; // MISC0[7:5] = 001
+ break;
+ case COLOR_DEPTH_101010:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_10BPC;
+ misc0 = misc0 | 0x40; // MISC0[7:5] = 010
+ break;
+ case COLOR_DEPTH_121212:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_12BPC;
+ misc0 = misc0 | 0x60; // MISC0[7:5] = 011
+ break;
+ default:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC;
+ break;
+ }
+
+ REG_UPDATE_3(DP_SYM32_ENC_VID_PIXEL_FORMAT,
+ PIXEL_ENCODING_TYPE, compressed_format,
+ UNCOMPRESSED_PIXEL_ENCODING, pixel_encoding,
+ UNCOMPRESSED_COMPONENT_DEPTH, component_depth);
+
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_XR_RGB:
+ case COLOR_SPACE_MSREF_SCRGB:
+ case COLOR_SPACE_ADOBERGB:
+ case COLOR_SPACE_DCIP3:
+ case COLOR_SPACE_XV_YCC_709:
+ case COLOR_SPACE_XV_YCC_601:
+ case COLOR_SPACE_DISPLAYNATIVE:
+ case COLOR_SPACE_DOLBYVISION:
+ case COLOR_SPACE_APPCTRL:
+ case COLOR_SPACE_CUSTOMPOINTS:
+ case COLOR_SPACE_UNKNOWN:
+ case COLOR_SPACE_YCBCR709_BLACK:
+ /* do nothing */
+ break;
+ }
+
+ /* calculate from vesa timing parameters
+ * h_active_start related to leading edge of sync
+ */
+ h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left -
+ hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right;
+
+ h_back_porch = h_blank - hw_crtc_timing.h_front_porch -
+ hw_crtc_timing.h_sync_width;
+
+ /* start at beginning of left border */
+ h_active_start = hw_crtc_timing.h_sync_width + h_back_porch;
+
+ v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top -
+ hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom -
+ hw_crtc_timing.v_front_porch;
+
+ h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right;
+ v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
+ hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80;
+ vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;
+ v_freq = (uint64_t)hw_crtc_timing.pix_clk_100hz * 100;
+
+ /* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
+ *
+ * Lane 0 Lane 1 Lane 2 Lane 3
+ * MSA[0] = { 0, 0, 0, VFREQ[47:40]}
+ * MSA[1] = { 0, 0, 0, VFREQ[39:32]}
+ * MSA[2] = { 0, 0, 0, VFREQ[31:24]}
+ * MSA[3] = { HTotal[15:8], HStart[15:8], HWidth[15:8], VFREQ[23:16]}
+ * MSA[4] = { HTotal[ 7:0], HStart[ 7:0], HWidth[ 7:0], VFREQ[15: 8]}
+ * MSA[5] = { VTotal[15:8], VStart[15:8], VHeight[15:8], VFREQ[ 7: 0]}
+ * MSA[6] = { VTotal[ 7:0], VStart[ 7:0], VHeight[ 7:0], MISC0[ 7: 0]}
+ * MSA[7] = { HSP|HSW[14:8], VSP|VSW[14:8], 0, MISC1[ 7: 0]}
+ * MSA[8] = { HSW[ 7:0], VSW[ 7:0], 0, 0}
+ */
+ REG_SET_4(DP_SYM32_ENC_VID_MSA0, 0,
+ MSA_DATA_LANE_0, 0,
+ MSA_DATA_LANE_1, 0,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, v_freq >> 40);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA1, 0,
+ MSA_DATA_LANE_0, 0,
+ MSA_DATA_LANE_1, 0,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, (v_freq >> 32) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA2, 0,
+ MSA_DATA_LANE_0, 0,
+ MSA_DATA_LANE_1, 0,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, (v_freq >> 24) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA3, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.h_total >> 8,
+ MSA_DATA_LANE_1, h_active_start >> 8,
+ MSA_DATA_LANE_2, h_width >> 8,
+ MSA_DATA_LANE_3, (v_freq >> 16) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA4, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.h_total & 0xff,
+ MSA_DATA_LANE_1, h_active_start & 0xff,
+ MSA_DATA_LANE_2, h_width & 0xff,
+ MSA_DATA_LANE_3, (v_freq >> 8) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA5, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.v_total >> 8,
+ MSA_DATA_LANE_1, v_active_start >> 8,
+ MSA_DATA_LANE_2, v_height >> 8,
+ MSA_DATA_LANE_3, v_freq & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA6, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.v_total & 0xff,
+ MSA_DATA_LANE_1, v_active_start & 0xff,
+ MSA_DATA_LANE_2, v_height & 0xff,
+ MSA_DATA_LANE_3, misc0);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA7, 0,
+ MSA_DATA_LANE_0, hsp | (hw_crtc_timing.h_sync_width >> 8),
+ MSA_DATA_LANE_1, vsp | (hw_crtc_timing.v_sync_width >> 8),
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, misc1);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA8, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.h_sync_width & 0xff,
+ MSA_DATA_LANE_1, hw_crtc_timing.v_sync_width & 0xff,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, 0);
+}
+
+static void dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num(
+ struct hpo_dp_stream_encoder *enc,
+ struct encoder_info_frame *info_frame)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ if (info_frame->adaptive_sync.valid == true &&
+ info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) {
+ //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, 1);
+
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER,
+ info_frame->sdp_line_num.adaptive_sync_line_num);
+ }
+}
+
+static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
+ struct hpo_dp_stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ uint32_t dmdata_packet_enabled = 0;
+
+ if (info_frame->vsc.valid)
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 0, /* packetIndex */
+ &info_frame->vsc,
+ true);
+
+ if (info_frame->spd.valid)
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 2, /* packetIndex */
+ &info_frame->spd,
+ true);
+
+ if (info_frame->hdrsmd.valid)
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 3, /* packetIndex */
+ &info_frame->hdrsmd,
+ true);
+
+ if (info_frame->adaptive_sync.valid)
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 5, /* packetIndex */
+ &info_frame->adaptive_sync,
+ true);
+
+ /* enable/disable transmission of packet(s).
+ * If enabled, packet transmission begins on the next frame
+ */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->adaptive_sync.valid);
+
+ /* check if dynamic metadata packet transmission is enabled */
+ REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL,
+ METADATA_PACKET_ENABLE, &dmdata_packet_enabled);
+
+ /* Enable secondary data path */
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 1);
+}
+
+static void dcn31_hpo_dp_stream_enc_stop_dp_info_packets(
+ struct hpo_dp_stream_encoder *enc)
+{
+ /* stop generic packets on DP */
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ uint32_t asp_enable = 0;
+ uint32_t atp_enable = 0;
+ uint32_t aip_enable = 0;
+ uint32_t acm_enable = 0;
+
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0);
+
+ /* Disable secondary data path if audio is also disabled */
+ REG_GET_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0,
+ ASP_ENABLE, &asp_enable,
+ ATP_ENABLE, &atp_enable,
+ AIP_ENABLE, &aip_enable,
+ ACM_ENABLE, &acm_enable);
+ if (!(asp_enable || atp_enable || aip_enable || acm_enable))
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 0);
+}
+
+static uint32_t hpo_dp_is_gsp_enabled(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ uint32_t gsp0_enabled = 0;
+ uint32_t gsp2_enabled = 0;
+ uint32_t gsp3_enabled = 0;
+ uint32_t gsp11_enabled = 0;
+
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp0_enabled);
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp2_enabled);
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp3_enabled);
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp11_enabled);
+
+ return (gsp0_enabled || gsp2_enabled || gsp3_enabled || gsp11_enabled);
+}
+
+static void dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet(
+ struct hpo_dp_stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ if (enable) {
+ struct dc_info_packet pps_sdp;
+ int i;
+
+ /* Configure for PPS packet size (128 bytes) */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_PAYLOAD_SIZE, 3);
+
+ /* Load PPS into infoframe (SDP) registers */
+ pps_sdp.valid = true;
+ pps_sdp.hb0 = 0;
+ pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS;
+ pps_sdp.hb2 = 127;
+ pps_sdp.hb3 = 0;
+
+ for (i = 0; i < 4; i++) {
+ memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32);
+ enc3->base.vpg->funcs->update_generic_info_packet(
+ enc3->base.vpg,
+ 11 + i,
+ &pps_sdp,
+ immediate_update);
+ }
+
+ /* SW should make sure VBID[6] update line number is bigger
+ * than PPS transmit line number
+ */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_TRANSMISSION_LINE_NUMBER, 2);
+
+ REG_UPDATE_2(DP_SYM32_ENC_VID_VBID_CONTROL,
+ VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, 0,
+ VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, 3);
+
+ /* Send PPS data at the line number specified above. */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 1);
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 1);
+ } else {
+ /* Disable Generic Stream Packet 11 (GSP) transmission */
+ REG_UPDATE_2(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0,
+ GSP_PAYLOAD_SIZE, 0);
+ }
+}
+
+static void dcn31_hpo_dp_stream_enc_map_stream_to_link(
+ struct hpo_dp_stream_encoder *enc,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ ASSERT(stream_enc_inst < 4 && link_enc_inst < 2);
+
+ switch (stream_enc_inst) {
+ case 0:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL0,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ case 1:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL1,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ case 2:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL2,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ case 3:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL3,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ }
+}
+
+static void dcn31_hpo_dp_stream_enc_audio_setup(
+ struct hpo_dp_stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Set the input mux for video stream source */
+ REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL,
+ DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, az_inst);
+
+ ASSERT(enc->apg);
+ enc->apg->funcs->se_audio_setup(enc->apg, az_inst, info);
+}
+
+static void dcn31_hpo_dp_stream_enc_audio_enable(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Enable Audio packets */
+ REG_UPDATE(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, 1);
+
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(DP_SYM32_ENC_SDP_AUDIO_CONTROL0,
+ ATP_ENABLE, 1,
+ AIP_ENABLE, 1);
+
+ /* Enable secondary data path */
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 1);
+
+ /* Enable APG block */
+ enc->apg->funcs->enable_apg(enc->apg);
+}
+
+static void dcn31_hpo_dp_stream_enc_audio_disable(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Disable Audio packets */
+ REG_UPDATE_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0,
+ ASP_ENABLE, 0,
+ ATP_ENABLE, 0,
+ AIP_ENABLE, 0,
+ ACM_ENABLE, 0);
+
+ /* Disable STP Stream Enable if other SDP GSP are also disabled */
+ if (!(hpo_dp_is_gsp_enabled(enc)))
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 0);
+
+ /* Disable APG block */
+ enc->apg->funcs->disable_apg(enc->apg);
+}
+
+static void dcn31_hpo_dp_stream_enc_read_state(
+ struct hpo_dp_stream_encoder *enc,
+ struct hpo_dp_stream_encoder_state *s)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ REG_GET(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_ENABLE, &s->stream_enc_enabled);
+ REG_GET(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_ENABLE, &s->vid_stream_enabled);
+ REG_GET(DP_STREAM_ENC_INPUT_MUX_CONTROL,
+ DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, &s->otg_inst);
+
+ REG_GET_3(DP_SYM32_ENC_VID_PIXEL_FORMAT,
+ PIXEL_ENCODING_TYPE, &s->compressed_format,
+ UNCOMPRESSED_PIXEL_ENCODING, &s->pixel_encoding,
+ UNCOMPRESSED_COMPONENT_DEPTH, &s->component_depth);
+
+ REG_GET(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, &s->sdp_enabled);
+
+ switch (enc->inst) {
+ case 0:
+ REG_GET(DP_STREAM_MAPPER_CONTROL0,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ case 1:
+ REG_GET(DP_STREAM_MAPPER_CONTROL1,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ case 2:
+ REG_GET(DP_STREAM_MAPPER_CONTROL2,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ case 3:
+ REG_GET(DP_STREAM_MAPPER_CONTROL3,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ }
+}
+
+static void dcn31_set_hblank_min_symbol_width(
+ struct hpo_dp_stream_encoder *enc,
+ uint16_t width)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ REG_SET(DP_SYM32_ENC_HBLANK_CONTROL, 0,
+ HBLANK_MINIMUM_SYMBOL_WIDTH, width);
+}
+
+static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = {
+ .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream,
+ .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank,
+ .dp_blank = dcn31_hpo_dp_stream_enc_dp_blank,
+ .disable = dcn31_hpo_dp_stream_enc_disable,
+ .set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute,
+ .update_dp_info_packets_sdp_line_num = dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num,
+ .update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets,
+ .stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets,
+ .dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet,
+ .map_stream_to_link = dcn31_hpo_dp_stream_enc_map_stream_to_link,
+ .dp_audio_setup = dcn31_hpo_dp_stream_enc_audio_setup,
+ .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable,
+ .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable,
+ .read_state = dcn31_hpo_dp_stream_enc_read_state,
+ .set_hblank_min_symbol_width = dcn31_set_hblank_min_symbol_width,
+};
+
+void dcn31_hpo_dp_stream_encoder_construct(
+ struct dcn31_hpo_dp_stream_encoder *enc3,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ uint32_t inst,
+ enum engine_id eng_id,
+ struct vpg *vpg,
+ struct apg *apg,
+ const struct dcn31_hpo_dp_stream_encoder_registers *regs,
+ const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift,
+ const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask)
+{
+ enc3->base.funcs = &dcn30_str_enc_funcs;
+ enc3->base.ctx = ctx;
+ enc3->base.inst = inst;
+ enc3->base.id = eng_id;
+ enc3->base.bp = bp;
+ enc3->base.vpg = vpg;
+ enc3->base.apg = apg;
+ enc3->regs = regs;
+ enc3->hpo_se_shift = hpo_se_shift;
+ enc3->hpo_se_mask = hpo_se_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
new file mode 100644
index 000000000..82c3b3ac1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__
+#define __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__
+
+#include "dcn30/dcn30_vpg.h"
+#include "dcn31/dcn31_apg.h"
+#include "stream_encoder.h"
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(hpo_dp_stream_encoder)\
+ container_of(hpo_dp_stream_encoder, struct dcn31_hpo_dp_stream_encoder, base)
+
+
+/* Define MSA_DATA_LANE_[0-3] fields to make programming easier */
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0__SHIFT 0x0
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1__SHIFT 0x8
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2__SHIFT 0x10
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3__SHIFT 0x18
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0_MASK 0x000000FFL
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1_MASK 0x0000FF00L
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2_MASK 0x00FF0000L
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3_MASK 0xFF000000L
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id) \
+ SR(DP_STREAM_MAPPER_CONTROL0),\
+ SR(DP_STREAM_MAPPER_CONTROL1),\
+ SR(DP_STREAM_MAPPER_CONTROL2),\
+ SR(DP_STREAM_MAPPER_CONTROL3),\
+ SRI(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id),\
+ SRI(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id),\
+ SRI(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id),\
+ SRI(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id),\
+ SRI(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \
+ SRI(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id)
+
+#define DCN3_1_HPO_DP_STREAM_ENC_REGS \
+ uint32_t DP_STREAM_MAPPER_CONTROL0;\
+ uint32_t DP_STREAM_MAPPER_CONTROL1;\
+ uint32_t DP_STREAM_MAPPER_CONTROL2;\
+ uint32_t DP_STREAM_MAPPER_CONTROL3;\
+ uint32_t DP_STREAM_ENC_CLOCK_CONTROL;\
+ uint32_t DP_STREAM_ENC_INPUT_MUX_CONTROL;\
+ uint32_t DP_STREAM_ENC_AUDIO_CONTROL;\
+ uint32_t DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0;\
+ uint32_t DP_SYM32_ENC_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT;\
+ uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_MSA0;\
+ uint32_t DP_SYM32_ENC_VID_MSA1;\
+ uint32_t DP_SYM32_ENC_VID_MSA2;\
+ uint32_t DP_SYM32_ENC_VID_MSA3;\
+ uint32_t DP_SYM32_ENC_VID_MSA4;\
+ uint32_t DP_SYM32_ENC_VID_MSA5;\
+ uint32_t DP_SYM32_ENC_VID_MSA6;\
+ uint32_t DP_SYM32_ENC_VID_MSA7;\
+ uint32_t DP_SYM32_ENC_VID_MSA8;\
+ uint32_t DP_SYM32_ENC_VID_MSA_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_FIFO_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_STREAM_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_VBID_CONTROL;\
+ uint32_t DP_SYM32_ENC_SDP_CONTROL;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL0;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL2;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL3;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL5;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL11;\
+ uint32_t DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL;\
+ uint32_t DP_SYM32_ENC_SDP_AUDIO_CONTROL0;\
+ uint32_t DP_SYM32_ENC_VID_CRC_CONTROL;\
+ uint32_t DP_SYM32_ENC_HBLANK_CONTROL
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(mask_sh)\
+ SE_SF(DP_STREAM_MAPPER_CONTROL0, DP_STREAM_LINK_TARGET, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC_CLOCK_EN, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET_DONE, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET_DONE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, PIXEL_ENCODING_TYPE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, MSA_DOUBLE_BUFFER_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_0, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_1, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_2, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_3, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET_DONE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_PAYLOAD_SIZE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, METADATA_PACKET_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AUDIO_MUTE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ATP_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL, HBLANK_MINIMUM_SYMBOL_WIDTH, mask_sh)
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \
+ type DP_STREAM_LINK_TARGET;\
+ type DP_STREAM_ENC_CLOCK_EN;\
+ type DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL;\
+ type DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL;\
+ type FIFO_RESET;\
+ type FIFO_RESET_DONE;\
+ type FIFO_ENABLE;\
+ type DP_SYM32_ENC_RESET;\
+ type DP_SYM32_ENC_RESET_DONE;\
+ type DP_SYM32_ENC_ENABLE;\
+ type PIXEL_ENCODING_TYPE;\
+ type UNCOMPRESSED_PIXEL_ENCODING;\
+ type UNCOMPRESSED_COMPONENT_DEPTH;\
+ type PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE;\
+ type MSA_DOUBLE_BUFFER_ENABLE;\
+ type MSA_DATA_LANE_0;\
+ type MSA_DATA_LANE_1;\
+ type MSA_DATA_LANE_2;\
+ type MSA_DATA_LANE_3;\
+ type PIXEL_TO_SYMBOL_FIFO_RESET;\
+ type PIXEL_TO_SYMBOL_FIFO_RESET_DONE;\
+ type PIXEL_TO_SYMBOL_FIFO_ENABLE;\
+ type VID_STREAM_ENABLE;\
+ type VID_STREAM_STATUS;\
+ type VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE;\
+ type VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER;\
+ type SDP_STREAM_ENABLE;\
+ type AUDIO_MUTE;\
+ type ASP_ENABLE;\
+ type ATP_ENABLE;\
+ type AIP_ENABLE;\
+ type ACM_ENABLE;\
+ type GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE;\
+ type GSP_PAYLOAD_SIZE;\
+ type GSP_TRANSMISSION_LINE_NUMBER;\
+ type GSP_SOF_REFERENCE;\
+ type METADATA_PACKET_ENABLE;\
+ type CRC_ENABLE;\
+ type CRC_CONT_MODE_ENABLE;\
+ type HBLANK_MINIMUM_SYMBOL_WIDTH
+
+
+struct dcn31_hpo_dp_stream_encoder_registers {
+ DCN3_1_HPO_DP_STREAM_ENC_REGS;
+};
+
+struct dcn31_hpo_dp_stream_encoder_shift {
+ DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_hpo_dp_stream_encoder_mask {
+ DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_hpo_dp_stream_encoder {
+ struct hpo_dp_stream_encoder base;
+ const struct dcn31_hpo_dp_stream_encoder_registers *regs;
+ const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift;
+ const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask;
+};
+
+
+void dcn31_hpo_dp_stream_encoder_construct(
+ struct dcn31_hpo_dp_stream_encoder *enc3,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ uint32_t inst,
+ enum engine_id eng_id,
+ struct vpg *vpg,
+ struct apg *apg,
+ const struct dcn31_hpo_dp_stream_encoder_registers *regs,
+ const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift,
+ const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask);
+
+
+#endif // __DAL_DCN31_HPO_STREAM_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
new file mode 100644
index 000000000..1f4e0b626
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -0,0 +1,1067 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dcn30/dcn30_hubbub.h"
+#include "dcn31_hubbub.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+
+
+#define CTX \
+ hubbub2->base.ctx
+#define DC_LOGGER \
+ hubbub2->base.ctx->logger
+#define REG(reg)\
+ hubbub2->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub2->shifts->field_name, hubbub2->masks->field_name
+
+#ifdef NUM_VMID
+#undef NUM_VMID
+#endif
+#define NUM_VMID 16
+
+#define DCN31_CRB_SEGMENT_SIZE_KB 64
+
+static void dcn31_init_crb(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT,
+ &hubbub2->det0_size);
+
+ REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT,
+ &hubbub2->det1_size);
+
+ REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT,
+ &hubbub2->det2_size);
+
+ REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT,
+ &hubbub2->det3_size);
+
+ REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT,
+ &hubbub2->compbuf_size_segments);
+
+ REG_SET_2(COMPBUF_RESERVED_SPACE, 0,
+ COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32,
+ COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128);
+ REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x17F);
+}
+
+static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB;
+
+ switch (hubp_inst) {
+ case 0:
+ REG_UPDATE(DCHUBBUB_DET0_CTRL,
+ DET0_SIZE, det_size_segments);
+ hubbub2->det0_size = det_size_segments;
+ break;
+ case 1:
+ REG_UPDATE(DCHUBBUB_DET1_CTRL,
+ DET1_SIZE, det_size_segments);
+ hubbub2->det1_size = det_size_segments;
+ break;
+ case 2:
+ REG_UPDATE(DCHUBBUB_DET2_CTRL,
+ DET2_SIZE, det_size_segments);
+ hubbub2->det2_size = det_size_segments;
+ break;
+ case 3:
+ REG_UPDATE(DCHUBBUB_DET3_CTRL,
+ DET3_SIZE, det_size_segments);
+ hubbub2->det3_size = det_size_segments;
+ break;
+ default:
+ break;
+ }
+ DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments);
+ /* Should never be hit, if it is we have an erroneous hw config*/
+ ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
+}
+
+static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ unsigned int compbuf_size_segments = (compbuf_size_kb + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB;
+
+ if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) {
+ if (compbuf_size_segments > hubbub2->compbuf_size_segments) {
+ REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100);
+ REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100);
+ REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100);
+ REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100);
+ }
+ /* Should never be hit, if it is we have an erroneous hw config*/
+ ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs);
+ REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments);
+ hubbub2->compbuf_size_segments = compbuf_size_segments;
+ ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments);
+ }
+}
+
+static uint32_t convert_and_clamp(
+ uint32_t wm_ns,
+ uint32_t refclk_mhz,
+ uint32_t clamp_value)
+{
+ uint32_t ret_val = 0;
+ ret_val = wm_ns * refclk_mhz;
+ ret_val /= 1000;
+
+ if (ret_val > clamp_value) {
+ /* clamping WMs is abnormal, unexpected and may lead to underflow*/
+ ASSERT(0);
+ ret_val = clamp_value;
+ }
+
+ return ret_val;
+}
+
+static bool hubbub31_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+ bool wm_pending = false;
+
+ /* Repeat for water mark set A, B, C and D. */
+ /* clock state A */
+ if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
+ hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+ refclk_mhz, 0x3fff);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.urgent_ns, prog_wm_value);
+ } else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns)
+ wm_pending = true;
+
+ /* determine the transfer time for a quantity of data for a particular requestor.*/
+ if (safe_to_lower || watermarks->a.frac_urg_bw_flip
+ > hubbub2->watermarks.a.frac_urg_bw_flip) {
+ hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub2->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->a.frac_urg_bw_nom
+ > hubbub2->watermarks.a.frac_urg_bw_nom) {
+ hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub2->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
+ hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
+ refclk_mhz, 0x3fff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
+ } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
+ wm_pending = true;
+
+ /* clock state B */
+ if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
+ hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
+ refclk_mhz, 0x3fff);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.urgent_ns, prog_wm_value);
+ } else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns)
+ wm_pending = true;
+
+ /* determine the transfer time for a quantity of data for a particular requestor.*/
+ if (safe_to_lower || watermarks->b.frac_urg_bw_flip
+ > hubbub2->watermarks.b.frac_urg_bw_flip) {
+ hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip);
+ } else if (watermarks->b.frac_urg_bw_flip
+ < hubbub2->watermarks.b.frac_urg_bw_flip)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->b.frac_urg_bw_nom
+ > hubbub2->watermarks.b.frac_urg_bw_nom) {
+ hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom);
+ } else if (watermarks->b.frac_urg_bw_nom
+ < hubbub2->watermarks.b.frac_urg_bw_nom)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
+ hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
+ refclk_mhz, 0x3fff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
+ } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
+ wm_pending = true;
+
+ /* clock state C */
+ if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
+ hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
+ refclk_mhz, 0x3fff);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.urgent_ns, prog_wm_value);
+ } else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns)
+ wm_pending = true;
+
+ /* determine the transfer time for a quantity of data for a particular requestor.*/
+ if (safe_to_lower || watermarks->c.frac_urg_bw_flip
+ > hubbub2->watermarks.c.frac_urg_bw_flip) {
+ hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip);
+ } else if (watermarks->c.frac_urg_bw_flip
+ < hubbub2->watermarks.c.frac_urg_bw_flip)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->c.frac_urg_bw_nom
+ > hubbub2->watermarks.c.frac_urg_bw_nom) {
+ hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom);
+ } else if (watermarks->c.frac_urg_bw_nom
+ < hubbub2->watermarks.c.frac_urg_bw_nom)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
+ hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
+ refclk_mhz, 0x3fff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
+ } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
+ wm_pending = true;
+
+ /* clock state D */
+ if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
+ hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
+ refclk_mhz, 0x3fff);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.urgent_ns, prog_wm_value);
+ } else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns)
+ wm_pending = true;
+
+ /* determine the transfer time for a quantity of data for a particular requestor.*/
+ if (safe_to_lower || watermarks->d.frac_urg_bw_flip
+ > hubbub2->watermarks.d.frac_urg_bw_flip) {
+ hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip);
+ } else if (watermarks->d.frac_urg_bw_flip
+ < hubbub2->watermarks.d.frac_urg_bw_flip)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->d.frac_urg_bw_nom
+ > hubbub2->watermarks.d.frac_urg_bw_nom) {
+ hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom;
+
+ REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom);
+ } else if (watermarks->d.frac_urg_bw_nom
+ < hubbub2->watermarks.d.frac_urg_bw_nom)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
+ hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
+ refclk_mhz, 0x3fff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
+ } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
+ wm_pending = true;
+
+ return wm_pending;
+}
+
+static bool hubbub31_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+ bool wm_pending = false;
+
+ /* clock state A */
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
+ > hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) {
+ hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns =
+ watermarks->a.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ } else if (watermarks->a.cstate_pstate.cstate_exit_ns
+ < hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns
+ > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
+ hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns =
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
+ } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns
+ < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_z8_ns
+ > hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) {
+ hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns =
+ watermarks->a.cstate_pstate.cstate_exit_z8_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_z8_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
+ } else if (watermarks->a.cstate_pstate.cstate_exit_z8_ns
+ < hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns)
+ wm_pending = true;
+
+ /* clock state B */
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
+ > hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) {
+ hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns =
+ watermarks->b.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ } else if (watermarks->b.cstate_pstate.cstate_exit_ns
+ < hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns
+ > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
+ hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns =
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
+ } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns
+ < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_z8_ns
+ > hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) {
+ hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns =
+ watermarks->b.cstate_pstate.cstate_exit_z8_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_z8_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
+ } else if (watermarks->b.cstate_pstate.cstate_exit_z8_ns
+ < hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns)
+ wm_pending = true;
+
+ /* clock state C */
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
+ > hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) {
+ hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns =
+ watermarks->c.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ } else if (watermarks->c.cstate_pstate.cstate_exit_ns
+ < hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns
+ > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
+ hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns =
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
+ } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns
+ < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_z8_ns
+ > hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) {
+ hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns =
+ watermarks->c.cstate_pstate.cstate_exit_z8_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_z8_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
+ } else if (watermarks->c.cstate_pstate.cstate_exit_z8_ns
+ < hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns)
+ wm_pending = true;
+
+ /* clock state D */
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
+ > hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) {
+ hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns =
+ watermarks->d.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ } else if (watermarks->d.cstate_pstate.cstate_exit_ns
+ < hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns
+ > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) {
+ hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns =
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value);
+ } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns
+ < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns)
+ wm_pending = true;
+
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_z8_ns
+ > hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) {
+ hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns =
+ watermarks->d.cstate_pstate.cstate_exit_z8_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_z8_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_z8_ns, prog_wm_value);
+ } else if (watermarks->d.cstate_pstate.cstate_exit_z8_ns
+ < hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns)
+ wm_pending = true;
+
+ return wm_pending;
+}
+
+static bool hubbub31_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+
+ bool wm_pending = false;
+
+ /* clock state A */
+ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
+ > hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) {
+ hubbub2->watermarks.a.cstate_pstate.pstate_change_ns =
+ watermarks->a.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
+ } else if (watermarks->a.cstate_pstate.pstate_change_ns
+ < hubbub2->watermarks.a.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ /* clock state B */
+ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
+ > hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) {
+ hubbub2->watermarks.b.cstate_pstate.pstate_change_ns =
+ watermarks->b.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
+ } else if (watermarks->b.cstate_pstate.pstate_change_ns
+ < hubbub2->watermarks.b.cstate_pstate.pstate_change_ns)
+ wm_pending = false;
+
+ /* clock state C */
+ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
+ > hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) {
+ hubbub2->watermarks.c.cstate_pstate.pstate_change_ns =
+ watermarks->c.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
+ } else if (watermarks->c.cstate_pstate.pstate_change_ns
+ < hubbub2->watermarks.c.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ /* clock state D */
+ if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
+ > hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) {
+ hubbub2->watermarks.d.cstate_pstate.pstate_change_ns =
+ watermarks->d.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0xffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+ } else if (watermarks->d.cstate_pstate.pstate_change_ns
+ < hubbub2->watermarks.d.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ return wm_pending;
+}
+
+static bool hubbub31_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ bool wm_pending = false;
+
+ if (hubbub31_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub31_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub31_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ /*
+ * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
+ * If the memory controller is fully utilized and the DCHub requestors are
+ * well ahead of their amortized schedule, then it is safe to prevent the next winner
+ * from being committed and sent to the fabric.
+ * The utilization of the memory controller is approximated by ensuring that
+ * the number of outstanding requests is greater than a threshold specified
+ * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
+ * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
+ *
+ * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF)
+ * to turn off it for now.
+ */
+ /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
+
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ return wm_pending;
+}
+
+static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ /* copied from DML. might want to refactor DML to leverage from DML */
+ /* DML : get_blk256_size */
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static void hubbub31_det_request_size(
+ unsigned int detile_buf_size,
+ unsigned int height,
+ unsigned int width,
+ unsigned int bpe,
+ bool *req128_horz_wc,
+ bool *req128_vert_wc)
+{
+ unsigned int blk256_height = 0;
+ unsigned int blk256_width = 0;
+ unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
+
+ hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe);
+
+ swath_bytes_horz_wc = width * blk256_height * bpe;
+ swath_bytes_vert_wc = height * blk256_width * bpe;
+
+ *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+
+ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+}
+
+static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ struct dc *dc = hubbub->ctx->dc;
+ enum dcc_control dcc_control;
+ unsigned int bpe;
+ enum segment_order segment_order_horz, segment_order_vert;
+ bool req128_horz_wc, req128_vert_wc;
+
+ memset(output, 0, sizeof(*output));
+
+ if (dc->debug.disable_dcc == DCC_DISABLE)
+ return false;
+
+ if (!hubbub->funcs->dcc_support_pixel_format(input->format,
+ &bpe))
+ return false;
+
+ if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
+ &segment_order_horz, &segment_order_vert))
+ return false;
+
+ hubbub31_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
+ input->surface_size.height, input->surface_size.width,
+ bpe, &req128_horz_wc, &req128_vert_wc);
+
+ if (!req128_horz_wc && !req128_vert_wc) {
+ dcc_control = dcc_control__256_256_xxx;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!req128_horz_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!req128_vert_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else {
+ if ((req128_horz_wc &&
+ segment_order_horz == segment_order__non_contiguous) ||
+ (req128_vert_wc &&
+ segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64_64;
+ else
+ /* reg128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__128_128_xxx;
+ }
+
+ /* Exception for 64KB_R_X */
+ if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X))
+ dcc_control = dcc_control__128_128_xxx;
+
+ if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+ dcc_control != dcc_control__256_256_xxx)
+ return false;
+
+ switch (dcc_control) {
+ case dcc_control__256_256_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 256;
+ output->grph.rgb.independent_64b_blks = false;
+ output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1;
+ output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
+ break;
+ case dcc_control__128_128_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 128;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1;
+ output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
+ break;
+ case dcc_control__256_64_64:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 64;
+ output->grph.rgb.independent_64b_blks = true;
+ output->grph.rgb.dcc_controls.dcc_256_64_64 = 1;
+ break;
+ case dcc_control__256_128_128:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
+ break;
+ }
+ output->capable = true;
+ output->const_color_support = true;
+
+ return true;
+}
+
+int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_vmid_page_table_config phys_config;
+
+ REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
+ REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
+ REG_SET(DCN_VM_FB_OFFSET, 0,
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
+ REG_SET(DCN_VM_AGP_BOT, 0,
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
+ REG_SET(DCN_VM_AGP_TOP, 0,
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
+ REG_SET(DCN_VM_AGP_BASE, 0,
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+
+ if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
+ phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
+ phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
+ phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
+ // Init VMID 0 based on PA config
+ dcn20_vmid_setup(&hubbub2->vmid[0], &phys_config);
+
+ dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config);
+ }
+
+ dcn21_dchvm_init(hubbub);
+
+ return NUM_VMID;
+}
+
+static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub,
+ unsigned int dccg_ref_freq_inKhz,
+ unsigned int *dchub_ref_freq_inKhz)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t ref_div = 0;
+ uint32_t ref_en = 0;
+ unsigned int dc_refclk_khz = 24000;
+
+ REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div,
+ DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en);
+
+ if (ref_en) {
+ if (ref_div == 2)
+ *dchub_ref_freq_inKhz = dc_refclk_khz / 2;
+ else
+ *dchub_ref_freq_inKhz = dc_refclk_khz;
+
+ /*
+ * The external Reference Clock may change based on the board or
+ * platform requirements and the programmable integer divide must
+ * be programmed to provide a suitable DLG RefClk frequency between
+ * a minimum of 20MHz and maximum of 50MHz
+ */
+ if (*dchub_ref_freq_inKhz < 20000 || *dchub_ref_freq_inKhz > 50000)
+ ASSERT_CRITICAL(false);
+
+ return;
+ } else {
+ *dchub_ref_freq_inKhz = dc_refclk_khz;
+
+ // HUBBUB global timer must be enabled.
+ ASSERT_CRITICAL(false);
+ return;
+ }
+}
+
+static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ /*
+ * Pstate latency is ~20us so if we wait over 40us and pstate allow
+ * still not asserted, we are probably stuck and going to hang
+ */
+ const unsigned int pstate_wait_timeout_us = 100;
+ const unsigned int pstate_wait_expected_timeout_us = 40;
+
+ static unsigned int max_sampled_pstate_wait_us; /* data collection */
+ static bool forced_pstate_allow; /* help with revert wa */
+
+ unsigned int debug_data = 0;
+ unsigned int i;
+
+ if (forced_pstate_allow) {
+ /* we hacked to force pstate allow to prevent hang last time
+ * we verify_allow_pstate_change_high. so disable force
+ * here so we can check status
+ */
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
+ forced_pstate_allow = false;
+ }
+
+ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate);
+
+ for (i = 0; i < pstate_wait_timeout_us; i++) {
+ debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
+
+ /* Debug bit is specific to ASIC. */
+ if (debug_data & (1 << 26)) {
+ if (i > pstate_wait_expected_timeout_us)
+ DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i);
+ return true;
+ }
+ if (max_sampled_pstate_wait_us < i)
+ max_sampled_pstate_wait_us = i;
+
+ udelay(1);
+ }
+
+ /* force pstate allow to prevent system hang
+ * and break to debugger to investigate
+ */
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
+ forced_pstate_allow = true;
+
+ DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
+ debug_data);
+
+ return false;
+}
+
+void hubbub31_init(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ /*Enable clock gate*/
+ if (hubbub->ctx->dc->debug.disable_clock_gate) {
+ /*done in hwseq*/
+ /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
+ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
+ DISPCLK_R_DCHUBBUB_GATE_DIS, 1,
+ DCFCLK_R_DCHUBBUB_GATE_DIS, 1);
+ }
+
+ /*
+ only the DCN will determine when to connect the SDP port
+ */
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1);
+}
+static const struct hubbub_funcs hubbub31_funcs = {
+ .update_dchub = hubbub2_update_dchub,
+ .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
+ .init_vm_ctx = hubbub2_init_vm_ctx,
+ .dcc_support_swizzle = hubbub3_dcc_support_swizzle,
+ .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
+ .get_dcc_compression_cap = hubbub31_get_dcc_compression_cap,
+ .wm_read_state = hubbub21_wm_read_state,
+ .get_dchub_ref_freq = hubbub31_get_dchub_ref_freq,
+ .program_watermarks = hubbub31_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
+ .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high,
+ .program_det_size = dcn31_program_det_size,
+ .program_compbuf_size = dcn31_program_compbuf_size,
+ .init_crb = dcn31_init_crb,
+ .hubbub_read_state = hubbub2_read_state,
+};
+
+void hubbub31_construct(struct dcn20_hubbub *hubbub31,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask,
+ int det_size_kb,
+ int pixel_chunk_size_kb,
+ int config_return_buffer_size_kb)
+{
+
+ hubbub3_construct(hubbub31, ctx, hubbub_regs, hubbub_shift, hubbub_mask);
+ hubbub31->base.funcs = &hubbub31_funcs;
+ hubbub31->detile_buf_size = det_size_kb * 1024;
+ hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024;
+ hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB;
+
+ hubbub31->debug_test_index_pstate = 0x6;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
new file mode 100644
index 000000000..89d620828
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HUBBUB_DCN31_H__
+#define __DC_HUBBUB_DCN31_H__
+
+#include "dcn21/dcn21_hubbub.h"
+
+#define HUBBUB_REG_LIST_DCN31(id)\
+ HUBBUB_REG_LIST_DCN30(id),\
+ SR(DCHVM_CTRL0),\
+ SR(DCHVM_MEM_CTRL),\
+ SR(DCHVM_CLK_CTRL),\
+ SR(DCHVM_RIOMMU_CTRL0),\
+ SR(DCHVM_RIOMMU_STAT0),\
+ SR(DCHUBBUB_DET0_CTRL),\
+ SR(DCHUBBUB_DET1_CTRL),\
+ SR(DCHUBBUB_DET2_CTRL),\
+ SR(DCHUBBUB_DET3_CTRL),\
+ SR(DCHUBBUB_COMPBUF_CTRL),\
+ SR(COMPBUF_RESERVED_SPACE),\
+ SR(DCHUBBUB_DEBUG_CTRL_0),\
+ SR(DCHUBBUB_CLOCK_CNTL),\
+ SR(DCHUBBUB_SDPIF_CFG0),\
+ SR(DCHUBBUB_SDPIF_CFG1),\
+ SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D)
+
+#define HUBBUB_MASK_SH_LIST_DCN31(mask_sh)\
+ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
+ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
+ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \
+ HUBBUB_SF(DCHVM_CTRL0, HOSTVM_INIT_REQ, mask_sh),\
+ HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, mask_sh),\
+ HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_FORCE_REQ, mask_sh),\
+ HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_POWER_STATUS, mask_sh),\
+ HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_R_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_G_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_R_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_G_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHVM_CLK_CTRL, TR_REQ_REQCLKREQ_MODE, mask_sh),\
+ HUBBUB_SF(DCHVM_CLK_CTRL, TW_RSP_COMPCLKREQ_MODE, mask_sh),\
+ HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, mask_sh),\
+ HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, mask_sh),\
+ HUBBUB_SF(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, mask_sh),\
+ HUBBUB_SF(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, mask_sh),\
+ HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_64B, mask_sh),\
+ HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_ZS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh)
+
+int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
+
+void hubbub31_init(struct hubbub *hubbub);
+
+void hubbub31_construct(struct dcn20_hubbub *hubbub3,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask,
+ int det_size_kb,
+ int pixel_chunk_size_kb,
+ int config_return_buffer_size_kb);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
new file mode 100644
index 000000000..39a57bcd7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012-20 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+#include "dcn31_hubp.h"
+
+#define REG(reg)\
+ hubp2->hubp_regs->reg
+
+#define CTX \
+ hubp2->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
+
+void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, enable);
+ REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, enable);
+}
+
+void hubp31_soft_reset(struct hubp *hubp, bool reset)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_SOFT_RESET, reset);
+}
+
+static void hubp31_program_extended_blank(struct hubp *hubp,
+ unsigned int min_dst_y_next_start_optimized)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, min_dst_y_next_start_optimized);
+}
+
+static struct hubp_funcs dcn31_hubp_funcs = {
+ .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+ .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
+ .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr,
+ .hubp_program_surface_config = hubp3_program_surface_config,
+ .hubp_is_flip_pending = hubp2_is_flip_pending,
+ .hubp_setup = hubp3_setup,
+ .hubp_setup_interdependent = hubp2_setup_interdependent,
+ .hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
+ .set_blank = hubp2_set_blank,
+ .dcc_control = hubp3_dcc_control,
+ .mem_program_viewport = min_set_viewport,
+ .set_cursor_attributes = hubp2_cursor_set_attributes,
+ .set_cursor_position = hubp2_cursor_set_position,
+ .hubp_clk_cntl = hubp2_clk_cntl,
+ .hubp_vtg_sel = hubp2_vtg_sel,
+ .dmdata_set_attributes = hubp3_dmdata_set_attributes,
+ .dmdata_load = hubp2_dmdata_load,
+ .dmdata_status_done = hubp2_dmdata_status_done,
+ .hubp_read_state = hubp3_read_state,
+ .hubp_clear_underflow = hubp2_clear_underflow,
+ .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
+ .hubp_init = hubp3_init,
+ .set_unbounded_requesting = hubp31_set_unbounded_requesting,
+ .hubp_soft_reset = hubp31_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
+ .hubp_in_blank = hubp1_in_blank,
+ .program_extended_blank = hubp31_program_extended_blank,
+};
+
+bool hubp31_construct(
+ struct dcn20_hubp *hubp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_hubp2_registers *hubp_regs,
+ const struct dcn_hubp2_shift *hubp_shift,
+ const struct dcn_hubp2_mask *hubp_mask)
+{
+ hubp2->base.funcs = &dcn31_hubp_funcs;
+ hubp2->base.ctx = ctx;
+ hubp2->hubp_regs = hubp_regs;
+ hubp2->hubp_shift = hubp_shift;
+ hubp2->hubp_mask = hubp_mask;
+ hubp2->base.inst = inst;
+ hubp2->base.opp_id = OPP_ID_INVALID;
+ hubp2->base.mpcc_id = 0xf;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.h
new file mode 100644
index 000000000..c31a7b8f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2012-20 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HUBP_DCN31_H__
+#define __DC_HUBP_DCN31_H__
+
+#include "dcn20/dcn20_hubp.h"
+#include "dcn21/dcn21_hubp.h"
+#include "dcn30/dcn30_hubp.h"
+
+#define HUBP_MASK_SH_LIST_DCN31(mask_sh)\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, REFCYC_PER_VM_DMDATA, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS_CLEAR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_LATE_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS_CLEAR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_DONE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SOFT_RESET, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PKRS, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, META_LINEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, PIPE_ALIGNED, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_X_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_Y_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_WIDTH, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_X_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_Y_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS, SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, PRIMARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS, PRIMARY_META_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, SECONDARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C, SURFACE_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE, SURFACE_EARLIEST_INUSE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_BLK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_Y_G, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_ALPHA, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, PACK_3TO2_ELEMENT_DISABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, CRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, DLG_V_BLANK_END, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_DIMENSIONS, REFCYC_PER_HTOTAL, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_AFTER_SCALER, REFCYC_X_AFTER_SCALER, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_AFTER_SCALER, DST_Y_AFTER_SCALER, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_VM_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_ROW_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_HIGH_WM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, QoS_LEVEL_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, ROW_TTU_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
+ HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh),\
+ HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ALPHA_PLANE_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, DST_Y_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, VRATIO_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR, MC_VM_SYSTEM_APERTURE_LOW_ADDR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mask_sh),\
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_REQ_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_ADDRESS_HIGH, DMDATA_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_LEVEL, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_DL_DELTA, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_STATUS, DMDATA_DONE, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_VM_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_ROW_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_1, REFCYC_PER_PTE_GROUP_FLIP_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_2, REFCYC_PER_META_CHUNK_FLIP_L, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE_STOP_DATA_DURING_VM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, HUBPREQ_MASTER_UPDATE_LOCK_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_VMID_SETTINGS_0, VMID, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_5, REFCYC_PER_PTE_GROUP_FLIP_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+
+
+bool hubp31_construct(
+ struct dcn20_hubp *hubp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_hubp2_registers *hubp_regs,
+ const struct dcn_hubp2_shift *hubp_shift,
+ const struct dcn_hubp2_mask *hubp_mask);
+
+void hubp31_soft_reset(struct hubp *hubp, bool reset);
+
+void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable);
+
+#endif /* __DC_HUBP_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
new file mode 100644
index 000000000..2a7f47642
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -0,0 +1,603 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dccg.h"
+#include "dce/dce_hwseq.h"
+#include "clk_mgr.h"
+#include "reg_helper.h"
+#include "abm.h"
+#include "hubp.h"
+#include "dchubbub.h"
+#include "timing_generator.h"
+#include "opp.h"
+#include "ipp.h"
+#include "mpc.h"
+#include "mcif_wb.h"
+#include "dc_dmub_srv.h"
+#include "dcn31_hwseq.h"
+#include "link_hwss.h"
+#include "dpcd_defs.h"
+#include "dce/dmub_outbox.h"
+#include "link.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "inc/link_enc_cfg.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dce/dce_i2c_hw.h"
+
+#define DC_LOGGER_INIT(logger)
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+#define DC_LOGGER \
+ dc->ctx->logger
+
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+static void enable_memory_low_power(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (dc->debug.enable_mem_low_power.bits.dmcu) {
+ // Force ERAM to shutdown if DMCU is not enabled
+ if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
+ REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
+ }
+ }
+
+ // Set default OPTC memory power states
+ if (dc->debug.enable_mem_low_power.bits.optc) {
+ // Shutdown when unassigned and light sleep in VBLANK
+ REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
+ }
+
+ if (dc->debug.enable_mem_low_power.bits.vga) {
+ // Power down VGA memory
+ REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
+ }
+
+ if (dc->debug.enable_mem_low_power.bits.mpc &&
+ dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode)
+ dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
+
+
+ if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
+ // Power down VPGs
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++)
+ dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
+ dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
+#endif
+ }
+
+}
+
+void dcn31_init_hw(struct dc *dc)
+{
+ struct abm **abms = dc->res_pool->multiple_abms;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ struct resource_pool *res_pool = dc->res_pool;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ int i;
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+ if (!dcb->funcs->is_accelerated_mode(dcb)) {
+ hws->funcs.bios_golden_init(dc);
+ if (hws->funcs.disable_vga)
+ hws->funcs.disable_vga(dc->hwseq);
+ }
+ // Initialize the dccg
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
+
+ enable_memory_low_power(dc);
+
+ if (dc->ctx->dc_bios->fw_info_valid) {
+ res_pool->ref_clocks.xtalin_clock_inKhz =
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
+
+ if (res_pool->dccg && res_pool->hubbub) {
+
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
+ } else
+ ASSERT_CRITICAL(false);
+
+ for (i = 0; i < dc->link_count; i++) {
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector).
+ */
+ struct dc_link *link = dc->links[i];
+
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
+ link->link_enc->funcs->hw_init(link->link_enc);
+
+ /* Check for enabled DIG to identify enabled display */
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
+ link->link_status.link_active = true;
+ if (link->link_enc->funcs->fec_is_active &&
+ link->link_enc->funcs->fec_is_active(link->link_enc))
+ link->fec_state = dc_link_fec_enabled;
+ }
+ }
+
+ /* we want to turn off all dp displays before doing detection */
+ dc->link_srv->blank_all_dp_displays(dc);
+
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
+ /* If taking control over from VBIOS, we may want to optimize our first
+ * mode set, so we need to skip powering down pipes until we know which
+ * pipes we want to use.
+ * Otherwise, if taking control is not possible, we need to power
+ * everything down.
+ */
+ if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
+
+ // we want to turn off edp displays if odm is enabled and no seamless boot
+ if (!dc->caps.seamless_odm) {
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+
+ num_opps = 1;
+ if (tg) {
+ if (tg->funcs->is_tg_enabled(tg) && tg->funcs->get_optc_source) {
+ tg->funcs->get_optc_source(tg, &num_opps,
+ &opp_id_src0, &opp_id_src1);
+ }
+ }
+
+ if (num_opps > 1) {
+ dc->link_srv->blank_all_edp_displays(dc);
+ break;
+ }
+ }
+ }
+
+ hws->funcs.init_pipes(dc, dc->current_state);
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
+ }
+
+ for (i = 0; i < res_pool->audio_count; i++) {
+ struct audio *audio = res_pool->audios[i];
+
+ audio->funcs->hw_init(audio);
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->panel_cntl)
+ backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (abms[i] != NULL)
+ abms[i]->funcs->abm_init(abms[i], backlight);
+ }
+
+ /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ // Set i2c to light sleep until engine is setup
+ if (dc->debug.enable_mem_low_power.bits.i2c)
+ REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1);
+
+ if (hws->funcs.setup_hpo_hw_control)
+ hws->funcs.setup_hpo_hw_control(hws, false);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+
+ if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
+ dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
+
+ if (dc->clk_mgr->funcs->notify_wm_ranges)
+ dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+
+ if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, false, false);
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ if (dc->res_pool->hubbub->funcs->init_crb)
+ dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
+#endif
+
+ // Get DMCUB capabilities
+ dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
+ dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+}
+
+void dcn31_dsc_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+ uint32_t org_ip_request_cntl = 0;
+
+ if (hws->ctx->dc->debug.disable_dsc_power_gate)
+ return;
+
+ if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc &&
+ hws->ctx->dc->res_pool->dccg->funcs->enable_dsc &&
+ power_on)
+ hws->ctx->dc->res_pool->dccg->funcs->enable_dsc(
+ hws->ctx->dc->res_pool->dccg, dsc_inst);
+
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+
+ switch (dsc_inst) {
+ case 0: /* DSC0 */
+ REG_UPDATE(DOMAIN16_PG_CONFIG,
+ DOMAIN_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN16_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DSC1 */
+ REG_UPDATE(DOMAIN17_PG_CONFIG,
+ DOMAIN_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN17_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DSC2 */
+ REG_UPDATE(DOMAIN18_PG_CONFIG,
+ DOMAIN_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN18_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+
+ if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc) {
+ if (hws->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on)
+ hws->ctx->dc->res_pool->dccg->funcs->disable_dsc(
+ hws->ctx->dc->res_pool->dccg, dsc_inst);
+ }
+
+}
+
+
+void dcn31_enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable)
+{
+ bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
+
+ if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate)
+ force_on = false;
+
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+ /* DCHUBP0/1/2/3/4/5 */
+ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+ /* DPP0/1/2/3/4/5 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+
+ force_on = true; /* disable power gating */
+ if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
+ force_on = false;
+
+ /* DCS0/1/2/3/4/5 */
+ REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+}
+
+void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ bool is_hdmi_tmds;
+ bool is_dp;
+
+ ASSERT(pipe_ctx->stream);
+
+ if (pipe_ctx->stream_res.stream_enc == NULL)
+ return; /* this is not root pipe */
+
+ is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
+ is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
+
+ if (!is_hdmi_tmds && !is_dp)
+ return;
+
+ if (is_hdmi_tmds)
+ pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+ else if (pipe_ctx->stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+ return;
+ } else {
+ if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num)
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+ }
+}
+void dcn31_z10_save_init(struct dc *dc)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dcn31_z10_restore(const struct dc *dc)
+{
+ union dmub_rb_cmd cmd;
+
+ /*
+ * DMUB notifies whether restore is required.
+ * Optimization to avoid sending commands when not required.
+ */
+ if (!dc_dmub_srv_is_restore_required(dc->ctx->dmub_srv))
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE;
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+ uint32_t org_ip_request_cntl;
+ if (hws->ctx->dc->debug.disable_hubp_power_gate)
+ return;
+
+ if (REG(DOMAIN0_PG_CONFIG) == 0)
+ return;
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+
+ switch (hubp_inst) {
+ case 0:
+ REG_SET(DOMAIN0_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate);
+ REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ break;
+ case 1:
+ REG_SET(DOMAIN1_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate);
+ REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ break;
+ case 2:
+ REG_SET(DOMAIN2_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate);
+ REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ break;
+ case 3:
+ REG_SET(DOMAIN3_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate);
+ REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+}
+
+int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+{
+ struct dcn_hubbub_phys_addr_config config;
+
+ config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
+ config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
+ config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
+ config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
+ config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
+ config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
+ config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
+ config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
+
+ if (pa_config->gart_config.base_addr_is_mc_addr) {
+ /* Convert from MC address to offset into FB */
+ config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr -
+ pa_config->system_aperture.fb_base +
+ pa_config->system_aperture.fb_offset;
+ } else
+ config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+
+ return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
+}
+
+static void dcn31_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dc_link *link;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+ if (pipe_ctx->stream_res.stream_enc == NULL) {
+ pipe_ctx->stream = NULL;
+ return;
+ }
+ ASSERT(!pipe_ctx->top_pipe);
+
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+
+ pipe_ctx->stream_res.tg->funcs->set_dsc_config(
+ pipe_ctx->stream_res.tg,
+ OPTC_DSC_DISABLED, 0, 0);
+ pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
+ if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
+ pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+
+ if (pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, NULL);
+
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ dc->link_srv->set_dpms_off(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ /* free acquired resources */
+ if (pipe_ctx->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.audio, false);
+ pipe_ctx->stream_res.audio = NULL;
+ }
+ }
+
+ pipe_ctx->stream = NULL;
+ DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
+ pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
+}
+
+void dcn31_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Reset Back End*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+
+ /* New dc_state in the process of being applied to hardware. */
+ link_enc_cfg_set_transient_mode(dc, dc->current_state, context);
+}
+
+void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
+{
+ if (hws->ctx->dc->debug.hpo_optimization)
+ REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
new file mode 100644
index 000000000..edfc01d6a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -0,0 +1,59 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN31_H__
+#define __DC_HWSS_DCN31_H__
+
+#include "hw_sequencer_private.h"
+
+struct dc;
+
+void dcn31_init_hw(struct dc *dc);
+
+void dcn31_dsc_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst,
+ bool power_on);
+
+void dcn31_enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable);
+
+void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
+
+void dcn31_z10_restore(const struct dc *dc);
+void dcn31_z10_save_init(struct dc *dc);
+
+void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
+int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
+void dcn31_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context);
+bool dcn31_is_abm_supported(struct dc *dc,
+ struct dc_state *context, struct dc_stream_state *stream);
+void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
+void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
+
+#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
new file mode 100644
index 000000000..1d7bc1e39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dcn301/dcn301_hwseq.h"
+#include "dcn31/dcn31_hwseq.h"
+
+#include "dcn31_init.h"
+
+static const struct hw_sequencer_funcs dcn31_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn31_init_hw,
+ .power_down_on_boot = dcn10_power_down_on_boot,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dcn31_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn20_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn20_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn30_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dcn30_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_T12 = dce110_edp_wait_for_T12,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn30_enable_writeback,
+ .disable_writeback = dcn30_disable_writeback,
+ .update_writeback = dcn30_update_writeback,
+ .mmhubbub_warmup = dcn30_mmhubbub_warmup,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn30_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn31_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .power_down = dce110_power_down,
+ .set_backlight_level = dcn21_set_backlight_level,
+ .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
+ .set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
+ .z10_restore = dcn31_z10_restore,
+ .z10_save_init = dcn31_z10_save_init,
+ .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
+};
+
+static const struct hwseq_private_funcs dcn31_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn30_set_input_transfer_func,
+ .set_output_transfer_func = dcn30_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = dcn20_disable_stream_gating,
+ .enable_stream_gating = dcn20_enable_stream_gating,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = dcn20_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn20_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn31_enable_power_gating_plane,
+ .hubp_pg_control = dcn31_hubp_pg_control,
+ .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .update_odm = dcn20_update_odm,
+ .dsc_pg_control = dcn31_dsc_pg_control,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn30_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+ .setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+};
+
+void dcn31_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn31_funcs;
+ dc->hwseq->funcs = dcn31_private_funcs;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h
new file mode 100644
index 000000000..a3db08c8b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN31_INIT_H__
+#define __DC_DCN31_INIT_H__
+
+struct dc;
+
+void dcn31_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN31_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
new file mode 100644
index 000000000..63a677c8e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn31_optc.h"
+
+#include "dcn30/dcn30_optc.h"
+#include "reg_helper.h"
+#include "dc.h"
+#include "dcn_calc_math.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
+ / opp_cnt;
+ uint32_t memory_mask = 0;
+ int mem_count_per_opp = (mpcc_hactive + 2559) / 2560;
+
+ /* Assume less than 6 pipes */
+ if (opp_cnt == 4) {
+ if (mem_count_per_opp == 1)
+ memory_mask = 0xf;
+ else {
+ ASSERT(mem_count_per_opp == 2);
+ memory_mask = 0xff;
+ }
+ } else if (mem_count_per_opp == 1)
+ memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2);
+ else if (mem_count_per_opp == 2)
+ memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
+ else if (mem_count_per_opp == 3)
+ memory_mask = 0x77;
+ else if (mem_count_per_opp == 4)
+ memory_mask = 0xff;
+
+ if (REG(OPTC_MEMORY_CONFIG))
+ REG_SET(OPTC_MEMORY_CONFIG, 0,
+ OPTC_MEM_SEL, memory_mask);
+
+ if (opp_cnt == 2) {
+ REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
+ OPTC_NUM_OF_INPUT_SEGMENT, 1,
+ OPTC_SEG0_SRC_SEL, opp_id[0],
+ OPTC_SEG1_SRC_SEL, opp_id[1]);
+ } else if (opp_cnt == 4) {
+ REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0,
+ OPTC_NUM_OF_INPUT_SEGMENT, 3,
+ OPTC_SEG0_SRC_SEL, opp_id[0],
+ OPTC_SEG1_SRC_SEL, opp_id[1],
+ OPTC_SEG2_SRC_SEL, opp_id[2],
+ OPTC_SEG3_SRC_SEL, opp_id[3]);
+ }
+
+ REG_UPDATE(OPTC_WIDTH_CONTROL,
+ OPTC_SEGMENT_WIDTH, mpcc_hactive);
+
+ REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
+ optc1->opp_count = opp_cnt;
+}
+
+/*
+ * Enable CRTC - call ASIC Control Object to enable Timing generator.
+ */
+static bool optc31_enable_crtc(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ /* opp instance for OTG, 1 to 1 mapping and odm will adjust */
+ REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SEG0_SRC_SEL, optc->inst);
+
+ /* VTG enable first is for HW workaround */
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 1);
+
+ REG_SEQ_START();
+
+ /* Enable CRTC */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_DISABLE_POINT_CNTL, 2,
+ OTG_MASTER_EN, 1);
+
+ REG_SEQ_SUBMIT();
+ REG_SEQ_WAIT_DONE();
+
+ return true;
+}
+
+/* disable_crtc - call ASIC Control Object to disable Timing generator. */
+static bool optc31_disable_crtc(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ /* disable otg request until end of the first line
+ * in the vertical blank region
+ */
+ REG_UPDATE(OTG_CONTROL,
+ OTG_MASTER_EN, 0);
+
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 0);
+
+ /* CRTC disabled, so disable clock. */
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_BUSY, 0,
+ 1, 100000);
+ optc1_clear_optc_underflow(optc);
+
+ return true;
+}
+
+bool optc31_immediate_disable_crtc(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_DISABLE_POINT_CNTL, 0,
+ OTG_MASTER_EN, 0);
+
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 0);
+
+ /* CRTC disabled, so disable clock. */
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_BUSY, 0,
+ 1, 100000);
+
+ /* clear the false state */
+ optc1_clear_optc_underflow(optc);
+
+ return true;
+}
+
+void optc31_set_drr(
+ struct timing_generator *optc,
+ const struct drr_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ if (params->vertical_total_mid != 0) {
+
+ REG_SET(OTG_V_TOTAL_MID, 0,
+ OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
+
+ REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
+ OTG_VTOTAL_MID_FRAME_NUM,
+ (uint8_t)params->vertical_total_mid_frame_num);
+
+ }
+
+ optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
+
+ /*
+ * MIN_MASK_EN is gone and MASK is now always enabled.
+ *
+ * To get it to it work with manual trigger we need to make sure
+ * we program the correct bit.
+ */
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+
+ // Setup manual flow control for EOF via TRIG_A
+ optc->funcs->setup_manual_trigger(optc);
+ } else {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ }
+}
+
+void optc3_init_odm(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0,
+ OPTC_NUM_OF_INPUT_SEGMENT, 0,
+ OPTC_SEG0_SRC_SEL, optc->inst,
+ OPTC_SEG1_SRC_SEL, 0xf,
+ OPTC_SEG2_SRC_SEL, 0xf,
+ OPTC_SEG3_SRC_SEL, 0xf
+ );
+
+ REG_SET(OTG_H_TIMING_CNTL, 0,
+ OTG_H_TIMING_DIV_MODE, 0);
+
+ REG_SET(OPTC_MEMORY_CONFIG, 0,
+ OPTC_MEM_SEL, 0);
+ optc1->opp_count = 1;
+}
+
+static struct timing_generator_funcs dcn31_tg_funcs = {
+ .validate_timing = optc1_validate_timing,
+ .program_timing = optc1_program_timing,
+ .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+ .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+ .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc31_enable_crtc,
+ .disable_crtc = optc31_disable_crtc,
+ .immediate_disable_crtc = optc31_immediate_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
+ .set_early_control = optc1_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank_color = optc3_program_blank_color,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .triplebuffer_lock = optc3_triplebuffer_lock,
+ .triplebuffer_unlock = optc2_triplebuffer_unlock,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .lock = optc3_lock,
+ .unlock = optc1_unlock,
+ .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
+ .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc31_set_drr,
+ .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .tg_init = optc3_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
+ .setup_global_swap_lock = NULL,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc2_configure_crc,
+ .set_dsc_config = optc3_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
+ .set_dwb_source = NULL,
+ .set_odm_bypass = optc3_set_odm_bypass,
+ .set_odm_combine = optc31_set_odm_combine,
+ .get_optc_source = optc2_get_optc_source,
+ .set_out_mux = optc3_set_out_mux,
+ .set_drr_trigger_window = optc3_set_drr_trigger_window,
+ .set_vtotal_change_limit = optc3_set_vtotal_change_limit,
+ .set_gsl = optc2_set_gsl,
+ .set_gsl_source_select = optc2_set_gsl_source_select,
+ .set_vtg_params = optc1_set_vtg_params,
+ .program_manual_trigger = optc2_program_manual_trigger,
+ .setup_manual_trigger = optc2_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
+ .init_odm = optc3_init_odm,
+};
+
+void dcn31_timing_generator_init(struct optc *optc1)
+{
+ optc1->base.funcs = &dcn31_tg_funcs;
+
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
+
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 4;
+ optc1->min_v_sync_width = 1;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
new file mode 100644
index 000000000..30b81a448
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPTC_DCN31_H__
+#define __DC_OPTC_DCN31_H__
+
+#include "dcn10/dcn10_optc.h"
+
+#define OPTC_COMMON_REG_LIST_DCN3_1(inst) \
+ SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
+ SRI(OTG_VUPDATE_PARAM, OTG, inst),\
+ SRI(OTG_VREADY_PARAM, OTG, inst),\
+ SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\
+ SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\
+ SRI(OTG_H_TOTAL, OTG, inst),\
+ SRI(OTG_H_BLANK_START_END, OTG, inst),\
+ SRI(OTG_H_SYNC_A, OTG, inst),\
+ SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\
+ SRI(OTG_H_TIMING_CNTL, OTG, inst),\
+ SRI(OTG_V_TOTAL, OTG, inst),\
+ SRI(OTG_V_BLANK_START_END, OTG, inst),\
+ SRI(OTG_V_SYNC_A, OTG, inst),\
+ SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\
+ SRI(OTG_CONTROL, OTG, inst),\
+ SRI(OTG_STEREO_CONTROL, OTG, inst),\
+ SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\
+ SRI(OTG_STEREO_STATUS, OTG, inst),\
+ SRI(OTG_V_TOTAL_MAX, OTG, inst),\
+ SRI(OTG_V_TOTAL_MIN, OTG, inst),\
+ SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\
+ SRI(OTG_TRIGA_CNTL, OTG, inst),\
+ SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
+ SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
+ SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\
+ SRI(OTG_STATUS, OTG, inst),\
+ SRI(OTG_STATUS_POSITION, OTG, inst),\
+ SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
+ SRI(OTG_M_CONST_DTO0, OTG, inst),\
+ SRI(OTG_M_CONST_DTO1, OTG, inst),\
+ SRI(OTG_CLOCK_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
+ SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
+ SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
+ SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
+ SRI(CONTROL, VTG, inst),\
+ SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
+ SRI(OTG_GSL_CONTROL, OTG, inst),\
+ SRI(OTG_CRC_CNTL, OTG, inst),\
+ SRI(OTG_CRC0_DATA_RG, OTG, inst),\
+ SRI(OTG_CRC0_DATA_B, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
+ SR(GSL_SOURCE_SELECT),\
+ SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
+ SRI(OTG_GSL_WINDOW_X, OTG, inst),\
+ SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
+ SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
+ SRI(OTG_DSC_START_POSITION, OTG, inst),\
+ SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\
+ SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\
+ SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
+ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
+ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
+ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
+ SRI(OTG_CRC_CNTL2, OTG, inst),\
+ SR(DWB_SOURCE_SELECT),\
+ SRI(OTG_DRR_CONTROL, OTG, inst)
+
+#define OPTC_COMMON_MASK_SH_LIST_DCN3_1(mask_sh)\
+ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\
+ SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
+ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
+ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\
+ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\
+ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\
+ SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\
+ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\
+ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\
+ SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\
+ SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\
+ SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\
+ SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\
+ SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
+ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\
+ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\
+ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\
+ SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\
+ SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\
+ SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\
+ SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
+ SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\
+ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\
+ SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\
+ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\
+ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\
+ SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\
+ SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
+ SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
+ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+
+void dcn31_timing_generator_init(struct optc *optc1);
+
+bool optc31_immediate_disable_crtc(struct timing_generator *optc);
+
+void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params);
+
+void optc3_init_odm(struct timing_generator *optc);
+
+#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
new file mode 100644
index 000000000..d849b1eaa
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dc_dmub_srv.h"
+#include "dcn31_panel_cntl.h"
+#include "atom.h"
+
+#define TO_DCN31_PANEL_CNTL(panel_cntl)\
+ container_of(panel_cntl, struct dcn31_panel_cntl, base)
+
+#define CTX \
+ dcn31_panel_cntl->base.ctx
+
+#define DC_LOGGER \
+ dcn31_panel_cntl->base.ctx->logger
+
+static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub_rb_cmd *cmd)
+{
+ struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(panel_cntl);
+ struct dc_dmub_srv *dc_dmub_srv = panel_cntl->ctx->dmub_srv;
+
+ if (!dc_dmub_srv)
+ return false;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->panel_cntl.header.type = DMUB_CMD__PANEL_CNTL;
+ cmd->panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO;
+ cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data);
+ cmd->panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
+
+ return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+}
+
+static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!dcn31_query_backlight_info(panel_cntl, &cmd))
+ return 0;
+
+ return cmd.panel_cntl.data.current_backlight;
+}
+
+static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+{
+ struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(panel_cntl);
+ struct dc_dmub_srv *dc_dmub_srv = panel_cntl->ctx->dmub_srv;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv)
+ return 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.panel_cntl.header.type = DMUB_CMD__PANEL_CNTL;
+ cmd.panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_HW_INIT;
+ cmd.panel_cntl.header.payload_bytes = sizeof(cmd.panel_cntl.data);
+ cmd.panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
+ cmd.panel_cntl.data.bl_pwm_cntl = panel_cntl->stored_backlight_registers.BL_PWM_CNTL;
+ cmd.panel_cntl.data.bl_pwm_period_cntl = panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL;
+ cmd.panel_cntl.data.bl_pwm_ref_div1 =
+ panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+ cmd.panel_cntl.data.bl_pwm_ref_div2 =
+ panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2;
+ if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return 0;
+
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl;
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 = 0; /* unused */
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL = cmd.panel_cntl.data.bl_pwm_period_cntl;
+ panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV =
+ cmd.panel_cntl.data.bl_pwm_ref_div1;
+ panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2 =
+ cmd.panel_cntl.data.bl_pwm_ref_div2;
+
+ return cmd.panel_cntl.data.current_backlight;
+}
+
+static void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl)
+{
+ struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(*panel_cntl);
+
+ kfree(dcn31_panel_cntl);
+ *panel_cntl = NULL;
+}
+
+static bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!dcn31_query_backlight_info(panel_cntl, &cmd))
+ return false;
+
+ return cmd.panel_cntl.data.is_backlight_on;
+}
+
+static bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!dcn31_query_backlight_info(panel_cntl, &cmd))
+ return false;
+
+ return cmd.panel_cntl.data.is_powered_on;
+}
+
+static void dcn31_store_backlight_level(struct panel_cntl *panel_cntl)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!dcn31_query_backlight_info(panel_cntl, &cmd))
+ return;
+
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl;
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 = 0; /* unused */
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL = cmd.panel_cntl.data.bl_pwm_period_cntl;
+ panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV =
+ cmd.panel_cntl.data.bl_pwm_ref_div1;
+}
+
+static const struct panel_cntl_funcs dcn31_link_panel_cntl_funcs = {
+ .destroy = dcn31_panel_cntl_destroy,
+ .hw_init = dcn31_panel_cntl_hw_init,
+ .is_panel_backlight_on = dcn31_is_panel_backlight_on,
+ .is_panel_powered_on = dcn31_is_panel_powered_on,
+ .store_backlight_level = dcn31_store_backlight_level,
+ .get_current_backlight = dcn31_get_16_bit_backlight_from_pwm,
+};
+
+void dcn31_panel_cntl_construct(
+ struct dcn31_panel_cntl *dcn31_panel_cntl,
+ const struct panel_cntl_init_data *init_data)
+{
+ dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
+ dcn31_panel_cntl->base.ctx = init_data->ctx;
+ dcn31_panel_cntl->base.inst = init_data->inst;
+ dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.h
new file mode 100644
index 000000000..d33ccd6ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_PANEL_CNTL__DCN31_H__
+#define __DC_PANEL_CNTL__DCN31_H__
+
+#include "panel_cntl.h"
+#include "dce/dce_panel_cntl.h"
+
+struct dcn31_panel_cntl {
+ struct panel_cntl base;
+};
+
+void dcn31_panel_cntl_construct(
+ struct dcn31_panel_cntl *dcn31_panel_cntl,
+ const struct panel_cntl_init_data *init_data);
+
+#endif /* __DC_PANEL_CNTL__DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
new file mode 100644
index 000000000..82de4fe26
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -0,0 +1,2214 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "dcn31/dcn31_init.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn31_resource.h"
+
+#include "dcn20/dcn20_resource.h"
+#include "dcn30/dcn30_resource.h"
+
+#include "dml/dcn30/dcn30_fpu.h"
+
+#include "dcn10/dcn10_ipp.h"
+#include "dcn30/dcn30_hubbub.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn30/dcn30_mpc.h"
+#include "dcn31/dcn31_hubp.h"
+#include "irq/dcn31/irq_service_dcn31.h"
+#include "dcn30/dcn30_dpp.h"
+#include "dcn31/dcn31_optc.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn30/dcn30_opp.h"
+#include "dcn20/dcn20_dsc.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dcn30/dcn30_afmt.h"
+#include "dcn30/dcn30_dio_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_link_encoder.h"
+#include "dcn31/dcn31_apg.h"
+#include "dcn31/dcn31_dio_link_encoder.h"
+#include "dcn31/dcn31_vpg.h"
+#include "dcn31/dcn31_afmt.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "clk_mgr.h"
+#include "virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dml/display_mode_vba.h"
+#include "dml/dcn31/dcn31_fpu.h"
+#include "dcn31/dcn31_dccg.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn31_panel_cntl.h"
+
+#include "dcn30/dcn30_dwb.h"
+#include "dcn30/dcn30_mmhubbub.h"
+
+// TODO: change include headers /amd/include/asic_reg after upstream
+#include "yellow_carp_offset.h"
+#include "dcn/dcn_3_1_2_offset.h"
+#include "dcn/dcn_3_1_2_sh_mask.h"
+#include "nbio/nbio_7_2_0_offset.h"
+#include "dpcs/dpcs_4_2_0_offset.h"
+#include "dpcs/dpcs_4_2_0_sh_mask.h"
+#include "mmhub/mmhub_2_3_0_offset.h"
+#include "mmhub/mmhub_2_3_0_sh_mask.h"
+
+
+#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6
+#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2
+#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
+#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L
+
+#include "reg_helper.h"
+#include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_i2c.h"
+#include "dce/dmub_replay.h"
+
+#include "dml/dcn30/display_mode_vba_30.h"
+#include "vm_helper.h"
+#include "dcn20/dcn20_vmid.h"
+
+#include "link_enc_cfg.h"
+
+#define DC_LOGGER_INIT(logger)
+
+enum dcn31_clk_src_array_id {
+ DCN31_CLK_SRC_PLL0,
+ DCN31_CLK_SRC_PLL1,
+ DCN31_CLK_SRC_PLL2,
+ DCN31_CLK_SRC_PLL3,
+ DCN31_CLK_SRC_PLL4,
+ DCN30_CLK_SRC_TOTAL
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file
+ */
+
+/* DCN */
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI2(reg_name, block, id)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define SRIR(var_name, reg_name, block, id)\
+ .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_MPC_RMU(reg_name, block, id)\
+ .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_DWB(reg_name, temp_name, block, id)\
+ .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
+
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCCG_SRII(reg_name, block, id)\
+ .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) \
+ NBIO_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ .reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX1_ ## reg_name
+
+/* MMHUB */
+#define MMHUB_BASE_INNER(seg) \
+ MMHUB_BASE__INST0_SEG ## seg
+
+#define MMHUB_BASE(seg) \
+ MMHUB_BASE_INNER(seg)
+
+#define MMHUB_SR(reg_name)\
+ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* CLOCK */
+#define CLK_BASE_INNER(seg) \
+ CLK_BASE__INST0_SEG ## seg
+
+#define CLK_BASE(seg) \
+ CLK_BASE_INNER(seg)
+
+#define CLK_SRI(reg_name, block, inst)\
+ .reg_name = CLK_BASE(reg ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## _ ## inst ## _ ## reg_name
+
+
+static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_3),
+ NBIO_SR(BIOS_SCRATCH_6)
+};
+
+#define clk_src_regs(index, pllid)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCN3_0(index, pllid),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D),
+ clk_src_regs(4, E)
+};
+/*pll_id being rempped in dmub, in driver it is logical instance*/
+static const struct dce110_clk_src_regs clk_src_regs_b0[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, F),
+ clk_src_regs(3, G),
+ clk_src_regs(4, E)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+};
+
+#define abm_regs(id)\
+[id] = {\
+ ABM_DCN302_REG_LIST(id)\
+}
+
+static const struct dce_abm_registers abm_regs[] = {
+ abm_regs(0),
+ abm_regs(1),
+ abm_regs(2),
+ abm_regs(3),
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN30(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6)
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define vpg_regs(id)\
+[id] = {\
+ VPG_DCN31_REG_LIST(id)\
+}
+
+static const struct dcn31_vpg_registers vpg_regs[] = {
+ vpg_regs(0),
+ vpg_regs(1),
+ vpg_regs(2),
+ vpg_regs(3),
+ vpg_regs(4),
+ vpg_regs(5),
+ vpg_regs(6),
+ vpg_regs(7),
+ vpg_regs(8),
+ vpg_regs(9),
+};
+
+static const struct dcn31_vpg_shift vpg_shift = {
+ DCN31_VPG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_vpg_mask vpg_mask = {
+ DCN31_VPG_MASK_SH_LIST(_MASK)
+};
+
+#define afmt_regs(id)\
+[id] = {\
+ AFMT_DCN31_REG_LIST(id)\
+}
+
+static const struct dcn31_afmt_registers afmt_regs[] = {
+ afmt_regs(0),
+ afmt_regs(1),
+ afmt_regs(2),
+ afmt_regs(3),
+ afmt_regs(4),
+ afmt_regs(5)
+};
+
+static const struct dcn31_afmt_shift afmt_shift = {
+ DCN31_AFMT_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_afmt_mask afmt_mask = {
+ DCN31_AFMT_MASK_SH_LIST(_MASK)
+};
+
+#define apg_regs(id)\
+[id] = {\
+ APG_DCN31_REG_LIST(id)\
+}
+
+static const struct dcn31_apg_registers apg_regs[] = {
+ apg_regs(0),
+ apg_regs(1),
+ apg_regs(2),
+ apg_regs(3)
+};
+
+static const struct dcn31_apg_shift apg_shift = {
+ DCN31_APG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_apg_mask apg_mask = {
+ DCN31_APG_MASK_SH_LIST(_MASK)
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_DCN3_REG_LIST(id)\
+}
+
+/* Some encoders won't be initialized here - but they're logical, not physical. */
+static const struct dcn10_stream_enc_registers stream_enc_regs[ENGINE_ID_COUNT] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4)
+};
+
+static const struct dcn10_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN30(_MASK)
+};
+
+
+#define aux_regs(id)\
+[id] = {\
+ DCN2_AUX_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4)
+};
+
+#define link_regs(id, phyid)\
+[id] = {\
+ LE_DCN31_REG_LIST(id), \
+ UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN31_REG_LIST(id), \
+}
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0, A),
+ link_regs(1, B),
+ link_regs(2, C),
+ link_regs(3, D),
+ link_regs(4, E)
+};
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \
+ DPCS_DCN31_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \
+ DPCS_DCN31_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_stream_encoder_reg_list(id)\
+[id] = {\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
+}
+
+static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
+ hpo_dp_stream_encoder_reg_list(0),
+ hpo_dp_stream_encoder_reg_list(1),
+ hpo_dp_stream_encoder_reg_list(2),
+ hpo_dp_stream_encoder_reg_list(3),
+};
+
+static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_link_encoder_reg_list(id)\
+[id] = {\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
+ DCN3_1_RDPCSTX_REG_LIST(0),\
+ DCN3_1_RDPCSTX_REG_LIST(1),\
+ DCN3_1_RDPCSTX_REG_LIST(2),\
+ DCN3_1_RDPCSTX_REG_LIST(3),\
+ DCN3_1_RDPCSTX_REG_LIST(4)\
+}
+
+static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
+ hpo_dp_link_encoder_reg_list(0),
+ hpo_dp_link_encoder_reg_list(1),
+};
+
+static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
+ DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
+ DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
+};
+
+#define dpp_regs(id)\
+[id] = {\
+ DPP_REG_LIST_DCN30(id),\
+}
+
+static const struct dcn3_dpp_registers dpp_regs[] = {
+ dpp_regs(0),
+ dpp_regs(1),
+ dpp_regs(2),
+ dpp_regs(3)
+};
+
+static const struct dcn3_dpp_shift tf_shift = {
+ DPP_REG_LIST_SH_MASK_DCN30(__SHIFT)
+};
+
+static const struct dcn3_dpp_mask tf_mask = {
+ DPP_REG_LIST_SH_MASK_DCN30(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_REG_LIST_DCN30(id),\
+}
+
+static const struct dcn20_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3)
+};
+
+static const struct dcn20_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn20_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN20(_MASK)
+};
+
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST0(id), \
+ .AUXN_IMPCAL = 0, \
+ .AUXP_IMPCAL = 0, \
+ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4)
+};
+
+#define dwbc_regs_dcn3(id)\
+[id] = {\
+ DWBC_COMMON_REG_LIST_DCN30(id),\
+}
+
+static const struct dcn30_dwbc_registers dwbc30_regs[] = {
+ dwbc_regs_dcn3(0),
+};
+
+static const struct dcn30_dwbc_shift dwbc30_shift = {
+ DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn30_dwbc_mask dwbc30_mask = {
+ DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
+};
+
+#define mcif_wb_regs_dcn3(id)\
+[id] = {\
+ MCIF_WB_COMMON_REG_LIST_DCN30(id),\
+}
+
+static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
+ mcif_wb_regs_dcn3(0)
+};
+
+static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK)
+};
+
+#define dsc_regsDCN20(id)\
+[id] = {\
+ DSC_REG_LIST_DCN20(id)\
+}
+
+static const struct dcn20_dsc_registers dsc_regs[] = {
+ dsc_regsDCN20(0),
+ dsc_regsDCN20(1),
+ dsc_regsDCN20(2)
+};
+
+static const struct dcn20_dsc_shift dsc_shift = {
+ DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
+};
+
+static const struct dcn20_dsc_mask dsc_mask = {
+ DSC_REG_LIST_SH_MASK_DCN20(_MASK)
+};
+
+static const struct dcn30_mpc_registers mpc_regs = {
+ MPC_REG_LIST_DCN3_0(0),
+ MPC_REG_LIST_DCN3_0(1),
+ MPC_REG_LIST_DCN3_0(2),
+ MPC_REG_LIST_DCN3_0(3),
+ MPC_OUT_MUX_REG_LIST_DCN3_0(0),
+ MPC_OUT_MUX_REG_LIST_DCN3_0(1),
+ MPC_OUT_MUX_REG_LIST_DCN3_0(2),
+ MPC_OUT_MUX_REG_LIST_DCN3_0(3),
+ MPC_RMU_GLOBAL_REG_LIST_DCN3AG,
+ MPC_RMU_REG_LIST_DCN3AG(0),
+ MPC_RMU_REG_LIST_DCN3AG(1),
+ //MPC_RMU_REG_LIST_DCN3AG(2),
+ MPC_DWB_MUX_REG_LIST_DCN3_0(0),
+};
+
+static const struct dcn30_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn30_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN30(_MASK)
+};
+
+#define optc_regs(id)\
+[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)}
+
+static const struct dcn_optc_registers optc_regs[] = {
+ optc_regs(0),
+ optc_regs(1),
+ optc_regs(2),
+ optc_regs(3)
+};
+
+static const struct dcn_optc_shift optc_shift = {
+ OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT)
+};
+
+static const struct dcn_optc_mask optc_mask = {
+ OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK)
+};
+
+#define hubp_regs(id)\
+[id] = {\
+ HUBP_REG_LIST_DCN30(id)\
+}
+
+static const struct dcn_hubp2_registers hubp_regs[] = {
+ hubp_regs(0),
+ hubp_regs(1),
+ hubp_regs(2),
+ hubp_regs(3)
+};
+
+
+static const struct dcn_hubp2_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN31(__SHIFT)
+};
+
+static const struct dcn_hubp2_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN31(_MASK)
+};
+static const struct dcn_hubbub_registers hubbub_reg = {
+ HUBBUB_REG_LIST_DCN31(0)
+};
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN31(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN31(_MASK)
+};
+
+static const struct dccg_registers dccg_regs = {
+ DCCG_REG_LIST_DCN31()
+};
+
+static const struct dccg_shift dccg_shift = {
+ DCCG_MASK_SH_LIST_DCN31(__SHIFT)
+};
+
+static const struct dccg_mask dccg_mask = {
+ DCCG_MASK_SH_LIST_DCN31(_MASK)
+};
+
+
+#define SRII2(reg_name_pre, reg_name_post, id)\
+ .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \
+ ## id ## _ ## reg_name_post ## _BASE_IDX) + \
+ reg ## reg_name_pre ## id ## _ ## reg_name_post
+
+
+#define HWSEQ_DCN31_REG_LIST()\
+ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
+ SR(DIO_MEM_PWR_CTRL), \
+ SR(ODM_MEM_PWR_CTRL3), \
+ SR(DMU_MEM_PWR_CNTL), \
+ SR(MMHUBBUB_MEM_PWR_CNTL), \
+ SR(DCCG_GATE_DISABLE_CNTL), \
+ SR(DCCG_GATE_DISABLE_CNTL2), \
+ SR(DCFCLK_CNTL),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
+ SRII(PIXEL_RATE_CNTL, OTG, 0), \
+ SRII(PIXEL_RATE_CNTL, OTG, 1),\
+ SRII(PIXEL_RATE_CNTL, OTG, 2),\
+ SRII(PIXEL_RATE_CNTL, OTG, 3),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\
+ SR(MICROSECOND_TIME_BASE_DIV), \
+ SR(MILLISECOND_TIME_BASE_DIV), \
+ SR(DISPCLK_FREQ_CHANGE_CNTL), \
+ SR(RBBMIF_TIMEOUT_DIS), \
+ SR(RBBMIF_TIMEOUT_DIS_2), \
+ SR(DCHUBBUB_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_VAL_B_A), \
+ SR(DPP_TOP0_DPP_CRC_VAL_R_G), \
+ SR(MPC_CRC_CTRL), \
+ SR(MPC_CRC_RESULT_GB), \
+ SR(MPC_CRC_RESULT_C), \
+ SR(MPC_CRC_RESULT_AR), \
+ SR(DOMAIN0_PG_CONFIG), \
+ SR(DOMAIN1_PG_CONFIG), \
+ SR(DOMAIN2_PG_CONFIG), \
+ SR(DOMAIN3_PG_CONFIG), \
+ SR(DOMAIN16_PG_CONFIG), \
+ SR(DOMAIN17_PG_CONFIG), \
+ SR(DOMAIN18_PG_CONFIG), \
+ SR(DOMAIN0_PG_STATUS), \
+ SR(DOMAIN1_PG_STATUS), \
+ SR(DOMAIN2_PG_STATUS), \
+ SR(DOMAIN3_PG_STATUS), \
+ SR(DOMAIN16_PG_STATUS), \
+ SR(DOMAIN17_PG_STATUS), \
+ SR(DOMAIN18_PG_STATUS), \
+ SR(D1VGA_CONTROL), \
+ SR(D2VGA_CONTROL), \
+ SR(D3VGA_CONTROL), \
+ SR(D4VGA_CONTROL), \
+ SR(D5VGA_CONTROL), \
+ SR(D6VGA_CONTROL), \
+ SR(DC_IP_REQUEST_CNTL), \
+ SR(AZALIA_AUDIO_DTO), \
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ SR(HPO_TOP_HW_CONTROL)
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCN31_REG_LIST()
+};
+
+#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
+ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \
+ HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \
+ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
+ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
+ HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \
+ HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
+ HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh)
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN31_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN31_MASK_SH_LIST(_MASK)
+};
+#define vmid_regs(id)\
+[id] = {\
+ DCN20_VMID_REG_LIST(id)\
+}
+
+static const struct dcn_vmid_registers vmid_regs[] = {
+ vmid_regs(0),
+ vmid_regs(1),
+ vmid_regs(2),
+ vmid_regs(3),
+ vmid_regs(4),
+ vmid_regs(5),
+ vmid_regs(6),
+ vmid_regs(7),
+ vmid_regs(8),
+ vmid_regs(9),
+ vmid_regs(10),
+ vmid_regs(11),
+ vmid_regs(12),
+ vmid_regs(13),
+ vmid_regs(14),
+ vmid_regs(15)
+};
+
+static const struct dcn20_vmid_shift vmid_shifts = {
+ DCN20_VMID_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn20_vmid_mask vmid_masks = {
+ DCN20_VMID_MASK_SH_LIST(_MASK)
+};
+
+static const struct resource_caps res_cap_dcn31 = {
+ .num_timing_generator = 4,
+ .num_opp = 4,
+ .num_video_plane = 4,
+ .num_audio = 5,
+ .num_stream_encoder = 5,
+ .num_dig_link_enc = 5,
+ .num_hpo_dp_stream_encoder = 4,
+ .num_hpo_dp_link_encoder = 2,
+ .num_pll = 5,
+ .num_dwb = 1,
+ .num_ddc = 5,
+ .num_vmid = 16,
+ .num_mpc_3dlut = 2,
+ .num_dsc = 3,
+};
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true,
+ .p010 = true,
+ .ayuv = false,
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 16000
+ },
+
+ // 6:1 downscaling ratio: 1000/6 = 166.666
+ .max_downscale_factor = {
+ .argb8888 = 167,
+ .nv12 = 167,
+ .fp16 = 167
+ },
+ 64,
+ 64
+};
+
+static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = false,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .vsr_support = true,
+ .performance_trace = false,
+ .max_downscale_src_width = 4096,/*upto true 4K*/
+ .disable_pplib_wm_range = false,
+ .scl_reset_length10 = true,
+ .sanity_checks = true,
+ .underflow_assert_delay_us = 0xFFFFFFFF,
+ .dwb_fi_phase = -1, // -1 = disable,
+ .dmub_command_table = true,
+ .pstate_enabled = true,
+ .use_max_lb = true,
+ .enable_mem_low_power = {
+ .bits = {
+ .vga = true,
+ .i2c = true,
+ .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
+ .dscl = true,
+ .cm = true,
+ .mpc = true,
+ .optc = true,
+ .vpg = true,
+ .afmt = true,
+ }
+ },
+ .disable_z10 = true,
+ .enable_legacy_fast_update = true,
+ .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
+};
+
+static const struct dc_panel_config panel_config_defaults = {
+ .psr = {
+ .disable_psr = false,
+ .disallow_psrsu = false,
+ .disallow_replay = false,
+ },
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
+static void dcn31_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN20_DPP(*dpp));
+ *dpp = NULL;
+}
+
+static struct dpp *dcn31_dpp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn3_dpp *dpp =
+ kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
+
+ if (!dpp)
+ return NULL;
+
+ if (dpp3_construct(dpp, ctx, inst,
+ &dpp_regs[inst], &tf_shift, &tf_mask))
+ return &dpp->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(dpp);
+ return NULL;
+}
+
+static struct output_pixel_processor *dcn31_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_opp *opp =
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn20_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+static struct dce_aux *dcn31_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
+
+static const struct dce_i2c_registers i2c_hw_regs[] = {
+ i2c_inst_regs(1),
+ i2c_inst_regs(2),
+ i2c_inst_regs(3),
+ i2c_inst_regs(4),
+ i2c_inst_regs(5),
+};
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
+};
+
+static struct dce_i2c_hw *dcn31_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+ dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+static struct mpc *dcn31_mpc_create(
+ struct dc_context *ctx,
+ int num_mpcc,
+ int num_rmu)
+{
+ struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc),
+ GFP_KERNEL);
+
+ if (!mpc30)
+ return NULL;
+
+ dcn30_mpc_construct(mpc30, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ num_mpcc,
+ num_rmu);
+
+ return &mpc30->base;
+}
+
+static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx)
+{
+ int i;
+
+ struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub),
+ GFP_KERNEL);
+
+ if (!hubbub3)
+ return NULL;
+
+ hubbub31_construct(hubbub3, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask,
+ dcn3_1_ip.det_buffer_size_kbytes,
+ dcn3_1_ip.pixel_chunk_size_kbytes,
+ dcn3_1_ip.config_return_buffer_size_in_kbytes);
+
+
+ for (i = 0; i < res_cap_dcn31.num_vmid; i++) {
+ struct dcn20_vmid *vmid = &hubbub3->vmid[i];
+
+ vmid->ctx = ctx;
+
+ vmid->regs = &vmid_regs[i];
+ vmid->shifts = &vmid_shifts;
+ vmid->masks = &vmid_masks;
+ }
+
+ return &hubbub3->base;
+}
+
+static struct timing_generator *dcn31_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
+
+ if (!tgn10)
+ return NULL;
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &optc_regs[instance];
+ tgn10->tg_shift = &optc_shift;
+ tgn10->tg_mask = &optc_mask;
+
+ dcn31_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .fec_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+static struct link_encoder *dcn31_link_encoder_create(
+ struct dc_context *ctx,
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn20_link_encoder *enc20 =
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+
+ if (!enc20)
+ return NULL;
+
+ dcn31_link_encoder_construct(enc20,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc20->enc10.base;
+}
+
+/* Create a minimal link encoder object not associated with a particular
+ * physical connector.
+ * resource_funcs.link_enc_create_minimal
+ */
+static struct link_encoder *dcn31_link_enc_create_minimal(
+ struct dc_context *ctx, enum engine_id eng_id)
+{
+ struct dcn20_link_encoder *enc20;
+
+ if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc)
+ return NULL;
+
+ enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ if (!enc20)
+ return NULL;
+
+ dcn31_link_encoder_construct_minimal(
+ enc20,
+ ctx,
+ &link_enc_feature,
+ &link_enc_regs[eng_id - ENGINE_ID_DIGA],
+ eng_id);
+
+ return &enc20->enc10.base;
+}
+
+static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dcn31_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dcn31_panel_cntl_construct(panel_cntl, init_data);
+
+ return &panel_cntl->base;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+
+}
+
+static struct audio *dcn31_create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct vpg *dcn31_vpg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL);
+
+ if (!vpg31)
+ return NULL;
+
+ vpg31_construct(vpg31, ctx, inst,
+ &vpg_regs[inst],
+ &vpg_shift,
+ &vpg_mask);
+
+ return &vpg31->base;
+}
+
+static struct afmt *dcn31_afmt_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL);
+
+ if (!afmt31)
+ return NULL;
+
+ afmt31_construct(afmt31, ctx, inst,
+ &afmt_regs[inst],
+ &afmt_shift,
+ &afmt_mask);
+
+ // Light sleep by default, no need to power down here
+
+ return &afmt31->base;
+}
+
+static struct apg *dcn31_apg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL);
+
+ if (!apg31)
+ return NULL;
+
+ apg31_construct(apg31, ctx, inst,
+ &apg_regs[inst],
+ &apg_shift,
+ &apg_mask);
+
+ return &apg31->base;
+}
+
+static struct stream_encoder *dcn31_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn10_stream_encoder *enc1;
+ struct vpg *vpg;
+ struct afmt *afmt;
+ int vpg_inst;
+ int afmt_inst;
+
+ /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
+ if (eng_id <= ENGINE_ID_DIGF) {
+ vpg_inst = eng_id;
+ afmt_inst = eng_id;
+ } else
+ return NULL;
+
+ enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ afmt = dcn31_afmt_create(ctx, afmt_inst);
+
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
+ return NULL;
+ }
+
+ dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
+ eng_id, vpg, afmt,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+
+ return &enc1->base;
+}
+
+static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31;
+ struct vpg *vpg;
+ struct apg *apg;
+ uint32_t hpo_dp_inst;
+ uint32_t vpg_inst;
+ uint32_t apg_inst;
+
+ ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3));
+ hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0;
+
+ /* Mapping of VPG register blocks to HPO DP block instance:
+ * VPG[6] -> HPO_DP[0]
+ * VPG[7] -> HPO_DP[1]
+ * VPG[8] -> HPO_DP[2]
+ * VPG[9] -> HPO_DP[3]
+ */
+ vpg_inst = hpo_dp_inst + 6;
+
+ /* Mapping of APG register blocks to HPO DP block instance:
+ * APG[0] -> HPO_DP[0]
+ * APG[1] -> HPO_DP[1]
+ * APG[2] -> HPO_DP[2]
+ * APG[3] -> HPO_DP[3]
+ */
+ apg_inst = hpo_dp_inst;
+
+ /* allocate HPO stream encoder and create VPG sub-block */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ apg = dcn31_apg_create(ctx, apg_inst);
+
+ if (!hpo_dp_enc31 || !vpg || !apg) {
+ kfree(hpo_dp_enc31);
+ kfree(vpg);
+ kfree(apg);
+ return NULL;
+ }
+
+ dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
+ hpo_dp_inst, eng_id, vpg, apg,
+ &hpo_dp_stream_enc_regs[hpo_dp_inst],
+ &hpo_dp_se_shift, &hpo_dp_se_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ uint8_t inst,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31;
+
+ /* allocate HPO link encoder */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+
+ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ &hpo_dp_link_enc_regs[inst],
+ &hpo_dp_le_shift, &hpo_dp_le_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct dce_hwseq *dcn31_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = dcn31_create_audio,
+ .create_stream_encoder = dcn31_stream_encoder_create,
+ .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
+ .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
+ .create_hwseq = dcn31_hwseq_create,
+};
+
+static void dcn31_resource_destruct(struct dcn31_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ if (pool->base.stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg));
+ pool->base.stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.stream_enc[i]->afmt != NULL) {
+ kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt));
+ pool->base.stream_enc[i]->afmt = NULL;
+ }
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) {
+ if (pool->base.hpo_dp_stream_enc[i] != NULL) {
+ if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg));
+ pool->base.hpo_dp_stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) {
+ kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg));
+ pool->base.hpo_dp_stream_enc[i]->apg = NULL;
+ }
+ kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i]));
+ pool->base.hpo_dp_stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) {
+ if (pool->base.hpo_dp_link_enc[i] != NULL) {
+ kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i]));
+ pool->base.hpo_dp_link_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ if (pool->base.dscs[i] != NULL)
+ dcn20_dsc_destroy(&pool->base.dscs[i]);
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN20_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.dpps[i] != NULL)
+ dcn31_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ }
+ if (pool->base.sw_i2cs[i] != NULL) {
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
+ if (pool->base.dwbc[i] != NULL) {
+ kfree(TO_DCN30_DWBC(pool->base.dwbc[i]));
+ pool->base.dwbc[i] = NULL;
+ }
+ if (pool->base.mcif_wb[i] != NULL) {
+ kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i]));
+ pool->base.mcif_wb[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) {
+ if (pool->base.mpc_lut[i] != NULL) {
+ dc_3dlut_func_release(pool->base.mpc_lut[i]);
+ pool->base.mpc_lut[i] = NULL;
+ }
+ if (pool->base.mpc_shaper[i] != NULL) {
+ dc_transfer_func_release(pool->base.mpc_shaper[i]);
+ pool->base.mpc_shaper[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn20_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.multiple_abms[i] != NULL)
+ dce_abm_destroy(&pool->base.multiple_abms[i]);
+ }
+
+ if (pool->base.psr != NULL)
+ dmub_psr_destroy(&pool->base.psr);
+
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
+ if (pool->base.dccg != NULL)
+ dcn_dccg_destroy(&pool->base.dccg);
+}
+
+static struct hubp *dcn31_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn20_hubp *hubp2 =
+ kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
+
+ if (!hubp2)
+ return NULL;
+
+ if (hubp31_construct(hubp2, ctx, inst,
+ &hubp_regs[inst], &hubp_shift, &hubp_mask))
+ return &hubp2->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(hubp2);
+ return NULL;
+}
+
+static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc),
+ GFP_KERNEL);
+
+ if (!dwbc30) {
+ dm_error("DC: failed to create dwbc30!\n");
+ return false;
+ }
+
+ dcn30_dwbc_construct(dwbc30, ctx,
+ &dwbc30_regs[i],
+ &dwbc30_shift,
+ &dwbc30_mask,
+ i);
+
+ pool->dwbc[i] = &dwbc30->base;
+ }
+ return true;
+}
+
+static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub),
+ GFP_KERNEL);
+
+ if (!mcif_wb30) {
+ dm_error("DC: failed to create mcif_wb30!\n");
+ return false;
+ }
+
+ dcn30_mmhubbub_construct(mcif_wb30, ctx,
+ &mcif_wb30_regs[i],
+ &mcif_wb30_shift,
+ &mcif_wb30_mask,
+ i);
+
+ pool->mcif_wb[i] = &mcif_wb30->base;
+ }
+ return true;
+}
+
+static struct display_stream_compressor *dcn31_dsc_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_dsc *dsc =
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+
+ if (!dsc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
+ return &dsc->base;
+}
+
+static void dcn31_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn31_resource_pool *dcn31_pool = TO_DCN31_RES_POOL(*pool);
+
+ dcn31_resource_destruct(dcn31_pool);
+ kfree(dcn31_pool);
+ *pool = NULL;
+}
+
+static struct clock_source *dcn31_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dcn3_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ uint32_t pipe_cnt;
+ int i;
+
+ dc_assert_fp_enabled();
+
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ for (i = 0; i < pipe_cnt; i++) {
+ pipes[i].pipe.src.gpuvm = 1;
+ if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE) {
+ //pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
+ pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled;
+ } else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_FALSE)
+ pipes[i].pipe.src.hostvm = false;
+ else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_TRUE)
+ pipes[i].pipe.src.hostvm = true;
+ }
+ return pipe_cnt;
+}
+
+int dcn31_populate_dml_pipes_from_context(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+ bool upscaled = false;
+
+ DC_FP_START();
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ DC_FP_END();
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ DC_FP_START();
+ dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
+ DC_FP_END();
+
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE;
+ dc->config.enable_4to1MPC = false;
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+ /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
+ && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ return pipe_cnt;
+}
+
+void dcn31_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ DC_FP_START();
+ dcn31_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
+}
+
+void
+dcn31_populate_dml_writeback_from_context(struct dc *dc,
+ struct resource_context *res_ctx,
+ display_e2e_pipe_params_st *pipes)
+{
+ DC_FP_START();
+ dcn30_populate_dml_writeback_from_context(dc, res_ctx, pipes);
+ DC_FP_END();
+}
+
+void
+dcn31_set_mcif_arb_params(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt)
+{
+ DC_FP_START();
+ dcn30_set_mcif_arb_params(dc, context, pipes, pipe_cnt);
+ DC_FP_END();
+}
+
+bool dcn31_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ DC_FP_START();
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ DC_FP_END();
+
+ // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
+ if (pipe_cnt == 0)
+ fast_validate = false;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+ if (dc->res_pool->funcs->calculate_wm_and_dlg)
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
+
+static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
+};
+
+static struct resource_funcs dcn31_res_pool_funcs = {
+ .destroy = dcn31_destroy_resource_pool,
+ .link_enc_create = dcn31_link_encoder_create,
+ .link_enc_create_minimal = dcn31_link_enc_create_minimal,
+ .link_encs_assign = link_enc_cfg_link_encs_assign,
+ .link_enc_unassign = link_enc_cfg_link_enc_unassign,
+ .panel_cntl_create = dcn31_panel_cntl_create,
+ .validate_bandwidth = dcn31_validate_bandwidth,
+ .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
+ .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .populate_dml_pipes = dcn31_populate_dml_pipes_from_context,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
+ .add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
+ .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
+ .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn31_set_mcif_arb_params,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
+ .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ .update_bw_bounding_box = dcn31_update_bw_bounding_box,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn31_get_panel_config_defaults,
+};
+
+static struct clock_source *dcn30_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dcn31_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static bool dcn31_resource_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn31_resource_pool *pool)
+{
+ int i;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data init_data;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_dcn31;
+
+ pool->base.funcs = &dcn31_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
+ dc->caps.max_downscale_ratio = 600;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/
+ dc->caps.max_cursor_size = 256;
+ dc->caps.min_horizontal_blanking_period = 80;
+ dc->caps.dmdata_alloc_size = 2048;
+
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
+ dc->caps.post_blend_color_processing = true;
+ dc->caps.force_dp_tps4_for_cp2520 = true;
+ if (dc->config.forceHBR2CP2520)
+ dc->caps.force_dp_tps4_for_cp2520 = false;
+ dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
+ dc->caps.edp_dsc_support = true;
+ dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
+ dc->caps.is_apu = true;
+ dc->caps.zstate_support = true;
+
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 1;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 1;
+ dc->caps.color.dpp.post_csc = 1;
+ dc->caps.color.dpp.gamma_corr = 1;
+ dc->caps.color.dpp.dgam_rom_for_yuv = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN301
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 1;
+ dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
+ dc->config.use_old_fixed_vs_sequence = true;
+
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
+ /* read VBIOS LTTPR caps */
+ {
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ /* interop bit is implicit */
+ {
+ dc->caps.vbios_lttpr_aware = true;
+ }
+ }
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+
+ // Init the vm_helper
+ if (dc->vm_helper)
+ vm_helper_init(dc->vm_helper, 16);
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ /* Clock Sources for Pixel Clock*/
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL0] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL1] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ /*move phypllx_pixclk_resync to dmub next*/
+ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs_b0[2], false);
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs_b0[3], false);
+ } else {
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ }
+
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+
+ pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL;
+
+ /* todo: not reuse phy_pll registers */
+ pool->base.dp_clock_source =
+ dcn31_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+
+ /* TODO: DCCG */
+ pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create dccg!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* TODO: IRQ */
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn31_create(&init_data);
+ if (!pool->base.irqs)
+ goto create_fail;
+
+ /* HUBBUB */
+ pool->base.hubbub = dcn31_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create hubbub!\n");
+ goto create_fail;
+ }
+
+ /* HUBPs, DPPs, OPPs and TGs */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.hubps[i] = dcn31_hubp_create(ctx, i);
+ if (pool->base.hubps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create hubps!\n");
+ goto create_fail;
+ }
+
+ pool->base.dpps[i] = dcn31_dpp_create(ctx, i);
+ if (pool->base.dpps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpps!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ pool->base.opps[i] = dcn31_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.timing_generators[i] = dcn31_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ }
+ pool->base.timing_generator_count = i;
+
+ /* PSR */
+ pool->base.psr = dmub_psr_create(ctx);
+ if (pool->base.psr == NULL) {
+ dm_error("DC: failed to create psr obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* ABM */
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.multiple_abms[i] = dmub_abm_create(ctx,
+ &abm_regs[i],
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.multiple_abms[i] == NULL) {
+ dm_error("DC: failed to create abm for pipe %d!\n", i);
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+
+ /* MPC and DSC */
+ pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto create_fail;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ pool->base.dscs[i] = dcn31_dsc_create(ctx, i);
+ if (pool->base.dscs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create display stream compressor %d!\n", i);
+ goto create_fail;
+ }
+ }
+
+ /* DWB and MMHUBBUB */
+ if (!dcn31_dwbc_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create dwbc!\n");
+ goto create_fail;
+ }
+
+ if (!dcn31_mmhubbub_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mcif_wb!\n");
+ goto create_fail;
+ }
+
+ /* AUX and I2C */
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn31_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto create_fail;
+ }
+ pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create hw i2c!!\n");
+ goto create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+ !dc->debug.dpia_debug.bits.disable_dpia) {
+ /* YELLOW CARP B0 has 4 DPIA's */
+ pool->base.usb4_dpia_count = 4;
+ }
+
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1)
+ pool->base.usb4_dpia_count = 4;
+
+ /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto create_fail;
+
+ /* HW Sequencer and Plane caps */
+ dcn31_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->cap_funcs = cap_funcs;
+
+ dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp;
+
+ return true;
+
+create_fail:
+ dcn31_resource_destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn31_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc)
+{
+ struct dcn31_resource_pool *pool =
+ kzalloc(sizeof(struct dcn31_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dcn31_resource_construct(init_data->num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(pool);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
new file mode 100644
index 000000000..901436591
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN31_RESOURCE_H_
+#define _DCN31_RESOURCE_H_
+
+#include "core_types.h"
+
+#define TO_DCN31_RES_POOL(pool)\
+ container_of(pool, struct dcn31_resource_pool, base)
+
+extern struct _vcs_dpi_ip_params_st dcn3_1_ip;
+
+struct dcn31_resource_pool {
+ struct resource_pool base;
+};
+
+bool dcn31_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate);
+void dcn31_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+int dcn31_populate_dml_pipes_from_context(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+void
+dcn31_populate_dml_writeback_from_context(struct dc *dc,
+ struct resource_context *res_ctx,
+ display_e2e_pipe_params_st *pipes);
+void
+dcn31_set_mcif_arb_params(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt);
+
+struct resource_pool *dcn31_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+
+/*temp: B0 specific before switch to dcn313 headers*/
+#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
+#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
+#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f
+#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+
+//PHYPLLF_PIXCLK_RESYNC_CNTL
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+
+//PHYPLLG_PIXCLK_RESYNC_CNTL
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+#endif
+#endif /* _DCN31_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
new file mode 100644
index 000000000..f1deb1c3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dcn31_vpg.h"
+#include "reg_helper.h"
+#include "dc/dc.h"
+
+#define DC_LOGGER \
+ vpg31->base.ctx->logger
+
+#define REG(reg)\
+ (vpg31->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ vpg31->vpg_shift->field_name, vpg31->vpg_mask->field_name
+
+
+#define CTX \
+ vpg31->base.ctx
+
+static struct vpg_funcs dcn31_vpg_funcs = {
+ .update_generic_info_packet = vpg3_update_generic_info_packet,
+ .vpg_poweron = vpg31_poweron,
+ .vpg_powerdown = vpg31_powerdown,
+};
+
+void vpg31_powerdown(struct vpg *vpg)
+{
+ struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg);
+
+ if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false)
+ return;
+
+ REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 0, VPG_GSP_LIGHT_SLEEP_FORCE, 1);
+}
+
+void vpg31_poweron(struct vpg *vpg)
+{
+ struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg);
+
+ if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false)
+ return;
+
+ REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0);
+}
+
+void vpg31_construct(struct dcn31_vpg *vpg31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_vpg_registers *vpg_regs,
+ const struct dcn31_vpg_shift *vpg_shift,
+ const struct dcn31_vpg_mask *vpg_mask)
+{
+ vpg31->base.ctx = ctx;
+
+ vpg31->base.inst = inst;
+ vpg31->base.funcs = &dcn31_vpg_funcs;
+
+ vpg31->regs = vpg_regs;
+ vpg31->vpg_shift = vpg_shift;
+ vpg31->vpg_mask = vpg_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
new file mode 100644
index 000000000..0e76eabce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_VPG_H__
+#define __DAL_DCN31_VPG_H__
+
+
+#define DCN31_VPG_FROM_VPG(vpg)\
+ container_of(vpg, struct dcn31_vpg, base)
+
+#define VPG_DCN31_REG_LIST(id) \
+ SRI(VPG_GENERIC_STATUS, VPG, id), \
+ SRI(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \
+ SRI(VPG_GENERIC_PACKET_DATA, VPG, id), \
+ SRI(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \
+ SRI(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id), \
+ SRI(VPG_MEM_PWR, VPG, id)
+
+struct dcn31_vpg_registers {
+ uint32_t VPG_GENERIC_STATUS;
+ uint32_t VPG_GENERIC_PACKET_ACCESS_CTRL;
+ uint32_t VPG_GENERIC_PACKET_DATA;
+ uint32_t VPG_GSP_FRAME_UPDATE_CTRL;
+ uint32_t VPG_GSP_IMMEDIATE_UPDATE_CTRL;
+ uint32_t VPG_MEM_PWR;
+};
+
+#define DCN31_VPG_MASK_SH_LIST(mask_sh)\
+ SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_OCCURED, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_CLR, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL, VPG_GENERIC_DATA_INDEX, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE0, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE1, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE2, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE3, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC0_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC1_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC2_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC3_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC5_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC6_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC7_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC8_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC9_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC10_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC11_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC12_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC13_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC14_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC8_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC9_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC10_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC11_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC12_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC13_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC14_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, mask_sh),\
+ SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_LIGHT_SLEEP_FORCE, mask_sh),\
+ SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, mask_sh)
+
+#define VPG_DCN31_REG_FIELD_LIST(type) \
+ type VPG_GENERIC_CONFLICT_OCCURED;\
+ type VPG_GENERIC_CONFLICT_CLR;\
+ type VPG_GENERIC_DATA_INDEX;\
+ type VPG_GENERIC_DATA_BYTE0;\
+ type VPG_GENERIC_DATA_BYTE1;\
+ type VPG_GENERIC_DATA_BYTE2;\
+ type VPG_GENERIC_DATA_BYTE3;\
+ type VPG_GENERIC0_FRAME_UPDATE;\
+ type VPG_GENERIC1_FRAME_UPDATE;\
+ type VPG_GENERIC2_FRAME_UPDATE;\
+ type VPG_GENERIC3_FRAME_UPDATE;\
+ type VPG_GENERIC4_FRAME_UPDATE;\
+ type VPG_GENERIC5_FRAME_UPDATE;\
+ type VPG_GENERIC6_FRAME_UPDATE;\
+ type VPG_GENERIC7_FRAME_UPDATE;\
+ type VPG_GENERIC8_FRAME_UPDATE;\
+ type VPG_GENERIC9_FRAME_UPDATE;\
+ type VPG_GENERIC10_FRAME_UPDATE;\
+ type VPG_GENERIC11_FRAME_UPDATE;\
+ type VPG_GENERIC12_FRAME_UPDATE;\
+ type VPG_GENERIC13_FRAME_UPDATE;\
+ type VPG_GENERIC14_FRAME_UPDATE;\
+ type VPG_GENERIC0_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC1_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC2_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC3_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC4_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC5_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC6_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC7_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC8_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC9_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC10_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC11_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC12_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC13_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC14_IMMEDIATE_UPDATE;\
+ type VPG_GSP_MEM_LIGHT_SLEEP_DIS;\
+ type VPG_GSP_LIGHT_SLEEP_FORCE;\
+ type VPG_GSP_MEM_PWR_STATE
+
+struct dcn31_vpg_shift {
+ VPG_DCN31_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_vpg_mask {
+ VPG_DCN31_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_vpg {
+ struct vpg base;
+ const struct dcn31_vpg_registers *regs;
+ const struct dcn31_vpg_shift *vpg_shift;
+ const struct dcn31_vpg_mask *vpg_mask;
+};
+
+void vpg31_poweron(
+ struct vpg *vpg);
+
+void vpg31_powerdown(
+ struct vpg *vpg);
+
+void vpg31_construct(struct dcn31_vpg *vpg31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_vpg_registers *vpg_regs,
+ const struct dcn31_vpg_shift *vpg_shift,
+ const struct dcn31_vpg_mask *vpg_mask);
+
+#endif