summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/dcn20
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/gpu/drm/amd/display/dc/dcn20
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/dcn20')
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h328
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c432
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h778
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c1147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c772
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h589
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c332
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h431
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c873
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c670
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h143
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c1691
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h371
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c2945
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h154
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c502
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h364
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c324
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h517
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c590
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h312
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c386
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h170
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c587
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c2770
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h168
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c661
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h77
35 files changed, 19794 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
new file mode 100644
index 0000000000..abaed2121f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: MIT
+#
+# Makefile for DCN.
+
+DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+ dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \
+ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
+ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
+
+DCN20 += dcn20_dsc.o
+
+AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN20)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
new file mode 100644
index 0000000000..5999b2da3a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dcn20_dccg.h"
+
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
+
+#define REG(reg) \
+ (dccg_dcn->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
+
+#define CTX \
+ dccg_dcn->base.ctx
+#define DC_LOGGER \
+ dccg->ctx->logger
+
+void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->ref_dppclk && req_dppclk) {
+ int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
+
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
+
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
+ }
+
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ } else {
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 0);
+ }
+
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
+}
+
+void dccg2_get_dccg_ref_freq(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t clk_en = 0;
+ uint32_t clk_sel = 0;
+
+ REG_GET_2(REFCLK_CNTL, REFCLK_CLOCK_EN, &clk_en, REFCLK_SRC_SEL, &clk_sel);
+
+ if (clk_en != 0) {
+ // DCN20 has never been validated for non-xtalin as reference
+ // frequency. There's actually no way for DC to determine what
+ // frequency a non-xtalin source is.
+ ASSERT_CRITICAL(false);
+ }
+
+ *dccg_ref_freq_inKhz = xtalin_freq_inKhz;
+
+ return;
+}
+
+void dccg2_set_fifo_errdet_ovr_en(struct dccg *dccg,
+ bool en)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_UPDATE(DISPCLK_FREQ_CHANGE_CNTL,
+ DCCG_FIFO_ERRDET_OVR_EN, en ? 1 : 0);
+}
+
+void dccg2_otg_add_pixel(struct dccg *dccg,
+ uint32_t otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[otg_inst],
+ OTG_ADD_PIXEL[otg_inst], 0,
+ OTG_DROP_PIXEL[otg_inst], 0);
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst],
+ OTG_ADD_PIXEL[otg_inst], 1);
+}
+
+void dccg2_otg_drop_pixel(struct dccg *dccg,
+ uint32_t otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[otg_inst],
+ OTG_ADD_PIXEL[otg_inst], 0,
+ OTG_DROP_PIXEL[otg_inst], 0);
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[otg_inst],
+ OTG_DROP_PIXEL[otg_inst], 1);
+}
+
+void dccg2_init(struct dccg *dccg)
+{
+}
+
+static const struct dccg_funcs dccg2_funcs = {
+ .update_dpp_dto = dccg2_update_dpp_dto,
+ .get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
+ .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
+ .otg_add_pixel = dccg2_otg_add_pixel,
+ .otg_drop_pixel = dccg2_otg_drop_pixel,
+ .dccg_init = dccg2_init
+};
+
+struct dccg *dccg2_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask)
+{
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
+ struct dccg *base;
+
+ if (dccg_dcn == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ base = &dccg_dcn->base;
+ base->ctx = ctx;
+ base->funcs = &dccg2_funcs;
+
+ dccg_dcn->regs = regs;
+ dccg_dcn->dccg_shift = dccg_shift;
+ dccg_dcn->dccg_mask = dccg_mask;
+
+ return &dccg_dcn->base;
+}
+
+void dcn_dccg_destroy(struct dccg **dccg)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(*dccg);
+
+ kfree(dccg_dcn);
+ *dccg = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
new file mode 100644
index 0000000000..c8602bcfa3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN20_DCCG_H__
+#define __DCN20_DCCG_H__
+
+#include "dccg.h"
+
+#define DCCG_COMMON_REG_LIST_DCN_BASE() \
+ SR(DPPCLK_DTO_CTRL),\
+ DCCG_SRII(DTO_PARAM, DPPCLK, 0),\
+ DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
+ DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
+ DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
+ SR(REFCLK_CNTL),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\
+ SR(DISPCLK_FREQ_CHANGE_CNTL)
+
+#define DCCG_REG_LIST_DCN2() \
+ DCCG_COMMON_REG_LIST_DCN_BASE(),\
+ DCCG_SRII(DTO_PARAM, DPPCLK, 4),\
+ DCCG_SRII(DTO_PARAM, DPPCLK, 5),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 4),\
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 5)
+
+#define DCCG_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCCG_SFI(reg_name, field_name, field_prefix, inst, post_fix)\
+ .field_prefix ## _ ## field_name[inst] = reg_name ## __ ## field_prefix ## inst ## _ ## field_name ## post_fix
+
+#define DCCG_SFII(block, reg_name, field_prefix, field_name, inst, post_fix)\
+ .field_prefix ## _ ## field_name[inst] = block ## inst ## _ ## reg_name ## __ ## field_prefix ## inst ## _ ## field_name ## post_fix
+
+#define DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 1, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 2, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\
+ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(REFCLK_CNTL, REFCLK_CLOCK_EN, mask_sh),\
+ DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh)
+
+
+
+
+#define DCCG_MASK_SH_LIST_DCN2(mask_sh) \
+ DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 4, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 4, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 5, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 5, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 3, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 4, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 5, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 3, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 4, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 5, mask_sh)
+
+#define DCCG_MASK_SH_LIST_DCN2_1(mask_sh) \
+ DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 4, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 4, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 5, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 5, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 3, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 2, mask_sh),\
+ DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 3, mask_sh)
+
+
+#define DCCG_REG_FIELD_LIST(type) \
+ type DPPCLK0_DTO_PHASE;\
+ type DPPCLK0_DTO_MODULO;\
+ type DPPCLK_DTO_ENABLE[6];\
+ type DPPCLK_DTO_DB_EN[6];\
+ type REFCLK_CLOCK_EN;\
+ type REFCLK_SRC_SEL;\
+ type DISPCLK_STEP_DELAY;\
+ type DISPCLK_STEP_SIZE;\
+ type DISPCLK_FREQ_RAMP_DONE;\
+ type DISPCLK_MAX_ERRDET_CYCLES;\
+ type DCCG_FIFO_ERRDET_RESET;\
+ type DCCG_FIFO_ERRDET_STATE;\
+ type DCCG_FIFO_ERRDET_OVR_EN;\
+ type DISPCLK_CHG_FWD_CORR_DISABLE;\
+ type DISPCLK_FREQ_CHANGE_CNTL;\
+ type OTG_ADD_PIXEL[MAX_PIPES];\
+ type OTG_DROP_PIXEL[MAX_PIPES];
+
+#define DCCG3_REG_FIELD_LIST(type) \
+ type HDMICHARCLK0_EN;\
+ type HDMICHARCLK0_SRC_SEL;\
+ type PHYASYMCLK_FORCE_EN;\
+ type PHYASYMCLK_FORCE_SRC_SEL;\
+ type PHYBSYMCLK_FORCE_EN;\
+ type PHYBSYMCLK_FORCE_SRC_SEL;\
+ type PHYCSYMCLK_FORCE_EN;\
+ type PHYCSYMCLK_FORCE_SRC_SEL;
+
+#define DCCG31_REG_FIELD_LIST(type) \
+ type PHYDSYMCLK_FORCE_EN;\
+ type PHYDSYMCLK_FORCE_SRC_SEL;\
+ type PHYESYMCLK_FORCE_EN;\
+ type PHYESYMCLK_FORCE_SRC_SEL;\
+ type DPSTREAMCLK_PIPE0_EN;\
+ type DPSTREAMCLK_PIPE1_EN;\
+ type DPSTREAMCLK_PIPE2_EN;\
+ type DPSTREAMCLK_PIPE3_EN;\
+ type HDMISTREAMCLK0_SRC_SEL;\
+ type HDMISTREAMCLK0_DTO_FORCE_DIS;\
+ type SYMCLK32_SE0_SRC_SEL;\
+ type SYMCLK32_SE1_SRC_SEL;\
+ type SYMCLK32_SE2_SRC_SEL;\
+ type SYMCLK32_SE3_SRC_SEL;\
+ type SYMCLK32_SE0_EN;\
+ type SYMCLK32_SE1_EN;\
+ type SYMCLK32_SE2_EN;\
+ type SYMCLK32_SE3_EN;\
+ type SYMCLK32_LE0_SRC_SEL;\
+ type SYMCLK32_LE1_SRC_SEL;\
+ type SYMCLK32_LE0_EN;\
+ type SYMCLK32_LE1_EN;\
+ type DTBCLK_DTO_ENABLE[MAX_PIPES];\
+ type DTBCLKDTO_ENABLE_STATUS[MAX_PIPES];\
+ type PIPE_DTO_SRC_SEL[MAX_PIPES];\
+ type DTBCLK_DTO_DIV[MAX_PIPES];\
+ type DCCG_AUDIO_DTO_SEL;\
+ type DCCG_AUDIO_DTO0_SOURCE_SEL;\
+ type DENTIST_DISPCLK_CHG_MODE;\
+ type DSCCLK0_DTO_PHASE;\
+ type DSCCLK0_DTO_MODULO;\
+ type DSCCLK1_DTO_PHASE;\
+ type DSCCLK1_DTO_MODULO;\
+ type DSCCLK2_DTO_PHASE;\
+ type DSCCLK2_DTO_MODULO;\
+ type DSCCLK0_DTO_ENABLE;\
+ type DSCCLK1_DTO_ENABLE;\
+ type DSCCLK2_DTO_ENABLE;\
+ type SYMCLK32_ROOT_SE0_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE1_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE2_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE3_GATE_DISABLE;\
+ type SYMCLK32_SE0_GATE_DISABLE;\
+ type SYMCLK32_SE1_GATE_DISABLE;\
+ type SYMCLK32_SE2_GATE_DISABLE;\
+ type SYMCLK32_SE3_GATE_DISABLE;\
+ type SYMCLK32_ROOT_LE0_GATE_DISABLE;\
+ type SYMCLK32_ROOT_LE1_GATE_DISABLE;\
+ type SYMCLK32_LE0_GATE_DISABLE;\
+ type SYMCLK32_LE1_GATE_DISABLE;\
+ type DPSTREAMCLK_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK_GATE_DISABLE;\
+ type HDMISTREAMCLK0_DTO_PHASE;\
+ type HDMISTREAMCLK0_DTO_MODULO;\
+ type HDMICHARCLK0_GATE_DISABLE;\
+ type HDMICHARCLK0_ROOT_GATE_DISABLE; \
+ type PHYASYMCLK_GATE_DISABLE; \
+ type PHYBSYMCLK_GATE_DISABLE; \
+ type PHYCSYMCLK_GATE_DISABLE; \
+ type PHYDSYMCLK_GATE_DISABLE; \
+ type PHYESYMCLK_GATE_DISABLE;
+
+#define DCCG314_REG_FIELD_LIST(type) \
+ type DSCCLK3_DTO_PHASE;\
+ type DSCCLK3_DTO_MODULO;\
+ type DSCCLK3_DTO_ENABLE;\
+ type DENTIST_DISPCLK_RDIVIDER;\
+ type DENTIST_DISPCLK_WDIVIDER;
+
+#define DCCG32_REG_FIELD_LIST(type) \
+ type DPSTREAMCLK0_EN;\
+ type DPSTREAMCLK1_EN;\
+ type DPSTREAMCLK2_EN;\
+ type DPSTREAMCLK3_EN;\
+ type DPSTREAMCLK0_SRC_SEL;\
+ type DPSTREAMCLK1_SRC_SEL;\
+ type DPSTREAMCLK2_SRC_SEL;\
+ type DPSTREAMCLK3_SRC_SEL;\
+ type HDMISTREAMCLK0_EN;\
+ type OTG0_PIXEL_RATE_DIVK1;\
+ type OTG0_PIXEL_RATE_DIVK2;\
+ type OTG1_PIXEL_RATE_DIVK1;\
+ type OTG1_PIXEL_RATE_DIVK2;\
+ type OTG2_PIXEL_RATE_DIVK1;\
+ type OTG2_PIXEL_RATE_DIVK2;\
+ type OTG3_PIXEL_RATE_DIVK1;\
+ type OTG3_PIXEL_RATE_DIVK2;\
+ type DTBCLK_P0_SRC_SEL;\
+ type DTBCLK_P0_EN;\
+ type DTBCLK_P1_SRC_SEL;\
+ type DTBCLK_P1_EN;\
+ type DTBCLK_P2_SRC_SEL;\
+ type DTBCLK_P2_EN;\
+ type DTBCLK_P3_SRC_SEL;\
+ type DTBCLK_P3_EN;\
+ type DENTIST_DISPCLK_CHG_DONE;
+
+struct dccg_shift {
+ DCCG_REG_FIELD_LIST(uint8_t)
+ DCCG3_REG_FIELD_LIST(uint8_t)
+ DCCG31_REG_FIELD_LIST(uint8_t)
+ DCCG314_REG_FIELD_LIST(uint8_t)
+ DCCG32_REG_FIELD_LIST(uint8_t)
+};
+
+struct dccg_mask {
+ DCCG_REG_FIELD_LIST(uint32_t)
+ DCCG3_REG_FIELD_LIST(uint32_t)
+ DCCG31_REG_FIELD_LIST(uint32_t)
+ DCCG314_REG_FIELD_LIST(uint32_t)
+ DCCG32_REG_FIELD_LIST(uint32_t)
+};
+
+struct dccg_registers {
+ uint32_t DPPCLK_DTO_CTRL;
+ uint32_t DPPCLK_DTO_PARAM[6];
+ uint32_t REFCLK_CNTL;
+ uint32_t DISPCLK_FREQ_CHANGE_CNTL;
+ uint32_t OTG_PIXEL_RATE_CNTL[MAX_PIPES];
+ uint32_t HDMICHARCLK_CLOCK_CNTL[6];
+ uint32_t PHYASYMCLK_CLOCK_CNTL;
+ uint32_t PHYBSYMCLK_CLOCK_CNTL;
+ uint32_t PHYCSYMCLK_CLOCK_CNTL;
+ uint32_t PHYDSYMCLK_CLOCK_CNTL;
+ uint32_t PHYESYMCLK_CLOCK_CNTL;
+ uint32_t DTBCLK_DTO_MODULO[MAX_PIPES];
+ uint32_t DTBCLK_DTO_PHASE[MAX_PIPES];
+ uint32_t DCCG_AUDIO_DTBCLK_DTO_MODULO;
+ uint32_t DCCG_AUDIO_DTBCLK_DTO_PHASE;
+ uint32_t DCCG_AUDIO_DTO_SOURCE;
+ uint32_t DPSTREAMCLK_CNTL;
+ uint32_t HDMISTREAMCLK_CNTL;
+ uint32_t SYMCLK32_SE_CNTL;
+ uint32_t SYMCLK32_LE_CNTL;
+ uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t DSCCLK_DTO_CTRL;
+ uint32_t DSCCLK0_DTO_PARAM;
+ uint32_t DSCCLK1_DTO_PARAM;
+ uint32_t DSCCLK2_DTO_PARAM;
+ uint32_t DSCCLK3_DTO_PARAM;
+ uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
+ uint32_t DPSTREAMCLK_GATE_DISABLE;
+ uint32_t DCCG_GATE_DISABLE_CNTL;
+ uint32_t DCCG_GATE_DISABLE_CNTL2;
+ uint32_t DCCG_GATE_DISABLE_CNTL3;
+ uint32_t HDMISTREAMCLK0_DTO_PARAM;
+ uint32_t DCCG_GATE_DISABLE_CNTL4;
+ uint32_t OTG_PIXEL_RATE_DIV;
+ uint32_t DTBCLK_P_CNTL;
+};
+
+struct dcn_dccg {
+ struct dccg base;
+ const struct dccg_registers *regs;
+ const struct dccg_shift *dccg_shift;
+ const struct dccg_mask *dccg_mask;
+};
+
+void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
+
+void dccg2_get_dccg_ref_freq(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz);
+
+void dccg2_set_fifo_errdet_ovr_en(struct dccg *dccg,
+ bool en);
+void dccg2_otg_add_pixel(struct dccg *dccg,
+ uint32_t otg_inst);
+void dccg2_otg_drop_pixel(struct dccg *dccg,
+ uint32_t otg_inst);
+
+
+void dccg2_init(struct dccg *dccg);
+
+struct dccg *dccg2_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask);
+
+void dcn_dccg_destroy(struct dccg **dccg);
+
+#endif //__DCN20_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
new file mode 100644
index 0000000000..eaa7032f0f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn20_dpp.h"
+#include "basics/conversion.h"
+
+#define NUM_PHASES 64
+#define HORZ_MAX_TAPS 8
+#define VERT_MAX_TAPS 8
+
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+void dpp20_read_state(struct dpp *dpp_base,
+ struct dcn_dpp_state *s)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_GET(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, &s->is_enabled);
+ REG_GET(CM_DGAM_CONTROL,
+ CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
+ // BGAM has no ROM, and definition is different, can't reuse same dump
+ //REG_GET(CM_BLNDGAM_CONTROL,
+ // CM_BLNDGAM_LUT_MODE, &s->rgam_lut_mode);
+ REG_GET(CM_GAMUT_REMAP_CONTROL,
+ CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
+ if (s->gamut_remap_mode) {
+ s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
+ s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
+ s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
+ s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
+ s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
+ s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
+ }
+}
+
+void dpp2_power_on_obuf(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_UPDATE(CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, power_on == true ? 1:0);
+
+ REG_UPDATE(OBUF_MEM_PWR_CTRL,
+ OBUF_MEM_PWR_FORCE, power_on == true ? 0:1);
+
+ REG_UPDATE(DSCL_MEM_PWR_CTRL,
+ LUT_MEM_PWR_FORCE, power_on == true ? 0:1);
+}
+
+void dpp2_dummy_program_input_lut(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma)
+{}
+
+static void dpp2_cnv_setup (
+ struct dpp *dpp_base,
+ enum surface_pixel_format format,
+ enum expansion_mode mode,
+ struct dc_csc_transform input_csc_color_matrix,
+ enum dc_color_space input_color_space,
+ struct cnv_alpha_2bit_lut *alpha_2bit_lut)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ uint32_t pixel_format = 0;
+ uint32_t alpha_en = 1;
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+ enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS;
+ bool force_disable_cursor = false;
+ struct out_csc_color_matrix tbl_entry;
+ uint32_t is_2bit = 0;
+ int i = 0;
+
+ REG_SET_2(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode);
+
+ //hardcode default
+ //FORMAT_CONTROL. FORMAT_CNV16 default 0: U0.16/S.1.15; 1: U1.15/ S.1.14
+ //FORMAT_CONTROL. CNVC_BYPASS_MSB_ALIGN default 0: disabled 1: enabled
+ //FORMAT_CONTROL. CLAMP_POSITIVE default 0: disabled 1: enabled
+ //FORMAT_CONTROL. CLAMP_POSITIVE_C default 0: disabled 1: enabled
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0);
+ REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0);
+ REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0);
+ REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ pixel_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ pixel_format = 3;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ pixel_format = 8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ pixel_format = 10;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ force_disable_cursor = false;
+ pixel_format = 65;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 64;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ force_disable_cursor = true;
+ pixel_format = 67;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 66;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+ pixel_format = 26; /* ARGB16161616_UNORM */
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ pixel_format = 24;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ pixel_format = 25;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ pixel_format = 12;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+ pixel_format = 112;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+ pixel_format = 113;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
+ pixel_format = 114;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
+ pixel_format = 115;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+ pixel_format = 118;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ pixel_format = 119;
+ alpha_en = 0;
+ break;
+ default:
+ break;
+ }
+
+ /* Set default color space based on format if none is given. */
+ color_space = input_color_space ? input_color_space : color_space;
+
+ if (is_2bit == 1 && alpha_2bit_lut != NULL) {
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3);
+ }
+
+ REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
+
+ // if input adjustments exist, program icsc with those values
+ if (input_csc_color_matrix.enable_adjustment
+ == true) {
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
+
+ tbl_entry.color_space = input_color_space;
+
+ if (color_space >= COLOR_SPACE_YCBCR601)
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ else
+ select = DCN2_ICSC_SELECT_BYPASS;
+
+ dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry);
+ } else
+ dpp2_program_input_csc(dpp_base, color_space, select, NULL);
+
+ if (force_disable_cursor) {
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, 0);
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, 0);
+
+ }
+ dpp2_power_on_obuf(dpp_base, true);
+
+}
+
+/*compute the maximum number of lines that we can fit in the line buffer*/
+void dscl2_calc_lb_num_partitions(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c)
+{
+ int memory_line_size_y, memory_line_size_c, memory_line_size_a,
+ lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+ int line_size = scl_data->viewport.width < scl_data->recout.width ?
+ scl_data->viewport.width : scl_data->recout.width;
+ int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
+ scl_data->viewport_c.width : scl_data->recout.width;
+
+ if (line_size == 0)
+ line_size = 1;
+
+ if (line_size_c == 0)
+ line_size_c = 1;
+
+ memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */
+ memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */
+ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
+
+ if (lb_config == LB_MEMORY_CONFIG_1) {
+ lb_memory_size = 970;
+ lb_memory_size_c = 970;
+ lb_memory_size_a = 970;
+ } else if (lb_config == LB_MEMORY_CONFIG_2) {
+ lb_memory_size = 1290;
+ lb_memory_size_c = 1290;
+ lb_memory_size_a = 1290;
+ } else if (lb_config == LB_MEMORY_CONFIG_3) {
+ /* 420 mode: using 3rd mem from Y, Cr and Cb */
+ lb_memory_size = 970 + 1290 + 484 + 484 + 484;
+ lb_memory_size_c = 970 + 1290;
+ lb_memory_size_a = 970 + 1290 + 484;
+ } else {
+ lb_memory_size = 970 + 1290 + 484;
+ lb_memory_size_c = 970 + 1290 + 484;
+ lb_memory_size_a = 970 + 1290 + 484;
+ }
+ *num_part_y = lb_memory_size / memory_line_size_y;
+ *num_part_c = lb_memory_size_c / memory_line_size_c;
+ num_partitions_a = lb_memory_size_a / memory_line_size_a;
+
+ if (scl_data->lb_params.alpha_en
+ && (num_partitions_a < *num_part_y))
+ *num_part_y = num_partitions_a;
+
+ if (*num_part_y > 64)
+ *num_part_y = 64;
+ if (*num_part_c > 64)
+ *num_part_c = 64;
+}
+
+void dpp2_cnv_set_alpha_keyer(
+ struct dpp *dpp_base,
+ struct cnv_color_keyer_params *color_keyer)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_EN, color_keyer->color_keyer_en);
+
+ REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, color_keyer->color_keyer_mode);
+
+ REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, color_keyer->color_keyer_alpha_low);
+ REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, color_keyer->color_keyer_alpha_high);
+
+ REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, color_keyer->color_keyer_red_low);
+ REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, color_keyer->color_keyer_red_high);
+
+ REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, color_keyer->color_keyer_green_low);
+ REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, color_keyer->color_keyer_green_high);
+
+ REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, color_keyer->color_keyer_blue_low);
+ REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, color_keyer->color_keyer_blue_high);
+}
+
+void dpp2_set_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dc_cursor_attributes *cursor_attributes)
+{
+ enum dc_cursor_color_format color_format = cursor_attributes->color_format;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int cur_rom_en = 0;
+
+ if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
+ color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
+ }
+
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
+
+ if (color_format == CURSOR_MODE_MONO) {
+ /* todo: clarify what to program these to */
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
+}
+
+void oppn20_dummy_program_regamma_pwl(
+ struct dpp *dpp,
+ const struct pwl_params *params,
+ enum opp_regamma mode)
+{}
+
+static struct dpp_funcs dcn20_dpp_funcs = {
+ .dpp_read_state = dpp20_read_state,
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap,
+ .dpp_set_csc_adjustment = NULL,
+ .dpp_set_csc_default = NULL,
+ .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
+ .dpp_set_degamma = dpp2_set_degamma,
+ .dpp_program_input_lut = dpp2_dummy_program_input_lut,
+ .dpp_full_bypass = dpp1_full_bypass,
+ .dpp_setup = dpp2_cnv_setup,
+ .dpp_program_degamma_pwl = dpp2_set_degamma_pwl,
+ .dpp_program_blnd_lut = dpp20_program_blnd_lut,
+ .dpp_program_shaper_lut = dpp20_program_shaper,
+ .dpp_program_3dlut = dpp20_program_3dlut,
+ .dpp_program_bias_and_scale = NULL,
+ .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
+ .set_cursor_attributes = dpp2_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
+ .dpp_dppclk_control = dpp1_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+};
+
+static struct dpp_caps dcn20_dpp_cap = {
+ .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT,
+ .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions,
+};
+
+bool dpp2_construct(
+ struct dcn20_dpp *dpp,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn2_dpp_registers *tf_regs,
+ const struct dcn2_dpp_shift *tf_shift,
+ const struct dcn2_dpp_mask *tf_mask)
+{
+ dpp->base.ctx = ctx;
+
+ dpp->base.inst = inst;
+ dpp->base.funcs = &dcn20_dpp_funcs;
+ dpp->base.caps = &dcn20_dpp_cap;
+
+ dpp->tf_regs = tf_regs;
+ dpp->tf_shift = tf_shift;
+ dpp->tf_mask = tf_mask;
+
+ dpp->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP |
+ LB_PIXEL_DEPTH_36BPP;
+
+ dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
+
+ return true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
new file mode 100644
index 0000000000..e735363d00
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -0,0 +1,778 @@
+/* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN20_DPP_H__
+#define __DCN20_DPP_H__
+
+#include "dcn10/dcn10_dpp.h"
+
+#define TO_DCN20_DPP(dpp)\
+ container_of(dpp, struct dcn20_dpp, base)
+
+#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \
+ SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id)
+
+#define TF_REG_LIST_DCN20_COMMON(id) \
+ SRI(CM_BLNDGAM_CONTROL, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_END_CNTL2_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_END_CNTL1_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_END_CNTL2_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_0_1, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_2_3, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_4_5, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_6_7, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_8_9, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_10_11, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_12_13, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_14_15, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_16_17, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_18_19, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_20_21, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_22_23, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_24_25, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_26_27, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_28_29, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_30_31, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_REGION_32_33, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_END_CNTL2_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_END_CNTL1_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_END_CNTL2_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_0_1, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_2_3, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_4_5, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_6_7, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_8_9, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_10_11, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_12_13, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_14_15, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_16_17, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_18_19, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_20_21, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_22_23, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_24_25, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_26_27, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_28_29, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_30_31, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_REGION_32_33, CM, id), \
+ SRI(CM_BLNDGAM_LUT_INDEX, CM, id), \
+ SRI(CM_BLNDGAM_LUT_DATA, CM, id), \
+ SRI(CM_3DLUT_MODE, CM, id), \
+ SRI(CM_3DLUT_INDEX, CM, id), \
+ SRI(CM_3DLUT_DATA, CM, id), \
+ SRI(CM_3DLUT_DATA_30BIT, CM, id), \
+ SRI(CM_3DLUT_READ_WRITE_CONTROL, CM, id), \
+ SRI(CM_SHAPER_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_SHAPER_CONTROL, CM, id), \
+ SRI(CM_SHAPER_RAMB_START_CNTL_B, CM, id), \
+ SRI(CM_SHAPER_RAMB_START_CNTL_G, CM, id), \
+ SRI(CM_SHAPER_RAMB_START_CNTL_R, CM, id), \
+ SRI(CM_SHAPER_RAMB_END_CNTL_B, CM, id), \
+ SRI(CM_SHAPER_RAMB_END_CNTL_G, CM, id), \
+ SRI(CM_SHAPER_RAMB_END_CNTL_R, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_0_1, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_2_3, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_4_5, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_6_7, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_8_9, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_10_11, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_12_13, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_14_15, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_16_17, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_18_19, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_20_21, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_22_23, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_24_25, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_26_27, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_28_29, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_30_31, CM, id), \
+ SRI(CM_SHAPER_RAMB_REGION_32_33, CM, id), \
+ SRI(CM_SHAPER_RAMA_START_CNTL_B, CM, id), \
+ SRI(CM_SHAPER_RAMA_START_CNTL_G, CM, id), \
+ SRI(CM_SHAPER_RAMA_START_CNTL_R, CM, id), \
+ SRI(CM_SHAPER_RAMA_END_CNTL_B, CM, id), \
+ SRI(CM_SHAPER_RAMA_END_CNTL_G, CM, id), \
+ SRI(CM_SHAPER_RAMA_END_CNTL_R, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_0_1, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_2_3, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_4_5, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_6_7, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_8_9, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_10_11, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_12_13, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_14_15, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_16_17, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_18_19, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_20_21, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_22_23, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_24_25, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_26_27, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \
+ SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
+ SRI(CM_SHAPER_LUT_INDEX, CM, id)
+
+#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \
+ SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\
+ SRI(CM_ICSC_B_C11_C12, CM, id), \
+ SRI(CM_ICSC_B_C33_C34, CM, id)
+
+#define TF_REG_LIST_DCN20(id) \
+ TF_REG_LIST_DCN(id), \
+ TF_REG_LIST_DCN20_COMMON(id), \
+ TF_REG_LIST_DCN20_COMMON_UPDATED(id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \
+ SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \
+ SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \
+ SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \
+ SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \
+ SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \
+ SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_RED, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \
+ SRI(CM_SHAPER_LUT_DATA, CM, id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id),\
+ SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
+ SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_SIZE, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_INDEX, CM_3DLUT_INDEX, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_DATA, CM_3DLUT_DATA0, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_DATA, CM_3DLUT_DATA1, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_DATA_30BIT, CM_3DLUT_DATA_30BIT, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\
+ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
+ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+
+/* DPP CM debug status register:
+ *
+ * Status index including current ICSC, Gamut Remap Mode is 9
+ * ICSC Mode: [4..3]
+ * Gamut Remap Mode: [10..9]
+ */
+#define CM_TEST_DEBUG_DATA_STATUS_IDX 9
+
+#define TF_DEBUG_REG_LIST_SH_DCN20 \
+ TF_DEBUG_REG_LIST_SH_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9
+
+#define TF_DEBUG_REG_LIST_MASK_DCN20 \
+ TF_DEBUG_REG_LIST_MASK_DCN10, \
+ .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \
+ .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600
+
+#define TF_REG_FIELD_LIST_DCN2_0(type) \
+ TF_REG_FIELD_LIST(type) \
+ type CM_BLNDGAM_LUT_DATA; \
+ type CM_TEST_DEBUG_DATA_ICSC_MODE; \
+ type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \
+ type FORMAT_CNV16; \
+ type CNVC_BYPASS_MSB_ALIGN; \
+ type CLAMP_POSITIVE; \
+ type CLAMP_POSITIVE_C; \
+ type ALPHA_2BIT_LUT0; \
+ type ALPHA_2BIT_LUT1; \
+ type ALPHA_2BIT_LUT2; \
+ type ALPHA_2BIT_LUT3; \
+ type FCNV_FP_BIAS_R; \
+ type FCNV_FP_BIAS_G; \
+ type FCNV_FP_BIAS_B; \
+ type FCNV_FP_SCALE_R; \
+ type FCNV_FP_SCALE_G; \
+ type FCNV_FP_SCALE_B; \
+ type COLOR_KEYER_EN; \
+ type COLOR_KEYER_MODE; \
+ type COLOR_KEYER_ALPHA_LOW; \
+ type COLOR_KEYER_ALPHA_HIGH; \
+ type COLOR_KEYER_RED_LOW; \
+ type COLOR_KEYER_RED_HIGH; \
+ type COLOR_KEYER_GREEN_LOW; \
+ type COLOR_KEYER_GREEN_HIGH; \
+ type COLOR_KEYER_BLUE_LOW; \
+ type COLOR_KEYER_BLUE_HIGH; \
+ type CUR0_PIX_INV_MODE; \
+ type CUR0_PIXEL_ALPHA_MOD_EN; \
+ type CUR0_ROM_EN;\
+ type OBUF_MEM_PWR_FORCE
+
+
+struct dcn2_dpp_shift {
+ TF_REG_FIELD_LIST_DCN2_0(uint8_t);
+};
+
+struct dcn2_dpp_mask {
+ TF_REG_FIELD_LIST_DCN2_0(uint32_t);
+};
+
+#define DPP_DCN2_REG_VARIABLE_LIST \
+ DPP_COMMON_REG_VARIABLE_LIST \
+ uint32_t CM_BLNDGAM_LUT_DATA; \
+ uint32_t ALPHA_2BIT_LUT; \
+ uint32_t FCNV_FP_BIAS_R; \
+ uint32_t FCNV_FP_BIAS_G; \
+ uint32_t FCNV_FP_BIAS_B; \
+ uint32_t FCNV_FP_SCALE_R; \
+ uint32_t FCNV_FP_SCALE_G; \
+ uint32_t FCNV_FP_SCALE_B; \
+ uint32_t COLOR_KEYER_CONTROL; \
+ uint32_t COLOR_KEYER_ALPHA; \
+ uint32_t COLOR_KEYER_RED; \
+ uint32_t COLOR_KEYER_GREEN; \
+ uint32_t COLOR_KEYER_BLUE; \
+ uint32_t OBUF_MEM_PWR_CTRL
+
+#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \
+ uint32_t CM_GAMUT_REMAP_B_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_B_C13_C14; \
+ uint32_t CM_GAMUT_REMAP_B_C21_C22; \
+ uint32_t CM_GAMUT_REMAP_B_C23_C24; \
+ uint32_t CM_GAMUT_REMAP_B_C31_C32; \
+ uint32_t CM_GAMUT_REMAP_B_C33_C34; \
+ uint32_t CM_ICSC_B_C11_C12; \
+ uint32_t CM_ICSC_B_C33_C34
+
+struct dcn2_dpp_registers {
+ DPP_DCN2_REG_VARIABLE_LIST;
+ DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND;
+};
+
+struct dcn20_dpp {
+ struct dpp base;
+
+ const struct dcn2_dpp_registers *tf_regs;
+ const struct dcn2_dpp_shift *tf_shift;
+ const struct dcn2_dpp_mask *tf_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool is_write_to_ram_a_safe;
+ struct scaler_data scl_data;
+ struct pwl_params pwl_data;
+};
+
+enum dcn20_input_csc_select {
+ DCN2_ICSC_SELECT_BYPASS = 0,
+ DCN2_ICSC_SELECT_ICSC_A = 1,
+ DCN2_ICSC_SELECT_ICSC_B = 2
+};
+
+enum dcn20_gamut_remap_select {
+ DCN2_GAMUT_REMAP_BYPASS = 0,
+ DCN2_GAMUT_REMAP_COEF_A = 1,
+ DCN2_GAMUT_REMAP_COEF_B = 2
+};
+
+void dpp20_read_state(struct dpp *dpp_base,
+ struct dcn_dpp_state *s);
+
+void dpp2_set_degamma_pwl(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+void dpp2_set_degamma(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode);
+
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry);
+
+bool dpp20_program_blnd_lut(
+ struct dpp *dpp_base, const struct pwl_params *params);
+
+bool dpp20_program_shaper(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+bool dpp20_program_3dlut(
+ struct dpp *dpp_base,
+ struct tetrahedral_params *params);
+
+void dpp2_cnv_set_alpha_keyer(
+ struct dpp *dpp_base,
+ struct cnv_color_keyer_params *color_keyer);
+
+void dscl2_calc_lb_num_partitions(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c);
+
+void dpp2_set_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dc_cursor_attributes *cursor_attributes);
+
+void dpp2_dummy_program_input_lut(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma);
+
+void oppn20_dummy_program_regamma_pwl(
+ struct dpp *dpp,
+ const struct pwl_params *params,
+ enum opp_regamma mode);
+
+void dpp2_set_hdr_multiplier(
+ struct dpp *dpp_base,
+ uint32_t multiplier);
+
+bool dpp2_construct(struct dcn20_dpp *dpp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn2_dpp_registers *tf_regs,
+ const struct dcn2_dpp_shift *tf_shift,
+ const struct dcn2_dpp_mask *tf_mask);
+
+void dpp2_power_on_obuf(
+ struct dpp *dpp_base,
+ bool power_on);
+#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
new file mode 100644
index 0000000000..598caa508d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -0,0 +1,1147 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn20_dpp.h"
+#include "basics/conversion.h"
+
+#include "dcn10/dcn10_cm_common.h"
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define IND_REG(index) \
+ (index)
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+
+static void dpp2_enable_cm_block(
+ struct dpp *dpp_base)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ unsigned int cm_bypass_mode = 0;
+ //Temp, put CM in bypass mode
+ if (dpp_base->ctx->dc->debug.cm_in_bypass)
+ cm_bypass_mode = 1;
+
+ REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
+}
+
+
+static bool dpp2_degamma_ram_inuse(
+ struct dpp *dpp_base,
+ bool *ram_a_inuse)
+{
+ bool ret = false;
+ uint32_t status_reg = 0;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_GET(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS,
+ &status_reg);
+
+ if (status_reg == 3) {
+ *ram_a_inuse = true;
+ ret = true;
+ } else if (status_reg == 4) {
+ *ram_a_inuse = false;
+ ret = true;
+ }
+ return ret;
+}
+
+static void dpp2_program_degamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ bool is_ram_a)
+{
+ uint32_t i;
+
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK,
+ CM_DGAM_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL,
+ is_ram_a == true ? 0:1);
+
+ REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0);
+ for (i = 0 ; i < num; i++) {
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_red_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_green_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg);
+
+ }
+
+}
+
+void dpp2_set_degamma_pwl(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ bool is_ram_a = true;
+
+ dpp1_power_on_degamma_lut(dpp_base, true);
+ dpp2_enable_cm_block(dpp_base);
+ dpp2_degamma_ram_inuse(dpp_base, &is_ram_a);
+ if (is_ram_a == true)
+ dpp1_program_degamma_lutb_settings(dpp_base, params);
+ else
+ dpp1_program_degamma_luta_settings(dpp_base, params);
+
+ dpp2_program_degamma_lut(dpp_base, params->rgb_resulted, params->hw_points_num, !is_ram_a);
+ dpp1_degamma_ram_select(dpp_base, !is_ram_a);
+}
+
+void dpp2_set_degamma(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ dpp2_enable_cm_block(dpp_base);
+
+ switch (mode) {
+ case IPP_DEGAMMA_MODE_BYPASS:
+ /* Setting de gamma bypass for now */
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0);
+ break;
+ case IPP_DEGAMMA_MODE_HW_sRGB:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1);
+ break;
+ case IPP_DEGAMMA_MODE_HW_xvYCC:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
+ break;
+ case IPP_DEGAMMA_MODE_USER_PWL:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static void program_gamut_remap(
+ struct dcn20_dpp *dpp,
+ const uint16_t *regval,
+ enum dcn20_gamut_remap_select select)
+{
+ uint32_t cur_select = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+
+ /* determine which gamut_remap coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the update so gamut_remap is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select);
+
+ /* value stored in dbg reg will be 1 greater than mode we want */
+ if (cur_select != DCN2_GAMUT_REMAP_COEF_A)
+ select = DCN2_GAMUT_REMAP_COEF_A;
+ else
+ select = DCN2_GAMUT_REMAP_COEF_B;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+ if (select == DCN2_GAMUT_REMAP_COEF_A) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+ } else {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, select);
+
+}
+
+void dpp2_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i = 0;
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A);
+ }
+}
+
+void dpp2_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn20_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t cur_select = 0;
+ enum dcn20_input_csc_select select;
+ struct color_matrices_reg icsc_regs;
+
+ if (input_select == DCN2_ICSC_SELECT_BYPASS) {
+ REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
+ return;
+ }
+
+ if (tbl_entry == NULL) {
+ for (i = 0; i < arr_size; i++)
+ if (dpp_input_csc_matrix[i].color_space == color_space) {
+ regval = dpp_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ } else {
+ regval = tbl_entry->regval;
+ }
+
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_STATUS_IDX,
+ CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select);
+
+ if (cur_select != DCN2_ICSC_SELECT_ICSC_A)
+ select = DCN2_ICSC_SELECT_ICSC_A;
+ else
+ select = DCN2_ICSC_SELECT_ICSC_B;
+
+ icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
+ icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
+ icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
+ icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
+
+ if (select == DCN2_ICSC_SELECT_ICSC_A) {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
+
+ } else {
+
+ icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12);
+ icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34);
+
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &icsc_regs);
+
+ REG_SET(CM_ICSC_CONTROL, 0,
+ CM_ICSC_MODE, select);
+}
+
+static void dpp20_power_on_blnd_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
+
+}
+
+static void dpp20_configure_blnd_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK,
+ CM_BLNDGAM_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK,
+ CM_BLNDGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
+ REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
+}
+
+static void dpp20_program_blnd_pwl(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ for (i = 0 ; i < num; i++) {
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0,
+ CM_BLNDGAM_LUT_DATA, rgb[i].delta_red_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0,
+ CM_BLNDGAM_LUT_DATA, rgb[i].delta_green_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0,
+ CM_BLNDGAM_LUT_DATA, rgb[i].delta_blue_reg);
+
+ }
+
+}
+
+static void dcn20_dpp_cm_get_reg_field(
+ struct dcn20_dpp *dpp,
+ struct xfer_func_reg *reg)
+{
+ reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B;
+ reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B;
+ reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B;
+ reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+}
+
+/*program blnd lut RAM A*/
+static void dpp20_program_blnd_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dcn20_dpp_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+/*program blnd lut RAM B*/
+static void dpp20_program_blnd_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dcn20_dpp_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+static enum dc_lut_mode dpp20_get_blndgam_current(struct dpp *dpp_base)
+{
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, &state_mode);
+
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
+}
+
+bool dpp20_program_blnd_lut(
+ struct dpp *dpp_base, const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ if (params == NULL) {
+ REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, 0);
+ return false;
+ }
+ current_mode = dpp20_get_blndgam_current(dpp_base);
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ dpp20_power_on_blnd_lut(dpp_base, true);
+ dpp20_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A);
+
+ if (next_mode == LUT_RAM_A)
+ dpp20_program_blnd_luta_settings(dpp_base, params);
+ else
+ dpp20_program_blnd_lutb_settings(dpp_base, params);
+
+ dpp20_program_blnd_pwl(
+ dpp_base, params->rgb_resulted, params->hw_points_num);
+
+ REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE,
+ next_mode == LUT_RAM_A ? 1:2);
+
+ return true;
+}
+
+
+static void dpp20_program_shaper_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i, red, green, blue;
+ uint32_t red_delta, green_delta, blue_delta;
+ uint32_t red_value, green_value, blue_value;
+
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ for (i = 0 ; i < num; i++) {
+
+ red = rgb[i].red_reg;
+ green = rgb[i].green_reg;
+ blue = rgb[i].blue_reg;
+
+ red_delta = rgb[i].delta_red_reg;
+ green_delta = rgb[i].delta_green_reg;
+ blue_delta = rgb[i].delta_blue_reg;
+
+ red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff);
+ green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff);
+ blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff);
+
+ REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value);
+ REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value);
+ REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value);
+ }
+
+}
+
+static enum dc_lut_mode dpp20_get_shaper_current(struct dpp *dpp_base)
+{
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_GET(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, &state_mode);
+
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
+}
+
+static void dpp20_configure_shaper_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK,
+ CM_SHAPER_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK,
+ CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
+ REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0);
+}
+
+/*program shaper RAM A*/
+
+static void dpp20_program_shaper_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0,
+ CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0,
+ CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0);
+ REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0,
+ CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0);
+
+ REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0,
+ CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0,
+ CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0,
+ CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0,
+ CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0,
+ CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0,
+ CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0,
+ CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0,
+ CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0,
+ CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0,
+ CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0,
+ CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0,
+ CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0,
+ CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0,
+ CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0,
+ CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0,
+ CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0,
+ CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0,
+ CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0,
+ CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0,
+ CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num);
+}
+
+/*program shaper RAM B*/
+static void dpp20_program_shaper_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0,
+ CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0,
+ CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0);
+ REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0,
+ CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0);
+
+ REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0,
+ CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0,
+ CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0,
+ CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0,
+ CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0,
+ CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0,
+ CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0,
+ CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0,
+ CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0,
+ CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0,
+ CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0,
+ CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0,
+ CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0,
+ CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0,
+ CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0,
+ CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0,
+ CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0,
+ CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0,
+ CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0,
+ CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0,
+ CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num);
+
+}
+
+
+bool dpp20_program_shaper(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ if (params == NULL) {
+ REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0);
+ return false;
+ }
+ current_mode = dpp20_get_shaper_current(dpp_base);
+
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ dpp20_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A);
+
+ if (next_mode == LUT_RAM_A)
+ dpp20_program_shaper_luta_settings(dpp_base, params);
+ else
+ dpp20_program_shaper_lutb_settings(dpp_base, params);
+
+ dpp20_program_shaper_lut(
+ dpp_base, params->rgb_resulted, params->hw_points_num);
+
+ REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2);
+
+ return true;
+
+}
+
+static enum dc_lut_mode get3dlut_config(
+ struct dpp *dpp_base,
+ bool *is_17x17x17,
+ bool *is_12bits_color_channel)
+{
+ uint32_t i_mode, i_enable_10bits, lut_size;
+ enum dc_lut_mode mode;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_CONFIG_STATUS, &i_mode,
+ CM_3DLUT_30BIT_EN, &i_enable_10bits);
+
+ switch (i_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ if (i_enable_10bits > 0)
+ *is_12bits_color_channel = false;
+ else
+ *is_12bits_color_channel = true;
+
+ REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size);
+
+ if (lut_size == 0)
+ *is_17x17x17 = true;
+ else
+ *is_17x17x17 = false;
+
+ return mode;
+}
+/*
+ * select ramA or ramB, or bypass
+ * select color channel size 10 or 12 bits
+ * select 3dlut size 17x17x17 or 9x9x9
+ */
+static void dpp20_set_3dlut_mode(
+ struct dpp *dpp_base,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ bool is_lut_size17x17x17)
+{
+ uint32_t lut_mode;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ if (mode == LUT_BYPASS)
+ lut_mode = 0;
+ else if (mode == LUT_RAM_A)
+ lut_mode = 1;
+ else
+ lut_mode = 2;
+
+ REG_UPDATE_2(CM_3DLUT_MODE,
+ CM_3DLUT_MODE, lut_mode,
+ CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1);
+}
+
+static void dpp20_select_3dlut_ram(
+ struct dpp *dpp_base,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1,
+ CM_3DLUT_30BIT_EN,
+ is_color_channel_12bits == true ? 0:1);
+}
+
+
+
+static void dpp20_set3dlut_ram12(
+ struct dpp *dpp_base,
+ const struct dc_rgb *lut,
+ uint32_t entries)
+{
+ uint32_t i, red, green, blue, red1, green1, blue1;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ for (i = 0 ; i < entries; i += 2) {
+ red = lut[i].red<<4;
+ green = lut[i].green<<4;
+ blue = lut[i].blue<<4;
+ red1 = lut[i+1].red<<4;
+ green1 = lut[i+1].green<<4;
+ blue1 = lut[i+1].blue<<4;
+
+ REG_SET_2(CM_3DLUT_DATA, 0,
+ CM_3DLUT_DATA0, red,
+ CM_3DLUT_DATA1, red1);
+
+ REG_SET_2(CM_3DLUT_DATA, 0,
+ CM_3DLUT_DATA0, green,
+ CM_3DLUT_DATA1, green1);
+
+ REG_SET_2(CM_3DLUT_DATA, 0,
+ CM_3DLUT_DATA0, blue,
+ CM_3DLUT_DATA1, blue1);
+
+ }
+}
+
+/*
+ * load selected lut with 10 bits color channels
+ */
+static void dpp20_set3dlut_ram10(
+ struct dpp *dpp_base,
+ const struct dc_rgb *lut,
+ uint32_t entries)
+{
+ uint32_t i, red, green, blue, value;
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ for (i = 0; i < entries; i++) {
+ red = lut[i].red;
+ green = lut[i].green;
+ blue = lut[i].blue;
+
+ value = (red<<20) | (green<<10) | blue;
+
+ REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value);
+ }
+
+}
+
+
+static void dpp20_select_3dlut_ram_mask(
+ struct dpp *dpp_base,
+ uint32_t ram_selection_mask)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK,
+ ram_selection_mask);
+ REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0);
+}
+
+bool dpp20_program_3dlut(
+ struct dpp *dpp_base,
+ struct tetrahedral_params *params)
+{
+ enum dc_lut_mode mode;
+ bool is_17x17x17;
+ bool is_12bits_color_channel;
+ struct dc_rgb *lut0;
+ struct dc_rgb *lut1;
+ struct dc_rgb *lut2;
+ struct dc_rgb *lut3;
+ int lut_size0;
+ int lut_size;
+
+ if (params == NULL) {
+ dpp20_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false);
+ return false;
+ }
+ mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel);
+
+ if (mode == LUT_BYPASS || mode == LUT_RAM_B)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+
+ is_17x17x17 = !params->use_tetrahedral_9;
+ is_12bits_color_channel = params->use_12bits;
+ if (is_17x17x17) {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ lut_size0 = sizeof(params->tetrahedral_17.lut0)/
+ sizeof(params->tetrahedral_17.lut0[0]);
+ lut_size = sizeof(params->tetrahedral_17.lut1)/
+ sizeof(params->tetrahedral_17.lut1[0]);
+ } else {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ lut_size0 = sizeof(params->tetrahedral_9.lut0)/
+ sizeof(params->tetrahedral_9.lut0[0]);
+ lut_size = sizeof(params->tetrahedral_9.lut1)/
+ sizeof(params->tetrahedral_9.lut1[0]);
+ }
+
+ dpp20_select_3dlut_ram(dpp_base, mode,
+ is_12bits_color_channel);
+ dpp20_select_3dlut_ram_mask(dpp_base, 0x1);
+ if (is_12bits_color_channel)
+ dpp20_set3dlut_ram12(dpp_base, lut0, lut_size0);
+ else
+ dpp20_set3dlut_ram10(dpp_base, lut0, lut_size0);
+
+ dpp20_select_3dlut_ram_mask(dpp_base, 0x2);
+ if (is_12bits_color_channel)
+ dpp20_set3dlut_ram12(dpp_base, lut1, lut_size);
+ else
+ dpp20_set3dlut_ram10(dpp_base, lut1, lut_size);
+
+ dpp20_select_3dlut_ram_mask(dpp_base, 0x4);
+ if (is_12bits_color_channel)
+ dpp20_set3dlut_ram12(dpp_base, lut2, lut_size);
+ else
+ dpp20_set3dlut_ram10(dpp_base, lut2, lut_size);
+
+ dpp20_select_3dlut_ram_mask(dpp_base, 0x8);
+ if (is_12bits_color_channel)
+ dpp20_set3dlut_ram12(dpp_base, lut3, lut_size);
+ else
+ dpp20_set3dlut_ram10(dpp_base, lut3, lut_size);
+
+
+ dpp20_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel,
+ is_17x17x17);
+
+ return true;
+}
+
+void dpp2_set_hdr_multiplier(
+ struct dpp *dpp_base,
+ uint32_t multiplier)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
new file mode 100644
index 0000000000..5eebe7f03d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -0,0 +1,772 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/display/drm_dsc_helper.h>
+
+#include "reg_helper.h"
+#include "dcn20_dsc.h"
+#include "dsc/dscc_types.h"
+#include "dsc/rc_calc.h"
+
+static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
+
+/* Object I/F functions */
+static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
+static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+ struct dsc_optc_config *dsc_optc_cfg);
+static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
+static void dsc2_disable(struct display_stream_compressor *dsc);
+static void dsc2_disconnect(struct display_stream_compressor *dsc);
+
+static const struct dsc_funcs dcn20_dsc_funcs = {
+ .dsc_get_enc_caps = dsc2_get_enc_caps,
+ .dsc_read_state = dsc2_read_state,
+ .dsc_validate_stream = dsc2_validate_stream,
+ .dsc_set_config = dsc2_set_config,
+ .dsc_get_packed_pps = dsc2_get_packed_pps,
+ .dsc_enable = dsc2_enable,
+ .dsc_disable = dsc2_disable,
+ .dsc_disconnect = dsc2_disconnect,
+};
+
+/* Macro definitios for REG_SET macros*/
+#define CTX \
+ dsc20->base.ctx
+
+#define REG(reg)\
+ dsc20->dsc_regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dsc20->dsc_shift->field_name, dsc20->dsc_mask->field_name
+#define DC_LOGGER \
+ dsc->ctx->logger
+
+enum dsc_bits_per_comp {
+ DSC_BPC_8 = 8,
+ DSC_BPC_10 = 10,
+ DSC_BPC_12 = 12,
+ DSC_BPC_UNKNOWN
+};
+
+/* API functions (external or via structure->function_pointer) */
+
+void dsc2_construct(struct dcn20_dsc *dsc,
+ struct dc_context *ctx,
+ int inst,
+ const struct dcn20_dsc_registers *dsc_regs,
+ const struct dcn20_dsc_shift *dsc_shift,
+ const struct dcn20_dsc_mask *dsc_mask)
+{
+ dsc->base.ctx = ctx;
+ dsc->base.inst = inst;
+ dsc->base.funcs = &dcn20_dsc_funcs;
+
+ dsc->dsc_regs = dsc_regs;
+ dsc->dsc_shift = dsc_shift;
+ dsc->dsc_mask = dsc_mask;
+
+ dsc->max_image_width = 5184;
+}
+
+
+#define DCN20_MAX_PIXEL_CLOCK_Mhz 1188
+#define DCN20_MAX_DISPLAY_CLOCK_Mhz 1200
+
+/* This returns the capabilities for a single DSC encoder engine. Number of slices and total throughput
+ * can be doubled, tripled etc. by using additional DSC engines.
+ */
+void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
+{
+ dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
+
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
+
+ dsc_enc_caps->lb_bit_depth = 13;
+ dsc_enc_caps->is_block_pred_supported = true;
+
+ dsc_enc_caps->color_formats.bits.RGB = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
+
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
+
+ /* Maximum total throughput with all the slices combined. This is different from how DP spec specifies it.
+ * Our decoder's total throughput in Pix/s is equal to DISPCLK. This is then shared between slices.
+ * The value below is the absolute maximum value. The actual throughput may be lower, but it'll always
+ * be sufficient to process the input pixel rate fed into a single DSC engine.
+ */
+ dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz;
+
+ /* For pixel clock bigger than a single-pipe limit we'll need two engines, which then doubles our
+ * throughput and number of slices, but also introduces a lower limit of 2 slices
+ */
+ if (pixel_clock_100Hz >= DCN20_MAX_PIXEL_CLOCK_Mhz*10000) {
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 0;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1;
+ dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 2;
+ }
+
+ // TODO DSC: This is actually image width limitation, not a slice width. This should be added to the criteria to use ODM.
+ dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
+ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
+}
+
+
+/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
+ * into a dcn_dsc_state struct.
+ */
+static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
+ REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
+ REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel);
+ REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
+ REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
+ REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
+ REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);
+ REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en,
+ DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
+}
+
+
+static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
+{
+ struct dsc_optc_config dsc_optc_cfg;
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ if (dsc_cfg->pic_width > dsc20->max_image_width)
+ return false;
+
+ return dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, &dsc_optc_cfg);
+}
+
+
+void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config)
+{
+ DC_LOG_DSC("\tnum_slices_h %d", config->dc_dsc_cfg.num_slices_h);
+ DC_LOG_DSC("\tnum_slices_v %d", config->dc_dsc_cfg.num_slices_v);
+ DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)",
+ config->dc_dsc_cfg.bits_per_pixel,
+ config->dc_dsc_cfg.bits_per_pixel / 16,
+ ((config->dc_dsc_cfg.bits_per_pixel % 16) * 10000) / 16);
+ DC_LOG_DSC("\tcolor_depth %d", config->color_depth);
+}
+
+static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
+ struct dsc_optc_config *dsc_optc_cfg)
+{
+ bool is_config_ok;
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);
+ dsc_config_log(dsc, dsc_cfg);
+ is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg);
+ ASSERT(is_config_ok);
+ DC_LOG_DSC("programming DSC Picture Parameter Set (PPS):");
+ dsc_log_pps(dsc, &dsc20->reg_vals.pps);
+ dsc_write_to_registers(dsc, &dsc20->reg_vals);
+}
+
+
+bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps)
+{
+ bool is_config_ok;
+ struct dsc_reg_values dsc_reg_vals;
+ struct dsc_optc_config dsc_optc_cfg;
+
+ memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals));
+ memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg));
+
+ DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
+ dsc_config_log(dsc, dsc_cfg);
+ DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
+ is_config_ok = dsc_prepare_config(dsc_cfg, &dsc_reg_vals, &dsc_optc_cfg);
+ ASSERT(is_config_ok);
+ drm_dsc_pps_payload_pack((struct drm_dsc_picture_parameter_set *)dsc_packed_pps, &dsc_reg_vals.pps);
+ dsc_log_pps(dsc, &dsc_reg_vals.pps);
+
+ return is_config_ok;
+}
+
+
+static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
+
+ DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe);
+
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
+
+ REG_UPDATE(DSC_TOP_CONTROL,
+ DSC_CLOCK_EN, 1);
+
+ REG_UPDATE_2(DSCRM_DSC_FORWARD_CONFIG,
+ DSCRM_DSC_FORWARD_EN, 1,
+ DSCRM_DSC_OPP_PIPE_SOURCE, opp_pipe);
+}
+
+
+static void dsc2_disable(struct display_stream_compressor *dsc)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+ int dsc_clock_en;
+ int dsc_fw_config;
+ int enabled_opp_pipe;
+
+ DC_LOG_DSC("disable DSC %d", dsc->inst);
+
+ REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe);
+ if (!dsc_clock_en || !dsc_fw_config) {
+ DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe);
+ ASSERT(0);
+ }
+
+ REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
+ DSCRM_DSC_FORWARD_EN, 0);
+
+ REG_UPDATE(DSC_TOP_CONTROL,
+ DSC_CLOCK_EN, 0);
+}
+
+static void dsc2_disconnect(struct display_stream_compressor *dsc)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ DC_LOG_DSC("disconnect DSC %d", dsc->inst);
+
+ REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG,
+ DSCRM_DSC_FORWARD_EN, 0);
+}
+
+/* This module's internal functions */
+void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps)
+{
+ int i;
+ int bits_per_pixel = pps->bits_per_pixel;
+
+ DC_LOG_DSC("\tdsc_version_major %d", pps->dsc_version_major);
+ DC_LOG_DSC("\tdsc_version_minor %d", pps->dsc_version_minor);
+ DC_LOG_DSC("\tbits_per_component %d", pps->bits_per_component);
+ DC_LOG_DSC("\tline_buf_depth %d", pps->line_buf_depth);
+ DC_LOG_DSC("\tblock_pred_enable %d", pps->block_pred_enable);
+ DC_LOG_DSC("\tconvert_rgb %d", pps->convert_rgb);
+ DC_LOG_DSC("\tsimple_422 %d", pps->simple_422);
+ DC_LOG_DSC("\tvbr_enable %d", pps->vbr_enable);
+ DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)", bits_per_pixel, bits_per_pixel / 16, ((bits_per_pixel % 16) * 10000) / 16);
+ DC_LOG_DSC("\tpic_height %d", pps->pic_height);
+ DC_LOG_DSC("\tpic_width %d", pps->pic_width);
+ DC_LOG_DSC("\tslice_height %d", pps->slice_height);
+ DC_LOG_DSC("\tslice_width %d", pps->slice_width);
+ DC_LOG_DSC("\tslice_chunk_size %d", pps->slice_chunk_size);
+ DC_LOG_DSC("\tinitial_xmit_delay %d", pps->initial_xmit_delay);
+ DC_LOG_DSC("\tinitial_dec_delay %d", pps->initial_dec_delay);
+ DC_LOG_DSC("\tinitial_scale_value %d", pps->initial_scale_value);
+ DC_LOG_DSC("\tscale_increment_interval %d", pps->scale_increment_interval);
+ DC_LOG_DSC("\tscale_decrement_interval %d", pps->scale_decrement_interval);
+ DC_LOG_DSC("\tfirst_line_bpg_offset %d", pps->first_line_bpg_offset);
+ DC_LOG_DSC("\tnfl_bpg_offset %d", pps->nfl_bpg_offset);
+ DC_LOG_DSC("\tslice_bpg_offset %d", pps->slice_bpg_offset);
+ DC_LOG_DSC("\tinitial_offset %d", pps->initial_offset);
+ DC_LOG_DSC("\tfinal_offset %d", pps->final_offset);
+ DC_LOG_DSC("\tflatness_min_qp %d", pps->flatness_min_qp);
+ DC_LOG_DSC("\tflatness_max_qp %d", pps->flatness_max_qp);
+ /* DC_LOG_DSC("\trc_parameter_set %d", pps->rc_parameter_set); */
+ DC_LOG_DSC("\tnative_420 %d", pps->native_420);
+ DC_LOG_DSC("\tnative_422 %d", pps->native_422);
+ DC_LOG_DSC("\tsecond_line_bpg_offset %d", pps->second_line_bpg_offset);
+ DC_LOG_DSC("\tnsl_bpg_offset %d", pps->nsl_bpg_offset);
+ DC_LOG_DSC("\tsecond_line_offset_adj %d", pps->second_line_offset_adj);
+ DC_LOG_DSC("\trc_model_size %d", pps->rc_model_size);
+ DC_LOG_DSC("\trc_edge_factor %d", pps->rc_edge_factor);
+ DC_LOG_DSC("\trc_quant_incr_limit0 %d", pps->rc_quant_incr_limit0);
+ DC_LOG_DSC("\trc_quant_incr_limit1 %d", pps->rc_quant_incr_limit1);
+ DC_LOG_DSC("\trc_tgt_offset_high %d", pps->rc_tgt_offset_high);
+ DC_LOG_DSC("\trc_tgt_offset_low %d", pps->rc_tgt_offset_low);
+
+ for (i = 0; i < NUM_BUF_RANGES - 1; i++)
+ DC_LOG_DSC("\trc_buf_thresh[%d] %d", i, pps->rc_buf_thresh[i]);
+
+ for (i = 0; i < NUM_BUF_RANGES; i++) {
+ DC_LOG_DSC("\trc_range_parameters[%d].range_min_qp %d", i, pps->rc_range_params[i].range_min_qp);
+ DC_LOG_DSC("\trc_range_parameters[%d].range_max_qp %d", i, pps->rc_range_params[i].range_max_qp);
+ DC_LOG_DSC("\trc_range_parameters[%d].range_bpg_offset %d", i, pps->rc_range_params[i].range_bpg_offset);
+ }
+}
+
+void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override)
+{
+ uint8_t i;
+
+ rc->rc_model_size = override->rc_model_size;
+ for (i = 0; i < DC_DSC_RC_BUF_THRESH_SIZE; i++)
+ rc->rc_buf_thresh[i] = override->rc_buf_thresh[i];
+ for (i = 0; i < DC_DSC_QP_SET_SIZE; i++) {
+ rc->qp_min[i] = override->rc_minqp[i];
+ rc->qp_max[i] = override->rc_maxqp[i];
+ rc->ofs[i] = override->rc_offset[i];
+ }
+
+ rc->rc_tgt_offset_hi = override->rc_tgt_offset_hi;
+ rc->rc_tgt_offset_lo = override->rc_tgt_offset_lo;
+ rc->rc_edge_factor = override->rc_edge_factor;
+ rc->rc_quant_incr_limit0 = override->rc_quant_incr_limit0;
+ rc->rc_quant_incr_limit1 = override->rc_quant_incr_limit1;
+
+ rc->initial_fullness_offset = override->initial_fullness_offset;
+ rc->initial_xmit_delay = override->initial_delay;
+
+ rc->flatness_min_qp = override->flatness_min_qp;
+ rc->flatness_max_qp = override->flatness_max_qp;
+ rc->flatness_det_thresh = override->flatness_det_thresh;
+}
+
+bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
+ struct dsc_optc_config *dsc_optc_cfg)
+{
+ struct dsc_parameters dsc_params;
+ struct rc_params rc;
+
+ /* Validate input parameters */
+ ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h);
+ ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_v);
+ ASSERT(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2);
+ ASSERT(dsc_cfg->pic_width);
+ ASSERT(dsc_cfg->pic_height);
+ ASSERT((dsc_cfg->dc_dsc_cfg.version_minor == 1 &&
+ (8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 13)) ||
+ (dsc_cfg->dc_dsc_cfg.version_minor == 2 &&
+ ((8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 15) ||
+ dsc_cfg->dc_dsc_cfg.linebuf_depth == 0)));
+ ASSERT(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff); // 6.0 <= bits_per_pixel <= 63.9375
+
+ if (!dsc_cfg->dc_dsc_cfg.num_slices_v || !dsc_cfg->dc_dsc_cfg.num_slices_h ||
+ !(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2) ||
+ !dsc_cfg->pic_width || !dsc_cfg->pic_height ||
+ !((dsc_cfg->dc_dsc_cfg.version_minor == 1 && // v1.1 line buffer depth range:
+ 8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 13) ||
+ (dsc_cfg->dc_dsc_cfg.version_minor == 2 && // v1.2 line buffer depth range:
+ ((8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 15) ||
+ dsc_cfg->dc_dsc_cfg.linebuf_depth == 0))) ||
+ !(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff)) {
+ dm_output_to_console("%s: Invalid parameters\n", __func__);
+ return false;
+ }
+
+ dsc_init_reg_values(dsc_reg_vals);
+
+ /* Copy input config */
+ dsc_reg_vals->pixel_format = dsc_dc_pixel_encoding_to_dsc_pixel_format(dsc_cfg->pixel_encoding, dsc_cfg->dc_dsc_cfg.ycbcr422_simple);
+ dsc_reg_vals->num_slices_h = dsc_cfg->dc_dsc_cfg.num_slices_h;
+ dsc_reg_vals->num_slices_v = dsc_cfg->dc_dsc_cfg.num_slices_v;
+ dsc_reg_vals->pps.dsc_version_minor = dsc_cfg->dc_dsc_cfg.version_minor;
+ dsc_reg_vals->pps.pic_width = dsc_cfg->pic_width;
+ dsc_reg_vals->pps.pic_height = dsc_cfg->pic_height;
+ dsc_reg_vals->pps.bits_per_component = dsc_dc_color_depth_to_dsc_bits_per_comp(dsc_cfg->color_depth);
+ dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
+ dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
+ dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
+ dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
+
+ // TODO: in addition to validating slice height (pic height must be divisible by slice height),
+ // see what happens when the same condition doesn't apply for slice_width/pic_width.
+ dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
+ dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
+
+ ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
+ if (!(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height)) {
+ dm_output_to_console("%s: pix height %d not divisible by num_slices_v %d\n\n", __func__, dsc_cfg->pic_height, dsc_cfg->dc_dsc_cfg.num_slices_v);
+ return false;
+ }
+
+ dsc_reg_vals->bpp_x32 = dsc_cfg->dc_dsc_cfg.bits_per_pixel << 1;
+ if (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422)
+ dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32;
+ else
+ dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32 >> 1;
+
+ dsc_reg_vals->pps.convert_rgb = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ? 1 : 0;
+ dsc_reg_vals->pps.native_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422);
+ dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);
+ dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422);
+
+ calc_rc_params(&rc, &dsc_reg_vals->pps);
+
+ if (dsc_cfg->dc_dsc_cfg.rc_params_ovrd)
+ dsc_override_rc_params(&rc, dsc_cfg->dc_dsc_cfg.rc_params_ovrd);
+
+ if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &rc, &dsc_params)) {
+ dm_output_to_console("%s: DSC config failed\n", __func__);
+ return false;
+ }
+
+ dsc_update_from_dsc_parameters(dsc_reg_vals, &dsc_params);
+
+ dsc_optc_cfg->bytes_per_pixel = dsc_params.bytes_per_pixel;
+ dsc_optc_cfg->slice_width = dsc_reg_vals->pps.slice_width;
+ dsc_optc_cfg->is_pixel_format_444 = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ||
+ dsc_reg_vals->pixel_format == DSC_PIXFMT_YCBCR444 ||
+ dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422;
+
+ return true;
+}
+
+
+enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple)
+{
+ enum dsc_pixel_format dsc_pix_fmt = DSC_PIXFMT_UNKNOWN;
+
+ /* NOTE: We don't support DSC_PIXFMT_SIMPLE_YCBCR422 */
+
+ switch (dc_pix_enc) {
+ case PIXEL_ENCODING_RGB:
+ dsc_pix_fmt = DSC_PIXFMT_RGB;
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ if (is_ycbcr422_simple)
+ dsc_pix_fmt = DSC_PIXFMT_SIMPLE_YCBCR422;
+ else
+ dsc_pix_fmt = DSC_PIXFMT_NATIVE_YCBCR422;
+ break;
+ case PIXEL_ENCODING_YCBCR444:
+ dsc_pix_fmt = DSC_PIXFMT_YCBCR444;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ dsc_pix_fmt = DSC_PIXFMT_NATIVE_YCBCR420;
+ break;
+ default:
+ dsc_pix_fmt = DSC_PIXFMT_UNKNOWN;
+ break;
+ }
+
+ ASSERT(dsc_pix_fmt != DSC_PIXFMT_UNKNOWN);
+ return dsc_pix_fmt;
+}
+
+
+enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth)
+{
+ enum dsc_bits_per_comp bpc = DSC_BPC_UNKNOWN;
+
+ switch (dc_color_depth) {
+ case COLOR_DEPTH_888:
+ bpc = DSC_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bpc = DSC_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bpc = DSC_BPC_12;
+ break;
+ default:
+ bpc = DSC_BPC_UNKNOWN;
+ break;
+ }
+
+ return bpc;
+}
+
+
+void dsc_init_reg_values(struct dsc_reg_values *reg_vals)
+{
+ int i;
+
+ memset(reg_vals, 0, sizeof(struct dsc_reg_values));
+
+ /* Non-PPS values */
+ reg_vals->dsc_clock_enable = 1;
+ reg_vals->dsc_clock_gating_disable = 0;
+ reg_vals->underflow_recovery_en = 0;
+ reg_vals->underflow_occurred_int_en = 0;
+ reg_vals->underflow_occurred_status = 0;
+ reg_vals->ich_reset_at_eol = 0;
+ reg_vals->alternate_ich_encoding_en = 0;
+ reg_vals->rc_buffer_model_size = 0;
+ /*reg_vals->disable_ich = 0;*/
+ reg_vals->dsc_dbg_en = 0;
+
+ for (i = 0; i < 4; i++)
+ reg_vals->rc_buffer_model_overflow_int_en[i] = 0;
+
+ /* PPS values */
+ reg_vals->pps.dsc_version_minor = 2;
+ reg_vals->pps.dsc_version_major = 1;
+ reg_vals->pps.line_buf_depth = 9;
+ reg_vals->pps.bits_per_component = 8;
+ reg_vals->pps.block_pred_enable = 1;
+ reg_vals->pps.slice_chunk_size = 0;
+ reg_vals->pps.pic_width = 0;
+ reg_vals->pps.pic_height = 0;
+ reg_vals->pps.slice_width = 0;
+ reg_vals->pps.slice_height = 0;
+ reg_vals->pps.initial_xmit_delay = 170;
+ reg_vals->pps.initial_dec_delay = 0;
+ reg_vals->pps.initial_scale_value = 0;
+ reg_vals->pps.scale_increment_interval = 0;
+ reg_vals->pps.scale_decrement_interval = 0;
+ reg_vals->pps.nfl_bpg_offset = 0;
+ reg_vals->pps.slice_bpg_offset = 0;
+ reg_vals->pps.nsl_bpg_offset = 0;
+ reg_vals->pps.initial_offset = 6144;
+ reg_vals->pps.final_offset = 0;
+ reg_vals->pps.flatness_min_qp = 3;
+ reg_vals->pps.flatness_max_qp = 12;
+ reg_vals->pps.rc_model_size = 8192;
+ reg_vals->pps.rc_edge_factor = 6;
+ reg_vals->pps.rc_quant_incr_limit0 = 11;
+ reg_vals->pps.rc_quant_incr_limit1 = 11;
+ reg_vals->pps.rc_tgt_offset_low = 3;
+ reg_vals->pps.rc_tgt_offset_high = 3;
+}
+
+/* Updates dsc_reg_values::reg_vals::xxx fields based on the values from computed params.
+ * This is required because dscc_compute_dsc_parameters returns a modified PPS, which in turn
+ * affects non-PPS register values.
+ */
+void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params)
+{
+ int i;
+
+ reg_vals->pps = dsc_params->pps;
+
+ // pps_computed will have the "expanded" values; need to shift them to make them fit for regs.
+ for (i = 0; i < NUM_BUF_RANGES - 1; i++)
+ reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
+
+ reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size;
+}
+
+static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals)
+{
+ uint32_t temp_int;
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ REG_SET(DSC_DEBUG_CONTROL, 0,
+ DSC_DBG_EN, reg_vals->dsc_dbg_en);
+
+ // dsccif registers
+ REG_SET_5(DSCCIF_CONFIG0, 0,
+ INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, reg_vals->underflow_recovery_en,
+ INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, reg_vals->underflow_occurred_int_en,
+ INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, reg_vals->underflow_occurred_status,
+ INPUT_PIXEL_FORMAT, reg_vals->pixel_format,
+ DSCCIF_CONFIG0__BITS_PER_COMPONENT, reg_vals->pps.bits_per_component);
+
+ REG_SET_2(DSCCIF_CONFIG1, 0,
+ PIC_WIDTH, reg_vals->pps.pic_width,
+ PIC_HEIGHT, reg_vals->pps.pic_height);
+
+ // dscc registers
+ if (dsc20->dsc_mask->ICH_RESET_AT_END_OF_LINE == 0) {
+ REG_SET_3(DSCC_CONFIG0, 0,
+ NUMBER_OF_SLICES_PER_LINE, reg_vals->num_slices_h - 1,
+ ALTERNATE_ICH_ENCODING_EN, reg_vals->alternate_ich_encoding_en,
+ NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, reg_vals->num_slices_v - 1);
+ } else {
+ REG_SET_4(DSCC_CONFIG0, 0, ICH_RESET_AT_END_OF_LINE,
+ reg_vals->ich_reset_at_eol, NUMBER_OF_SLICES_PER_LINE,
+ reg_vals->num_slices_h - 1, ALTERNATE_ICH_ENCODING_EN,
+ reg_vals->alternate_ich_encoding_en, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION,
+ reg_vals->num_slices_v - 1);
+ }
+
+ REG_SET(DSCC_CONFIG1, 0,
+ DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size);
+ /*REG_SET_2(DSCC_CONFIG1, 0,
+ DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size,
+ DSCC_DISABLE_ICH, reg_vals->disable_ich);*/
+
+ REG_SET_4(DSCC_INTERRUPT_CONTROL_STATUS, 0,
+ DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[0],
+ DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[1],
+ DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[2],
+ DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[3]);
+
+ REG_SET_3(DSCC_PPS_CONFIG0, 0,
+ DSC_VERSION_MINOR, reg_vals->pps.dsc_version_minor,
+ LINEBUF_DEPTH, reg_vals->pps.line_buf_depth,
+ DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, reg_vals->pps.bits_per_component);
+
+ if (reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422)
+ temp_int = reg_vals->bpp_x32;
+ else
+ temp_int = reg_vals->bpp_x32 >> 1;
+
+ REG_SET_7(DSCC_PPS_CONFIG1, 0,
+ BITS_PER_PIXEL, temp_int,
+ SIMPLE_422, reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422,
+ CONVERT_RGB, reg_vals->pixel_format == DSC_PIXFMT_RGB,
+ BLOCK_PRED_ENABLE, reg_vals->pps.block_pred_enable,
+ NATIVE_422, reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422,
+ NATIVE_420, reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420,
+ CHUNK_SIZE, reg_vals->pps.slice_chunk_size);
+
+ REG_SET_2(DSCC_PPS_CONFIG2, 0,
+ PIC_WIDTH, reg_vals->pps.pic_width,
+ PIC_HEIGHT, reg_vals->pps.pic_height);
+
+ REG_SET_2(DSCC_PPS_CONFIG3, 0,
+ SLICE_WIDTH, reg_vals->pps.slice_width,
+ SLICE_HEIGHT, reg_vals->pps.slice_height);
+
+ REG_SET(DSCC_PPS_CONFIG4, 0,
+ INITIAL_XMIT_DELAY, reg_vals->pps.initial_xmit_delay);
+
+ REG_SET_2(DSCC_PPS_CONFIG5, 0,
+ INITIAL_SCALE_VALUE, reg_vals->pps.initial_scale_value,
+ SCALE_INCREMENT_INTERVAL, reg_vals->pps.scale_increment_interval);
+
+ REG_SET_3(DSCC_PPS_CONFIG6, 0,
+ SCALE_DECREMENT_INTERVAL, reg_vals->pps.scale_decrement_interval,
+ FIRST_LINE_BPG_OFFSET, reg_vals->pps.first_line_bpg_offset,
+ SECOND_LINE_BPG_OFFSET, reg_vals->pps.second_line_bpg_offset);
+
+ REG_SET_2(DSCC_PPS_CONFIG7, 0,
+ NFL_BPG_OFFSET, reg_vals->pps.nfl_bpg_offset,
+ SLICE_BPG_OFFSET, reg_vals->pps.slice_bpg_offset);
+
+ REG_SET_2(DSCC_PPS_CONFIG8, 0,
+ NSL_BPG_OFFSET, reg_vals->pps.nsl_bpg_offset,
+ SECOND_LINE_OFFSET_ADJ, reg_vals->pps.second_line_offset_adj);
+
+ REG_SET_2(DSCC_PPS_CONFIG9, 0,
+ INITIAL_OFFSET, reg_vals->pps.initial_offset,
+ FINAL_OFFSET, reg_vals->pps.final_offset);
+
+ REG_SET_3(DSCC_PPS_CONFIG10, 0,
+ FLATNESS_MIN_QP, reg_vals->pps.flatness_min_qp,
+ FLATNESS_MAX_QP, reg_vals->pps.flatness_max_qp,
+ RC_MODEL_SIZE, reg_vals->pps.rc_model_size);
+
+ REG_SET_5(DSCC_PPS_CONFIG11, 0,
+ RC_EDGE_FACTOR, reg_vals->pps.rc_edge_factor,
+ RC_QUANT_INCR_LIMIT0, reg_vals->pps.rc_quant_incr_limit0,
+ RC_QUANT_INCR_LIMIT1, reg_vals->pps.rc_quant_incr_limit1,
+ RC_TGT_OFFSET_LO, reg_vals->pps.rc_tgt_offset_low,
+ RC_TGT_OFFSET_HI, reg_vals->pps.rc_tgt_offset_high);
+
+ REG_SET_4(DSCC_PPS_CONFIG12, 0,
+ RC_BUF_THRESH0, reg_vals->pps.rc_buf_thresh[0],
+ RC_BUF_THRESH1, reg_vals->pps.rc_buf_thresh[1],
+ RC_BUF_THRESH2, reg_vals->pps.rc_buf_thresh[2],
+ RC_BUF_THRESH3, reg_vals->pps.rc_buf_thresh[3]);
+
+ REG_SET_4(DSCC_PPS_CONFIG13, 0,
+ RC_BUF_THRESH4, reg_vals->pps.rc_buf_thresh[4],
+ RC_BUF_THRESH5, reg_vals->pps.rc_buf_thresh[5],
+ RC_BUF_THRESH6, reg_vals->pps.rc_buf_thresh[6],
+ RC_BUF_THRESH7, reg_vals->pps.rc_buf_thresh[7]);
+
+ REG_SET_4(DSCC_PPS_CONFIG14, 0,
+ RC_BUF_THRESH8, reg_vals->pps.rc_buf_thresh[8],
+ RC_BUF_THRESH9, reg_vals->pps.rc_buf_thresh[9],
+ RC_BUF_THRESH10, reg_vals->pps.rc_buf_thresh[10],
+ RC_BUF_THRESH11, reg_vals->pps.rc_buf_thresh[11]);
+
+ REG_SET_5(DSCC_PPS_CONFIG15, 0,
+ RC_BUF_THRESH12, reg_vals->pps.rc_buf_thresh[12],
+ RC_BUF_THRESH13, reg_vals->pps.rc_buf_thresh[13],
+ RANGE_MIN_QP0, reg_vals->pps.rc_range_params[0].range_min_qp,
+ RANGE_MAX_QP0, reg_vals->pps.rc_range_params[0].range_max_qp,
+ RANGE_BPG_OFFSET0, reg_vals->pps.rc_range_params[0].range_bpg_offset);
+
+ REG_SET_6(DSCC_PPS_CONFIG16, 0,
+ RANGE_MIN_QP1, reg_vals->pps.rc_range_params[1].range_min_qp,
+ RANGE_MAX_QP1, reg_vals->pps.rc_range_params[1].range_max_qp,
+ RANGE_BPG_OFFSET1, reg_vals->pps.rc_range_params[1].range_bpg_offset,
+ RANGE_MIN_QP2, reg_vals->pps.rc_range_params[2].range_min_qp,
+ RANGE_MAX_QP2, reg_vals->pps.rc_range_params[2].range_max_qp,
+ RANGE_BPG_OFFSET2, reg_vals->pps.rc_range_params[2].range_bpg_offset);
+
+ REG_SET_6(DSCC_PPS_CONFIG17, 0,
+ RANGE_MIN_QP3, reg_vals->pps.rc_range_params[3].range_min_qp,
+ RANGE_MAX_QP3, reg_vals->pps.rc_range_params[3].range_max_qp,
+ RANGE_BPG_OFFSET3, reg_vals->pps.rc_range_params[3].range_bpg_offset,
+ RANGE_MIN_QP4, reg_vals->pps.rc_range_params[4].range_min_qp,
+ RANGE_MAX_QP4, reg_vals->pps.rc_range_params[4].range_max_qp,
+ RANGE_BPG_OFFSET4, reg_vals->pps.rc_range_params[4].range_bpg_offset);
+
+ REG_SET_6(DSCC_PPS_CONFIG18, 0,
+ RANGE_MIN_QP5, reg_vals->pps.rc_range_params[5].range_min_qp,
+ RANGE_MAX_QP5, reg_vals->pps.rc_range_params[5].range_max_qp,
+ RANGE_BPG_OFFSET5, reg_vals->pps.rc_range_params[5].range_bpg_offset,
+ RANGE_MIN_QP6, reg_vals->pps.rc_range_params[6].range_min_qp,
+ RANGE_MAX_QP6, reg_vals->pps.rc_range_params[6].range_max_qp,
+ RANGE_BPG_OFFSET6, reg_vals->pps.rc_range_params[6].range_bpg_offset);
+
+ REG_SET_6(DSCC_PPS_CONFIG19, 0,
+ RANGE_MIN_QP7, reg_vals->pps.rc_range_params[7].range_min_qp,
+ RANGE_MAX_QP7, reg_vals->pps.rc_range_params[7].range_max_qp,
+ RANGE_BPG_OFFSET7, reg_vals->pps.rc_range_params[7].range_bpg_offset,
+ RANGE_MIN_QP8, reg_vals->pps.rc_range_params[8].range_min_qp,
+ RANGE_MAX_QP8, reg_vals->pps.rc_range_params[8].range_max_qp,
+ RANGE_BPG_OFFSET8, reg_vals->pps.rc_range_params[8].range_bpg_offset);
+
+ REG_SET_6(DSCC_PPS_CONFIG20, 0,
+ RANGE_MIN_QP9, reg_vals->pps.rc_range_params[9].range_min_qp,
+ RANGE_MAX_QP9, reg_vals->pps.rc_range_params[9].range_max_qp,
+ RANGE_BPG_OFFSET9, reg_vals->pps.rc_range_params[9].range_bpg_offset,
+ RANGE_MIN_QP10, reg_vals->pps.rc_range_params[10].range_min_qp,
+ RANGE_MAX_QP10, reg_vals->pps.rc_range_params[10].range_max_qp,
+ RANGE_BPG_OFFSET10, reg_vals->pps.rc_range_params[10].range_bpg_offset);
+
+ REG_SET_6(DSCC_PPS_CONFIG21, 0,
+ RANGE_MIN_QP11, reg_vals->pps.rc_range_params[11].range_min_qp,
+ RANGE_MAX_QP11, reg_vals->pps.rc_range_params[11].range_max_qp,
+ RANGE_BPG_OFFSET11, reg_vals->pps.rc_range_params[11].range_bpg_offset,
+ RANGE_MIN_QP12, reg_vals->pps.rc_range_params[12].range_min_qp,
+ RANGE_MAX_QP12, reg_vals->pps.rc_range_params[12].range_max_qp,
+ RANGE_BPG_OFFSET12, reg_vals->pps.rc_range_params[12].range_bpg_offset);
+
+ REG_SET_6(DSCC_PPS_CONFIG22, 0,
+ RANGE_MIN_QP13, reg_vals->pps.rc_range_params[13].range_min_qp,
+ RANGE_MAX_QP13, reg_vals->pps.rc_range_params[13].range_max_qp,
+ RANGE_BPG_OFFSET13, reg_vals->pps.rc_range_params[13].range_bpg_offset,
+ RANGE_MIN_QP14, reg_vals->pps.rc_range_params[14].range_min_qp,
+ RANGE_MAX_QP14, reg_vals->pps.rc_range_params[14].range_max_qp,
+ RANGE_BPG_OFFSET14, reg_vals->pps.rc_range_params[14].range_bpg_offset);
+
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
new file mode 100644
index 0000000000..ba869387c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -0,0 +1,589 @@
+/* Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DCN20_DSC_H__
+#define __DCN20_DSC_H__
+
+#include "dsc.h"
+#include "dsc/dscc_types.h"
+#include <drm/display/drm_dsc.h>
+
+#define TO_DCN20_DSC(dsc)\
+ container_of(dsc, struct dcn20_dsc, base)
+
+#define DSC_REG_LIST_DCN20(id) \
+ SRI(DSC_TOP_CONTROL, DSC_TOP, id),\
+ SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\
+ SRI(DSCC_CONFIG0, DSCC, id),\
+ SRI(DSCC_CONFIG1, DSCC, id),\
+ SRI(DSCC_STATUS, DSCC, id),\
+ SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG0, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG1, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG2, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG3, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG4, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG5, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG6, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG7, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG8, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG9, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG10, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG11, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG12, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG13, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG14, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG15, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG16, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG17, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG18, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG19, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG20, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG21, DSCC, id),\
+ SRI(DSCC_PPS_CONFIG22, DSCC, id),\
+ SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\
+ SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\
+ SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\
+ SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\
+ SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\
+ SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\
+ SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\
+ SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\
+ SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\
+ SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
+ SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
+ SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
+ SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
+ SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
+ SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
+ SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
+ SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
+ SRI(DSCCIF_CONFIG0, DSCCIF, id),\
+ SRI(DSCCIF_CONFIG1, DSCCIF, id),\
+ SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
+
+
+#define DSC_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+//Used in resolving the corner case with duplicate field name
+#define DSC2_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## _ ## field_name ## post_fix
+
+#define DSC_REG_LIST_SH_MASK_DCN20(mask_sh)\
+ DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
+ DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
+ DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
+ DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_CONFIG0, ICH_RESET_AT_END_OF_LINE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
+ DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
+ /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
+ DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \
+ DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \
+ DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \
+ DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \
+ DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \
+ DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \
+ DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \
+ DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \
+ DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \
+ DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \
+ DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \
+ DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \
+ DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \
+ DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \
+ DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \
+ DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
+ DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
+ DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
+ DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
+ DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
+ DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
+ DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
+ DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
+ DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
+ DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
+ DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
+ DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \
+ DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
+ DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
+ DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \
+ DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \
+ DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \
+ DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh)
+
+
+
+#define DSC_FIELD_LIST_DCN20(type)\
+ type DSC_CLOCK_EN; \
+ type DSC_DISPCLK_R_GATE_DIS; \
+ type DSC_DSCCLK_R_GATE_DIS; \
+ type DSC_DBG_EN; \
+ type DSC_TEST_CLOCK_MUX_SEL; \
+ type ICH_RESET_AT_END_OF_LINE; \
+ type NUMBER_OF_SLICES_PER_LINE; \
+ type ALTERNATE_ICH_ENCODING_EN; \
+ type NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION; \
+ type DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE; \
+ /*type DSCC_DISABLE_ICH;*/ \
+ type DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \
+ type DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED; \
+ type DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED; \
+ type DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED; \
+ type DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED; \
+ type DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED; \
+ type DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED; \
+ type DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED; \
+ type DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED; \
+ type DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED; \
+ type DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED; \
+ type DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED; \
+ type DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED; \
+ type DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN; \
+ type DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN; \
+ type DSC_VERSION_MINOR; \
+ type DSC_VERSION_MAJOR; \
+ type PPS_IDENTIFIER; \
+ type LINEBUF_DEPTH; \
+ type DSCC_PPS_CONFIG0__BITS_PER_COMPONENT; \
+ type BITS_PER_PIXEL; \
+ type VBR_ENABLE; \
+ type SIMPLE_422; \
+ type CONVERT_RGB; \
+ type BLOCK_PRED_ENABLE; \
+ type NATIVE_422; \
+ type NATIVE_420; \
+ type CHUNK_SIZE; \
+ type PIC_WIDTH; \
+ type PIC_HEIGHT; \
+ type SLICE_WIDTH; \
+ type SLICE_HEIGHT; \
+ type INITIAL_XMIT_DELAY; \
+ type INITIAL_DEC_DELAY; \
+ type INITIAL_SCALE_VALUE; \
+ type SCALE_INCREMENT_INTERVAL; \
+ type SCALE_DECREMENT_INTERVAL; \
+ type FIRST_LINE_BPG_OFFSET; \
+ type SECOND_LINE_BPG_OFFSET; \
+ type NFL_BPG_OFFSET; \
+ type SLICE_BPG_OFFSET; \
+ type NSL_BPG_OFFSET; \
+ type SECOND_LINE_OFFSET_ADJ; \
+ type INITIAL_OFFSET; \
+ type FINAL_OFFSET; \
+ type FLATNESS_MIN_QP; \
+ type FLATNESS_MAX_QP; \
+ type RC_MODEL_SIZE; \
+ type RC_EDGE_FACTOR; \
+ type RC_QUANT_INCR_LIMIT0; \
+ type RC_QUANT_INCR_LIMIT1; \
+ type RC_TGT_OFFSET_LO; \
+ type RC_TGT_OFFSET_HI; \
+ type RC_BUF_THRESH0; \
+ type RC_BUF_THRESH1; \
+ type RC_BUF_THRESH2; \
+ type RC_BUF_THRESH3; \
+ type RC_BUF_THRESH4; \
+ type RC_BUF_THRESH5; \
+ type RC_BUF_THRESH6; \
+ type RC_BUF_THRESH7; \
+ type RC_BUF_THRESH8; \
+ type RC_BUF_THRESH9; \
+ type RC_BUF_THRESH10; \
+ type RC_BUF_THRESH11; \
+ type RC_BUF_THRESH12; \
+ type RC_BUF_THRESH13; \
+ type RANGE_MIN_QP0; \
+ type RANGE_MAX_QP0; \
+ type RANGE_BPG_OFFSET0; \
+ type RANGE_MIN_QP1; \
+ type RANGE_MAX_QP1; \
+ type RANGE_BPG_OFFSET1; \
+ type RANGE_MIN_QP2; \
+ type RANGE_MAX_QP2; \
+ type RANGE_BPG_OFFSET2; \
+ type RANGE_MIN_QP3; \
+ type RANGE_MAX_QP3; \
+ type RANGE_BPG_OFFSET3; \
+ type RANGE_MIN_QP4; \
+ type RANGE_MAX_QP4; \
+ type RANGE_BPG_OFFSET4; \
+ type RANGE_MIN_QP5; \
+ type RANGE_MAX_QP5; \
+ type RANGE_BPG_OFFSET5; \
+ type RANGE_MIN_QP6; \
+ type RANGE_MAX_QP6; \
+ type RANGE_BPG_OFFSET6; \
+ type RANGE_MIN_QP7; \
+ type RANGE_MAX_QP7; \
+ type RANGE_BPG_OFFSET7; \
+ type RANGE_MIN_QP8; \
+ type RANGE_MAX_QP8; \
+ type RANGE_BPG_OFFSET8; \
+ type RANGE_MIN_QP9; \
+ type RANGE_MAX_QP9; \
+ type RANGE_BPG_OFFSET9; \
+ type RANGE_MIN_QP10; \
+ type RANGE_MAX_QP10; \
+ type RANGE_BPG_OFFSET10; \
+ type RANGE_MIN_QP11; \
+ type RANGE_MAX_QP11; \
+ type RANGE_BPG_OFFSET11; \
+ type RANGE_MIN_QP12; \
+ type RANGE_MAX_QP12; \
+ type RANGE_BPG_OFFSET12; \
+ type RANGE_MIN_QP13; \
+ type RANGE_MAX_QP13; \
+ type RANGE_BPG_OFFSET13; \
+ type RANGE_MIN_QP14; \
+ type RANGE_MAX_QP14; \
+ type RANGE_BPG_OFFSET14; \
+ type DSCC_DEFAULT_MEM_LOW_POWER_STATE; \
+ type DSCC_MEM_PWR_FORCE; \
+ type DSCC_MEM_PWR_DIS; \
+ type DSCC_MEM_PWR_STATE; \
+ type DSCC_NATIVE_422_MEM_PWR_FORCE; \
+ type DSCC_NATIVE_422_MEM_PWR_DIS; \
+ type DSCC_NATIVE_422_MEM_PWR_STATE; \
+ type DSCC_R_Y_SQUARED_ERROR_LOWER; \
+ type DSCC_R_Y_SQUARED_ERROR_UPPER; \
+ type DSCC_G_CB_SQUARED_ERROR_LOWER; \
+ type DSCC_G_CB_SQUARED_ERROR_UPPER; \
+ type DSCC_B_CR_SQUARED_ERROR_LOWER; \
+ type DSCC_B_CR_SQUARED_ERROR_UPPER; \
+ type DSCC_R_Y_MAX_ABS_ERROR; \
+ type DSCC_G_CB_MAX_ABS_ERROR; \
+ type DSCC_B_CR_MAX_ABS_ERROR; \
+ type DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL; \
+ type DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL; \
+ type DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL; \
+ type DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL; \
+ type DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL; \
+ type DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL; \
+ type DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL; \
+ type DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL; \
+ type DSCC_UPDATE_PENDING_STATUS; \
+ type DSCC_UPDATE_TAKEN_STATUS; \
+ type DSCC_UPDATE_TAKEN_ACK; \
+ type DSCC_RATE_BUFFER0_FULLNESS_LEVEL; \
+ type DSCC_RATE_BUFFER1_FULLNESS_LEVEL; \
+ type DSCC_RATE_BUFFER2_FULLNESS_LEVEL; \
+ type DSCC_RATE_BUFFER3_FULLNESS_LEVEL; \
+ type DSCC_RATE_CONTROL_BUFFER0_FULLNESS_LEVEL; \
+ type DSCC_RATE_CONTROL_BUFFER1_FULLNESS_LEVEL; \
+ type DSCC_RATE_CONTROL_BUFFER2_FULLNESS_LEVEL; \
+ type DSCC_RATE_CONTROL_BUFFER3_FULLNESS_LEVEL; \
+ type DSCC_RATE_BUFFER0_INITIAL_XMIT_DELAY_REACHED; \
+ type DSCC_RATE_BUFFER1_INITIAL_XMIT_DELAY_REACHED; \
+ type DSCC_RATE_BUFFER2_INITIAL_XMIT_DELAY_REACHED; \
+ type DSCC_RATE_BUFFER3_INITIAL_XMIT_DELAY_REACHED; \
+ type INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN; \
+ type INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN; \
+ type INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS; \
+ type INPUT_PIXEL_FORMAT; \
+ type DSCCIF_CONFIG0__BITS_PER_COMPONENT; \
+ type DOUBLE_BUFFER_REG_UPDATE_PENDING; \
+ type DSCCIF_UPDATE_PENDING_STATUS; \
+ type DSCCIF_UPDATE_TAKEN_STATUS; \
+ type DSCCIF_UPDATE_TAKEN_ACK; \
+ type DSCRM_DSC_FORWARD_EN; \
+ type DSCRM_DSC_OPP_PIPE_SOURCE
+
+struct dcn20_dsc_registers {
+ uint32_t DSC_TOP_CONTROL;
+ uint32_t DSC_DEBUG_CONTROL;
+ uint32_t DSCC_CONFIG0;
+ uint32_t DSCC_CONFIG1;
+ uint32_t DSCC_STATUS;
+ uint32_t DSCC_INTERRUPT_CONTROL_STATUS;
+ uint32_t DSCC_PPS_CONFIG0;
+ uint32_t DSCC_PPS_CONFIG1;
+ uint32_t DSCC_PPS_CONFIG2;
+ uint32_t DSCC_PPS_CONFIG3;
+ uint32_t DSCC_PPS_CONFIG4;
+ uint32_t DSCC_PPS_CONFIG5;
+ uint32_t DSCC_PPS_CONFIG6;
+ uint32_t DSCC_PPS_CONFIG7;
+ uint32_t DSCC_PPS_CONFIG8;
+ uint32_t DSCC_PPS_CONFIG9;
+ uint32_t DSCC_PPS_CONFIG10;
+ uint32_t DSCC_PPS_CONFIG11;
+ uint32_t DSCC_PPS_CONFIG12;
+ uint32_t DSCC_PPS_CONFIG13;
+ uint32_t DSCC_PPS_CONFIG14;
+ uint32_t DSCC_PPS_CONFIG15;
+ uint32_t DSCC_PPS_CONFIG16;
+ uint32_t DSCC_PPS_CONFIG17;
+ uint32_t DSCC_PPS_CONFIG18;
+ uint32_t DSCC_PPS_CONFIG19;
+ uint32_t DSCC_PPS_CONFIG20;
+ uint32_t DSCC_PPS_CONFIG21;
+ uint32_t DSCC_PPS_CONFIG22;
+ uint32_t DSCC_MEM_POWER_CONTROL;
+ uint32_t DSCC_R_Y_SQUARED_ERROR_LOWER;
+ uint32_t DSCC_R_Y_SQUARED_ERROR_UPPER;
+ uint32_t DSCC_G_CB_SQUARED_ERROR_LOWER;
+ uint32_t DSCC_G_CB_SQUARED_ERROR_UPPER;
+ uint32_t DSCC_B_CR_SQUARED_ERROR_LOWER;
+ uint32_t DSCC_B_CR_SQUARED_ERROR_UPPER;
+ uint32_t DSCC_MAX_ABS_ERROR0;
+ uint32_t DSCC_MAX_ABS_ERROR1;
+ uint32_t DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL;
+ uint32_t DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL;
+ uint32_t DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL;
+ uint32_t DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL;
+ uint32_t DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL;
+ uint32_t DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL;
+ uint32_t DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL;
+ uint32_t DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL;
+ uint32_t DSCCIF_CONFIG0;
+ uint32_t DSCCIF_CONFIG1;
+ uint32_t DSCRM_DSC_FORWARD_CONFIG;
+};
+
+
+struct dcn20_dsc_shift {
+ DSC_FIELD_LIST_DCN20(uint8_t);
+};
+
+struct dcn20_dsc_mask {
+ DSC_FIELD_LIST_DCN20(uint32_t);
+};
+
+/* DSCCIF_CONFIG.INPUT_PIXEL_FORMAT values */
+enum dsc_pixel_format {
+ DSC_PIXFMT_RGB,
+ DSC_PIXFMT_YCBCR444,
+ DSC_PIXFMT_SIMPLE_YCBCR422,
+ DSC_PIXFMT_NATIVE_YCBCR422,
+ DSC_PIXFMT_NATIVE_YCBCR420,
+ DSC_PIXFMT_UNKNOWN
+};
+
+struct dsc_reg_values {
+ /* PPS registers */
+ struct drm_dsc_config pps;
+
+ /* Additional registers */
+ uint32_t dsc_clock_enable;
+ uint32_t dsc_clock_gating_disable;
+ uint32_t underflow_recovery_en;
+ uint32_t underflow_occurred_int_en;
+ uint32_t underflow_occurred_status;
+ enum dsc_pixel_format pixel_format;
+ uint32_t ich_reset_at_eol;
+ uint32_t alternate_ich_encoding_en;
+ uint32_t num_slices_h;
+ uint32_t num_slices_v;
+ uint32_t rc_buffer_model_size;
+ uint32_t disable_ich;
+ uint32_t bpp_x32;
+ uint32_t dsc_dbg_en;
+ uint32_t rc_buffer_model_overflow_int_en[4];
+};
+
+struct dcn20_dsc {
+ struct display_stream_compressor base;
+ const struct dcn20_dsc_registers *dsc_regs;
+ const struct dcn20_dsc_shift *dsc_shift;
+ const struct dcn20_dsc_mask *dsc_mask;
+
+ struct dsc_reg_values reg_vals;
+
+ int max_image_width;
+};
+
+void dsc_config_log(struct display_stream_compressor *dsc,
+ const struct dsc_config *config);
+
+void dsc_log_pps(struct display_stream_compressor *dsc,
+ struct drm_dsc_config *pps);
+
+void dsc_override_rc_params(struct rc_params *rc,
+ const struct dc_dsc_rc_params_override *override);
+
+bool dsc_prepare_config(const struct dsc_config *dsc_cfg,
+ struct dsc_reg_values *dsc_reg_vals,
+ struct dsc_optc_config *dsc_optc_cfg);
+
+enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc,
+ bool is_ycbcr422_simple);
+
+enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth);
+
+void dsc_init_reg_values(struct dsc_reg_values *reg_vals);
+
+void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params);
+
+void dsc2_construct(struct dcn20_dsc *dsc,
+ struct dc_context *ctx,
+ int inst,
+ const struct dcn20_dsc_registers *dsc_regs,
+ const struct dcn20_dsc_shift *dsc_shift,
+ const struct dcn20_dsc_mask *dsc_mask);
+
+void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps,
+ int pixel_clock_100Hz);
+
+bool dsc2_get_packed_pps(struct display_stream_compressor *dsc,
+ const struct dsc_config *dsc_cfg,
+ uint8_t *dsc_packed_pps);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
new file mode 100644
index 0000000000..f8667be570
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2012-17 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "reg_helper.h"
+#include "resource.h"
+#include "dwb.h"
+#include "dcn20_dwb.h"
+
+
+#define REG(reg)\
+ dwbc20->dwbc_regs->reg
+
+#define CTX \
+ dwbc20->base.ctx
+
+#define DC_LOGGER \
+ dwbc20->base.ctx->logger
+#undef FN
+#define FN(reg_name, field_name) \
+ dwbc20->dwbc_shift->field_name, dwbc20->dwbc_mask->field_name
+
+enum dwb_outside_pix_strategy {
+ DWB_OUTSIDE_PIX_STRATEGY_BLACK = 0,
+ DWB_OUTSIDE_PIX_STRATEGY_EDGE = 1
+};
+
+static bool dwb2_get_caps(struct dwbc *dwbc, struct dwb_caps *caps)
+{
+ struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
+ if (caps) {
+ caps->adapter_id = 0; /* we only support 1 adapter currently */
+ caps->hw_version = DCN_VERSION_2_0;
+ caps->num_pipes = 1;
+ memset(&caps->reserved, 0, sizeof(caps->reserved));
+ memset(&caps->reserved2, 0, sizeof(caps->reserved2));
+ caps->sw_version = dwb_ver_1_0;
+ caps->caps.support_dwb = true;
+ caps->caps.support_ogam = false;
+ caps->caps.support_wbscl = false;
+ caps->caps.support_ocsc = false;
+ DC_LOG_DWB("%s SUPPORTED! inst = %d", __func__, dwbc20->base.inst);
+ return true;
+ } else {
+ DC_LOG_DWB("%s NOT SUPPORTED! inst = %d", __func__, dwbc20->base.inst);
+ return false;
+ }
+}
+
+void dwb2_config_dwb_cnv(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
+ DC_LOG_DWB("%s inst = %d", __func__, dwbc20->base.inst);
+
+ /* Set DWB source size */
+ REG_UPDATE_2(CNV_SOURCE_SIZE, CNV_SOURCE_WIDTH, params->cnv_params.src_width,
+ CNV_SOURCE_HEIGHT, params->cnv_params.src_height);
+
+ /* source size is not equal the source size, then enable cropping. */
+ if (params->cnv_params.crop_en) {
+ REG_UPDATE(CNV_MODE, CNV_WINDOW_CROP_EN, 1);
+ REG_UPDATE(CNV_WINDOW_START, CNV_WINDOW_START_X, params->cnv_params.crop_x);
+ REG_UPDATE(CNV_WINDOW_START, CNV_WINDOW_START_Y, params->cnv_params.crop_y);
+ REG_UPDATE(CNV_WINDOW_SIZE, CNV_WINDOW_WIDTH, params->cnv_params.crop_width);
+ REG_UPDATE(CNV_WINDOW_SIZE, CNV_WINDOW_HEIGHT, params->cnv_params.crop_height);
+ } else {
+ REG_UPDATE(CNV_MODE, CNV_WINDOW_CROP_EN, 0);
+ }
+
+ /* Set CAPTURE_RATE */
+ REG_UPDATE(CNV_MODE, CNV_FRAME_CAPTURE_RATE, params->capture_rate);
+
+ /* Set CNV output pixel depth */
+ REG_UPDATE(CNV_MODE, CNV_OUT_BPC, params->cnv_params.cnv_out_bpc);
+}
+
+static bool dwb2_enable(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
+
+ /* Only chroma scaling (sub-sampling) is supported in DCN2 */
+ if ((params->cnv_params.src_width != params->dest_width) ||
+ (params->cnv_params.src_height != params->dest_height)) {
+
+ DC_LOG_DWB("%s inst = %d, FAILED!LUMA SCALING NOT SUPPORTED", __func__, dwbc20->base.inst);
+ return false;
+ }
+ DC_LOG_DWB("%s inst = %d, ENABLED", __func__, dwbc20->base.inst);
+
+ /* disable power gating */
+ //REG_UPDATE_5(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, 1,
+ // DISPCLK_G_WB_GATE_DIS, 1, DISPCLK_G_WBSCL_GATE_DIS, 1,
+ // WB_LB_LS_DIS, 1, WB_LUT_LS_DIS, 1);
+
+ /* Set WB_ENABLE (not double buffered; capture not enabled) */
+ REG_UPDATE(WB_ENABLE, WB_ENABLE, 1);
+
+ /* Set CNV parameters */
+ dwb2_config_dwb_cnv(dwbc, params);
+
+ /* Set scaling parameters */
+ dwb2_set_scaler(dwbc, params);
+
+ /* Enable DWB capture enable (double buffered) */
+ REG_UPDATE(CNV_MODE, CNV_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_ENABLE);
+
+ // disable warmup
+ REG_UPDATE(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, 0);
+
+ return true;
+}
+
+bool dwb2_disable(struct dwbc *dwbc)
+{
+ struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
+ DC_LOG_DWB("%s inst = %d, Disabled", __func__, dwbc20->base.inst);
+
+ /* disable CNV */
+ REG_UPDATE(CNV_MODE, CNV_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_DISABLE);
+
+ /* disable WB */
+ REG_UPDATE(WB_ENABLE, WB_ENABLE, 0);
+
+ /* soft reset */
+ REG_UPDATE(WB_SOFT_RESET, WB_SOFT_RESET, 1);
+ REG_UPDATE(WB_SOFT_RESET, WB_SOFT_RESET, 0);
+
+ /* enable power gating */
+ //REG_UPDATE_5(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, 0,
+ // DISPCLK_G_WB_GATE_DIS, 0, DISPCLK_G_WBSCL_GATE_DIS, 0,
+ // WB_LB_LS_DIS, 0, WB_LUT_LS_DIS, 0);
+
+ return true;
+}
+
+static bool dwb2_update(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
+ unsigned int pre_locked;
+
+ /* Only chroma scaling (sub-sampling) is supported in DCN2 */
+ if ((params->cnv_params.src_width != params->dest_width) ||
+ (params->cnv_params.src_height != params->dest_height)) {
+ DC_LOG_DWB("%s inst = %d, FAILED!LUMA SCALING NOT SUPPORTED", __func__, dwbc20->base.inst);
+ return false;
+ }
+ DC_LOG_DWB("%s inst = %d, scaling", __func__, dwbc20->base.inst);
+
+ /*
+ * Check if the caller has already locked CNV registers.
+ * If so: assume the caller will unlock, so don't touch the lock.
+ * If not: lock them for this update, then unlock after the
+ * update is complete.
+ */
+ REG_GET(CNV_UPDATE, CNV_UPDATE_LOCK, &pre_locked);
+
+ if (pre_locked == 0) {
+ /* Lock DWB registers */
+ REG_UPDATE(CNV_UPDATE, CNV_UPDATE_LOCK, 1);
+ }
+
+ /* Set CNV parameters */
+ dwb2_config_dwb_cnv(dwbc, params);
+
+ /* Set scaling parameters */
+ dwb2_set_scaler(dwbc, params);
+
+ if (pre_locked == 0) {
+ /* Unlock DWB registers */
+ REG_UPDATE(CNV_UPDATE, CNV_UPDATE_LOCK, 0);
+ }
+
+ return true;
+}
+
+bool dwb2_is_enabled(struct dwbc *dwbc)
+{
+ struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
+ unsigned int wb_enabled = 0;
+ unsigned int cnv_frame_capture_en = 0;
+
+ REG_GET(WB_ENABLE, WB_ENABLE, &wb_enabled);
+ REG_GET(CNV_MODE, CNV_FRAME_CAPTURE_EN, &cnv_frame_capture_en);
+
+ return ((wb_enabled != 0) && (cnv_frame_capture_en != 0));
+}
+
+void dwb2_set_stereo(struct dwbc *dwbc,
+ struct dwb_stereo_params *stereo_params)
+{
+ struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
+ DC_LOG_DWB("%s inst = %d, enabled =%d", __func__,\
+ dwbc20->base.inst, stereo_params->stereo_enabled);
+
+ if (stereo_params->stereo_enabled) {
+ REG_UPDATE(CNV_MODE, CNV_STEREO_TYPE, stereo_params->stereo_type);
+ REG_UPDATE(CNV_MODE, CNV_EYE_SELECTION, stereo_params->stereo_eye_select);
+ REG_UPDATE(CNV_MODE, CNV_STEREO_POLARITY, stereo_params->stereo_polarity);
+ } else {
+ REG_UPDATE(CNV_MODE, CNV_EYE_SELECTION, 0);
+ }
+}
+
+void dwb2_set_new_content(struct dwbc *dwbc,
+ bool is_new_content)
+{
+ struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
+ DC_LOG_DWB("%s inst = %d", __func__, dwbc20->base.inst);
+
+ REG_UPDATE(CNV_MODE, CNV_NEW_CONTENT, is_new_content);
+}
+
+static void dwb2_set_warmup(struct dwbc *dwbc,
+ struct dwb_warmup_params *warmup_params)
+{
+ struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
+ DC_LOG_DWB("%s inst = %d", __func__, dwbc20->base.inst);
+
+ REG_UPDATE(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, warmup_params->warmup_en);
+ REG_UPDATE(WB_WARM_UP_MODE_CTL1, WIDTH_WARMUP, warmup_params->warmup_width);
+ REG_UPDATE(WB_WARM_UP_MODE_CTL1, HEIGHT_WARMUP, warmup_params->warmup_height);
+
+ REG_UPDATE(WB_WARM_UP_MODE_CTL2, DATA_VALUE_WARMUP, warmup_params->warmup_data);
+ REG_UPDATE(WB_WARM_UP_MODE_CTL2, MODE_WARMUP, warmup_params->warmup_mode);
+ REG_UPDATE(WB_WARM_UP_MODE_CTL2, DATA_DEPTH_WARMUP, warmup_params->warmup_depth);
+}
+
+void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn20_dwbc *dwbc20 = TO_DCN20_DWBC(dwbc);
+ DC_LOG_DWB("%s inst = %d", __func__, dwbc20->base.inst);
+
+ /* Program scaling mode */
+ REG_UPDATE_2(WBSCL_MODE, WBSCL_MODE, params->out_format,
+ WBSCL_OUT_BIT_DEPTH, params->output_depth);
+
+ if (params->out_format != dwb_scaler_mode_bypass444) {
+ /* Program output size */
+ REG_UPDATE(WBSCL_DEST_SIZE, WBSCL_DEST_WIDTH, params->dest_width);
+ REG_UPDATE(WBSCL_DEST_SIZE, WBSCL_DEST_HEIGHT, params->dest_height);
+
+ /* Program round offsets */
+ REG_UPDATE(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_Y_RGB, 0x40);
+ REG_UPDATE(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_CBCR, 0x200);
+
+ /* Program clamp values */
+ REG_UPDATE(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_UPPER_Y_RGB, 0x3fe);
+ REG_UPDATE(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_LOWER_Y_RGB, 0x1);
+ REG_UPDATE(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_UPPER_CBCR, 0x3fe);
+ REG_UPDATE(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_LOWER_CBCR, 0x1);
+
+ /* Program outside pixel strategy to use edge pixels */
+ REG_UPDATE(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_OUTSIDE_PIX_STRATEGY, DWB_OUTSIDE_PIX_STRATEGY_EDGE);
+
+ if (params->cnv_params.crop_en) {
+ /* horizontal scale */
+ dwb_program_horz_scalar(dwbc20, params->cnv_params.crop_width,
+ params->dest_width,
+ params->scaler_taps);
+
+ /* vertical scale */
+ dwb_program_vert_scalar(dwbc20, params->cnv_params.crop_height,
+ params->dest_height,
+ params->scaler_taps,
+ params->subsample_position);
+ } else {
+ /* horizontal scale */
+ dwb_program_horz_scalar(dwbc20, params->cnv_params.src_width,
+ params->dest_width,
+ params->scaler_taps);
+
+ /* vertical scale */
+ dwb_program_vert_scalar(dwbc20, params->cnv_params.src_height,
+ params->dest_height,
+ params->scaler_taps,
+ params->subsample_position);
+ }
+ }
+
+}
+
+static const struct dwbc_funcs dcn20_dwbc_funcs = {
+ .get_caps = dwb2_get_caps,
+ .enable = dwb2_enable,
+ .disable = dwb2_disable,
+ .update = dwb2_update,
+ .is_enabled = dwb2_is_enabled,
+ .set_stereo = dwb2_set_stereo,
+ .set_new_content = dwb2_set_new_content,
+ .set_warmup = dwb2_set_warmup,
+ .dwb_set_scaler = dwb2_set_scaler,
+};
+
+void dcn20_dwbc_construct(struct dcn20_dwbc *dwbc20,
+ struct dc_context *ctx,
+ const struct dcn20_dwbc_registers *dwbc_regs,
+ const struct dcn20_dwbc_shift *dwbc_shift,
+ const struct dcn20_dwbc_mask *dwbc_mask,
+ int inst)
+{
+ dwbc20->base.ctx = ctx;
+
+ dwbc20->base.inst = inst;
+ dwbc20->base.funcs = &dcn20_dwbc_funcs;
+
+ dwbc20->dwbc_regs = dwbc_regs;
+ dwbc20->dwbc_shift = dwbc_shift;
+ dwbc20->dwbc_mask = dwbc_mask;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h
new file mode 100644
index 0000000000..a9dd9ae23e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h
@@ -0,0 +1,431 @@
+/* Copyright 2012-17 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DC_DWBC_DCN20_H__
+#define __DC_DWBC_DCN20_H__
+
+#define TO_DCN20_DWBC(dwbc_base) \
+ container_of(dwbc_base, struct dcn20_dwbc, base)
+
+#define DWBC_COMMON_REG_LIST_DCN2_0(inst) \
+ SRI2_DWB(WB_ENABLE, CNV, inst),\
+ SRI2_DWB(WB_EC_CONFIG, CNV, inst),\
+ SRI2_DWB(CNV_MODE, CNV, inst),\
+ SRI2_DWB(CNV_WINDOW_START, CNV, inst),\
+ SRI2_DWB(CNV_WINDOW_SIZE, CNV, inst),\
+ SRI2_DWB(CNV_UPDATE, CNV, inst),\
+ SRI2_DWB(CNV_SOURCE_SIZE, CNV, inst),\
+ SRI2_DWB(CNV_TEST_CNTL, CNV, inst),\
+ SRI2_DWB(CNV_TEST_CRC_RED, CNV, inst),\
+ SRI2_DWB(CNV_TEST_CRC_GREEN, CNV, inst),\
+ SRI2_DWB(CNV_TEST_CRC_BLUE, CNV, inst),\
+ SRI2_DWB(WBSCL_COEF_RAM_SELECT, WBSCL, inst),\
+ SRI2_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL, inst),\
+ SRI2_DWB(WBSCL_MODE, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TAP_CONTROL, WBSCL, inst),\
+ SRI2_DWB(WBSCL_DEST_SIZE, WBSCL, inst),\
+ SRI2_DWB(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL, inst),\
+ SRI2_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL, inst),\
+ SRI2_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL, inst),\
+ SRI2_DWB(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL, inst),\
+ SRI2_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL, inst),\
+ SRI2_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL, inst),\
+ SRI2_DWB(WBSCL_ROUND_OFFSET, WBSCL, inst),\
+ SRI2_DWB(WBSCL_OVERFLOW_STATUS, WBSCL, inst),\
+ SRI2_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_CNTL, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_CRC_RED, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_CRC_GREEN, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_CRC_BLUE, WBSCL, inst),\
+ SRI2_DWB(WBSCL_BACKPRESSURE_CNT_EN, WBSCL, inst),\
+ SRI2_DWB(WB_MCIF_BACKPRESSURE_CNT, WBSCL, inst),\
+ SRI2_DWB(WBSCL_CLAMP_Y_RGB, WBSCL, inst),\
+ SRI2_DWB(WBSCL_CLAMP_CBCR, WBSCL, inst),\
+ SRI2_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL, inst),\
+ SRI2_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL, inst),\
+ SRI2_DWB(WBSCL_DEBUG, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL, inst),\
+ SRI2_DWB(WBSCL_TEST_DEBUG_DATA, WBSCL, inst),\
+ SRI2_DWB(WB_DEBUG_CTRL, CNV, inst),\
+ SRI2_DWB(WB_DBG_MODE, CNV, inst),\
+ SRI2_DWB(WB_HW_DEBUG, CNV, inst),\
+ SRI2_DWB(CNV_TEST_DEBUG_INDEX, CNV, inst),\
+ SRI2_DWB(CNV_TEST_DEBUG_DATA, CNV, inst),\
+ SRI2_DWB(WB_SOFT_RESET, CNV, inst),\
+ SRI2_DWB(WB_WARM_UP_MODE_CTL1, CNV, inst),\
+ SRI2_DWB(WB_WARM_UP_MODE_CTL2, CNV, inst)
+
+#define DWBC_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \
+ SF_DWB(WB_ENABLE, WB_ENABLE, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, DISPCLK_G_WB_GATE_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, DISPCLK_G_WBSCL_GATE_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WB_TEST_CLK_SEL, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WB_LB_LS_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WB_LB_SD_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WB_LUT_LS_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_MODE_SEL, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_DIS, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_FORCE, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_STATE, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WB_RAM_PW_SAVE_MODE, mask_sh),\
+ SF_DWB(WB_EC_CONFIG, WBSCL_LUT_MEM_PWR_STATE, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_OUT_BPC, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_RATE, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_WINDOW_CROP_EN, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_STEREO_TYPE, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_INTERLACED_MODE, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_EYE_SELECTION, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_STEREO_POLARITY, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_INTERLACED_FIELD_ORDER, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_STEREO_SPLIT, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_NEW_CONTENT, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_EN_CURRENT, mask_sh),\
+ SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_EN, mask_sh),\
+ SF_DWB(CNV_WINDOW_START, CNV_WINDOW_START_X, mask_sh),\
+ SF_DWB(CNV_WINDOW_START, CNV_WINDOW_START_Y, mask_sh),\
+ SF_DWB(CNV_WINDOW_SIZE, CNV_WINDOW_WIDTH, mask_sh),\
+ SF_DWB(CNV_WINDOW_SIZE, CNV_WINDOW_HEIGHT, mask_sh),\
+ SF_DWB(CNV_UPDATE, CNV_UPDATE_PENDING, mask_sh),\
+ SF_DWB(CNV_UPDATE, CNV_UPDATE_TAKEN, mask_sh),\
+ SF_DWB(CNV_UPDATE, CNV_UPDATE_LOCK, mask_sh),\
+ SF_DWB(CNV_SOURCE_SIZE, CNV_SOURCE_WIDTH, mask_sh),\
+ SF_DWB(CNV_SOURCE_SIZE, CNV_SOURCE_HEIGHT, mask_sh),\
+ SF_DWB(CNV_TEST_CNTL, CNV_TEST_CRC_EN, mask_sh),\
+ SF_DWB(CNV_TEST_CNTL, CNV_TEST_CRC_CONT_EN, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_RED, CNV_TEST_CRC_RED_MASK, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_RED, CNV_TEST_CRC_SIG_RED, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_GREEN_MASK, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_SIG_GREEN, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_BLUE_MASK, mask_sh),\
+ SF_DWB(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_SIG_BLUE, mask_sh),\
+ SF_DWB(WB_DEBUG_CTRL, WB_DEBUG_EN, mask_sh),\
+ SF_DWB(WB_DEBUG_CTRL, WB_DEBUG_SEL, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_MODE_EN, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_DIN_FMT, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_36MODE, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_CMAP, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_PXLRATE_ERROR, mask_sh),\
+ SF_DWB(WB_DBG_MODE, WB_DBG_SOURCE_WIDTH, mask_sh),\
+ SF_DWB(WB_HW_DEBUG, WB_HW_DEBUG, mask_sh),\
+ SF_DWB(WB_SOFT_RESET, WB_SOFT_RESET, mask_sh),\
+ SF_DWB(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_INDEX, mask_sh),\
+ SF_DWB(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_WRITE_EN, mask_sh),\
+ SF_DWB(CNV_TEST_DEBUG_DATA, CNV_TEST_DEBUG_DATA, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_PHASE, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_FILTER_TYPE, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
+ SF_DWB(WBSCL_MODE, WBSCL_MODE, mask_sh),\
+ SF_DWB(WBSCL_MODE, WBSCL_OUT_BIT_DEPTH, mask_sh),\
+ SF_DWB(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_CBCR, mask_sh),\
+ SF_DWB(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_CBCR, mask_sh),\
+ SF_DWB(WBSCL_DEST_SIZE, WBSCL_DEST_HEIGHT, mask_sh),\
+ SF_DWB(WBSCL_DEST_SIZE, WBSCL_DEST_WIDTH, mask_sh),\
+ SF_DWB(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, mask_sh),\
+ SF_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_FRAC_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_INT_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_FRAC_CBCR, mask_sh),\
+ SF_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_INT_CBCR, mask_sh),\
+ SF_DWB(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, mask_sh),\
+ SF_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_FRAC_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_INT_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_FRAC_CBCR, mask_sh),\
+ SF_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_INT_CBCR, mask_sh),\
+ SF_DWB(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_CBCR, mask_sh),\
+ SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_FLAG, mask_sh),\
+ SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_ACK, mask_sh),\
+ SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_MASK, mask_sh),\
+ SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_STATUS, mask_sh),\
+ SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_TYPE, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_FLAG, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_ACK, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_MASK, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_STATUS, mask_sh),\
+ SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_TYPE, mask_sh),\
+ SF_DWB(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_EN, mask_sh),\
+ SF_DWB(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_CONT_EN, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_RED_MASK, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_SIG_RED, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_GREEN_MASK, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_SIG_GREEN, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_BLUE_MASK, mask_sh),\
+ SF_DWB(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_SIG_BLUE, mask_sh),\
+ SF_DWB(WBSCL_BACKPRESSURE_CNT_EN, WBSCL_BACKPRESSURE_CNT_EN, mask_sh),\
+ SF_DWB(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_Y_MAX_BACKPRESSURE, mask_sh),\
+ SF_DWB(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_C_MAX_BACKPRESSURE, mask_sh),\
+ SF_DWB(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_UPPER_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_LOWER_Y_RGB, mask_sh),\
+ SF_DWB(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_UPPER_CBCR, mask_sh),\
+ SF_DWB(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_LOWER_CBCR, mask_sh),\
+ SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_OUTSIDE_PIX_STRATEGY, mask_sh),\
+ SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_BLACK_COLOR_G_Y, mask_sh),\
+ SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_B_CB, mask_sh),\
+ SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_R_CR, mask_sh),\
+ SF_DWB(WBSCL_DEBUG, WBSCL_DEBUG, mask_sh),\
+ SF_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_INDEX, mask_sh),\
+ SF_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_WRITE_EN, mask_sh),\
+ SF_DWB(WBSCL_TEST_DEBUG_DATA, WBSCL_TEST_DEBUG_DATA, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL1, WIDTH_WARMUP, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL1, HEIGHT_WARMUP, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL2, DATA_VALUE_WARMUP, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL2, MODE_WARMUP, mask_sh),\
+ SF_DWB(WB_WARM_UP_MODE_CTL2, DATA_DEPTH_WARMUP, mask_sh)
+
+#define DWBC_REG_FIELD_LIST_DCN2_0(type) \
+ type WB_ENABLE;\
+ type DISPCLK_R_WB_GATE_DIS;\
+ type DISPCLK_G_WB_GATE_DIS;\
+ type DISPCLK_G_WBSCL_GATE_DIS;\
+ type WB_TEST_CLK_SEL;\
+ type WB_LB_LS_DIS;\
+ type WB_LB_SD_DIS;\
+ type WB_LUT_LS_DIS;\
+ type WBSCL_LB_MEM_PWR_MODE_SEL;\
+ type WBSCL_LB_MEM_PWR_DIS;\
+ type WBSCL_LB_MEM_PWR_FORCE;\
+ type WBSCL_LB_MEM_PWR_STATE;\
+ type WB_RAM_PW_SAVE_MODE;\
+ type WBSCL_LUT_MEM_PWR_STATE;\
+ type CNV_OUT_BPC;\
+ type CNV_FRAME_CAPTURE_RATE;\
+ type CNV_WINDOW_CROP_EN;\
+ type CNV_STEREO_TYPE;\
+ type CNV_INTERLACED_MODE;\
+ type CNV_EYE_SELECTION;\
+ type CNV_STEREO_POLARITY;\
+ type CNV_INTERLACED_FIELD_ORDER;\
+ type CNV_STEREO_SPLIT;\
+ type CNV_NEW_CONTENT;\
+ type CNV_FRAME_CAPTURE_EN_CURRENT;\
+ type CNV_FRAME_CAPTURE_EN;\
+ type CNV_WINDOW_START_X;\
+ type CNV_WINDOW_START_Y;\
+ type CNV_WINDOW_WIDTH;\
+ type CNV_WINDOW_HEIGHT;\
+ type CNV_UPDATE_PENDING;\
+ type CNV_UPDATE_TAKEN;\
+ type CNV_UPDATE_LOCK;\
+ type CNV_SOURCE_WIDTH;\
+ type CNV_SOURCE_HEIGHT;\
+ type CNV_TEST_CRC_EN;\
+ type CNV_TEST_CRC_CONT_EN;\
+ type CNV_TEST_CRC_RED_MASK;\
+ type CNV_TEST_CRC_SIG_RED;\
+ type CNV_TEST_CRC_GREEN_MASK;\
+ type CNV_TEST_CRC_SIG_GREEN;\
+ type CNV_TEST_CRC_BLUE_MASK;\
+ type CNV_TEST_CRC_SIG_BLUE;\
+ type WB_DEBUG_EN;\
+ type WB_DEBUG_SEL;\
+ type WB_DBG_MODE_EN;\
+ type WB_DBG_DIN_FMT;\
+ type WB_DBG_36MODE;\
+ type WB_DBG_CMAP;\
+ type WB_DBG_PXLRATE_ERROR;\
+ type WB_DBG_SOURCE_WIDTH;\
+ type WB_HW_DEBUG;\
+ type CNV_TEST_DEBUG_INDEX;\
+ type CNV_TEST_DEBUG_WRITE_EN;\
+ type CNV_TEST_DEBUG_DATA;\
+ type WB_SOFT_RESET;\
+ type WBSCL_COEF_RAM_TAP_PAIR_IDX;\
+ type WBSCL_COEF_RAM_PHASE;\
+ type WBSCL_COEF_RAM_FILTER_TYPE;\
+ type WBSCL_COEF_RAM_SEL;\
+ type WBSCL_COEF_RAM_SEL_CURRENT;\
+ type WBSCL_COEF_RAM_RD_SEL;\
+ type WBSCL_COEF_RAM_EVEN_TAP_COEF;\
+ type WBSCL_COEF_RAM_EVEN_TAP_COEF_EN;\
+ type WBSCL_COEF_RAM_ODD_TAP_COEF;\
+ type WBSCL_COEF_RAM_ODD_TAP_COEF_EN;\
+ type WBSCL_MODE;\
+ type WBSCL_OUT_BIT_DEPTH;\
+ type WBSCL_V_NUM_OF_TAPS_Y_RGB;\
+ type WBSCL_V_NUM_OF_TAPS_CBCR;\
+ type WBSCL_H_NUM_OF_TAPS_Y_RGB;\
+ type WBSCL_H_NUM_OF_TAPS_CBCR;\
+ type WBSCL_DEST_HEIGHT;\
+ type WBSCL_DEST_WIDTH;\
+ type WBSCL_H_SCALE_RATIO;\
+ type WBSCL_H_INIT_FRAC_Y_RGB;\
+ type WBSCL_H_INIT_INT_Y_RGB;\
+ type WBSCL_H_INIT_FRAC_CBCR;\
+ type WBSCL_H_INIT_INT_CBCR;\
+ type WBSCL_V_SCALE_RATIO;\
+ type WBSCL_V_INIT_FRAC_Y_RGB;\
+ type WBSCL_V_INIT_INT_Y_RGB;\
+ type WBSCL_V_INIT_FRAC_CBCR;\
+ type WBSCL_V_INIT_INT_CBCR;\
+ type WBSCL_ROUND_OFFSET_Y_RGB;\
+ type WBSCL_ROUND_OFFSET_CBCR;\
+ type WBSCL_DATA_OVERFLOW_FLAG;\
+ type WBSCL_DATA_OVERFLOW_ACK;\
+ type WBSCL_DATA_OVERFLOW_MASK;\
+ type WBSCL_DATA_OVERFLOW_INT_STATUS;\
+ type WBSCL_DATA_OVERFLOW_INT_TYPE;\
+ type WBSCL_HOST_CONFLICT_FLAG;\
+ type WBSCL_HOST_CONFLICT_ACK;\
+ type WBSCL_HOST_CONFLICT_MASK;\
+ type WBSCL_HOST_CONFLICT_INT_STATUS;\
+ type WBSCL_HOST_CONFLICT_INT_TYPE;\
+ type WBSCL_TEST_CRC_EN;\
+ type WBSCL_TEST_CRC_CONT_EN;\
+ type WBSCL_TEST_CRC_RED_MASK;\
+ type WBSCL_TEST_CRC_SIG_RED;\
+ type WBSCL_TEST_CRC_GREEN_MASK;\
+ type WBSCL_TEST_CRC_SIG_GREEN;\
+ type WBSCL_TEST_CRC_BLUE_MASK;\
+ type WBSCL_TEST_CRC_SIG_BLUE;\
+ type WBSCL_BACKPRESSURE_CNT_EN;\
+ type WB_MCIF_Y_MAX_BACKPRESSURE;\
+ type WB_MCIF_C_MAX_BACKPRESSURE;\
+ type WBSCL_CLAMP_UPPER_Y_RGB;\
+ type WBSCL_CLAMP_LOWER_Y_RGB;\
+ type WBSCL_CLAMP_UPPER_CBCR;\
+ type WBSCL_CLAMP_LOWER_CBCR;\
+ type WBSCL_OUTSIDE_PIX_STRATEGY;\
+ type WBSCL_BLACK_COLOR_G_Y;\
+ type WBSCL_BLACK_COLOR_B_CB;\
+ type WBSCL_BLACK_COLOR_R_CR;\
+ type WBSCL_DEBUG;\
+ type WBSCL_TEST_DEBUG_INDEX;\
+ type WBSCL_TEST_DEBUG_WRITE_EN;\
+ type WBSCL_TEST_DEBUG_DATA;\
+ type WIDTH_WARMUP;\
+ type HEIGHT_WARMUP;\
+ type GMC_WARM_UP_ENABLE;\
+ type DATA_VALUE_WARMUP;\
+ type MODE_WARMUP;\
+ type DATA_DEPTH_WARMUP; \
+
+struct dcn20_dwbc_registers {
+ /* DCN2.0 */
+ uint32_t WB_ENABLE;
+ uint32_t WB_EC_CONFIG;
+ uint32_t CNV_MODE;
+ uint32_t CNV_WINDOW_START;
+ uint32_t CNV_WINDOW_SIZE;
+ uint32_t CNV_UPDATE;
+ uint32_t CNV_SOURCE_SIZE;
+ uint32_t CNV_TEST_CNTL;
+ uint32_t CNV_TEST_CRC_RED;
+ uint32_t CNV_TEST_CRC_GREEN;
+ uint32_t CNV_TEST_CRC_BLUE;
+ uint32_t WB_DEBUG_CTRL;
+ uint32_t WB_DBG_MODE;
+ uint32_t WB_HW_DEBUG;
+ uint32_t CNV_TEST_DEBUG_INDEX;
+ uint32_t CNV_TEST_DEBUG_DATA;
+ uint32_t WB_SOFT_RESET;
+ uint32_t WBSCL_COEF_RAM_SELECT;
+ uint32_t WBSCL_COEF_RAM_TAP_DATA;
+ uint32_t WBSCL_MODE;
+ uint32_t WBSCL_TAP_CONTROL;
+ uint32_t WBSCL_DEST_SIZE;
+ uint32_t WBSCL_HORZ_FILTER_SCALE_RATIO;
+ uint32_t WBSCL_HORZ_FILTER_INIT_Y_RGB;
+ uint32_t WBSCL_HORZ_FILTER_INIT_CBCR;
+ uint32_t WBSCL_VERT_FILTER_SCALE_RATIO;
+ uint32_t WBSCL_VERT_FILTER_INIT_Y_RGB;
+ uint32_t WBSCL_VERT_FILTER_INIT_CBCR;
+ uint32_t WBSCL_ROUND_OFFSET;
+ uint32_t WBSCL_OVERFLOW_STATUS;
+ uint32_t WBSCL_COEF_RAM_CONFLICT_STATUS;
+ uint32_t WBSCL_TEST_CNTL;
+ uint32_t WBSCL_TEST_CRC_RED;
+ uint32_t WBSCL_TEST_CRC_GREEN;
+ uint32_t WBSCL_TEST_CRC_BLUE;
+ uint32_t WBSCL_BACKPRESSURE_CNT_EN;
+ uint32_t WB_MCIF_BACKPRESSURE_CNT;
+ uint32_t WBSCL_CLAMP_Y_RGB;
+ uint32_t WBSCL_CLAMP_CBCR;
+ uint32_t WBSCL_OUTSIDE_PIX_STRATEGY;
+ uint32_t WBSCL_OUTSIDE_PIX_STRATEGY_CBCR;
+ uint32_t WBSCL_DEBUG;
+ uint32_t WBSCL_TEST_DEBUG_INDEX;
+ uint32_t WBSCL_TEST_DEBUG_DATA;
+ uint32_t WB_WARM_UP_MODE_CTL1;
+ uint32_t WB_WARM_UP_MODE_CTL2;
+};
+
+
+struct dcn20_dwbc_mask {
+ DWBC_REG_FIELD_LIST_DCN2_0(uint32_t)
+};
+
+struct dcn20_dwbc_shift {
+ DWBC_REG_FIELD_LIST_DCN2_0(uint8_t)
+};
+
+struct dcn20_dwbc {
+ struct dwbc base;
+ const struct dcn20_dwbc_registers *dwbc_regs;
+ const struct dcn20_dwbc_shift *dwbc_shift;
+ const struct dcn20_dwbc_mask *dwbc_mask;
+};
+
+void dcn20_dwbc_construct(struct dcn20_dwbc *dwbc20,
+ struct dc_context *ctx,
+ const struct dcn20_dwbc_registers *dwbc_regs,
+ const struct dcn20_dwbc_shift *dwbc_shift,
+ const struct dcn20_dwbc_mask *dwbc_mask,
+ int inst);
+
+bool dwb2_disable(struct dwbc *dwbc);
+
+bool dwb2_is_enabled(struct dwbc *dwbc);
+
+void dwb2_set_stereo(struct dwbc *dwbc,
+ struct dwb_stereo_params *stereo_params);
+
+void dwb2_set_new_content(struct dwbc *dwbc,
+ bool is_new_content);
+
+void dwb2_config_dwb_cnv(struct dwbc *dwbc,
+ struct dc_dwb_params *params);
+
+void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params);
+
+bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
+ uint32_t src_height,
+ uint32_t dest_height,
+ struct scaling_taps num_taps,
+ enum dwb_subsample_position subsample_position);
+
+bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
+ uint32_t src_width,
+ uint32_t dest_width,
+ struct scaling_taps num_taps);
+
+
+#endif
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
new file mode 100644
index 0000000000..994fb732a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -0,0 +1,873 @@
+/*
+ * Copyright 2012-17 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "fixed31_32.h"
+#include "resource.h"
+#include "dwb.h"
+#include "dcn20_dwb.h"
+
+#define NUM_PHASES 16
+#define HORZ_MAX_TAPS 12
+#define VERT_MAX_TAPS 12
+
+#define REG(reg)\
+ dwbc20->dwbc_regs->reg
+
+#define CTX \
+ dwbc20->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dwbc20->dwbc_shift->field_name, dwbc20->dwbc_mask->field_name
+
+#define TO_DCN20_DWBC(dwbc_base) \
+ container_of(dwbc_base, struct dcn20_dwbc, base)
+
+
+static const uint16_t filter_3tap_16p_upscale[27] = {
+ 2048, 2048, 0,
+ 1708, 2424, 16348,
+ 1372, 2796, 16308,
+ 1056, 3148, 16272,
+ 768, 3464, 16244,
+ 512, 3728, 16236,
+ 296, 3928, 16252,
+ 124, 4052, 16296,
+ 0, 4096, 0
+};
+
+static const uint16_t filter_3tap_16p_117[27] = {
+ 2048, 2048, 0,
+ 1824, 2276, 16376,
+ 1600, 2496, 16380,
+ 1376, 2700, 16,
+ 1156, 2880, 52,
+ 948, 3032, 108,
+ 756, 3144, 192,
+ 580, 3212, 296,
+ 428, 3236, 428
+};
+
+static const uint16_t filter_3tap_16p_150[27] = {
+ 2048, 2048, 0,
+ 1872, 2184, 36,
+ 1692, 2308, 88,
+ 1516, 2420, 156,
+ 1340, 2516, 236,
+ 1168, 2592, 328,
+ 1004, 2648, 440,
+ 844, 2684, 560,
+ 696, 2696, 696
+};
+
+static const uint16_t filter_3tap_16p_183[27] = {
+ 2048, 2048, 0,
+ 1892, 2104, 92,
+ 1744, 2152, 196,
+ 1592, 2196, 300,
+ 1448, 2232, 412,
+ 1304, 2256, 528,
+ 1168, 2276, 648,
+ 1032, 2288, 772,
+ 900, 2292, 900
+};
+
+static const uint16_t filter_4tap_16p_upscale[36] = {
+ 0, 4096, 0, 0,
+ 16240, 4056, 180, 16380,
+ 16136, 3952, 404, 16364,
+ 16072, 3780, 664, 16344,
+ 16040, 3556, 952, 16312,
+ 16036, 3284, 1268, 16272,
+ 16052, 2980, 1604, 16224,
+ 16084, 2648, 1952, 16176,
+ 16128, 2304, 2304, 16128
+};
+
+static const uint16_t filter_4tap_16p_117[36] = {
+ 428, 3236, 428, 0,
+ 276, 3232, 604, 16364,
+ 148, 3184, 800, 16340,
+ 44, 3104, 1016, 16312,
+ 16344, 2984, 1244, 16284,
+ 16284, 2832, 1488, 16256,
+ 16244, 2648, 1732, 16236,
+ 16220, 2440, 1976, 16220,
+ 16212, 2216, 2216, 16212
+};
+
+static const uint16_t filter_4tap_16p_150[36] = {
+ 696, 2700, 696, 0,
+ 560, 2700, 848, 16364,
+ 436, 2676, 1008, 16348,
+ 328, 2628, 1180, 16336,
+ 232, 2556, 1356, 16328,
+ 152, 2460, 1536, 16328,
+ 84, 2344, 1716, 16332,
+ 28, 2208, 1888, 16348,
+ 16376, 2052, 2052, 16376
+};
+
+static const uint16_t filter_4tap_16p_183[36] = {
+ 940, 2208, 940, 0,
+ 832, 2200, 1052, 4,
+ 728, 2180, 1164, 16,
+ 628, 2148, 1280, 36,
+ 536, 2100, 1392, 60,
+ 448, 2044, 1504, 92,
+ 368, 1976, 1612, 132,
+ 296, 1900, 1716, 176,
+ 232, 1812, 1812, 232
+};
+
+static const uint16_t filter_5tap_16p_upscale[45] = {
+ 15936, 2496, 2496, 15936, 0,
+ 15992, 2128, 2832, 15896, 12,
+ 16056, 1760, 3140, 15876, 24,
+ 16120, 1404, 3420, 15876, 36,
+ 16188, 1060, 3652, 15908, 44,
+ 16248, 744, 3844, 15972, 44,
+ 16304, 460, 3980, 16072, 40,
+ 16348, 212, 4064, 16208, 24,
+ 0, 0, 4096, 0, 0,
+};
+
+static const uint16_t filter_5tap_16p_117[45] = {
+ 16056, 2372, 2372, 16056, 0,
+ 16052, 2124, 2600, 16076, 0,
+ 16060, 1868, 2808, 16120, 0,
+ 16080, 1612, 2992, 16180, 16376,
+ 16112, 1356, 3144, 16268, 16364,
+ 16144, 1108, 3268, 16376, 16344,
+ 16184, 872, 3356, 124, 16320,
+ 16220, 656, 3412, 276, 16292,
+ 16256, 456, 3428, 456, 16256,
+};
+
+static const uint16_t filter_5tap_16p_150[45] = {
+ 16368, 2064, 2064, 16368, 0,
+ 16316, 1924, 2204, 44, 16372,
+ 16280, 1772, 2328, 116, 16356,
+ 16256, 1616, 2440, 204, 16340,
+ 16240, 1456, 2536, 304, 16320,
+ 16232, 1296, 2612, 416, 16300,
+ 16232, 1132, 2664, 544, 16284,
+ 16240, 976, 2700, 680, 16264,
+ 16248, 824, 2708, 824, 16248,
+};
+
+static const uint16_t filter_5tap_16p_183[45] = {
+ 228, 1816, 1816, 228, 0,
+ 168, 1728, 1904, 300, 16372,
+ 116, 1632, 1988, 376, 16360,
+ 72, 1528, 2060, 460, 16348,
+ 36, 1424, 2120, 552, 16340,
+ 4, 1312, 2168, 652, 16336,
+ 16368, 1200, 2204, 752, 16332,
+ 16352, 1084, 2224, 860, 16332,
+ 16340, 972, 2232, 972, 16340,
+};
+
+static const uint16_t filter_6tap_16p_upscale[54] = {
+ 0, 0, 4092, 0, 0, 0,
+ 44, 16188, 4064, 228, 16324, 0,
+ 80, 16036, 3980, 492, 16256, 4,
+ 108, 15916, 3844, 788, 16184, 16,
+ 120, 15836, 3656, 1108, 16104, 28,
+ 128, 15792, 3420, 1448, 16024, 44,
+ 124, 15776, 3144, 1800, 15948, 64,
+ 112, 15792, 2836, 2152, 15880, 80,
+ 100, 15828, 2504, 2504, 15828, 100,
+};
+
+static const uint16_t filter_6tap_16p_117[54] = {
+ 16168, 476, 3568, 476, 16168, 0,
+ 16216, 280, 3540, 692, 16116, 8,
+ 16264, 104, 3472, 924, 16068, 16,
+ 16304, 16340, 3372, 1168, 16024, 28,
+ 16344, 16212, 3236, 1424, 15988, 36,
+ 16372, 16112, 3072, 1680, 15956, 44,
+ 12, 16036, 2880, 1936, 15940, 48,
+ 28, 15984, 2668, 2192, 15936, 48,
+ 40, 15952, 2436, 2436, 15952, 40,
+};
+
+static const uint16_t filter_6tap_16p_150[54] = {
+ 16148, 920, 2724, 920, 16148, 0,
+ 16156, 768, 2712, 1072, 16144, 0,
+ 16172, 628, 2684, 1232, 16148, 16380,
+ 16192, 492, 2632, 1388, 16160, 16372,
+ 16212, 368, 2564, 1548, 16180, 16364,
+ 16232, 256, 2480, 1704, 16212, 16352,
+ 16256, 156, 2380, 1856, 16256, 16336,
+ 16276, 64, 2268, 2004, 16308, 16320,
+ 16300, 16372, 2140, 2140, 16372, 16300,
+};
+
+static const uint16_t filter_6tap_16p_183[54] = {
+ 16296, 1032, 2196, 1032, 16296, 0,
+ 16284, 924, 2196, 1144, 16320, 16376,
+ 16272, 820, 2180, 1256, 16348, 16364,
+ 16268, 716, 2156, 1364, 16380, 16352,
+ 16264, 620, 2116, 1472, 36, 16340,
+ 16268, 524, 2068, 1576, 88, 16328,
+ 16272, 436, 2008, 1680, 144, 16316,
+ 16280, 352, 1940, 1772, 204, 16304,
+ 16292, 276, 1860, 1860, 276, 16292,
+};
+
+static const uint16_t filter_7tap_16p_upscale[63] = {
+ 176, 15760, 2488, 2488, 15760, 176, 0,
+ 160, 15812, 2152, 2816, 15728, 192, 16376,
+ 136, 15884, 1812, 3124, 15720, 196, 16368,
+ 108, 15964, 1468, 3400, 15740, 196, 16364,
+ 84, 16048, 1132, 3640, 15792, 180, 16360,
+ 56, 16140, 812, 3832, 15884, 152, 16360,
+ 32, 16228, 512, 3976, 16012, 116, 16364,
+ 12, 16308, 240, 4064, 16180, 60, 16372,
+ 0, 0, 0, 4096, 0, 0, 0,
+};
+
+static const uint16_t filter_7tap_16p_117[63] = {
+ 92, 15868, 2464, 2464, 15868, 92, 0,
+ 108, 15852, 2216, 2700, 15904, 72, 0,
+ 112, 15856, 1960, 2916, 15964, 44, 0,
+ 116, 15876, 1696, 3108, 16048, 8, 8,
+ 112, 15908, 1428, 3268, 16156, 16348, 12,
+ 104, 15952, 1168, 3400, 16288, 16300, 24,
+ 92, 16004, 916, 3496, 64, 16244, 36,
+ 80, 16064, 676, 3556, 248, 16184, 48,
+ 64, 16124, 452, 3576, 452, 16124, 64,
+};
+
+static const uint16_t filter_7tap_16p_150[63] = {
+ 16224, 16380, 2208, 2208, 16380, 16224, 0,
+ 16252, 16304, 2072, 2324, 84, 16196, 4,
+ 16276, 16240, 1924, 2432, 184, 16172, 8,
+ 16300, 16184, 1772, 2524, 296, 16144, 12,
+ 16324, 16144, 1616, 2600, 416, 16124, 12,
+ 16344, 16112, 1456, 2660, 548, 16104, 12,
+ 16360, 16092, 1296, 2704, 688, 16088, 12,
+ 16372, 16080, 1140, 2732, 832, 16080, 8,
+ 0, 16076, 984, 2740, 984, 16076, 0,
+};
+
+static const uint16_t filter_7tap_16p_183[63] = {
+ 16216, 324, 1884, 1884, 324, 16216, 0,
+ 16228, 248, 1804, 1960, 408, 16212, 16380,
+ 16240, 176, 1716, 2028, 496, 16208, 16376,
+ 16252, 112, 1624, 2084, 588, 16208, 16372,
+ 16264, 56, 1524, 2132, 684, 16212, 16364,
+ 16280, 4, 1424, 2168, 788, 16220, 16356,
+ 16292, 16344, 1320, 2196, 892, 16232, 16344,
+ 16308, 16308, 1212, 2212, 996, 16252, 16332,
+ 16320, 16276, 1104, 2216, 1104, 16276, 16320,
+};
+
+static const uint16_t filter_8tap_16p_upscale[72] = {
+ 0, 0, 0, 4096, 0, 0, 0, 0,
+ 16360, 76, 16172, 4064, 244, 16296, 24, 16380,
+ 16340, 136, 15996, 3980, 524, 16204, 56, 16380,
+ 16328, 188, 15860, 3844, 828, 16104, 92, 16372,
+ 16320, 224, 15760, 3656, 1156, 16008, 128, 16368,
+ 16320, 248, 15696, 3428, 1496, 15912, 160, 16360,
+ 16320, 256, 15668, 3156, 1844, 15828, 192, 16348,
+ 16324, 256, 15672, 2856, 2192, 15756, 220, 16340,
+ 16332, 244, 15704, 2532, 2532, 15704, 244, 16332,
+};
+
+static const uint16_t filter_8tap_16p_117[72] = {
+ 116, 16100, 428, 3564, 428, 16100, 116, 0,
+ 96, 16168, 220, 3548, 656, 16032, 136, 16376,
+ 76, 16236, 32, 3496, 904, 15968, 152, 16372,
+ 56, 16300, 16252, 3408, 1164, 15908, 164, 16368,
+ 36, 16360, 16116, 3284, 1428, 15856, 172, 16364,
+ 20, 28, 16000, 3124, 1700, 15820, 176, 16364,
+ 4, 76, 15912, 2940, 1972, 15800, 172, 16364,
+ 16380, 112, 15848, 2724, 2236, 15792, 160, 16364,
+ 16372, 140, 15812, 2488, 2488, 15812, 140, 16372,
+};
+
+static const uint16_t filter_8tap_16p_150[72] = {
+ 16380, 16020, 1032, 2756, 1032, 16020, 16380, 0,
+ 12, 16020, 876, 2744, 1184, 16032, 16364, 4,
+ 24, 16028, 728, 2716, 1344, 16052, 16340, 8,
+ 36, 16040, 584, 2668, 1500, 16080, 16316, 16,
+ 40, 16060, 448, 2608, 1652, 16120, 16288, 20,
+ 44, 16080, 320, 2528, 1804, 16168, 16260, 28,
+ 48, 16108, 204, 2436, 1948, 16232, 16228, 32,
+ 44, 16136, 100, 2328, 2084, 16304, 16200, 40,
+ 44, 16168, 4, 2212, 2212, 4, 16168, 44,
+};
+
+static const uint16_t filter_8tap_16p_183[72] = {
+ 16264, 16264, 1164, 2244, 1164, 16264, 16264, 0,
+ 16280, 16232, 1056, 2236, 1268, 16300, 16248, 0,
+ 16296, 16204, 948, 2220, 1372, 16348, 16232, 0,
+ 16312, 16184, 844, 2192, 1472, 12, 16216, 4,
+ 16328, 16172, 740, 2156, 1572, 72, 16200, 0,
+ 16340, 16160, 640, 2108, 1668, 136, 16188, 0,
+ 16352, 16156, 544, 2052, 1756, 204, 16176, 16380,
+ 16360, 16156, 452, 1988, 1840, 280, 16164, 16376,
+ 16368, 16160, 364, 1920, 1920, 364, 16160, 16368,
+};
+
+static const uint16_t filter_9tap_16p_upscale[81] = {
+ 16284, 296, 15660, 2572, 2572, 15660, 296, 16284, 0,
+ 16296, 272, 15712, 2228, 2896, 15632, 304, 16276, 4,
+ 16308, 240, 15788, 1876, 3192, 15632, 304, 16276, 4,
+ 16320, 204, 15876, 1520, 3452, 15664, 288, 16280, 8,
+ 16336, 164, 15976, 1176, 3676, 15732, 260, 16288, 12,
+ 16348, 120, 16080, 844, 3856, 15840, 216, 16300, 12,
+ 16364, 76, 16188, 532, 3988, 15984, 156, 16324, 8,
+ 16376, 36, 16288, 252, 4068, 16164, 84, 16352, 4,
+ 0, 0, 0, 0, 4096, 0, 0, 0, 0,
+};
+
+static const uint16_t filter_9tap_16p_117[81] = {
+ 16356, 172, 15776, 2504, 2504, 15776, 172, 16356, 0,
+ 16344, 200, 15756, 2252, 2740, 15816, 136, 16372, 16380,
+ 16336, 216, 15756, 1988, 2956, 15884, 92, 8, 16380,
+ 16332, 224, 15780, 1720, 3144, 15976, 40, 28, 16376,
+ 16328, 224, 15816, 1448, 3304, 16096, 16364, 52, 16372,
+ 16328, 216, 15868, 1180, 3432, 16240, 16296, 80, 16364,
+ 16332, 200, 15928, 916, 3524, 24, 16224, 108, 16356,
+ 16336, 184, 15996, 668, 3580, 220, 16148, 132, 16352,
+ 16344, 160, 16072, 436, 3600, 436, 16072, 160, 16344,
+};
+
+static const uint16_t filter_9tap_16p_150[81] = {
+ 84, 16128, 0, 2216, 2216, 0, 16128, 84, 0,
+ 80, 16160, 16296, 2088, 2332, 100, 16092, 84, 0,
+ 76, 16196, 16220, 1956, 2432, 208, 16064, 80, 0,
+ 72, 16232, 16152, 1812, 2524, 328, 16036, 76, 4,
+ 64, 16264, 16096, 1664, 2600, 460, 16012, 64, 8,
+ 56, 16300, 16052, 1508, 2656, 596, 15996, 52, 12,
+ 48, 16328, 16020, 1356, 2700, 740, 15984, 36, 20,
+ 40, 16356, 15996, 1196, 2728, 888, 15980, 20, 24,
+ 32, 0, 15984, 1044, 2736, 1044, 15984, 0, 32,
+};
+
+static const uint16_t filter_9tap_16p_183[81] = {
+ 16356, 16112, 388, 1952, 1952, 388, 16112, 16356, 0,
+ 16368, 16116, 304, 1876, 2020, 480, 16112, 16344, 4,
+ 16376, 16124, 224, 1792, 2080, 576, 16116, 16328, 8,
+ 0, 16136, 148, 1700, 2132, 672, 16124, 16312, 8,
+ 8, 16148, 80, 1604, 2176, 772, 16140, 16296, 12,
+ 12, 16164, 16, 1504, 2208, 876, 16156, 16276, 16,
+ 16, 16180, 16344, 1404, 2232, 980, 16184, 16256, 20,
+ 20, 16200, 16296, 1300, 2244, 1088, 16212, 16240, 20,
+ 20, 16220, 16252, 1196, 2252, 1196, 16252, 16220, 20,
+};
+
+static const uint16_t filter_10tap_16p_upscale[90] = {
+ 0, 0, 0, 0, 4096, 0, 0, 0, 0, 0,
+ 12, 16344, 88, 16160, 4068, 252, 16280, 44, 16368, 0,
+ 24, 16308, 168, 15976, 3988, 540, 16176, 92, 16348, 0,
+ 32, 16280, 236, 15828, 3852, 852, 16064, 140, 16328, 4,
+ 36, 16260, 284, 15720, 3672, 1184, 15956, 188, 16308, 8,
+ 36, 16244, 320, 15648, 3448, 1528, 15852, 236, 16288, 12,
+ 36, 16240, 336, 15612, 3184, 1880, 15764, 276, 16272, 20,
+ 32, 16240, 340, 15608, 2888, 2228, 15688, 308, 16256, 24,
+ 28, 16244, 332, 15636, 2568, 2568, 15636, 332, 16244, 28,
+};
+
+static const uint16_t filter_10tap_16p_117[90] = {
+ 16308, 196, 16048, 440, 3636, 440, 16048, 196, 16308, 0,
+ 16316, 164, 16132, 220, 3612, 676, 15972, 220, 16300, 0,
+ 16324, 132, 16212, 20, 3552, 932, 15900, 240, 16296, 4,
+ 16336, 100, 16292, 16232, 3456, 1192, 15836, 256, 16296, 4,
+ 16348, 68, 16364, 16084, 3324, 1464, 15784, 264, 16296, 8,
+ 16356, 36, 48, 15960, 3164, 1736, 15748, 260, 16304, 4,
+ 16364, 8, 108, 15864, 2972, 2008, 15728, 252, 16312, 4,
+ 16372, 16368, 160, 15792, 2756, 2268, 15724, 228, 16328, 0,
+ 16380, 16344, 200, 15748, 2520, 2520, 15748, 200, 16344, 16380,
+};
+
+static const uint16_t filter_10tap_16p_150[90] = {
+ 64, 0, 15956, 1048, 2716, 1048, 15956, 0, 64, 0,
+ 52, 24, 15952, 896, 2708, 1204, 15972, 16356, 72, 16380,
+ 44, 48, 15952, 748, 2684, 1360, 16000, 16320, 84, 16380,
+ 32, 68, 15964, 604, 2644, 1516, 16032, 16288, 92, 16376,
+ 24, 88, 15980, 464, 2588, 1668, 16080, 16248, 100, 16376,
+ 16, 100, 16004, 332, 2516, 1816, 16140, 16212, 108, 16376,
+ 8, 108, 16032, 212, 2428, 1956, 16208, 16172, 112, 16376,
+ 4, 116, 16060, 100, 2328, 2092, 16288, 16132, 116, 16380,
+ 0, 116, 16096, 16380, 2216, 2216, 16380, 16096, 116, 0,
+};
+
+static const uint16_t filter_10tap_16p_183[90] = {
+ 40, 16180, 16240, 1216, 2256, 1216, 16240, 16180, 40, 0,
+ 44, 16204, 16200, 1112, 2252, 1320, 16288, 16160, 36, 0,
+ 44, 16224, 16168, 1004, 2236, 1424, 16344, 16144, 28, 4,
+ 44, 16248, 16136, 900, 2208, 1524, 16, 16124, 24, 8,
+ 44, 16268, 16116, 796, 2176, 1620, 84, 16108, 12, 12,
+ 40, 16288, 16100, 692, 2132, 1712, 156, 16096, 4, 16,
+ 36, 16308, 16088, 592, 2080, 1796, 232, 16088, 16376, 20,
+ 32, 16328, 16080, 496, 2020, 1876, 316, 16080, 16360, 24,
+ 28, 16344, 16080, 404, 1952, 1952, 404, 16080, 16344, 28,
+};
+
+static const uint16_t filter_11tap_16p_upscale[99] = {
+ 60, 16216, 356, 15620, 2556, 2556, 15620, 356, 16216, 60, 0,
+ 52, 16224, 336, 15672, 2224, 2876, 15592, 368, 16208, 64, 16380,
+ 44, 16244, 304, 15744, 1876, 3176, 15596, 364, 16212, 64, 16376,
+ 36, 16264, 260, 15836, 1532, 3440, 15636, 340, 16220, 60, 16376,
+ 28, 16288, 212, 15940, 1188, 3668, 15708, 304, 16236, 56, 16376,
+ 20, 16312, 160, 16052, 856, 3848, 15820, 248, 16264, 48, 16376,
+ 12, 16336, 104, 16164, 544, 3984, 15968, 180, 16296, 36, 16376,
+ 4, 16360, 48, 16276, 256, 4068, 16160, 96, 16336, 16, 16380,
+ 0, 0, 0, 0, 0, 4096, 0, 0, 0, 0, 0,
+};
+
+static const uint16_t filter_11tap_16p_117[99] = {
+ 16380, 16332, 220, 15728, 2536, 2536, 15728, 220, 16332, 16380, 0,
+ 4, 16308, 256, 15704, 2280, 2768, 15772, 176, 16360, 16368, 0,
+ 12, 16292, 280, 15704, 2016, 2984, 15848, 120, 8, 16356, 0,
+ 20, 16276, 292, 15724, 1744, 3172, 15948, 56, 40, 16340, 4,
+ 24, 16268, 292, 15760, 1468, 3328, 16072, 16368, 80, 16324, 8,
+ 24, 16264, 288, 15816, 1196, 3456, 16224, 16288, 116, 16312, 12,
+ 24, 16264, 272, 15880, 932, 3548, 16, 16208, 152, 16296, 16,
+ 24, 16268, 248, 15956, 676, 3604, 216, 16120, 188, 16284, 20,
+ 24, 16276, 220, 16036, 436, 3624, 436, 16036, 220, 16276, 24,
+};
+
+static const uint16_t filter_11tap_16p_150[99] = {
+ 0, 144, 16072, 0, 2212, 2212, 0, 16072, 144, 0, 0,
+ 16376, 144, 16112, 16288, 2092, 2324, 104, 16036, 140, 8, 16380,
+ 16368, 144, 16152, 16204, 1960, 2424, 216, 16004, 132, 16, 16376,
+ 16364, 140, 16192, 16132, 1820, 2512, 340, 15976, 116, 28, 16376,
+ 16364, 132, 16232, 16072, 1676, 2584, 476, 15952, 100, 40, 16372,
+ 16360, 124, 16272, 16020, 1528, 2644, 612, 15936, 80, 52, 16368,
+ 16360, 116, 16312, 15980, 1372, 2684, 760, 15928, 56, 64, 16364,
+ 16360, 104, 16348, 15952, 1216, 2712, 908, 15928, 28, 76, 16364,
+ 16360, 92, 0, 15936, 1064, 2720, 1064, 15936, 0, 92, 16360,
+};
+
+static const uint16_t filter_11tap_16p_183[99] = {
+ 60, 16336, 16052, 412, 1948, 1948, 412, 16052, 16336, 60, 0,
+ 56, 16356, 16052, 324, 1876, 2016, 504, 16056, 16316, 64, 0,
+ 48, 16372, 16060, 240, 1796, 2072, 604, 16064, 16292, 64, 0,
+ 44, 4, 16068, 160, 1712, 2124, 700, 16080, 16272, 68, 0,
+ 40, 20, 16080, 84, 1620, 2164, 804, 16096, 16248, 68, 4,
+ 32, 32, 16096, 16, 1524, 2200, 908, 16124, 16224, 68, 4,
+ 28, 40, 16112, 16340, 1428, 2220, 1012, 16152, 16200, 64, 8,
+ 24, 52, 16132, 16284, 1328, 2236, 1120, 16192, 16176, 64, 12,
+ 16, 56, 16156, 16236, 1224, 2240, 1224, 16236, 16156, 56, 16,
+};
+
+static const uint16_t filter_12tap_16p_upscale[108] = {
+ 0, 0, 0, 0, 0, 4096, 0, 0, 0, 0, 0, 0,
+ 16376, 24, 16332, 100, 16156, 4068, 260, 16272, 56, 16356, 8, 0,
+ 16368, 44, 16284, 188, 15964, 3988, 548, 16156, 112, 16328, 20, 16380,
+ 16360, 64, 16248, 260, 15812, 3856, 864, 16040, 172, 16296, 32, 16380,
+ 16360, 76, 16216, 320, 15696, 3672, 1196, 15928, 228, 16268, 44, 16376,
+ 16356, 84, 16196, 360, 15620, 3448, 1540, 15820, 280, 16240, 56, 16372,
+ 16356, 88, 16184, 384, 15580, 3188, 1888, 15728, 324, 16216, 68, 16368,
+ 16360, 88, 16180, 392, 15576, 2892, 2236, 15652, 360, 16200, 80, 16364,
+ 16360, 84, 16188, 384, 15600, 2576, 2576, 15600, 384, 16188, 84, 16360,
+};
+
+static const uint16_t filter_12tap_16p_117[108] = {
+ 48, 16248, 240, 16028, 436, 3612, 436, 16028, 240, 16248, 48, 0,
+ 44, 16260, 208, 16116, 212, 3596, 676, 15944, 272, 16240, 48, 16380,
+ 40, 16276, 168, 16204, 12, 3540, 932, 15868, 296, 16240, 48, 16380,
+ 36, 16292, 128, 16288, 16220, 3452, 1196, 15800, 312, 16240, 44, 16380,
+ 28, 16308, 84, 16372, 16064, 3324, 1472, 15748, 316, 16244, 40, 16380,
+ 24, 16328, 44, 64, 15936, 3168, 1744, 15708, 312, 16256, 32, 16380,
+ 16, 16344, 8, 132, 15836, 2980, 2016, 15688, 300, 16272, 20, 0,
+ 12, 16364, 16356, 188, 15760, 2768, 2280, 15688, 272, 16296, 8, 4,
+ 8, 16380, 16324, 236, 15712, 2532, 2532, 15712, 236, 16324, 16380, 8,
+};
+
+static const uint16_t filter_12tap_16p_150[108] = {
+ 16340, 116, 0, 15916, 1076, 2724, 1076, 15916, 0, 116, 16340, 0,
+ 16340, 100, 32, 15908, 920, 2716, 1232, 15936, 16344, 128, 16340, 0,
+ 16344, 84, 64, 15908, 772, 2692, 1388, 15968, 16304, 140, 16344, 16380,
+ 16344, 68, 92, 15912, 624, 2652, 1540, 16008, 16264, 152, 16344, 16380,
+ 16348, 52, 112, 15928, 484, 2592, 1688, 16060, 16220, 160, 16348, 16380,
+ 16352, 40, 132, 15952, 348, 2520, 1836, 16124, 16176, 168, 16356, 16376,
+ 16356, 24, 148, 15980, 224, 2436, 1976, 16200, 16132, 172, 16364, 16372,
+ 16360, 12, 160, 16012, 108, 2336, 2104, 16288, 16088, 172, 16372, 16368,
+ 16364, 0, 168, 16048, 0, 2228, 2228, 0, 16048, 168, 0, 16364,
+};
+
+static const uint16_t filter_12tap_16p_183[108] = {
+ 36, 72, 16132, 16228, 1224, 2224, 1224, 16228, 16132, 72, 36, 0,
+ 28, 80, 16156, 16184, 1120, 2224, 1328, 16280, 16112, 64, 40, 16380,
+ 24, 84, 16180, 16144, 1016, 2208, 1428, 16340, 16092, 52, 48, 16380,
+ 16, 88, 16208, 16112, 912, 2188, 1524, 16, 16072, 36, 56, 16380,
+ 12, 92, 16232, 16084, 812, 2156, 1620, 88, 16056, 24, 64, 16380,
+ 8, 92, 16256, 16064, 708, 2116, 1708, 164, 16044, 4, 68, 16380,
+ 4, 88, 16280, 16048, 608, 2068, 1792, 244, 16036, 16372, 76, 16380,
+ 0, 88, 16308, 16036, 512, 2008, 1872, 328, 16032, 16352, 80, 16380,
+ 0, 84, 16328, 16032, 416, 1944, 1944, 416, 16032, 16328, 84, 0,
+};
+
+static const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dc_fixpt_one.value)
+ return filter_3tap_16p_upscale;
+ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_3tap_16p_117;
+ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_3tap_16p_150;
+ else
+ return filter_3tap_16p_183;
+}
+
+static const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dc_fixpt_one.value)
+ return filter_4tap_16p_upscale;
+ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_4tap_16p_117;
+ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_4tap_16p_150;
+ else
+ return filter_4tap_16p_183;
+}
+
+static const uint16_t *wbscl_get_filter_5tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dc_fixpt_one.value)
+ return filter_5tap_16p_upscale;
+ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_5tap_16p_117;
+ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_5tap_16p_150;
+ else
+ return filter_5tap_16p_183;
+}
+
+static const uint16_t *wbscl_get_filter_6tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dc_fixpt_one.value)
+ return filter_6tap_16p_upscale;
+ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_6tap_16p_117;
+ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_6tap_16p_150;
+ else
+ return filter_6tap_16p_183;
+}
+
+static const uint16_t *wbscl_get_filter_7tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dc_fixpt_one.value)
+ return filter_7tap_16p_upscale;
+ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_7tap_16p_117;
+ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_7tap_16p_150;
+ else
+ return filter_7tap_16p_183;
+}
+
+static const uint16_t *wbscl_get_filter_8tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dc_fixpt_one.value)
+ return filter_8tap_16p_upscale;
+ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_8tap_16p_117;
+ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_8tap_16p_150;
+ else
+ return filter_8tap_16p_183;
+}
+
+static const uint16_t *wbscl_get_filter_9tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dc_fixpt_one.value)
+ return filter_9tap_16p_upscale;
+ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_9tap_16p_117;
+ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_9tap_16p_150;
+ else
+ return filter_9tap_16p_183;
+}
+static const uint16_t *wbscl_get_filter_10tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dc_fixpt_one.value)
+ return filter_10tap_16p_upscale;
+ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_10tap_16p_117;
+ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_10tap_16p_150;
+ else
+ return filter_10tap_16p_183;
+}
+
+static const uint16_t *wbscl_get_filter_11tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dc_fixpt_one.value)
+ return filter_11tap_16p_upscale;
+ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_11tap_16p_117;
+ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_11tap_16p_150;
+ else
+ return filter_11tap_16p_183;
+}
+
+static const uint16_t *wbscl_get_filter_12tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dc_fixpt_one.value)
+ return filter_12tap_16p_upscale;
+ else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ return filter_12tap_16p_117;
+ else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ return filter_12tap_16p_150;
+ else
+ return filter_12tap_16p_183;
+}
+
+static const uint16_t *wbscl_get_filter_coeffs_16p(int taps, struct fixed31_32 ratio)
+{
+ if (taps == 12)
+ return wbscl_get_filter_12tap_16p(ratio);
+ else if (taps == 11)
+ return wbscl_get_filter_11tap_16p(ratio);
+ else if (taps == 10)
+ return wbscl_get_filter_10tap_16p(ratio);
+ else if (taps == 9)
+ return wbscl_get_filter_9tap_16p(ratio);
+ else if (taps == 8)
+ return wbscl_get_filter_8tap_16p(ratio);
+ else if (taps == 7)
+ return wbscl_get_filter_7tap_16p(ratio);
+ else if (taps == 6)
+ return wbscl_get_filter_6tap_16p(ratio);
+ else if (taps == 5)
+ return wbscl_get_filter_5tap_16p(ratio);
+ else if (taps == 4)
+ return wbscl_get_filter_4tap_16p(ratio);
+ else if (taps == 3)
+ return wbscl_get_filter_3tap_16p(ratio);
+ else if (taps == 2)
+ return get_filter_2tap_16p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+static void wbscl_set_scaler_filter(
+ struct dcn20_dwbc *dwbc20,
+ uint32_t taps,
+ enum wbscl_coef_filter_type_sel filter_type,
+ const uint16_t *filter)
+{
+ const int tap_pairs = (taps + 1) / 2;
+ int phase;
+ int pair;
+ uint16_t odd_coef, even_coef;
+
+ for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
+ for (pair = 0; pair < tap_pairs; pair++) {
+ even_coef = filter[phase * taps + 2 * pair];
+ if ((pair * 2 + 1) < taps)
+ odd_coef = filter[phase * taps + 2 * pair + 1];
+ else
+ odd_coef = 0;
+
+ REG_SET_3(WBSCL_COEF_RAM_SELECT, 0,
+ WBSCL_COEF_RAM_TAP_PAIR_IDX, pair,
+ WBSCL_COEF_RAM_PHASE, phase,
+ WBSCL_COEF_RAM_FILTER_TYPE, filter_type);
+
+ REG_SET_4(WBSCL_COEF_RAM_TAP_DATA, 0,
+ /* Even tap coefficient (bits 1:0 fixed to 0) */
+ WBSCL_COEF_RAM_EVEN_TAP_COEF, even_coef,
+ /* Write/read control for even coefficient */
+ WBSCL_COEF_RAM_EVEN_TAP_COEF_EN, 1,
+ /* Odd tap coefficient (bits 1:0 fixed to 0) */
+ WBSCL_COEF_RAM_ODD_TAP_COEF, odd_coef,
+ /* Write/read control for odd coefficient */
+ WBSCL_COEF_RAM_ODD_TAP_COEF_EN, 1);
+ }
+ }
+}
+
+bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
+ uint32_t src_width,
+ uint32_t dest_width,
+ struct scaling_taps num_taps)
+{
+ uint32_t h_ratio_luma = 1;
+ uint32_t h_taps_luma = num_taps.h_taps;
+ uint32_t h_taps_chroma = num_taps.h_taps_c;
+ int32_t h_init_phase_luma = 0;
+ int32_t h_init_phase_chroma = 0;
+ uint32_t h_init_phase_luma_int = 0;
+ uint32_t h_init_phase_luma_frac = 0;
+ uint32_t h_init_phase_chroma_int = 0;
+ uint32_t h_init_phase_chroma_frac = 0;
+ const uint16_t *filter_h = NULL;
+ const uint16_t *filter_h_c = NULL;
+
+
+ struct fixed31_32 tmp_h_init_phase_luma = dc_fixpt_from_int(0);
+ struct fixed31_32 tmp_h_init_phase_chroma = dc_fixpt_from_int(0);
+
+
+ /*Calculate ratio*/
+ struct fixed31_32 tmp_h_ratio_luma = dc_fixpt_from_fraction(
+ src_width, dest_width);
+
+ if (dc_fixpt_floor(tmp_h_ratio_luma) == 8)
+ h_ratio_luma = -1;
+ else
+ h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5;
+
+ /*Program ratio*/
+ REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma);
+
+ /* Program taps*/
+ REG_UPDATE(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_Y_RGB, h_taps_luma - 1);
+ REG_UPDATE(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_CBCR, h_taps_chroma - 1);
+
+ /* Calculate phase*/
+ tmp_h_init_phase_luma = dc_fixpt_add_int(tmp_h_ratio_luma, h_taps_luma + 1);
+ tmp_h_init_phase_luma = dc_fixpt_div_int(tmp_h_init_phase_luma, 2);
+ tmp_h_init_phase_luma = dc_fixpt_sub_int(tmp_h_init_phase_luma, h_taps_luma);
+
+ h_init_phase_luma = dc_fixpt_s4d19(tmp_h_init_phase_luma);
+ h_init_phase_luma_int = (h_init_phase_luma >> 19) & 0x1f;
+ h_init_phase_luma_frac = (h_init_phase_luma & 0x7ffff) << 5;
+
+ tmp_h_init_phase_chroma = dc_fixpt_mul_int(tmp_h_ratio_luma, 2);
+ tmp_h_init_phase_chroma = dc_fixpt_add_int(tmp_h_init_phase_chroma, h_taps_chroma + 1);
+ tmp_h_init_phase_chroma = dc_fixpt_div_int(tmp_h_init_phase_chroma, 2);
+ tmp_h_init_phase_chroma = dc_fixpt_sub_int(tmp_h_init_phase_chroma, h_taps_chroma);
+ tmp_h_init_phase_chroma = dc_fixpt_add(tmp_h_init_phase_chroma, dc_fixpt_from_fraction(1, 4));
+
+ h_init_phase_chroma = dc_fixpt_s4d19(tmp_h_init_phase_chroma);
+ h_init_phase_chroma_int = (h_init_phase_chroma >> 19) & 0x1f;
+ h_init_phase_chroma_frac = (h_init_phase_chroma & 0x7ffff) << 5;
+
+ /* Program phase*/
+ REG_UPDATE(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_INT_Y_RGB, h_init_phase_luma_int);
+ REG_UPDATE(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_FRAC_Y_RGB, h_init_phase_luma_frac);
+ REG_UPDATE(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_INT_CBCR, h_init_phase_chroma_int);
+ REG_UPDATE(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_FRAC_CBCR, h_init_phase_chroma_frac);
+
+ /* Program LUT coefficients*/
+ filter_h = wbscl_get_filter_coeffs_16p(
+ h_taps_luma, tmp_h_ratio_luma);
+ filter_h_c = wbscl_get_filter_coeffs_16p(
+ h_taps_chroma, dc_fixpt_from_int(h_ratio_luma * 2));
+
+ wbscl_set_scaler_filter(dwbc20, h_taps_luma,
+ WBSCL_COEF_LUMA_HORZ_FILTER, filter_h);
+
+ wbscl_set_scaler_filter(dwbc20, h_taps_chroma,
+ WBSCL_COEF_CHROMA_HORZ_FILTER, filter_h_c);
+
+ return true;
+}
+
+bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
+ uint32_t src_height,
+ uint32_t dest_height,
+ struct scaling_taps num_taps,
+ enum dwb_subsample_position subsample_position)
+{
+ uint32_t v_ratio_luma = 1;
+ uint32_t v_taps_luma = num_taps.v_taps;
+ uint32_t v_taps_chroma = num_taps.v_taps_c;
+ int32_t v_init_phase_luma = 0;
+ int32_t v_init_phase_chroma = 0;
+ uint32_t v_init_phase_luma_int = 0;
+ uint32_t v_init_phase_luma_frac = 0;
+ uint32_t v_init_phase_chroma_int = 0;
+ uint32_t v_init_phase_chroma_frac = 0;
+
+ const uint16_t *filter_v = NULL;
+ const uint16_t *filter_v_c = NULL;
+
+ struct fixed31_32 tmp_v_init_phase_luma = dc_fixpt_from_int(0);
+ struct fixed31_32 tmp_v_init_phase_chroma = dc_fixpt_from_int(0);
+
+ /*Calculate ratio*/
+ struct fixed31_32 tmp_v_ratio_luma = dc_fixpt_from_fraction(
+ src_height, dest_height);
+
+ if (dc_fixpt_floor(tmp_v_ratio_luma) == 8)
+ v_ratio_luma = -1;
+ else
+ v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5;
+
+ /*Program ratio*/
+ REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma);
+
+ /* Program taps*/
+ REG_UPDATE(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_Y_RGB, v_taps_luma - 1);
+ REG_UPDATE(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_CBCR, v_taps_chroma - 1);
+
+ /* Calculate phase*/
+ tmp_v_init_phase_luma = dc_fixpt_add_int(tmp_v_ratio_luma, v_taps_luma + 1);
+ tmp_v_init_phase_luma = dc_fixpt_div_int(tmp_v_init_phase_luma, 2);
+ tmp_v_init_phase_luma = dc_fixpt_sub_int(tmp_v_init_phase_luma, v_taps_luma);
+
+ v_init_phase_luma = dc_fixpt_s4d19(tmp_v_init_phase_luma);
+ v_init_phase_luma_int = (v_init_phase_luma >> 19) & 0x1f;
+ v_init_phase_luma_frac = (v_init_phase_luma & 0x7ffff) << 5;
+
+ tmp_v_init_phase_chroma = dc_fixpt_mul_int(tmp_v_ratio_luma, 2);
+ tmp_v_init_phase_chroma = dc_fixpt_add_int(tmp_v_init_phase_chroma, v_taps_chroma + 1);
+ tmp_v_init_phase_chroma = dc_fixpt_div_int(tmp_v_init_phase_chroma, 2);
+ tmp_v_init_phase_chroma = dc_fixpt_sub_int(tmp_v_init_phase_chroma, v_taps_chroma);
+ if (subsample_position == DWB_COSITED_SUBSAMPLING)
+ tmp_v_init_phase_chroma = dc_fixpt_add(tmp_v_init_phase_chroma, dc_fixpt_from_fraction(1, 4));
+
+ v_init_phase_chroma = dc_fixpt_s4d19(tmp_v_init_phase_chroma);
+ v_init_phase_chroma_int = (v_init_phase_chroma >> 19) & 0x1f;
+ v_init_phase_chroma_frac = (v_init_phase_chroma & 0x7ffff) << 5;
+
+ /* Program phase*/
+ REG_UPDATE(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_INT_Y_RGB, v_init_phase_luma_int);
+ REG_UPDATE(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_FRAC_Y_RGB, v_init_phase_luma_frac);
+ REG_UPDATE(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_INT_CBCR, v_init_phase_chroma_int);
+ REG_UPDATE(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_FRAC_CBCR, v_init_phase_chroma_frac);
+
+
+ /* Program LUT coefficients*/
+ filter_v = wbscl_get_filter_coeffs_16p(
+ v_taps_luma, tmp_v_ratio_luma);
+ filter_v_c = wbscl_get_filter_coeffs_16p(
+ v_taps_chroma, dc_fixpt_from_int(v_ratio_luma * 2));
+ wbscl_set_scaler_filter(dwbc20, v_taps_luma,
+ WBSCL_COEF_LUMA_VERT_FILTER, filter_v);
+
+ wbscl_set_scaler_filter(dwbc20, v_taps_chroma,
+ WBSCL_COEF_CHROMA_VERT_FILTER, filter_v_c);
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
new file mode 100644
index 0000000000..6eebcb22e3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -0,0 +1,670 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dcn20_hubbub.h"
+#include "reg_helper.h"
+#include "clk_mgr.h"
+
+#define REG(reg)\
+ hubbub1->regs->reg
+
+#define CTX \
+ hubbub1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub1->shifts->field_name, hubbub1->masks->field_name
+
+#define REG(reg)\
+ hubbub1->regs->reg
+
+#define CTX \
+ hubbub1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub1->shifts->field_name, hubbub1->masks->field_name
+
+#ifdef NUM_VMID
+#undef NUM_VMID
+#endif
+#define NUM_VMID 16
+
+bool hubbub2_dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert)
+{
+ bool standard_swizzle = false;
+ bool display_swizzle = false;
+ bool render_swizzle = false;
+
+ switch (swizzle) {
+ case DC_SW_4KB_S:
+ case DC_SW_64KB_S:
+ case DC_SW_VAR_S:
+ case DC_SW_4KB_S_X:
+ case DC_SW_64KB_S_X:
+ case DC_SW_VAR_S_X:
+ standard_swizzle = true;
+ break;
+ case DC_SW_64KB_R_X:
+ render_swizzle = true;
+ break;
+ case DC_SW_4KB_D:
+ case DC_SW_64KB_D:
+ case DC_SW_VAR_D:
+ case DC_SW_4KB_D_X:
+ case DC_SW_64KB_D_X:
+ case DC_SW_VAR_D_X:
+ display_swizzle = true;
+ break;
+ default:
+ break;
+ }
+
+ if (standard_swizzle) {
+ if (bytes_per_element == 1) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__na;
+ return true;
+ }
+ if (bytes_per_element == 2) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 4) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8) {
+ *segment_order_horz = segment_order__na;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ }
+ if (render_swizzle) {
+ if (bytes_per_element == 2) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 4) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+ }
+ if (display_swizzle && bytes_per_element == 8) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+
+ return false;
+}
+
+bool hubbub2_dcc_support_pixel_format(
+ enum surface_pixel_format format,
+ unsigned int *bytes_per_element)
+{
+ /* DML: get_bytes_per_element */
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ *bytes_per_element = 2;
+ return true;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ *bytes_per_element = 4;
+ return true;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ *bytes_per_element = 8;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ /* copied from DML. might want to refactor DML to leverage from DML */
+ /* DML : get_blk256_size */
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static void hubbub2_det_request_size(
+ unsigned int detile_buf_size,
+ unsigned int height,
+ unsigned int width,
+ unsigned int bpe,
+ bool *req128_horz_wc,
+ bool *req128_vert_wc)
+{
+ unsigned int blk256_height = 0;
+ unsigned int blk256_width = 0;
+ unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
+
+ hubbub2_get_blk256_size(&blk256_width, &blk256_height, bpe);
+
+ swath_bytes_horz_wc = width * blk256_height * bpe;
+ swath_bytes_vert_wc = height * blk256_width * bpe;
+
+ *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+
+ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+}
+
+bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ struct dc *dc = hubbub->ctx->dc;
+ /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
+ enum dcc_control dcc_control;
+ unsigned int bpe;
+ enum segment_order segment_order_horz, segment_order_vert;
+ bool req128_horz_wc, req128_vert_wc;
+
+ memset(output, 0, sizeof(*output));
+
+ if (dc->debug.disable_dcc == DCC_DISABLE)
+ return false;
+
+ if (!hubbub->funcs->dcc_support_pixel_format(input->format,
+ &bpe))
+ return false;
+
+ if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
+ &segment_order_horz, &segment_order_vert))
+ return false;
+
+ hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
+ input->surface_size.height, input->surface_size.width,
+ bpe, &req128_horz_wc, &req128_vert_wc);
+
+ if (!req128_horz_wc && !req128_vert_wc) {
+ dcc_control = dcc_control__256_256_xxx;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!req128_horz_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!req128_vert_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else {
+ if ((req128_horz_wc &&
+ segment_order_horz == segment_order__non_contiguous) ||
+ (req128_vert_wc &&
+ segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64_64;
+ else
+ /* reg128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__128_128_xxx;
+ }
+
+ /* Exception for 64KB_R_X */
+ if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X))
+ dcc_control = dcc_control__128_128_xxx;
+
+ if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+ dcc_control != dcc_control__256_256_xxx)
+ return false;
+
+ switch (dcc_control) {
+ case dcc_control__256_256_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 256;
+ output->grph.rgb.independent_64b_blks = false;
+ break;
+ case dcc_control__128_128_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 128;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ break;
+ case dcc_control__256_64_64:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 64;
+ output->grph.rgb.independent_64b_blks = true;
+ break;
+ default:
+ ASSERT(false);
+ break;
+ }
+ output->capable = true;
+ output->const_color_support = true;
+
+ return true;
+}
+
+static enum dcn_hubbub_page_table_depth page_table_depth_to_hw(unsigned int page_table_depth)
+{
+ enum dcn_hubbub_page_table_depth depth = 0;
+
+ switch (page_table_depth) {
+ case 1:
+ depth = DCN_PAGE_TABLE_DEPTH_1_LEVEL;
+ break;
+ case 2:
+ depth = DCN_PAGE_TABLE_DEPTH_2_LEVEL;
+ break;
+ case 3:
+ depth = DCN_PAGE_TABLE_DEPTH_3_LEVEL;
+ break;
+ case 4:
+ depth = DCN_PAGE_TABLE_DEPTH_4_LEVEL;
+ break;
+ default:
+ ASSERT(false);
+ break;
+ }
+
+ return depth;
+}
+
+static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigned int page_table_block_size)
+{
+ enum dcn_hubbub_page_table_block_size block_size = 0;
+
+ switch (page_table_block_size) {
+ case 4096:
+ block_size = DCN_PAGE_TABLE_BLOCK_SIZE_4KB;
+ break;
+ case 65536:
+ block_size = DCN_PAGE_TABLE_BLOCK_SIZE_64KB;
+ break;
+ case 32768:
+ block_size = DCN_PAGE_TABLE_BLOCK_SIZE_32KB;
+ break;
+ default:
+ ASSERT(false);
+ block_size = page_table_block_size;
+ break;
+ }
+
+ return block_size;
+}
+
+void hubbub2_init_vm_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_virt_addr_config *va_config,
+ int vmid)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_vmid_page_table_config virt_config;
+
+ virt_config.page_table_start_addr = va_config->page_table_start_addr >> 12;
+ virt_config.page_table_end_addr = va_config->page_table_end_addr >> 12;
+ virt_config.depth = page_table_depth_to_hw(va_config->page_table_depth);
+ virt_config.block_size = page_table_block_size_to_hw(va_config->page_table_block_size);
+ virt_config.page_table_base_addr = va_config->page_table_base_addr;
+
+ dcn20_vmid_setup(&hubbub1->vmid[vmid], &virt_config);
+}
+
+int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_vmid_page_table_config phys_config;
+
+ REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
+ REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
+ REG_SET(DCN_VM_FB_OFFSET, 0,
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
+ REG_SET(DCN_VM_AGP_BOT, 0,
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
+ REG_SET(DCN_VM_AGP_TOP, 0,
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
+ REG_SET(DCN_VM_AGP_BASE, 0,
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+
+ REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, 0,
+ DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, (pa_config->page_table_default_page_addr >> 44) & 0xF);
+ REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, 0,
+ DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, (pa_config->page_table_default_page_addr >> 12) & 0xFFFFFFFF);
+
+ if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
+ phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
+ phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
+ phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
+ // Init VMID 0 based on PA config
+ dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
+ }
+
+ return NUM_VMID;
+}
+
+void hubbub2_update_dchub(struct hubbub *hubbub,
+ struct dchub_init_data *dh_data)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ if (REG(DCN_VM_FB_LOCATION_TOP) == 0)
+ return;
+
+ switch (dh_data->fb_mode) {
+ case FRAME_BUFFER_MODE_ZFB_ONLY:
+ /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
+ REG_UPDATE(DCN_VM_FB_LOCATION_TOP,
+ FB_TOP, 0);
+
+ REG_UPDATE(DCN_VM_FB_LOCATION_BASE,
+ FB_BASE, 0xFFFFFF);
+
+ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
+ REG_UPDATE(DCN_VM_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 24);
+
+ /*This field defines the bottom range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 24);
+
+ /*This field defines the top range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 24);
+ break;
+ case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+
+ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
+ REG_UPDATE(DCN_VM_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 24);
+
+ /*This field defines the bottom range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 24);
+
+ /*This field defines the top range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 24);
+ break;
+ case FRAME_BUFFER_MODE_LOCAL_ONLY:
+ /*Should not touch FB LOCATION (should be done by VBIOS)*/
+
+ /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/
+ REG_UPDATE(DCN_VM_AGP_BASE,
+ AGP_BASE, 0);
+
+ /*This field defines the bottom range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_BOT,
+ AGP_BOT, 0xFFFFFF);
+
+ /*This field defines the top range of the AGP aperture and represents the 24*/
+ /*MSBs, bits [47:24] of the 48 address bits*/
+ REG_UPDATE(DCN_VM_AGP_TOP,
+ AGP_TOP, 0);
+ break;
+ default:
+ break;
+ }
+
+ dh_data->dchub_initialzied = true;
+ dh_data->dchub_info_valid = false;
+}
+
+void hubbub2_wm_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_wm *wm)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ struct dcn_hubbub_wm_set *s;
+
+ memset(wm, 0, sizeof(struct dcn_hubbub_wm));
+
+ s = &wm->sets[0];
+ s->wm_set = 0;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
+ if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A))
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
+ }
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+
+ s = &wm->sets[1];
+ s->wm_set = 1;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
+ if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B))
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
+ }
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+
+ s = &wm->sets[2];
+ s->wm_set = 2;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
+ if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C))
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
+ }
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+
+ s = &wm->sets[3];
+ s->wm_set = 3;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
+ if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D))
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
+ }
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+}
+
+void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
+ unsigned int dccg_ref_freq_inKhz,
+ unsigned int *dchub_ref_freq_inKhz)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t ref_div = 0;
+ uint32_t ref_en = 0;
+
+ REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div,
+ DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en);
+
+ if (ref_en) {
+ if (ref_div == 2)
+ *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz / 2;
+ else
+ *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz;
+
+ // DC hub reference frequency must be around 50Mhz, otherwise there may be
+ // overflow/underflow issues when doing HUBBUB programming
+ if (*dchub_ref_freq_inKhz < 40000 || *dchub_ref_freq_inKhz > 60000)
+ ASSERT_CRITICAL(false);
+
+ return;
+ } else {
+ *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz;
+
+ // HUBBUB global timer must be enabled.
+ ASSERT_CRITICAL(false);
+ return;
+ }
+}
+
+static bool hubbub2_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
+ /*
+ * Need to clamp to max of the register values (i.e. no wrap)
+ * for dcn1, all wm registers are 21-bit wide
+ */
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ /*
+ * There's a special case when going from p-state support to p-state unsupported
+ * here we are going to LOWER watermarks to go to dummy p-state only, but this has
+ * to be done prepare_bandwidth, not optimize
+ */
+ if (hubbub1->base.ctx->dc->clk_mgr->clks.prev_p_state_change_support == true &&
+ hubbub1->base.ctx->dc->clk_mgr->clks.p_state_change_support == false)
+ safe_to_lower = true;
+
+ hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+
+ REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
+
+ hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ return wm_pending;
+}
+
+void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_state)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ if (REG(DCN_VM_FAULT_ADDR_MSB))
+ hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_MSB);
+
+ if (REG(DCN_VM_FAULT_ADDR_LSB))
+ hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_LSB);
+
+ if (REG(DCN_VM_FAULT_CNTL))
+ REG_GET(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, &hubbub_state->vm_error_mode);
+
+ if (REG(DCN_VM_FAULT_STATUS)) {
+ REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, &hubbub_state->vm_error_status);
+ REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid);
+ REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe);
+ }
+
+ if (REG(DCHUBBUB_TEST_DEBUG_INDEX) && REG(DCHUBBUB_TEST_DEBUG_DATA)) {
+ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6);
+ hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
+ }
+
+ if (REG(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL))
+ hubbub_state->watermark_change_cntl = REG_READ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL);
+
+ if (REG(DCHUBBUB_ARB_DRAM_STATE_CNTL))
+ hubbub_state->dram_state_cntl = REG_READ(DCHUBBUB_ARB_DRAM_STATE_CNTL);
+}
+
+static const struct hubbub_funcs hubbub2_funcs = {
+ .update_dchub = hubbub2_update_dchub,
+ .init_dchub_sys_ctx = hubbub2_init_dchub_sys_ctx,
+ .init_vm_ctx = hubbub2_init_vm_ctx,
+ .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
+ .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
+ .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
+ .wm_read_state = hubbub2_wm_read_state,
+ .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
+ .program_watermarks = hubbub2_program_watermarks,
+ .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
+ .hubbub_read_state = hubbub2_read_state,
+};
+
+void hubbub2_construct(struct dcn20_hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask)
+{
+ hubbub->base.ctx = ctx;
+
+ hubbub->base.funcs = &hubbub2_funcs;
+
+ hubbub->regs = hubbub_regs;
+ hubbub->shifts = hubbub_shift;
+ hubbub->masks = hubbub_mask;
+
+ hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
new file mode 100644
index 0000000000..2f6146bf1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HUBBUB_DCN20_H__
+#define __DC_HUBBUB_DCN20_H__
+
+#include "dcn10/dcn10_hubbub.h"
+#include "dcn20_vmid.h"
+
+#define TO_DCN20_HUBBUB(hubbub)\
+ container_of(hubbub, struct dcn20_hubbub, base)
+
+#define HUBBUB_REG_LIST_DCN20_COMMON()\
+ HUBBUB_REG_LIST_DCN_COMMON(), \
+ SR(DCHUBBUB_CRC_CTRL), \
+ SR(DCN_VM_FB_LOCATION_BASE),\
+ SR(DCN_VM_FB_LOCATION_TOP),\
+ SR(DCN_VM_FB_OFFSET),\
+ SR(DCN_VM_AGP_BOT),\
+ SR(DCN_VM_AGP_TOP),\
+ SR(DCN_VM_AGP_BASE),\
+ SR(DCN_VM_FAULT_ADDR_MSB), \
+ SR(DCN_VM_FAULT_ADDR_LSB), \
+ SR(DCN_VM_FAULT_CNTL), \
+ SR(DCN_VM_FAULT_STATUS)
+
+#define HUBBUB_REG_LIST_DCN20(id)\
+ HUBBUB_REG_LIST_DCN20_COMMON(), \
+ HUBBUB_SR_WATERMARK_REG_LIST(), \
+ HUBBUB_VM_REG_LIST(),\
+ SR(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB),\
+ SR(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB)
+
+
+#define HUBBUB_MASK_SH_LIST_DCN20(mask_sh)\
+ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
+ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
+ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \
+ HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
+
+struct dcn20_hubbub {
+ struct hubbub base;
+ const struct dcn_hubbub_registers *regs;
+ const struct dcn_hubbub_shift *shifts;
+ const struct dcn_hubbub_mask *masks;
+ unsigned int debug_test_index_pstate;
+ struct dcn_watermark_set watermarks;
+ int num_vmid;
+ struct dcn20_vmid vmid[16];
+ unsigned int detile_buf_size;
+ unsigned int crb_size_segs;
+ unsigned int compbuf_size_segments;
+ unsigned int pixel_chunk_size;
+ unsigned int det0_size;
+ unsigned int det1_size;
+ unsigned int det2_size;
+ unsigned int det3_size;
+};
+
+void hubbub2_construct(struct dcn20_hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask);
+
+bool hubbub2_dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert);
+
+bool hubbub2_dcc_support_pixel_format(
+ enum surface_pixel_format format,
+ unsigned int *bytes_per_element);
+
+bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output);
+
+bool hubbub2_initialize_vmids(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output);
+
+int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
+void hubbub2_init_vm_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_virt_addr_config *va_config,
+ int vmid);
+void hubbub2_update_dchub(struct hubbub *hubbub,
+ struct dchub_init_data *dh_data);
+
+void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
+ unsigned int dccg_ref_freq_inKhz,
+ unsigned int *dchub_ref_freq_inKhz);
+
+void hubbub2_wm_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_wm *wm);
+
+void hubbub2_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_state *hubbub_state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
new file mode 100644
index 0000000000..aa252dc263
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -0,0 +1,1691 @@
+/*
+ * Copyright 2012-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn20_hubp.h"
+
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+
+#define DC_LOGGER_INIT(logger)
+
+#define REG(reg)\
+ hubp2->hubp_regs->reg
+
+#define CTX \
+ hubp2->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
+
+void hubp2_set_vm_system_aperture_settings(struct hubp *hubp,
+ struct vm_system_aperture_param *apt)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
+
+ // The format of default addr is 48:12 of the 48 bit addr
+ mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
+
+ // The format of high/low are 48:18 of the 48 bit addr
+ mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
+ mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
+
+ REG_UPDATE_2(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, 1, /* 1 = system physical memory */
+ DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mc_vm_apt_default.high_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 0,
+ DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mc_vm_apt_default.low_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, 0,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR, mc_vm_apt_low.quad_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, 0,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mc_vm_apt_high.quad_part);
+
+ REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
+ ENABLE_L1_TLB, 1,
+ SYSTEM_ACCESS_MODE, 0x3);
+}
+
+void hubp2_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ /* DLG - Per hubp */
+ REG_SET_2(BLANK_OFFSET_0, 0,
+ REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end,
+ DLG_V_BLANK_END, dlg_attr->dlg_vblank_end);
+
+ REG_SET(BLANK_OFFSET_1, 0,
+ MIN_DST_Y_NEXT_START, dlg_attr->min_dst_y_next_start);
+
+ REG_SET(DST_DIMENSIONS, 0,
+ REFCYC_PER_HTOTAL, dlg_attr->refcyc_per_htotal);
+
+ REG_SET_2(DST_AFTER_SCALER, 0,
+ REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
+
+ REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
+ REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_SET(VBLANK_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
+
+ if (REG(NOM_PARAMETERS_0))
+ REG_SET(NOM_PARAMETERS_0, 0,
+ DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
+
+ if (REG(NOM_PARAMETERS_1))
+ REG_SET(NOM_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
+
+ REG_SET(NOM_PARAMETERS_4, 0,
+ DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
+
+ REG_SET(NOM_PARAMETERS_5, 0,
+ REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+ REG_SET_2(PER_LINE_DELIVERY, 0,
+ REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
+
+ REG_SET(VBLANK_PARAMETERS_2, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
+
+ if (REG(NOM_PARAMETERS_2))
+ REG_SET(NOM_PARAMETERS_2, 0,
+ DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
+
+ if (REG(NOM_PARAMETERS_3))
+ REG_SET(NOM_PARAMETERS_3, 0,
+ REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
+
+ REG_SET(NOM_PARAMETERS_6, 0,
+ DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
+
+ REG_SET(NOM_PARAMETERS_7, 0,
+ REFCYC_PER_META_CHUNK_NOM_C, dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+ /* TTU - per hubp */
+ REG_SET_2(DCN_TTU_QOS_WM, 0,
+ QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+
+ REG_SET_3(DCN_SURF0_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
+
+ REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
+
+ REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
+
+ REG_SET(FLIP_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_FLIP_L, dlg_attr->refcyc_per_pte_group_flip_l);
+}
+
+void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ uint32_t value = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ /* disable_dlg_test_mode Set 9th bit to 1 to disable "dv" mode */
+ REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
+ /*
+ if (VSTARTUP_START - (VREADY_OFFSET+VUPDATE_WIDTH+VUPDATE_OFFSET)/htotal)
+ <= OTG_V_BLANK_END
+ Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 1
+ else
+ Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
+ */
+ if (pipe_dest->htotal != 0) {
+ if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+ value = 1;
+ } else
+ value = 0;
+ }
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
+}
+
+static void hubp2_program_requestor(struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
+ REG_SET_4(DCN_EXPANSION_MODE, 0,
+ DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
+ CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG_C, 0,
+ CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
+}
+
+static void hubp2_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ /* otg is locked when this func is called. Register are double buffered.
+ * disable the requestors is not needed
+ */
+
+ hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
+ hubp2_program_requestor(hubp, rq_regs);
+ hubp2_program_deadline(hubp, dlg_attr, ttu_attr);
+
+}
+
+void hubp2_setup_interdependent(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_SET_2(PREFETCH_SETTINGS, 0,
+ DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+
+ REG_SET(PREFETCH_SETTINGS_C, 0,
+ VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+
+ REG_SET_2(VBLANK_PARAMETERS_0, 0,
+ DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+
+ REG_SET_2(FLIP_PARAMETERS_0, 0,
+ DST_Y_PER_VM_FLIP, dlg_attr->dst_y_per_vm_flip,
+ DST_Y_PER_ROW_FLIP, dlg_attr->dst_y_per_row_flip);
+
+ REG_SET(VBLANK_PARAMETERS_3, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ REG_SET(VBLANK_PARAMETERS_4, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ REG_SET(FLIP_PARAMETERS_2, 0,
+ REFCYC_PER_META_CHUNK_FLIP_L, dlg_attr->refcyc_per_meta_chunk_flip_l);
+
+ REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_SET(DCN_SURF0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_l);
+ REG_SET(DCN_SURF1_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_c);
+ REG_SET(DCN_CUR0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+ REG_SET(DCN_CUR1_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur1);
+
+ REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
+ MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+}
+
+/* DCN2 (GFX10), the following GFX fields are deprecated. They can be set but they will not be used:
+ * NUM_BANKS
+ * NUM_SE
+ * NUM_RB_PER_SE
+ * RB_ALIGNED
+ * Other things can be defaulted, since they never change:
+ * PIPE_ALIGNED = 0
+ * META_LINEAR = 0
+ * In GFX10, only these apply:
+ * PIPE_INTERLEAVE
+ * NUM_PIPES
+ * MAX_COMPRESSED_FRAGS
+ * SW_MODE
+ */
+static void hubp2_program_tiling(
+ struct dcn20_hubp *hubp2,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format)
+{
+ REG_UPDATE_3(DCSURF_ADDR_CONFIG,
+ NUM_PIPES, log_2(info->gfx9.num_pipes),
+ PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
+ MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags));
+
+ REG_UPDATE_4(DCSURF_TILING_CONFIG,
+ SW_MODE, info->gfx9.swizzle,
+ META_LINEAR, 0,
+ RB_ALIGNED, 0,
+ PIPE_ALIGNED, 0);
+}
+
+void hubp2_program_size(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ const struct plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
+ bool use_pitch_c = false;
+
+ /* Program data and meta surface pitch (calculation from addrlib)
+ * 444 or 420 luma
+ */
+ use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
+ && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END;
+ use_pitch_c = use_pitch_c
+ || (format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA);
+ if (use_pitch_c) {
+ ASSERT(plane_size->chroma_pitch != 0);
+ /* Chroma pitch zero can cause system hang! */
+
+ pitch = plane_size->surface_pitch - 1;
+ meta_pitch = dcc->meta_pitch - 1;
+ pitch_c = plane_size->chroma_pitch - 1;
+ meta_pitch_c = dcc->meta_pitch_c - 1;
+ } else {
+ pitch = plane_size->surface_pitch - 1;
+ meta_pitch = dcc->meta_pitch - 1;
+ pitch_c = 0;
+ meta_pitch_c = 0;
+ }
+
+ if (!dcc->enable) {
+ meta_pitch = 0;
+ meta_pitch_c = 0;
+ }
+
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH,
+ PITCH, pitch, META_PITCH, meta_pitch);
+
+ use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN;
+ use_pitch_c = use_pitch_c
+ || (format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA);
+ if (use_pitch_c)
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
+ PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
+}
+
+void hubp2_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t mirror;
+
+
+ if (horizontal_mirror)
+ mirror = 1;
+ else
+ mirror = 0;
+
+ /* Program rotation angle and horz mirror - no mirror */
+ if (rotation == ROTATION_ANGLE_0)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 0,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_90)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 1,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_180)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 2,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_270)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 3,
+ H_MIRROR_EN, mirror);
+}
+
+void hubp2_dcc_control(struct hubp *hubp, bool enable,
+ enum hubp_ind_block_size independent_64b_blks)
+{
+ uint32_t dcc_en = enable ? 1 : 0;
+ uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc_en,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk,
+ SECONDARY_SURFACE_DCC_EN, dcc_en,
+ SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
+}
+
+void hubp2_program_pixel_format(
+ struct hubp *hubp,
+ enum surface_pixel_format format)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t red_bar = 3;
+ uint32_t blue_bar = 2;
+
+ /* swap for ABGR format */
+ if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+ red_bar = 2;
+ blue_bar = 3;
+ }
+
+ REG_UPDATE_2(HUBPRET_CONTROL,
+ CROSSBAR_SRC_CB_B, blue_bar,
+ CROSSBAR_SRC_CR_R, red_bar);
+
+ /* Mapping is same as ipp programming (cnvc) */
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 1);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 3);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 8);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 10);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 24);
+ break;
+
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 65);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 64);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 67);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 66);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 12);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 112);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 113);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 114);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 118);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 119);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 0);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ /* don't see the need of program the xbar in DCN 1.0 */
+}
+
+void hubp2_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ unsigned int compat_level)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ hubp2_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
+ hubp2_program_tiling(hubp2, tiling_info, format);
+ hubp2_program_size(hubp, format, plane_size, dcc);
+ hubp2_program_rotation(hubp, rotation, horizontal_mirror);
+ hubp2_program_pixel_format(hubp, format);
+}
+
+enum cursor_lines_per_chunk hubp2_get_lines_per_chunk(
+ unsigned int cursor_width,
+ enum dc_cursor_color_format cursor_mode)
+{
+ enum cursor_lines_per_chunk line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
+
+ if (cursor_mode == CURSOR_MODE_MONO)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
+ else if (cursor_mode == CURSOR_MODE_COLOR_1BIT_AND ||
+ cursor_mode == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
+ cursor_mode == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
+ if (cursor_width >= 1 && cursor_width <= 32)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
+ else if (cursor_width >= 33 && cursor_width <= 64)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_8;
+ else if (cursor_width >= 65 && cursor_width <= 128)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_4;
+ else if (cursor_width >= 129 && cursor_width <= 256)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_2;
+ } else if (cursor_mode == CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED ||
+ cursor_mode == CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED) {
+ if (cursor_width >= 1 && cursor_width <= 16)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
+ else if (cursor_width >= 17 && cursor_width <= 32)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_8;
+ else if (cursor_width >= 33 && cursor_width <= 64)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_4;
+ else if (cursor_width >= 65 && cursor_width <= 128)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_2;
+ else if (cursor_width >= 129 && cursor_width <= 256)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_1;
+ }
+
+ return line_per_chunk;
+}
+
+void hubp2_cursor_set_attributes(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum cursor_pitch hw_pitch = hubp1_get_cursor_pitch(attr->pitch);
+ enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk(
+ attr->width, attr->color_format);
+
+ hubp->curs_attr = *attr;
+
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+
+ hubp->cur_rect.w = attr->width;
+ hubp->cur_rect.h = attr->height;
+
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
+}
+
+void hubp2_dmdata_set_attributes(
+ struct hubp *hubp,
+ const struct dc_dmdata_attributes *attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ if (attr->dmdata_mode == DMDATA_HW_MODE) {
+ /* set to HW mode */
+ REG_UPDATE(DMDATA_CNTL,
+ DMDATA_MODE, 1);
+
+ /* for DMDATA flip, need to use SURFACE_UPDATE_LOCK */
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, 1);
+
+ /* toggle DMDATA_UPDATED and set repeat and size */
+ REG_UPDATE(DMDATA_CNTL,
+ DMDATA_UPDATED, 0);
+ REG_UPDATE_3(DMDATA_CNTL,
+ DMDATA_UPDATED, 1,
+ DMDATA_REPEAT, attr->dmdata_repeat,
+ DMDATA_SIZE, attr->dmdata_size);
+
+ /* set DMDATA address */
+ REG_WRITE(DMDATA_ADDRESS_LOW, attr->address.low_part);
+ REG_UPDATE(DMDATA_ADDRESS_HIGH,
+ DMDATA_ADDRESS_HIGH, attr->address.high_part);
+
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, 0);
+
+ } else {
+ /* set to SW mode before loading data */
+ REG_SET(DMDATA_CNTL, 0,
+ DMDATA_MODE, 0);
+ /* toggle DMDATA_SW_UPDATED to start loading sequence */
+ REG_UPDATE(DMDATA_SW_CNTL,
+ DMDATA_SW_UPDATED, 0);
+ REG_UPDATE_3(DMDATA_SW_CNTL,
+ DMDATA_SW_UPDATED, 1,
+ DMDATA_SW_REPEAT, attr->dmdata_repeat,
+ DMDATA_SW_SIZE, attr->dmdata_size);
+ /* load data into hubp dmdata buffer */
+ hubp2_dmdata_load(hubp, attr->dmdata_size, attr->dmdata_sw_data);
+ }
+
+ /* Note that DL_DELTA must be programmed if we want to use TTU mode */
+ REG_SET_3(DMDATA_QOS_CNTL, 0,
+ DMDATA_QOS_MODE, attr->dmdata_qos_mode,
+ DMDATA_QOS_LEVEL, attr->dmdata_qos_level,
+ DMDATA_DL_DELTA, attr->dmdata_dl_delta);
+}
+
+void hubp2_dmdata_load(
+ struct hubp *hubp,
+ uint32_t dmdata_sw_size,
+ const uint32_t *dmdata_sw_data)
+{
+ int i;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ /* load dmdata into HUBP buffer in SW mode */
+ for (i = 0; i < dmdata_sw_size / 4; i++)
+ REG_WRITE(DMDATA_SW_DATA, dmdata_sw_data[i]);
+}
+
+bool hubp2_dmdata_status_done(struct hubp *hubp)
+{
+ uint32_t status;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(DMDATA_STATUS, DMDATA_DONE, &status);
+ return (status == 1);
+}
+
+bool hubp2_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ //program flip type
+ REG_UPDATE(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_TYPE, flip_immediate);
+
+ // Program VMID reg
+ REG_UPDATE(VMID_SETTINGS_0,
+ VMID, address->vmid);
+
+
+ /* HW automatically latch rest of address register on write to
+ * DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
+ *
+ * program high first and then the low addr, order matters!
+ */
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ /* DCN1.0 does not support const color
+ * TODO: program DCHUBBUB_RET_PATH_DCC_CFGx_0/1
+ * base on address->grph.dcc_const_color
+ * x = 0, 2, 4, 6 for pipe 0, 1, 2, 3 for rgb and luma
+ * x = 1, 3, 5, 7 for pipe 0, 1, 2, 3 for chroma
+ */
+
+ if (address->grph.addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->grph.meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph.meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph.meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph.addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph.addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0
+ || address->video_progressive.chroma_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
+
+ if (address->video_progressive.luma_meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_meta_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->video_progressive.luma_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->video_progressive.luma_addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0)
+ break;
+ if (address->grph_stereo.right_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface,
+ SECONDARY_SURFACE_TMZ, address->tmz_surface,
+ SECONDARY_SURFACE_TMZ_C, address->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ, address->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface);
+
+ if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_meta_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
+ SECONDARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.right_meta_addr.low_part);
+ }
+ if (address->grph_stereo.left_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.left_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
+ SECONDARY_SURFACE_ADDRESS,
+ address->grph_stereo.right_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph_stereo.left_addr.low_part);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ hubp->request_address = *address;
+
+ return true;
+}
+
+void hubp2_enable_triplebuffer(
+ struct hubp *hubp,
+ bool enable)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t triple_buffer_en = 0;
+ bool tri_buffer_en;
+
+ REG_GET(DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, &triple_buffer_en);
+ tri_buffer_en = (triple_buffer_en == 1);
+ if (tri_buffer_en != enable) {
+ REG_UPDATE(DCSURF_FLIP_CONTROL2,
+ SURFACE_TRIPLE_BUFFER_ENABLE, enable ? DC_TRIPLEBUFFER_ENABLE : DC_TRIPLEBUFFER_DISABLE);
+ }
+}
+
+bool hubp2_is_triplebuffer_enabled(
+ struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t triple_buffer_en = 0;
+
+ REG_GET(DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, &triple_buffer_en);
+
+ return (bool)triple_buffer_en;
+}
+
+void hubp2_set_flip_control_surface_gsl(struct hubp *hubp, bool enable)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, enable ? 1 : 0);
+}
+
+bool hubp2_is_flip_pending(struct hubp *hubp)
+{
+ uint32_t flip_pending = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dc_plane_address earliest_inuse_address;
+
+ if (hubp && hubp->power_gated)
+ return false;
+
+ REG_GET(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_PENDING, &flip_pending);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
+ SURFACE_EARLIEST_INUSE_ADDRESS, &earliest_inuse_address.grph.addr.low_part);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &earliest_inuse_address.grph.addr.high_part);
+
+ if (flip_pending)
+ return true;
+
+ if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ return true;
+
+ return false;
+}
+
+void hubp2_set_blank(struct hubp *hubp, bool blank)
+{
+ hubp2_set_blank_regs(hubp, blank);
+
+ if (blank) {
+ hubp->mpcc_id = 0xf;
+ hubp->opp_id = OPP_ID_INVALID;
+ }
+}
+
+void hubp2_set_blank_regs(struct hubp *hubp, bool blank)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t blank_en = blank ? 1 : 0;
+
+ if (blank) {
+ uint32_t reg_val = REG_READ(DCHUBP_CNTL);
+
+ if (reg_val) {
+ /* init sequence workaround: in case HUBP is
+ * power gated, this wait would timeout.
+ *
+ * we just wrote reg_val to non-0, if it stay 0
+ * it means HUBP is gated
+ */
+ REG_WAIT(DCHUBP_CNTL,
+ HUBP_NO_OUTSTANDING_REQ, 1,
+ 1, 100000);
+ }
+ }
+
+ REG_UPDATE_2(DCHUBP_CNTL,
+ HUBP_BLANK_EN, blank_en,
+ HUBP_TTU_DISABLE, 0);
+}
+
+void hubp2_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ int x_pos = pos->x - param->viewport.x;
+ int y_pos = pos->y - param->viewport.y;
+ int x_hotspot = pos->x_hotspot;
+ int y_hotspot = pos->y_hotspot;
+ int src_x_offset = x_pos - pos->x_hotspot;
+ int src_y_offset = y_pos - pos->y_hotspot;
+ int cursor_height = (int)hubp->curs_attr.height;
+ int cursor_width = (int)hubp->curs_attr.width;
+ uint32_t dst_x_offset;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+
+ hubp->curs_pos = *pos;
+
+ /*
+ * Guard aganst cursor_set_position() from being called with invalid
+ * attributes
+ *
+ * TODO: Look at combining cursor_set_position() and
+ * cursor_set_attributes() into cursor_update()
+ */
+ if (hubp->curs_attr.address.quad_part == 0)
+ return;
+
+ // Transform cursor width / height and hotspots for offset calculations
+ if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
+ swap(cursor_height, cursor_width);
+ swap(x_hotspot, y_hotspot);
+
+ if (param->rotation == ROTATION_ANGLE_90) {
+ // hotspot = (-y, x)
+ src_x_offset = x_pos - (cursor_width - x_hotspot);
+ src_y_offset = y_pos - y_hotspot;
+ } else if (param->rotation == ROTATION_ANGLE_270) {
+ // hotspot = (y, -x)
+ src_x_offset = x_pos - x_hotspot;
+ src_y_offset = y_pos - (cursor_height - y_hotspot);
+ }
+ } else if (param->rotation == ROTATION_ANGLE_180) {
+ // hotspot = (-x, -y)
+ if (!param->mirror)
+ src_x_offset = x_pos - (cursor_width - x_hotspot);
+
+ src_y_offset = y_pos - (cursor_height - y_hotspot);
+ }
+
+ dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
+ dst_x_offset *= param->ref_clk_khz;
+ dst_x_offset /= param->pixel_clk_khz;
+
+ ASSERT(param->h_scale_ratio.value);
+
+ if (param->h_scale_ratio.value)
+ dst_x_offset = dc_fixpt_floor(dc_fixpt_div(
+ dc_fixpt_from_int(dst_x_offset),
+ param->h_scale_ratio));
+
+ if (src_x_offset >= (int)param->viewport.width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + cursor_width <= 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
+ if (src_y_offset >= (int)param->viewport.height)
+ cur_en = 0; /* not visible beyond bottom edge*/
+
+ if (src_y_offset + cursor_height <= 0)
+ cur_en = 0; /* not visible beyond top edge*/
+
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
+
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
+
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ /* TODO Handle surface pixel formats other than 4:4:4 */
+ /* Cursor Position Register Config */
+ hubp->pos.cur_ctl.bits.cur_enable = cur_en;
+ hubp->pos.position.bits.x_pos = pos->x;
+ hubp->pos.position.bits.y_pos = pos->y;
+ hubp->pos.hot_spot.bits.x_hot = pos->x_hotspot;
+ hubp->pos.hot_spot.bits.y_hot = pos->y_hotspot;
+ hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
+ /* Cursor Rectangle Cache
+ * Cursor bitmaps have different hotspot values
+ * There's a possibility that the above logic returns a negative value,
+ * so we clamp them to 0
+ */
+ if (src_x_offset < 0)
+ src_x_offset = 0;
+ if (src_y_offset < 0)
+ src_y_offset = 0;
+ /* Save necessary cursor info x, y position. w, h is saved in attribute func. */
+ if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ param->rotation != ROTATION_ANGLE_0) {
+ hubp->cur_rect.x = 0;
+ hubp->cur_rect.y = 0;
+ hubp->cur_rect.w = param->stream->timing.h_addressable;
+ hubp->cur_rect.h = param->stream->timing.v_addressable;
+ } else {
+ hubp->cur_rect.x = src_x_offset + param->viewport.x;
+ hubp->cur_rect.y = src_y_offset + param->viewport.y;
+ }
+}
+
+void hubp2_clk_cntl(struct hubp *hubp, bool enable)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ uint32_t clk_enable = enable ? 1 : 0;
+
+ REG_UPDATE(HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, clk_enable);
+}
+
+void hubp2_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
+}
+
+void hubp2_clear_underflow(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1);
+}
+
+void hubp2_read_state_common(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dcn_hubp_state *s = &hubp2->state;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ /* Requester */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
+
+ REG_GET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR, &rq_regs->aperture_high_addr);
+
+ REG_GET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR, &rq_regs->aperture_low_addr);
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end);
+
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start);
+
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal);
+
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler);
+
+ if (REG(PREFETCH_SETTINS))
+ REG_GET_2(PREFETCH_SETTINS,
+ DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
+ else
+ REG_GET_2(PREFETCH_SETTINGS,
+ DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
+
+ REG_GET_2(VBLANK_PARAMETERS_0,
+ DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank);
+
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l);
+
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l);
+
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l);
+
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l);
+
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c);
+
+ if (REG(PREFETCH_SETTINS_C))
+ REG_GET(PREFETCH_SETTINS_C,
+ VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
+ else
+ REG_GET(PREFETCH_SETTINGS_C,
+ VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
+
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c);
+
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c);
+
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c);
+
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c);
+
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm);
+
+ REG_GET_2(DCN_GLOBAL_TTU_CNTL,
+ MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l);
+
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ &ttu_attr->refcyc_per_req_delivery_pre_l);
+
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c);
+
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ &ttu_attr->refcyc_per_req_delivery_pre_c);
+
+ /* Rest of hubp */
+ REG_GET(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, &s->pixel_format);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
+ SURFACE_EARLIEST_INUSE_ADDRESS, &s->inuse_addr_lo);
+
+ REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION,
+ PRI_VIEWPORT_WIDTH, &s->viewport_width,
+ PRI_VIEWPORT_HEIGHT, &s->viewport_height);
+
+ REG_GET_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, &s->rotation_angle,
+ H_MIRROR_EN, &s->h_mirror_en);
+
+ REG_GET(DCSURF_TILING_CONFIG,
+ SW_MODE, &s->sw_mode);
+
+ REG_GET(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, &s->dcc_en);
+
+ REG_GET_3(DCHUBP_CNTL,
+ HUBP_BLANK_EN, &s->blank_en,
+ HUBP_TTU_DISABLE, &s->ttu_disable,
+ HUBP_UNDERFLOW_STATUS, &s->underflow_status);
+
+ REG_GET(HUBP_CLK_CNTL,
+ HUBP_CLOCK_ENABLE, &s->clock_en);
+
+ REG_GET(DCN_GLOBAL_TTU_CNTL,
+ MIN_TTU_VBLANK, &s->min_ttu_vblank);
+
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
+
+ REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS,
+ PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_lo);
+
+ REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH,
+ PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_hi);
+
+ REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS,
+ PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_lo);
+
+ REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_hi);
+}
+
+void hubp2_read_state(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dcn_hubp_state *s = &hubp2->state;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ hubp2_read_state_common(hubp);
+
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
+
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+
+}
+
+static void hubp2_validate_dml_output(struct hubp *hubp,
+ struct dc_context *ctx,
+ struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,
+ struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct _vcs_dpi_display_rq_regs_st rq_regs = {0};
+ struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
+ struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
+ DC_LOGGER_INIT(ctx->logger);
+ DC_LOG_DEBUG("DML Validation | Running Validation");
+
+ /* Requestor Regs */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear);
+
+ if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address)
+ DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address);
+ if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode);
+ if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode);
+ if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n",
+ dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode);
+ if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode)
+ DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n",
+ dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode);
+
+ if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size);
+ if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size);
+ if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size);
+ if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size);
+ if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size);
+ if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size);
+ if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height);
+ if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear);
+
+ if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size);
+ if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size);
+ if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size);
+ if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size);
+ if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size);
+ if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size);
+ if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height);
+ if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear)
+ DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n",
+ dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear);
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end);
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start);
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal);
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler);
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq);
+
+ if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end);
+ if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end);
+ if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start)
+ DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n",
+ dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start);
+ if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal)
+ DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal);
+ if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler);
+ if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler)
+ DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler);
+ if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq)
+ DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n",
+ dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l);
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l);
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l);
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l);
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c);
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c);
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c);
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c);
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c);
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c);
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l);
+ if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l);
+ if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l);
+ if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l);
+ if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l);
+ if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c);
+ if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c);
+ if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c);
+ if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c);
+ if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c);
+ if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c)
+ DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c);
+ if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l);
+ if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l);
+ if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c)
+ DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm);
+
+ if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm);
+ if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm)
+ DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l);
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c);
+ REG_GET_3(DCN_CUR0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0);
+ REG_GET(FLIP_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l);
+ REG_GET(DCN_CUR0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ REG_GET(DCN_CUR1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l);
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c);
+
+ if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l);
+ if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l);
+ if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l);
+ if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c);
+ if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c);
+ if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c);
+ if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0);
+ if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0);
+ if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0);
+ if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l)
+ DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n",
+ dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0);
+ if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1)
+ DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1);
+ if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l);
+ if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c)
+ DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n",
+ dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c);
+}
+
+static struct hubp_funcs dcn20_hubp_funcs = {
+ .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+ .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
+ .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
+ .hubp_program_surface_config = hubp2_program_surface_config,
+ .hubp_is_flip_pending = hubp2_is_flip_pending,
+ .hubp_setup = hubp2_setup,
+ .hubp_setup_interdependent = hubp2_setup_interdependent,
+ .hubp_set_vm_system_aperture_settings = hubp2_set_vm_system_aperture_settings,
+ .set_blank = hubp2_set_blank,
+ .set_blank_regs = hubp2_set_blank_regs,
+ .dcc_control = hubp2_dcc_control,
+ .mem_program_viewport = min_set_viewport,
+ .set_cursor_attributes = hubp2_cursor_set_attributes,
+ .set_cursor_position = hubp2_cursor_set_position,
+ .hubp_clk_cntl = hubp2_clk_cntl,
+ .hubp_vtg_sel = hubp2_vtg_sel,
+ .dmdata_set_attributes = hubp2_dmdata_set_attributes,
+ .dmdata_load = hubp2_dmdata_load,
+ .dmdata_status_done = hubp2_dmdata_status_done,
+ .hubp_read_state = hubp2_read_state,
+ .hubp_clear_underflow = hubp2_clear_underflow,
+ .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
+ .hubp_init = hubp1_init,
+ .validate_dml_output = hubp2_validate_dml_output,
+ .hubp_in_blank = hubp1_in_blank,
+ .hubp_soft_reset = hubp1_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
+};
+
+
+bool hubp2_construct(
+ struct dcn20_hubp *hubp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_hubp2_registers *hubp_regs,
+ const struct dcn_hubp2_shift *hubp_shift,
+ const struct dcn_hubp2_mask *hubp_mask)
+{
+ hubp2->base.funcs = &dcn20_hubp_funcs;
+ hubp2->base.ctx = ctx;
+ hubp2->hubp_regs = hubp_regs;
+ hubp2->hubp_shift = hubp_shift;
+ hubp2->hubp_mask = hubp_mask;
+ hubp2->base.inst = inst;
+ hubp2->base.opp_id = OPP_ID_INVALID;
+ hubp2->base.mpcc_id = 0xf;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
new file mode 100644
index 0000000000..efa2adf4f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -0,0 +1,371 @@
+/*
+ * Copyright 2012-17 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MEM_INPUT_DCN20_H__
+#define __DC_MEM_INPUT_DCN20_H__
+
+#include "../dcn10/dcn10_hubp.h"
+
+#define TO_DCN20_HUBP(hubp)\
+ container_of(hubp, struct dcn20_hubp, base)
+
+#define HUBP_REG_LIST_DCN2_COMMON(id)\
+ HUBP_REG_LIST_DCN(id),\
+ HUBP_REG_LIST_DCN_VM(id),\
+ SRI(PREFETCH_SETTINGS, HUBPREQ, id),\
+ SRI(PREFETCH_SETTINGS_C, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, HUBPREQ, id),\
+ SRI(CURSOR_SETTINGS, HUBPREQ, id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
+ SRI(CURSOR_SIZE, CURSOR0_, id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI(CURSOR_POSITION, CURSOR0_, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR0_, id), \
+ SRI(DMDATA_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI(DMDATA_ADDRESS_LOW, CURSOR0_, id), \
+ SRI(DMDATA_CNTL, CURSOR0_, id), \
+ SRI(DMDATA_SW_CNTL, CURSOR0_, id), \
+ SRI(DMDATA_QOS_CNTL, CURSOR0_, id), \
+ SRI(DMDATA_SW_DATA, CURSOR0_, id), \
+ SRI(DMDATA_STATUS, CURSOR0_, id),\
+ SRI(FLIP_PARAMETERS_0, HUBPREQ, id),\
+ SRI(FLIP_PARAMETERS_1, HUBPREQ, id),\
+ SRI(FLIP_PARAMETERS_2, HUBPREQ, id),\
+ SRI(DCN_CUR1_TTU_CNTL0, HUBPREQ, id),\
+ SRI(DCN_CUR1_TTU_CNTL1, HUBPREQ, id),\
+ SRI(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \
+ SRI(VMID_SETTINGS_0, HUBPREQ, id)
+
+#define HUBP_REG_LIST_DCN20(id)\
+ HUBP_REG_LIST_DCN2_COMMON(id),\
+ SR(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),\
+ SR(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB)
+
+#define HUBP_MASK_SH_LIST_DCN2_SHARE_COMMON(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN_SHARE_COMMON(mask_sh),\
+ HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, DST_Y_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, VRATIO_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR, MC_VM_SYSTEM_APERTURE_LOW_ADDR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mask_sh),\
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_ADDRESS_HIGH, DMDATA_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_LEVEL, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_DL_DELTA, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_STATUS, DMDATA_DONE, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_VM_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_ROW_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_1, REFCYC_PER_PTE_GROUP_FLIP_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_2, REFCYC_PER_META_CHUNK_FLIP_L, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE_STOP_DATA_DURING_VM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, HUBPREQ_MASTER_UPDATE_LOCK_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_VMID_SETTINGS_0, VMID, mask_sh)
+
+/*DCN2.x and DCN1.x*/
+#define HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN2_SHARE_COMMON(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh)
+
+/*DCN2.0 specific*/
+#define HUBP_MASK_SH_LIST_DCN20(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh),\
+ HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, mask_sh),\
+ HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mask_sh),\
+ HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mask_sh)
+
+/*DCN2.x */
+#define DCN2_HUBP_REG_COMMON_VARIABLE_LIST \
+ HUBP_COMMON_REG_VARIABLE_LIST; \
+ uint32_t DMDATA_ADDRESS_HIGH; \
+ uint32_t DMDATA_ADDRESS_LOW; \
+ uint32_t DMDATA_CNTL; \
+ uint32_t DMDATA_SW_CNTL; \
+ uint32_t DMDATA_QOS_CNTL; \
+ uint32_t DMDATA_SW_DATA; \
+ uint32_t DMDATA_STATUS;\
+ uint32_t DCSURF_FLIP_CONTROL2;\
+ uint32_t FLIP_PARAMETERS_0;\
+ uint32_t FLIP_PARAMETERS_1;\
+ uint32_t FLIP_PARAMETERS_2;\
+ uint32_t DCN_CUR1_TTU_CNTL0;\
+ uint32_t DCN_CUR1_TTU_CNTL1;\
+ uint32_t VMID_SETTINGS_0
+
+
+#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
+ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
+ uint32_t FLIP_PARAMETERS_3;\
+ uint32_t FLIP_PARAMETERS_4;\
+ uint32_t FLIP_PARAMETERS_5;\
+ uint32_t FLIP_PARAMETERS_6;\
+ uint32_t VBLANK_PARAMETERS_5;\
+ uint32_t VBLANK_PARAMETERS_6
+
+#define DCN30_HUBP_REG_COMMON_VARIABLE_LIST \
+ DCN21_HUBP_REG_COMMON_VARIABLE_LIST;\
+ uint32_t DCN_DMDATA_VM_CNTL
+
+#define DCN32_HUBP_REG_COMMON_VARIABLE_LIST \
+ DCN30_HUBP_REG_COMMON_VARIABLE_LIST;\
+ uint32_t DCHUBP_MALL_CONFIG;\
+ uint32_t DCHUBP_VMPG_CONFIG;\
+ uint32_t UCLK_PSTATE_FORCE
+
+#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN_HUBP_REG_FIELD_BASE_LIST(type); \
+ type DMDATA_ADDRESS_HIGH;\
+ type DMDATA_MODE;\
+ type DMDATA_UPDATED;\
+ type DMDATA_REPEAT;\
+ type DMDATA_SIZE;\
+ type DMDATA_SW_UPDATED;\
+ type DMDATA_SW_REPEAT;\
+ type DMDATA_SW_SIZE;\
+ type DMDATA_QOS_MODE;\
+ type DMDATA_QOS_LEVEL;\
+ type DMDATA_DL_DELTA;\
+ type DMDATA_DONE;\
+ type DST_Y_PER_VM_FLIP;\
+ type DST_Y_PER_ROW_FLIP;\
+ type REFCYC_PER_PTE_GROUP_FLIP_L;\
+ type REFCYC_PER_META_CHUNK_FLIP_L;\
+ type HUBP_VREADY_AT_OR_AFTER_VSYNC;\
+ type HUBP_DISABLE_STOP_DATA_DURING_VM;\
+ type HUBPREQ_MASTER_UPDATE_LOCK_STATUS;\
+ type SURFACE_GSL_ENABLE;\
+ type SURFACE_TRIPLE_BUFFER_ENABLE;\
+ type VMID
+
+#define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\
+ type REFCYC_PER_VM_GROUP_FLIP;\
+ type REFCYC_PER_VM_REQ_FLIP;\
+ type REFCYC_PER_VM_GROUP_VBLANK;\
+ type REFCYC_PER_VM_REQ_VBLANK;\
+ type REFCYC_PER_PTE_GROUP_FLIP_C; \
+ type REFCYC_PER_META_CHUNK_FLIP_C; \
+ type VM_GROUP_SIZE
+
+#define DCN30_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type);\
+ type PRIMARY_SURFACE_DCC_IND_BLK;\
+ type SECONDARY_SURFACE_DCC_IND_BLK;\
+ type PRIMARY_SURFACE_DCC_IND_BLK_C;\
+ type SECONDARY_SURFACE_DCC_IND_BLK_C;\
+ type ALPHA_PLANE_EN;\
+ type REFCYC_PER_VM_DMDATA;\
+ type DMDATA_VM_FAULT_STATUS;\
+ type DMDATA_VM_FAULT_STATUS_CLEAR; \
+ type DMDATA_VM_UNDERFLOW_STATUS;\
+ type DMDATA_VM_LATE_STATUS;\
+ type DMDATA_VM_UNDERFLOW_STATUS_CLEAR; \
+ type DMDATA_VM_DONE; \
+ type CROSSBAR_SRC_Y_G; \
+ type CROSSBAR_SRC_ALPHA; \
+ type PACK_3TO2_ELEMENT_DISABLE; \
+ type ROW_TTU_MODE; \
+ type NUM_PKRS
+
+#define DCN31_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN30_HUBP_REG_FIELD_VARIABLE_LIST(type);\
+ type HUBP_UNBOUNDED_REQ_MODE;\
+ type CURSOR_REQ_MODE;\
+ type HUBP_SOFT_RESET
+
+#define DCN32_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN31_HUBP_REG_FIELD_VARIABLE_LIST(type);\
+ type USE_MALL_SEL; \
+ type USE_MALL_FOR_CURSOR;\
+ type VMPG_SIZE; \
+ type PTE_BUFFER_MODE; \
+ type BIGK_FRAGMENT_SIZE; \
+ type FORCE_ONE_ROW_FOR_FRAME; \
+ type DATA_UCLK_PSTATE_FORCE_EN; \
+ type DATA_UCLK_PSTATE_FORCE_VALUE; \
+ type CURSOR_UCLK_PSTATE_FORCE_EN; \
+ type CURSOR_UCLK_PSTATE_FORCE_VALUE
+
+struct dcn_hubp2_registers {
+ DCN32_HUBP_REG_COMMON_VARIABLE_LIST;
+};
+
+struct dcn_hubp2_shift {
+ DCN32_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
+};
+
+struct dcn_hubp2_mask {
+ DCN32_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
+};
+
+struct dcn20_hubp {
+ struct hubp base;
+ struct dcn_hubp_state state;
+ const struct dcn_hubp2_registers *hubp_regs;
+ const struct dcn_hubp2_shift *hubp_shift;
+ const struct dcn_hubp2_mask *hubp_mask;
+};
+
+bool hubp2_construct(
+ struct dcn20_hubp *hubp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_hubp2_registers *hubp_regs,
+ const struct dcn_hubp2_shift *hubp_shift,
+ const struct dcn_hubp2_mask *hubp_mask);
+
+void hubp2_setup_interdependent(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
+
+void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+void hubp2_cursor_set_attributes(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr);
+
+void hubp2_set_vm_system_aperture_settings(struct hubp *hubp,
+ struct vm_system_aperture_param *apt);
+
+enum cursor_lines_per_chunk hubp2_get_lines_per_chunk(
+ unsigned int cursor_width,
+ enum dc_cursor_color_format cursor_mode);
+
+void hubp2_dmdata_set_attributes(
+ struct hubp *hubp,
+ const struct dc_dmdata_attributes *attr);
+
+void hubp2_dmdata_load(
+ struct hubp *hubp,
+ uint32_t dmdata_sw_size,
+ const uint32_t *dmdata_sw_data);
+
+bool hubp2_dmdata_status_done(struct hubp *hubp);
+
+void hubp2_enable_triplebuffer(
+ struct hubp *hubp,
+ bool enable);
+
+bool hubp2_is_triplebuffer_enabled(
+ struct hubp *hubp);
+
+void hubp2_set_flip_control_surface_gsl(struct hubp *hubp, bool enable);
+
+void hubp2_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
+
+bool hubp2_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+void hubp2_dcc_control(struct hubp *hubp, bool enable,
+ enum hubp_ind_block_size independent_64b_blks);
+
+void hubp2_program_size(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ const struct plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc);
+
+void hubp2_program_rotation(
+ struct hubp *hubp,
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror);
+
+void hubp2_program_pixel_format(
+ struct hubp *hubp,
+ enum surface_pixel_format format);
+
+void hubp2_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ unsigned int compat_level);
+
+bool hubp2_is_flip_pending(struct hubp *hubp);
+
+void hubp2_set_blank(struct hubp *hubp, bool blank);
+void hubp2_set_blank_regs(struct hubp *hubp, bool blank);
+
+void hubp2_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+void hubp2_clk_cntl(struct hubp *hubp, bool enable);
+
+void hubp2_vtg_sel(struct hubp *hubp, uint32_t otg_inst);
+
+void hubp2_clear_underflow(struct hubp *hubp);
+
+void hubp2_read_state_common(struct hubp *hubp);
+
+void hubp2_read_state(struct hubp *hubp);
+
+#endif /* __DC_MEM_INPUT_DCN20_H__ */
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
new file mode 100644
index 0000000000..a2e1ca3b93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -0,0 +1,2945 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include <linux/delay.h>
+
+#include "dm_services.h"
+#include "basics/dc_common.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dcn20_resource.h"
+#include "dcn20_hwseq.h"
+#include "dce/dce_hwseq.h"
+#include "dcn20_dsc.h"
+#include "dcn20_optc.h"
+#include "abm.h"
+#include "clk_mgr.h"
+#include "dmcu.h"
+#include "hubp.h"
+#include "timing_generator.h"
+#include "opp.h"
+#include "ipp.h"
+#include "mpc.h"
+#include "mcif_wb.h"
+#include "dchubbub.h"
+#include "reg_helper.h"
+#include "dcn10/dcn10_cm_common.h"
+#include "vm_helper.h"
+#include "dccg.h"
+#include "dc_dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
+#include "hw_sequencer.h"
+#include "dpcd_defs.h"
+#include "inc/link_enc_cfg.h"
+#include "link_hwss.h"
+#include "link.h"
+
+#define DC_LOGGER_INIT(logger)
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+static int find_free_gsl_group(const struct dc *dc)
+{
+ if (dc->res_pool->gsl_groups.gsl_0 == 0)
+ return 1;
+ if (dc->res_pool->gsl_groups.gsl_1 == 0)
+ return 2;
+ if (dc->res_pool->gsl_groups.gsl_2 == 0)
+ return 3;
+
+ return 0;
+}
+
+/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
+ * This is only used to lock pipes in pipe splitting case with immediate flip
+ * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
+ * so we get tearing with freesync since we cannot flip multiple pipes
+ * atomically.
+ * We use GSL for this:
+ * - immediate flip: find first available GSL group if not already assigned
+ * program gsl with that group, set current OTG as master
+ * and always us 0x4 = AND of flip_ready from all pipes
+ * - vsync flip: disable GSL if used
+ *
+ * Groups in stream_res are stored as +1 from HW registers, i.e.
+ * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
+ * Using a magic value like -1 would require tracking all inits/resets
+ */
+static void dcn20_setup_gsl_group_as_lock(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable)
+{
+ struct gsl_params gsl;
+ int group_idx;
+
+ memset(&gsl, 0, sizeof(struct gsl_params));
+
+ if (enable) {
+ /* return if group already assigned since GSL was set up
+ * for vsync flip, we would unassign so it can't be "left over"
+ */
+ if (pipe_ctx->stream_res.gsl_group > 0)
+ return;
+
+ group_idx = find_free_gsl_group(dc);
+ ASSERT(group_idx != 0);
+ pipe_ctx->stream_res.gsl_group = group_idx;
+
+ /* set gsl group reg field and mark resource used */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 1;
+ dc->res_pool->gsl_groups.gsl_0 = 1;
+ break;
+ case 2:
+ gsl.gsl1_en = 1;
+ dc->res_pool->gsl_groups.gsl_1 = 1;
+ break;
+ case 3:
+ gsl.gsl2_en = 1;
+ dc->res_pool->gsl_groups.gsl_2 = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return; // invalid case
+ }
+ gsl.gsl_master_en = 1;
+ } else {
+ group_idx = pipe_ctx->stream_res.gsl_group;
+ if (group_idx == 0)
+ return; // if not in use, just return
+
+ pipe_ctx->stream_res.gsl_group = 0;
+
+ /* unset gsl group reg field and mark resource free */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 0;
+ dc->res_pool->gsl_groups.gsl_0 = 0;
+ break;
+ case 2:
+ gsl.gsl1_en = 0;
+ dc->res_pool->gsl_groups.gsl_1 = 0;
+ break;
+ case 3:
+ gsl.gsl2_en = 0;
+ dc->res_pool->gsl_groups.gsl_2 = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ gsl.gsl_master_en = 0;
+ }
+
+ /* at this point we want to program whether it's to enable or disable */
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+ pipe_ctx->stream_res.tg->funcs->set_gsl(
+ pipe_ctx->stream_res.tg,
+ &gsl);
+
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ } else
+ BREAK_TO_DEBUGGER();
+}
+
+void dcn20_set_flip_control_gsl(
+ struct pipe_ctx *pipe_ctx,
+ bool flip_immediate)
+{
+ if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
+ pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
+ pipe_ctx->plane_res.hubp, flip_immediate);
+
+}
+
+void dcn20_enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable)
+{
+ bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
+
+ if (enable)
+ force_on = false;
+
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+
+ /* DCHUBP0/1/2/3/4/5 */
+ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
+ if (REG(DOMAIN8_PG_CONFIG))
+ REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
+ if (REG(DOMAIN10_PG_CONFIG))
+ REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
+
+ /* DPP0/1/2/3/4/5 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
+ if (REG(DOMAIN9_PG_CONFIG))
+ REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
+ if (REG(DOMAIN11_PG_CONFIG))
+ REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
+
+ /* DCS0/1/2/3/4/5 */
+ REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on);
+ if (REG(DOMAIN19_PG_CONFIG))
+ REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on);
+ if (REG(DOMAIN20_PG_CONFIG))
+ REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on);
+ if (REG(DOMAIN21_PG_CONFIG))
+ REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+
+}
+
+void dcn20_dccg_init(struct dce_hwseq *hws)
+{
+ /*
+ * set MICROSECOND_TIME_BASE_DIV
+ * 100Mhz refclk -> 0x120264
+ * 27Mhz refclk -> 0x12021b
+ * 48Mhz refclk -> 0x120230
+ *
+ */
+ REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264);
+
+ /*
+ * set MILLISECOND_TIME_BASE_DIV
+ * 100Mhz refclk -> 0x1186a0
+ * 27Mhz refclk -> 0x106978
+ * 48Mhz refclk -> 0x10bb80
+ *
+ */
+ REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0);
+
+ /* This value is dependent on the hardware pipeline delay so set once per SOC */
+ REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c);
+}
+
+void dcn20_disable_vga(
+ struct dce_hwseq *hws)
+{
+ REG_WRITE(D1VGA_CONTROL, 0);
+ REG_WRITE(D2VGA_CONTROL, 0);
+ REG_WRITE(D3VGA_CONTROL, 0);
+ REG_WRITE(D4VGA_CONTROL, 0);
+ REG_WRITE(D5VGA_CONTROL, 0);
+ REG_WRITE(D6VGA_CONTROL, 0);
+}
+
+void dcn20_program_triple_buffer(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable_triple_buffer)
+{
+ if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) {
+ pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer(
+ pipe_ctx->plane_res.hubp,
+ enable_triple_buffer);
+ }
+}
+
+/* Blank pixel data during initialization */
+void dcn20_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ struct output_pixel_processor *opp = NULL;
+ struct output_pixel_processor *bottom_opp = NULL;
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+ uint32_t otg_active_width, otg_active_height;
+
+ /* program opp dpg blank color */
+ color_space = COLOR_SPACE_SRGB;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ /* get the OTG active size */
+ tg->funcs->get_otg_active_size(tg,
+ &otg_active_width,
+ &otg_active_height);
+
+ /* get the OPTC source */
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+
+ if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
+ opp = dc->res_pool->opps[opp_id_src0];
+
+ /* don't override the blank pattern if already enabled with the correct one. */
+ if (opp->funcs->dpg_is_blanked && opp->funcs->dpg_is_blanked(opp))
+ return;
+
+ if (num_opps == 2) {
+ otg_active_width = otg_active_width / 2;
+
+ if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
+ bottom_opp = dc->res_pool->opps[opp_id_src1];
+ }
+
+ opp->funcs->opp_set_disp_pattern_generator(
+ opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+
+ if (num_opps == 2) {
+ bottom_opp->funcs->opp_set_disp_pattern_generator(
+ bottom_opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+ }
+
+ hws->funcs.wait_for_blank_complete(opp);
+}
+
+void dcn20_dsc_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+ uint32_t org_ip_request_cntl = 0;
+
+ if (hws->ctx->dc->debug.disable_dsc_power_gate)
+ return;
+
+ if (REG(DOMAIN16_PG_CONFIG) == 0)
+ return;
+
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+
+ switch (dsc_inst) {
+ case 0: /* DSC0 */
+ REG_UPDATE(DOMAIN16_PG_CONFIG,
+ DOMAIN16_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN16_PG_STATUS,
+ DOMAIN16_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DSC1 */
+ REG_UPDATE(DOMAIN17_PG_CONFIG,
+ DOMAIN17_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN17_PG_STATUS,
+ DOMAIN17_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DSC2 */
+ REG_UPDATE(DOMAIN18_PG_CONFIG,
+ DOMAIN18_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN18_PG_STATUS,
+ DOMAIN18_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DSC3 */
+ REG_UPDATE(DOMAIN19_PG_CONFIG,
+ DOMAIN19_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN19_PG_STATUS,
+ DOMAIN19_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 4: /* DSC4 */
+ REG_UPDATE(DOMAIN20_PG_CONFIG,
+ DOMAIN20_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN20_PG_STATUS,
+ DOMAIN20_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 5: /* DSC5 */
+ REG_UPDATE(DOMAIN21_PG_CONFIG,
+ DOMAIN21_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN21_PG_STATUS,
+ DOMAIN21_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+}
+
+void dcn20_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+ if (hws->ctx->dc->debug.disable_dpp_power_gate)
+ return;
+ if (REG(DOMAIN1_PG_CONFIG) == 0)
+ return;
+
+ switch (dpp_inst) {
+ case 0: /* DPP0 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG,
+ DOMAIN1_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN1_PG_STATUS,
+ DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DPP1 */
+ REG_UPDATE(DOMAIN3_PG_CONFIG,
+ DOMAIN3_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN3_PG_STATUS,
+ DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DPP2 */
+ REG_UPDATE(DOMAIN5_PG_CONFIG,
+ DOMAIN5_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN5_PG_STATUS,
+ DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DPP3 */
+ REG_UPDATE(DOMAIN7_PG_CONFIG,
+ DOMAIN7_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN7_PG_STATUS,
+ DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 4: /* DPP4 */
+ REG_UPDATE(DOMAIN9_PG_CONFIG,
+ DOMAIN9_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN9_PG_STATUS,
+ DOMAIN9_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 5: /* DPP5 */
+ /*
+ * Do not power gate DPP5, should be left at HW default, power on permanently.
+ * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard
+ * reset.
+ * REG_UPDATE(DOMAIN11_PG_CONFIG,
+ * DOMAIN11_POWER_GATE, power_gate);
+ *
+ * REG_WAIT(DOMAIN11_PG_STATUS,
+ * DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
+ * 1, 1000);
+ */
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+
+void dcn20_hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+ if (hws->ctx->dc->debug.disable_hubp_power_gate)
+ return;
+ if (REG(DOMAIN0_PG_CONFIG) == 0)
+ return;
+
+ switch (hubp_inst) {
+ case 0: /* DCHUBP0 */
+ REG_UPDATE(DOMAIN0_PG_CONFIG,
+ DOMAIN0_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN0_PG_STATUS,
+ DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DCHUBP1 */
+ REG_UPDATE(DOMAIN2_PG_CONFIG,
+ DOMAIN2_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN2_PG_STATUS,
+ DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DCHUBP2 */
+ REG_UPDATE(DOMAIN4_PG_CONFIG,
+ DOMAIN4_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN4_PG_STATUS,
+ DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DCHUBP3 */
+ REG_UPDATE(DOMAIN6_PG_CONFIG,
+ DOMAIN6_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN6_PG_STATUS,
+ DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 4: /* DCHUBP4 */
+ REG_UPDATE(DOMAIN8_PG_CONFIG,
+ DOMAIN8_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN8_PG_STATUS,
+ DOMAIN8_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 5: /* DCHUBP5 */
+ /*
+ * Do not power gate DCHUB5, should be left at HW default, power on permanently.
+ * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard
+ * reset.
+ * REG_UPDATE(DOMAIN10_PG_CONFIG,
+ * DOMAIN10_POWER_GATE, power_gate);
+ *
+ * REG_WAIT(DOMAIN10_PG_STATUS,
+ * DOMAIN10_PGFSM_PWR_STATUS, pwr_status,
+ * 1, 1000);
+ */
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+
+/* disable HW used by plane.
+ * note: cannot disable until disconnect is complete
+ */
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
+
+ /* In flip immediate with pipe splitting case GSL is used for
+ * synchronization so we must disable it when the plane is disabled.
+ */
+ if (pipe_ctx->stream_res.gsl_group != 0)
+ dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
+
+ if (hubp->funcs->hubp_update_mall_sel)
+ hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
+
+ dc->hwss.set_flip_control_gsl(pipe_ctx, false);
+
+ hubp->funcs->hubp_clk_cntl(hubp, false);
+
+ dpp->funcs->dpp_dppclk_control(dpp, false, false);
+
+ hubp->power_gated = true;
+
+ hws->funcs.plane_atomic_power_down(dc,
+ pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp);
+
+ pipe_ctx->stream = NULL;
+ memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
+ memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->plane_state = NULL;
+}
+
+
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+ struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
+ return;
+
+ dcn20_plane_atomic_disable(dc, pipe_ctx);
+
+ /* Turn back off the phantom OTG after the phantom plane is fully disabled
+ */
+ if (is_phantom)
+ if (tg && tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+
+ DC_LOG_DC("Power down front end %d\n",
+ pipe_ctx->pipe_idx);
+}
+
+void dcn20_disable_pixel_data(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank)
+{
+ dcn20_blank_pixel_data(dc, pipe_ctx, blank);
+}
+
+static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
+ int opp_cnt)
+{
+ bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
+ int flow_ctrl_cnt;
+
+ if (opp_cnt >= 2)
+ hblank_halved = true;
+
+ flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
+ stream->timing.h_border_left -
+ stream->timing.h_border_right;
+
+ if (hblank_halved)
+ flow_ctrl_cnt /= 2;
+
+ /* ODM combine 4:1 case */
+ if (opp_cnt == 4)
+ flow_ctrl_cnt /= 2;
+
+ return flow_ctrl_cnt;
+}
+
+enum dc_status dcn20_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct drr_params params = {0};
+ unsigned int event_triggers = 0;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
+ bool interlace = stream->timing.flags.INTERLACE;
+ int i;
+ struct mpc_dwb_flow_control flow_control;
+ struct mpc *mpc = dc->res_pool->mpc;
+ bool rate_control_2x_pclk = (interlace || optc2_is_two_pixels_per_containter(&stream->timing));
+ unsigned int k1_div = PIXEL_RATE_DIV_NA;
+ unsigned int k2_div = PIXEL_RATE_DIV_NA;
+
+ if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
+ hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
+
+ dc->res_pool->dccg->funcs->set_pixel_rate_div(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.tg->inst,
+ k1_div, k2_div);
+ }
+ /* by upper caller loop, pipe0 is parent pipe and be called first.
+ * back end is set up by for pipe0. Other children pipe share back end
+ * with pipe 0. No program is needed.
+ */
+ if (pipe_ctx->top_pipe != NULL)
+ return DC_OK;
+
+ /* TODO check if timing_changed, disable stream if timing changed */
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
+ opp_cnt++;
+ }
+
+ if (opp_cnt > 1)
+ pipe_ctx->stream_res.tg->funcs->set_odm_combine(
+ pipe_ctx->stream_res.tg,
+ opp_inst, opp_cnt,
+ &pipe_ctx->stream->timing);
+
+ /* HW program guide assume display already disable
+ * by unplug sequence. OTG assume stop.
+ */
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
+
+ if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ &pipe_ctx->pll_settings)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ stream->link->phy_state.symclk_ref_cnts.otg = 1;
+ if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ else
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+ }
+
+ if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
+ dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
+
+ pipe_ctx->stream_res.tg->funcs->program_timing(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+ pipe_ctx->pipe_dlg_param.vready_offset,
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->stream->signal,
+ true);
+
+ rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
+ flow_control.flow_ctrl_mode = 0;
+ flow_control.flow_ctrl_cnt0 = 0x80;
+ flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(stream, opp_cnt);
+ if (mpc->funcs->set_out_rate_control) {
+ for (i = 0; i < opp_cnt; ++i) {
+ mpc->funcs->set_out_rate_control(
+ mpc, opp_inst[i],
+ true,
+ rate_control_2x_pclk,
+ &flow_control);
+ }
+ }
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
+ odm_pipe->stream_res.opp,
+ true);
+
+ pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
+ pipe_ctx->stream_res.opp,
+ true);
+
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
+
+ /* VTG is within DCHUB command block. DCFCLK is always on */
+ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
+
+ params.vertical_total_min = stream->adjust.v_total_min;
+ params.vertical_total_max = stream->adjust.v_total_max;
+ params.vertical_total_mid = stream->adjust.v_total_mid;
+ params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
+ if (pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, &params);
+
+ // DRR should set trigger event to monitor surface update event
+ if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
+ event_triggers = 0x80;
+ /* Event triggers and num frames initialized for DRR, but can be
+ * later updated for PSR use. Note DRR trigger events are generated
+ * regardless of whether num frames met.
+ */
+ if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
+ pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
+ pipe_ctx->stream_res.tg, event_triggers, 2);
+
+ /* TODO program crtc source select for non-virtual signal*/
+ /* TODO program FMT */
+ /* TODO setup link_enc */
+ /* TODO set stream attributes */
+ /* TODO program audio */
+ /* TODO enable stream if timing changed */
+ /* TODO unblank stream if DP */
+
+ if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
+ pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
+ }
+ return DC_OK;
+}
+
+void dcn20_program_output_csc(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id)
+{
+ struct mpc *mpc = dc->res_pool->mpc;
+ enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+
+ if (mpc->funcs->power_on_mpc_mem_pwr)
+ mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
+
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
+ if (mpc->funcs->set_output_csc != NULL)
+ mpc->funcs->set_output_csc(mpc,
+ opp_id,
+ matrix,
+ ocsc_mode);
+ } else {
+ if (mpc->funcs->set_ocsc_default != NULL)
+ mpc->funcs->set_ocsc_default(mpc,
+ opp_id,
+ colorspace,
+ ocsc_mode);
+ }
+}
+
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+{
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+ struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
+ struct pwl_params *params = NULL;
+ /*
+ * program OGAM only for the top pipe
+ * if there is a pipe split then fix diagnostic is required:
+ * how to pass OGAM parameter for stream.
+ * if programming for all pipes is required then remove condition
+ * pipe_ctx->top_pipe == NULL ,but then fix the diagnostic.
+ */
+ if (mpc->funcs->power_on_mpc_mem_pwr)
+ mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
+ if (pipe_ctx->top_pipe == NULL
+ && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
+ if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func->pwl;
+ else if (pipe_ctx->stream->out_transfer_func->type ==
+ TF_TYPE_DISTRIBUTED_POINTS &&
+ cm_helper_translate_curve_to_hw_format(dc->ctx,
+ stream->out_transfer_func,
+ &mpc->blender_params, false))
+ params = &mpc->blender_params;
+ /*
+ * there is no ROM
+ */
+ if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ BREAK_TO_DEBUGGER();
+ }
+ /*
+ * if above if is not executed then 'params' equal to 0 and set in bypass
+ */
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+
+ return true;
+}
+
+bool dcn20_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ bool result = true;
+ struct pwl_params *blend_lut = NULL;
+
+ if (plane_state->blend_tf) {
+ if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
+ blend_lut = &plane_state->blend_tf->pwl;
+ else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ blend_lut = &dpp_base->regamma_params;
+ }
+ }
+ result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
+
+ return result;
+}
+
+bool dcn20_set_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ bool result = true;
+ struct pwl_params *shaper_lut = NULL;
+
+ if (plane_state->in_shaper_func) {
+ if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
+ shaper_lut = &plane_state->in_shaper_func->pwl;
+ else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ shaper_lut = &dpp_base->shaper_params;
+ }
+ }
+
+ result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut);
+ if (plane_state->lut3d_func &&
+ plane_state->lut3d_func->state.bits.initialized == 1)
+ result = dpp_base->funcs->dpp_program_3dlut(dpp_base,
+ &plane_state->lut3d_func->lut_3d);
+ else
+ result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
+
+ return result;
+}
+
+bool dcn20_set_input_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ const struct dc_transfer_func *tf = NULL;
+ bool result = true;
+ bool use_degamma_ram = false;
+
+ if (dpp_base == NULL || plane_state == NULL)
+ return false;
+
+ hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
+ hws->funcs.set_blend_lut(pipe_ctx, plane_state);
+
+ if (plane_state->in_transfer_func)
+ tf = plane_state->in_transfer_func;
+
+
+ if (tf == NULL) {
+ dpp_base->funcs->dpp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_BYPASS);
+ return true;
+ }
+
+ if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS)
+ use_degamma_ram = true;
+
+ if (use_degamma_ram == true) {
+ if (tf->type == TF_TYPE_HWPWL)
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
+ &tf->pwl);
+ else if (tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_degamma_hw_format(tf,
+ &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
+ &dpp_base->degamma_params);
+ }
+ return true;
+ }
+ /* handle here the optimized cases when de-gamma ROM could be used.
+ *
+ */
+ if (tf->type == TF_TYPE_PREDEFINED) {
+ switch (tf->tf) {
+ case TRANSFER_FUNCTION_SRGB:
+ dpp_base->funcs->dpp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_HW_sRGB);
+ break;
+ case TRANSFER_FUNCTION_BT709:
+ dpp_base->funcs->dpp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_HW_xvYCC);
+ break;
+ case TRANSFER_FUNCTION_LINEAR:
+ dpp_base->funcs->dpp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_BYPASS);
+ break;
+ case TRANSFER_FUNCTION_PQ:
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
+ cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
+ result = true;
+ break;
+ default:
+ result = false;
+ break;
+ }
+ } else if (tf->type == TF_TYPE_BYPASS)
+ dpp_base->funcs->dpp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_BYPASS);
+ else {
+ /*
+ * if we are here, we did not handle correctly.
+ * fix is required for this use case
+ */
+ BREAK_TO_DEBUGGER();
+ dpp_base->funcs->dpp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_BYPASS);
+ }
+
+ return result;
+}
+
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
+ opp_cnt++;
+ }
+
+ if (opp_cnt > 1)
+ pipe_ctx->stream_res.tg->funcs->set_odm_combine(
+ pipe_ctx->stream_res.tg,
+ opp_inst, opp_cnt,
+ &pipe_ctx->stream->timing);
+ else
+ pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+}
+
+void dcn20_blank_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank)
+{
+ struct tg_color black_color = {0};
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space = stream->output_color_space;
+ enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
+ enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ struct pipe_ctx *odm_pipe;
+ int odm_cnt = 1;
+ int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ int odm_slice_width, last_odm_slice_width, offset = 0;
+
+ if (stream->link->test_pattern_enabled)
+ return;
+
+ /* get opp dpg blank color */
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_cnt++;
+ odm_slice_width = h_active / odm_cnt;
+ last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
+
+ if (blank) {
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ }
+ } else {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ }
+
+ odm_pipe = pipe_ctx;
+
+ while (odm_pipe->next_odm_pipe) {
+ dc->hwss.set_disp_pattern_generator(dc,
+ odm_pipe,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ &black_color,
+ odm_slice_width,
+ v_active,
+ offset);
+ offset += odm_slice_width;
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+
+ dc->hwss.set_disp_pattern_generator(dc,
+ odm_pipe,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ &black_color,
+ last_odm_slice_width,
+ v_active,
+ offset);
+
+ if (!blank)
+ if (stream_res->abm) {
+ dc->hwss.set_pipe(pipe_ctx);
+ stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
+ }
+}
+
+
+static void dcn20_power_on_plane_resources(
+ struct dce_hwseq *hws,
+ struct pipe_ctx *pipe_ctx)
+{
+ DC_LOGGER_INIT(hws->ctx->logger);
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+ DC_LOG_DEBUG(
+ "Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst);
+ }
+}
+
+static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ //if (dc->debug.sanity_checks) {
+ // dcn10_verify_allow_pstate_change_high(dc);
+ //}
+ dcn20_power_on_plane_resources(dc->hwseq, pipe_ctx);
+
+ /* enable DCFCLK current DCHUB */
+ pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
+
+ /* initialize HUBP on power up */
+ pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
+
+ /* make sure OPP_PIPE_CLOCK_EN = 1 */
+ pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
+ pipe_ctx->stream_res.opp,
+ true);
+
+/* TODO: enable/disable in dm as per update type.
+ if (plane_state) {
+ DC_LOG_DC(dc->ctx->logger,
+ "Pipe:%d 0x%x: addr hi:0x%x, "
+ "addr low:0x%x, "
+ "src: %d, %d, %d,"
+ " %d; dst: %d, %d, %d, %d;\n",
+ pipe_ctx->pipe_idx,
+ plane_state,
+ plane_state->address.grph.addr.high_part,
+ plane_state->address.grph.addr.low_part,
+ plane_state->src_rect.x,
+ plane_state->src_rect.y,
+ plane_state->src_rect.width,
+ plane_state->src_rect.height,
+ plane_state->dst_rect.x,
+ plane_state->dst_rect.y,
+ plane_state->dst_rect.width,
+ plane_state->dst_rect.height);
+
+ DC_LOG_DC(dc->ctx->logger,
+ "Pipe %d: width, height, x, y format:%d\n"
+ "viewport:%d, %d, %d, %d\n"
+ "recout: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ plane_state->format,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ pipe_ctx->plane_res.scl_data.recout.width,
+ pipe_ctx->plane_res.scl_data.recout.height,
+ pipe_ctx->plane_res.scl_data.recout.x,
+ pipe_ctx->plane_res.scl_data.recout.y);
+ print_rq_dlg_ttu(dc, pipe_ctx);
+ }
+*/
+ if (dc->vm_pa_config.valid) {
+ struct vm_system_aperture_param apt;
+
+ apt.sys_default.quad_part = 0;
+
+ apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
+ apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
+
+ // Program system aperture settings
+ pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
+ }
+
+ if (!pipe_ctx->top_pipe
+ && pipe_ctx->plane_state
+ && pipe_ctx->plane_state->flip_int_enabled
+ && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
+ pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
+
+// if (dc->debug.sanity_checks) {
+// dcn10_verify_allow_pstate_change_high(dc);
+// }
+}
+
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ struct pipe_ctx *temp_pipe;
+ bool flip_immediate = false;
+
+ /* use TG master update lock to lock everything on the TG
+ * therefore only top pipe need to lock
+ */
+ if (!pipe || pipe->top_pipe)
+ return;
+
+ if (pipe->plane_state != NULL)
+ flip_immediate = pipe->plane_state->flip_immediate;
+
+ if (pipe->stream_res.gsl_group > 0) {
+ temp_pipe = pipe->bottom_pipe;
+ while (!flip_immediate && temp_pipe) {
+ if (temp_pipe->plane_state != NULL)
+ flip_immediate = temp_pipe->plane_state->flip_immediate;
+ temp_pipe = temp_pipe->bottom_pipe;
+ }
+ }
+
+ if (flip_immediate && lock) {
+ const int TIMEOUT_FOR_FLIP_PENDING_US = 100000;
+ unsigned int polling_interval_us = 1;
+ int i;
+
+ temp_pipe = pipe;
+ while (temp_pipe) {
+ if (temp_pipe->plane_state && temp_pipe->plane_state->flip_immediate) {
+ for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING_US / polling_interval_us; ++i) {
+ if (!temp_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(temp_pipe->plane_res.hubp))
+ break;
+ udelay(polling_interval_us);
+ }
+
+ /* no reason it should take this long for immediate flips */
+ ASSERT(i != TIMEOUT_FOR_FLIP_PENDING_US);
+ }
+ temp_pipe = temp_pipe->bottom_pipe;
+ }
+ }
+
+ /* In flip immediate and pipe splitting case, we need to use GSL
+ * for synchronization. Only do setup on locking and on flip type change.
+ */
+ if (lock && (pipe->bottom_pipe != NULL || !flip_immediate))
+ if ((flip_immediate && pipe->stream_res.gsl_group == 0) ||
+ (!flip_immediate && pipe->stream_res.gsl_group > 0))
+ dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate);
+
+ if (pipe->plane_state != NULL)
+ flip_immediate = pipe->plane_state->flip_immediate;
+
+ temp_pipe = pipe->bottom_pipe;
+ while (flip_immediate && temp_pipe) {
+ if (temp_pipe->plane_state != NULL)
+ flip_immediate = temp_pipe->plane_state->flip_immediate;
+ temp_pipe = temp_pipe->bottom_pipe;
+ }
+
+ if (!lock && pipe->stream_res.gsl_group > 0 && pipe->plane_state &&
+ !flip_immediate)
+ dcn20_setup_gsl_group_as_lock(dc, pipe, false);
+
+ if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_pipe = 1;
+ inst_flags.otg_inst = pipe->stream_res.tg->inst;
+
+ if (pipe->plane_state != NULL)
+ hw_locks.bits.triple_buffer_lock = pipe->plane_state->triplebuffer_flips;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ lock,
+ &hw_locks,
+ &inst_flags);
+ } else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
+ if (lock)
+ pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg);
+ } else {
+ if (lock)
+ pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+ }
+}
+
+static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
+{
+ new_pipe->update_flags.raw = 0;
+
+ /* If non-phantom pipe is being transitioned to a phantom pipe,
+ * set disable and return immediately. This is because the pipe
+ * that was previously in use must be fully disabled before we
+ * can "enable" it as a phantom pipe (since the OTG will certainly
+ * be different). The post_unlock sequence will set the correct
+ * update flags to enable the phantom pipe.
+ */
+ if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom &&
+ new_pipe->plane_state && new_pipe->plane_state->is_phantom) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
+ return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.unbounded_req = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ new_pipe->update_flags.bits.det_size = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+
+ /* For SubVP we need to unconditionally enable because any phantom pipes are
+ * always removed then newly added for every full updates whenever SubVP is in use.
+ * The remove-add sequence of the phantom pipe always results in the pipe
+ * being blanked in enable_stream_timing (DPG).
+ */
+ if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ new_pipe->update_flags.bits.enable = 1;
+
+ /* Phantom pipes are effectively disabled, if the pipe was previously phantom
+ * we have to enable
+ */
+ if (old_pipe->plane_state && old_pipe->plane_state->is_phantom &&
+ new_pipe->plane_state && !new_pipe->plane_state->is_phantom)
+ new_pipe->update_flags.bits.enable = 1;
+
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
+ /* Detect plane change */
+ if (old_pipe->plane_state != new_pipe->plane_state) {
+ new_pipe->update_flags.bits.plane_changed = true;
+ }
+
+ /* Detect top pipe only changes */
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ /* Detect odm changes */
+ if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx)
+ || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe)
+ || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe)
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Detect global sync changes */
+ if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
+ || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
+ || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset
+ || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width)
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+
+ if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
+ new_pipe->update_flags.bits.det_size = 1;
+
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /*
+ * Detect mpcc blending changes, only dpp inst and opp matter here,
+ * mpccs getting removed/inserted update connected ones during their own
+ * programming
+ */
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs;
+ struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs;
+
+ /* Detect pipe interdependent updates */
+ if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch ||
+ old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch ||
+ old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c ||
+ old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank ||
+ old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank ||
+ old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip ||
+ old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c ||
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 ||
+ old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank ||
+ old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) {
+ old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch;
+ old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch;
+ old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c;
+ old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank;
+ old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank;
+ old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip;
+ old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l;
+ old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1;
+ old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank;
+ old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) ||
+ memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) ||
+ memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ }
+
+ if (old_pipe->unbounded_req != new_pipe->unbounded_req)
+ new_pipe->update_flags.bits.unbounded_req = 1;
+}
+
+static void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dccg *dccg = dc->res_pool->dccg;
+ bool viewport_changed = false;
+
+ if (pipe_ctx->update_flags.bits.dppclk)
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
+
+ if (pipe_ctx->update_flags.bits.enable)
+ dccg->funcs->update_dpp_dto(dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz);
+
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+ * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
+ * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
+ */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
+
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
+
+ if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
+ hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
+
+ if (pipe_ctx->update_flags.bits.hubp_interdependent)
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ struct dc_bias_and_scale bns_params = {0};
+
+ // program the input csc
+ dpp->funcs->dpp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO,
+ plane_state->input_csc_color_matrix,
+ plane_state->color_space,
+ NULL);
+
+ if (dpp->funcs->dpp_program_bias_and_scale) {
+ //TODO :for CNVC set scale and bias registers if necessary
+ build_prescale_params(&bns_params, plane_state);
+ dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.mpcc
+ || pipe_ctx->update_flags.bits.plane_changed
+ || plane_state->update_flags.bits.global_alpha_change
+ || plane_state->update_flags.bits.per_pixel_alpha_change) {
+ // MPCC inst is equal to pipe index in practice
+ hws->funcs.update_mpcc(dc, pipe_ctx);
+ }
+
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
+
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+
+ hubp->funcs->mem_program_viewport(
+ hubp,
+ &pipe_ctx->plane_res.scl_data.viewport,
+ &pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
+
+ /* Any updates are handled in dc interface, just need to apply existing for plane enable */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
+ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ }
+
+ /* Any updates are handled in dc interface, just need
+ * to apply existing for plane enable / opp change */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->update_flags.bits.plane_changed
+ || pipe_ctx->stream->update_flags.bits.gamut_remap
+ || plane_state->update_flags.bits.gamut_remap_change
+ || pipe_ctx->stream->update_flags.bits.out_csc) {
+ /* dpp/cm gamut remap*/
+ dc->hwss.program_gamut_remap(pipe_ctx);
+
+ /*call the dcn2 method which uses mpc csc*/
+ dc->hwss.program_output_csc(dc,
+ pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix,
+ hubp->opp_id);
+ }
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror,
+ 0);
+ hubp->power_gated = false;
+ }
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.addr_update)
+ hws->funcs.update_plane_addr(dc, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable)
+ hubp->funcs->set_blank(hubp, false);
+ /* If the stream paired with this plane is phantom, the plane is also phantom */
+ if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM
+ && hubp->funcs->phantom_hubp_post_enable)
+ hubp->funcs->phantom_hubp_post_enable(hubp);
+}
+
+static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+{
+ struct pipe_ctx *other_pipe;
+ int vready_offset = pipe->pipe_dlg_param.vready_offset;
+
+ /* Always use the largest vready_offset of all connected pipes */
+ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
+ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
+ }
+ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
+ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
+ }
+ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
+ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
+ }
+ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
+ vready_offset = other_pipe->pipe_dlg_param.vready_offset;
+ }
+
+ return vready_offset;
+}
+
+static void dcn20_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx,
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
+ }
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe) {
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ calculate_vready_offset_for_group(pipe_ctx),
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
+
+ if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm)
+ hws->funcs.update_odm(dc, context, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable) {
+ dcn20_enable_plane(dc, pipe_ctx, context);
+ if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+ dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
+ }
+
+ if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size)
+ dc->res_pool->hubbub->funcs->program_det_size(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+
+ if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
+ dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->plane_state->update_flags.bits.hdr_mult)
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d)
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf ||
+ pipe_ctx->plane_state->update_flags.bits.output_tf_change)
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+ if (pipe_ctx->plane_state->visible) {
+ if (pipe_ctx->stream_res.abm) {
+ dc->hwss.set_pipe(pipe_ctx);
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
+ pipe_ctx->stream->abm_level);
+ }
+ }
+}
+
+void dcn20_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Carry over GSL groups in case the context is changing. */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == old_pipe_ctx->stream)
+ pipe_ctx->stream_res.gsl_group = old_pipe_ctx->stream_res.gsl_group;
+ }
+
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
+ }
+
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+
+ /* When disabling phantom pipes, turn on phantom OTG first (so we can get double
+ * buffer updates properly)
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
+ dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+ if (tg->funcs->enable_crtc) {
+ if (dc->hwss.blank_phantom) {
+ int main_pipe_width, main_pipe_height;
+
+ main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width;
+ main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height;
+ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
+ }
+ tg->funcs->enable_crtc(tg);
+ }
+ }
+ }
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+
+
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ /* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
+ * then we want to do the programming here (effectively it's being disabled). If we do
+ * the programming later the DET won't be updated until the OTG for the phantom pipe is
+ * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
+ * DET allocation.
+ */
+ if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
+ (context->res_ctx.pipe_ctx[i].plane_state && context->res_ctx.pipe_ctx[i].plane_state->is_phantom)))
+ hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
+ }
+
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ if (hws->funcs.program_pipe)
+ hws->funcs.program_pipe(dc, pipe, context);
+ else {
+ /* Don't program phantom pipes in the regular front end programming sequence.
+ * There is an MPO transition case where a pipe being used by a video plane is
+ * transitioned directly to be a phantom pipe when closing the MPO video. However
+ * the phantom pipe will program a new HUBP_VTG_SEL (update takes place right away),
+ * but the MPO still exists until the double buffered update of the main pipe so we
+ * will get a frame of underflow if the phantom pipe is programmed here.
+ */
+ if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ dcn20_program_pipe(dc, pipe, context);
+ }
+
+ pipe = pipe->bottom_pipe;
+ }
+ }
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+
+ /* Avoid underflow by check of pipe line read when adding 2nd plane. */
+ if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
+ !pipe->top_pipe &&
+ pipe->stream &&
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
+ }
+
+ /* when dynamic ODM is active, pipes must be reconfigured when all planes are
+ * disabled, as some transitions will leave software and hardware state
+ * mismatched.
+ */
+ if (dc->debug.enable_single_display_2to1_odm_policy &&
+ pipe->stream &&
+ pipe->update_flags.bits.disable &&
+ !pipe->prev_odm_pipe &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+}
+
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_US = 100000;
+ unsigned int polling_interval_us = 1;
+ struct dce_hwseq *hwseq = dc->hwseq;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+
+ /*
+ * If we are enabling a pipe, we need to wait for pending clear as this is a critical
+ * part of the enable operation otherwise, DM may request an immediate flip which
+ * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
+ * is unsupported on DCN.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ // Don't check flip pending on phantom pipes
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
+ pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ udelay(polling_interval_us);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ /* Program phantom pipe here to prevent a frame of underflow in the MPO transition
+ * case (if a pipe being used for a video plane transitions to a phantom pipe, it
+ * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
+ * programming sequence).
+ */
+ while (pipe) {
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ /* When turning on the phantom pipe we want to run through the
+ * entire enable sequence, so apply all the "enable" flags.
+ */
+ if (dc->hwss.apply_update_flags_for_phantom)
+ dc->hwss.apply_update_flags_for_phantom(pipe);
+ if (dc->hwss.update_phantom_vp_position)
+ dc->hwss.update_phantom_vp_position(dc, context, pipe);
+ dcn20_program_pipe(dc, pipe, context);
+ }
+ pipe = pipe->bottom_pipe;
+ }
+ }
+ }
+
+ /* P-State support transitions:
+ * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
+ * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
+ * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
+ * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
+ * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
+ */
+ if (hwseq && hwseq->funcs.update_force_pstate)
+ dc->hwseq->funcs.update_force_pstate(dc, context);
+
+ /* Only program the MALL registers after all the main and phantom pipes
+ * are done programming.
+ */
+ if (hwseq->funcs.program_mall_pipe_config)
+ hwseq->funcs.program_mall_pipe_config(dc, context);
+
+ /* WA to apply WM setting*/
+ if (hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
+
+
+ /* WA for stutter underflow during MPO transitions when adding 2nd plane */
+ if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+ if (dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg);
+ }
+ }
+}
+
+void dcn20_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+ unsigned int compbuf_size_kb = 0;
+ unsigned int cache_wm_a = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns;
+ unsigned int i;
+
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ false);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ // At optimize don't restore the original watermark value
+ if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
+ break;
+ }
+ }
+
+ /* program dchubbub watermarks:
+ * For assigning wm_optimized_required, use |= operator since we don't want
+ * to clear the value if the optimize has not happened yet
+ */
+ dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ &context->bw_ctx.bw.dcn.watermarks,
+ dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
+ false);
+
+ // Restore the real watermark so we can commit the value to DMCUB
+ // DMCUB uses the "original" watermark value in SubVP MCLK switch
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = cache_wm_a;
+
+ /* decrease compbuf size */
+ if (hubbub->funcs->program_compbuf_size) {
+ if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
+ compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
+ dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
+ } else {
+ compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
+ dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
+ }
+
+ hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
+ }
+}
+
+void dcn20_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ // At optimize don't need to restore the original watermark value
+ if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
+ break;
+ }
+ }
+
+ /* program dchubbub watermarks */
+ hubbub->funcs->program_watermarks(hubbub,
+ &context->bw_ctx.bw.dcn.watermarks,
+ dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
+ true);
+
+ if (dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
+ context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+
+ /* increase compbuf size */
+ if (hubbub->funcs->program_compbuf_size)
+ hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ dc_dmub_srv_p_state_delegate(dc,
+ true, context);
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->clks.fw_based_mclk_switching = true;
+ } else {
+ dc->clk_mgr->clks.fw_based_mclk_switching = false;
+ }
+
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ true);
+ if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
+ && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
+ && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
+ pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
+ pipe_ctx->dlg_regs.min_dst_y_next_start);
+ }
+ }
+}
+
+bool dcn20_update_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* recalculate DML parameters */
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ return false;
+
+ /* apply updated bandwidth parameters */
+ dc->hwss.prepare_bandwidth(dc, context);
+
+ /* update hubp configs for all pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe == NULL) {
+ bool blank = !is_pipe_tree_visible(pipe_ctx);
+
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ calculate_vready_offset_for_group(pipe_ctx),
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
+
+ if (pipe_ctx->prev_odm_pipe == NULL)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+ }
+
+ pipe_ctx->plane_res.hubp->funcs->hubp_setup(
+ pipe_ctx->plane_res.hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
+
+ return true;
+}
+
+void dcn20_enable_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+ struct timing_generator *optc;
+
+ ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES);
+ ASSERT(wb_info->wb_enabled);
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* set the OPTC source mux */
+ optc = dc->res_pool->timing_generators[dwb->otg_inst];
+ optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
+ /* set MCIF_WB buffer and arbitration configuration */
+ mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+ /* Enable MCIF_WB */
+ mcif_wb->funcs->enable_mcif(mcif_wb);
+ /* Enable DWB */
+ dwb->funcs->enable(dwb, &wb_info->dwb_params);
+ /* TODO: add sequence to enable/disable warmup */
+}
+
+void dcn20_disable_writeback(
+ struct dc *dc,
+ unsigned int dwb_pipe_inst)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ ASSERT(dwb_pipe_inst < MAX_DWB_PIPES);
+ dwb = dc->res_pool->dwbc[dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst];
+
+ dwb->funcs->disable(dwb);
+ mcif_wb->funcs->disable_mcif(mcif_wb);
+}
+
+bool dcn20_wait_for_blank_complete(
+ struct output_pixel_processor *opp)
+{
+ int counter;
+
+ for (counter = 0; counter < 1000; counter++) {
+ if (opp->funcs->dpg_is_blanked(opp))
+ break;
+
+ udelay(100);
+ }
+
+ if (counter == 1000) {
+ dm_error("DC: failed to blank crtc!\n");
+ return false;
+ }
+
+ return true;
+}
+
+bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+
+ if (!hubp)
+ return false;
+ return hubp->funcs->dmdata_status_done(hubp);
+}
+
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (pipe_ctx->stream_res.dsc) {
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+
+ hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
+ while (odm_pipe) {
+ hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+ }
+}
+
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (pipe_ctx->stream_res.dsc) {
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+
+ hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
+ while (odm_pipe) {
+ hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+ }
+}
+
+void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_dmdata_attributes attr = { 0 };
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+
+ attr.dmdata_mode = DMDATA_HW_MODE;
+ attr.dmdata_size =
+ dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36;
+ attr.address.quad_part =
+ pipe_ctx->stream->dmdata_address.quad_part;
+ attr.dmdata_dl_delta = 0;
+ attr.dmdata_qos_mode = 0;
+ attr.dmdata_qos_level = 0;
+ attr.dmdata_repeat = 1; /* always repeat */
+ attr.dmdata_updated = 1;
+ attr.dmdata_sw_data = NULL;
+
+ hubp->funcs->dmdata_set_attributes(hubp, &attr);
+}
+
+void dcn20_init_vm_ctx(
+ struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_virtual_addr_space_config *va_config,
+ int vmid)
+{
+ struct dcn_hubbub_virt_addr_config config;
+
+ if (vmid == 0) {
+ ASSERT(0); /* VMID cannot be 0 for vm context */
+ return;
+ }
+
+ config.page_table_start_addr = va_config->page_table_start_addr;
+ config.page_table_end_addr = va_config->page_table_end_addr;
+ config.page_table_block_size = va_config->page_table_block_size_in_bytes;
+ config.page_table_depth = va_config->page_table_depth;
+ config.page_table_base_addr = va_config->page_table_base_addr;
+
+ dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid);
+}
+
+int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+{
+ struct dcn_hubbub_phys_addr_config config;
+
+ config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
+ config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
+ config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
+ config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
+ config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
+ config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
+ config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
+ config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
+ config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+ config.page_table_default_page_addr = pa_config->page_table_default_page_addr;
+
+ return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
+}
+
+static bool patch_address_for_sbs_tb_stereo(
+ struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ (pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
+ *addr = plane_state->address.grph_stereo.left_addr;
+ plane_state->address.grph_stereo.left_addr =
+ plane_state->address.grph_stereo.right_addr;
+ return true;
+ }
+
+ if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
+ plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
+ plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
+ plane_state->address.grph_stereo.right_addr =
+ plane_state->address.grph_stereo.left_addr;
+ plane_state->address.grph_stereo.right_meta_addr =
+ plane_state->address.grph_stereo.left_meta_addr;
+ }
+ return false;
+}
+
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ bool addr_patched = false;
+ PHYSICAL_ADDRESS_LOC addr;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (plane_state == NULL)
+ return;
+
+ addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
+
+ // Call Helper to track VMID use
+ vm_helper_mark_vmid_used(dc->vm_helper, plane_state->address.vmid, pipe_ctx->plane_res.hubp->inst);
+
+ pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
+ pipe_ctx->plane_res.hubp,
+ &plane_state->address,
+ plane_state->flip_immediate);
+
+ plane_state->status.requested_address = plane_state->address;
+
+ if (plane_state->flip_immediate)
+ plane_state->status.current_address = plane_state->address;
+
+ if (addr_patched)
+ pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
+}
+
+void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings)
+{
+ struct encoder_unblank_param params = {0};
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
+ struct pipe_ctx *odm_pipe;
+
+ params.opp_cnt = 1;
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ params.opp_cnt++;
+ }
+ /* only 3 items below are used by unblank */
+ params.timing = pipe_ctx->stream->timing;
+
+ params.link_settings.link_rate = link_settings->link_rate;
+
+ if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ pipe_ctx->stream_res.tg->inst);
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
+ params.timing.pix_clk_100hz /= 2;
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
+ pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
+ }
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ hws->funcs.edp_backlight_control(link, true);
+ }
+}
+
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+
+ if (start_line < 0)
+ start_line = 0;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ tg->funcs->setup_vertical_interrupt2(tg, start_line);
+}
+
+static void dcn20_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ int i;
+ struct dc_link *link = pipe_ctx->stream->link;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+ if (pipe_ctx->stream_res.stream_enc == NULL) {
+ pipe_ctx->stream = NULL;
+ return;
+ }
+
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ dc->link_srv->set_dpms_off(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ /* free acquired resources */
+ if (pipe_ctx->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.audio, false);
+ pipe_ctx->stream_res.audio = NULL;
+ }
+ }
+
+ /* by upper caller loop, parent pipe: pipe0, will be reset last.
+ * back end share by all pipes and will be disable only when disable
+ * parent pipe.
+ */
+ if (pipe_ctx->top_pipe == NULL) {
+
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+
+ pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
+ if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
+ pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
+ if (pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, NULL);
+ /* TODO - convert symclk_ref_cnts for otg to a bit map to solve
+ * the case where the same symclk is shared across multiple otg
+ * instances
+ */
+ link->phy_state.symclk_ref_cnts.otg = 0;
+ if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
+ link_hwss->disable_link_output(link,
+ &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
+ break;
+
+ if (i == dc->res_pool->pipe_count)
+ return;
+
+ pipe_ctx->stream = NULL;
+ DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
+ pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
+}
+
+void dcn20_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Reset Back End*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+}
+
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct mpcc_blnd_cfg blnd_cfg = {0};
+ bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+
+ blnd_cfg.overlap_only = false;
+ blnd_cfg.global_gain = 0xff;
+
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
+ } else {
+ blnd_cfg.pre_multiplied_alpha = false;
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
+ if (pipe_ctx->plane_state->global_alpha)
+ blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
+ else
+ blnd_cfg.global_alpha = 0xff;
+
+ blnd_cfg.background_color_bpc = 4;
+ blnd_cfg.bottom_gain_mode = 0;
+ blnd_cfg.top_gain = 0x1f000;
+ blnd_cfg.bottom_inside_gain = 0x1f000;
+ blnd_cfg.bottom_outside_gain = 0x1f000;
+
+ if (pipe_ctx->plane_state->format
+ == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
+ blnd_cfg.pre_multiplied_alpha = false;
+
+ /*
+ * TODO: remove hack
+ * Note: currently there is a bug in init_hw such that
+ * on resume from hibernate, BIOS sets up MPCC0, and
+ * we do mpcc_remove but the mpcc cannot go to idle
+ * after remove. This cause us to pick mpcc1 here,
+ * which causes a pstate hang for yet unknown reason.
+ */
+ mpcc_id = hubp->inst;
+
+ /* If there is no full update, don't need to touch MPC tree*/
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+ !pipe_ctx->update_flags.bits.mpcc) {
+ mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
+ return;
+ }
+
+ /* check if this MPCC is already being used */
+ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
+ /* remove MPCC if being used */
+ if (new_mpcc != NULL)
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
+ else
+ if (dc->debug.sanity_checks)
+ mpc->funcs->assert_mpcc_idle_before_connect(
+ dc->res_pool->mpc, mpcc_id);
+
+ /* Call MPC to insert new plane */
+ new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
+ mpc_tree_params,
+ &blnd_cfg,
+ NULL,
+ NULL,
+ hubp->inst,
+ mpcc_id);
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
+
+ ASSERT(new_mpcc != NULL);
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
+}
+
+static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
+{
+ switch (link->link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYD32CLKA;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYD32CLKB;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYD32CLKC;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYD32CLKD;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYD32CLKE;
+ default:
+ return PHYD32CLKA;
+ }
+}
+
+static int get_odm_segment_count(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+ int count = 1;
+
+ while (odm_pipe != NULL) {
+ count++;
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+
+ return count;
+}
+
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
+{
+ enum dc_lane_count lane_count =
+ pipe_ctx->stream->link->cur_link_settings.lane_count;
+
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link *link = pipe_ctx->stream->link;
+
+ uint32_t active_total_with_borders;
+ uint32_t early_control = 0;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dtbclk_dto_params dto_params = {0};
+ struct dccg *dccg = dc->res_pool->dccg;
+ enum phyd32clk_clock_source phyd32clk;
+ int dp_hpo_inst;
+ struct dce_hwseq *hws = dc->hwseq;
+ unsigned int k1_div = PIXEL_RATE_DIV_NA;
+ unsigned int k2_div = PIXEL_RATE_DIV_NA;
+
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ if (dc->hwseq->funcs.setup_hpo_hw_control)
+ dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
+ }
+
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
+
+ phyd32clk = get_phyd32clk_src(link);
+ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+
+ dto_params.otg_inst = tg->inst;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ } else {
+ }
+ if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
+ hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
+
+ dc->res_pool->dccg->funcs->set_pixel_rate_div(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.tg->inst,
+ k1_div, k2_div);
+ }
+
+ link_hwss->setup_stream_encoder(pipe_ctx);
+
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
+ if (dc->hwss.program_dmdata_engine)
+ dc->hwss.program_dmdata_engine(pipe_ctx);
+ }
+
+ dc->hwss.update_info_frame(pipe_ctx);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+
+ /* enable early control to avoid corruption on DP monitor*/
+ active_total_with_borders =
+ timing->h_addressable
+ + timing->h_border_left
+ + timing->h_border_right;
+
+ if (lane_count != 0)
+ early_control = active_total_with_borders % lane_count;
+
+ if (early_control == 0)
+ early_control = lane_count;
+
+ tg->funcs->set_early_control(tg, early_control);
+
+ if (dc->hwseq->funcs.set_pixels_per_cycle)
+ dc->hwseq->funcs.set_pixels_per_cycle(pipe_ctx);
+}
+
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ bool enable = false;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal)
+ ? dmdata_dp
+ : dmdata_hdmi;
+
+ /* if using dynamic meta, don't set up generic infopackets */
+ if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
+ pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
+ enable = true;
+ }
+
+ if (!hubp)
+ return;
+
+ if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata)
+ return;
+
+ stream_enc->funcs->set_dynamic_metadata(stream_enc, enable,
+ hubp->inst, mode);
+}
+
+void dcn20_fpga_init_hw(struct dc *dc)
+{
+ int i, j;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct resource_pool *res_pool = dc->res_pool;
+ struct dc_state *context = dc->current_state;
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+ // Initialize the dccg
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
+
+ //Enable ability to power gate / don't force power on permanently
+ hws->funcs.enable_power_gating_plane(hws, true);
+
+ // Specific to FPGA dccg and registers
+ REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
+ REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
+
+ hws->funcs.dccg_init(hws);
+
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ if (REG(REFCLK_CNTL))
+ REG_WRITE(REFCLK_CNTL, 0);
+ //
+
+
+ /* Blank pixel data with OPP DPG */
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ dcn20_init_blank(dc, tg);
+ }
+
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ tg->funcs->lock(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dpp *dpp = res_pool->dpps[i];
+
+ dpp->funcs->dpp_reset(dpp);
+ }
+
+ /* Reset all MPCC muxes */
+ res_pool->mpc->funcs->mpc_init(res_pool->mpc);
+
+ /* initialize OPP mpc_tree parameter */
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst;
+ res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ for (j = 0; j < MAX_PIPES; j++)
+ res_pool->opps[i]->mpcc_disconnect_pending[j] = false;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+
+ pipe_ctx->stream_res.tg = tg;
+ pipe_ctx->pipe_idx = i;
+
+ pipe_ctx->plane_res.hubp = hubp;
+ pipe_ctx->plane_res.dpp = dpp;
+ pipe_ctx->plane_res.mpcc_inst = dpp->inst;
+ hubp->mpcc_id = dpp->inst;
+ hubp->opp_id = OPP_ID_INVALID;
+ hubp->power_gated = false;
+ pipe_ctx->stream_res.opp = NULL;
+
+ hubp->funcs->hubp_init(hubp);
+
+ //dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+ //dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
+ /*to do*/
+ hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ }
+
+ /* initialize DWB pointer to MCIF_WB */
+ for (i = 0; i < res_pool->res_cap->num_dwb; i++)
+ res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i];
+
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ tg->funcs->unlock(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ dc->hwss.disable_plane(dc, pipe_ctx);
+
+ pipe_ctx->stream_res.tg = NULL;
+ pipe_ctx->plane_res.hubp = NULL;
+ }
+
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ tg->funcs->tg_init(tg);
+ }
+
+ if (dc->res_pool->hubbub->funcs->init_crb)
+ dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
+}
+#ifndef TRIM_FSFT
+bool dcn20_optimize_timing_for_fsft(struct dc *dc,
+ struct dc_crtc_timing *timing,
+ unsigned int max_input_rate_in_khz)
+{
+ unsigned int old_v_front_porch;
+ unsigned int old_v_total;
+ unsigned int max_input_rate_in_100hz;
+ unsigned long long new_v_total;
+
+ max_input_rate_in_100hz = max_input_rate_in_khz * 10;
+ if (max_input_rate_in_100hz < timing->pix_clk_100hz)
+ return false;
+
+ old_v_total = timing->v_total;
+ old_v_front_porch = timing->v_front_porch;
+
+ timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
+ timing->pix_clk_100hz = max_input_rate_in_100hz;
+
+ new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz);
+
+ timing->v_total = new_v_total;
+ timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total);
+ return true;
+}
+#endif
+
+void dcn20_set_disp_pattern_generator(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ const struct tg_color *solid_color,
+ int width, int height, int offset)
+{
+ pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
+ color_space, color_depth, solid_color, width, height, offset);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
new file mode 100644
index 0000000000..01901b0864
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -0,0 +1,154 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN20_H__
+#define __DC_HWSS_DCN20_H__
+
+#include "hw_sequencer_private.h"
+
+bool dcn20_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+bool dcn20_set_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+void dcn20_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
+void dcn20_program_output_csc(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id);
+void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
+void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_disable_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+void dcn20_blank_pixel_data(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank);
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn20_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn20_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+bool dcn20_update_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn20_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context);
+enum dc_status dcn20_enable_stream_timing(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+void dcn20_disable_vga(
+ struct dce_hwseq *hws);
+void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable);
+void dcn20_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+void dcn20_hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+void dcn20_program_triple_buffer(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable_triple_buffer);
+void dcn20_enable_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
+void dcn20_disable_writeback(
+ struct dc *dc,
+ unsigned int dwb_pipe_inst);
+void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
+bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx);
+void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
+void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx);
+void dcn20_init_vm_ctx(
+ struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_virtual_addr_space_config *va_config,
+ int vmid);
+void dcn20_set_flip_control_gsl(
+ struct pipe_ctx *pipe_ctx,
+ bool flip_immediate);
+void dcn20_dsc_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst,
+ bool power_on);
+void dcn20_fpga_init_hw(struct dc *dc);
+bool dcn20_wait_for_blank_complete(
+ struct output_pixel_processor *opp);
+void dcn20_dccg_init(struct dce_hwseq *hws);
+int dcn20_init_sys_ctx(struct dce_hwseq *hws,
+ struct dc *dc,
+ struct dc_phy_addr_space_config *pa_config);
+
+#ifndef TRIM_FSFT
+bool dcn20_optimize_timing_for_fsft(struct dc *dc,
+ struct dc_crtc_timing *timing,
+ unsigned int max_input_rate_in_khz);
+#endif
+
+void dcn20_set_disp_pattern_generator(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ const struct tg_color *solid_color,
+ int width, int height, int offset);
+
+#endif /* __DC_HWSS_DCN20_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
new file mode 100644
index 0000000000..e4b44e691c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20_hwseq.h"
+
+#include "dcn20_init.h"
+
+static const struct hw_sequencer_funcs dcn20_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn10_init_hw,
+ .power_down_on_boot = dcn10_power_down_on_boot,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_vblanks_synchronization = dcn10_enable_vblanks_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn20_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn20_disable_plane,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn20_enable_writeback,
+ .disable_writeback = dcn20_disable_writeback,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn20_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn20_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_pipe = dce110_set_pipe,
+#ifndef TRIM_FSFT
+ .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
+#endif
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
+ .set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .get_dcc_en_bits = dcn10_get_dcc_en_bits,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
+};
+
+static const struct hwseq_private_funcs dcn20_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn20_set_input_transfer_func,
+ .set_output_transfer_func = dcn20_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = dcn20_disable_stream_gating,
+ .enable_stream_gating = dcn20_enable_stream_gating,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = dcn20_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn20_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn20_enable_power_gating_plane,
+ .dpp_pg_control = dcn20_dpp_pg_control,
+ .hubp_pg_control = dcn20_hubp_pg_control,
+ .update_odm = dcn20_update_odm,
+ .dsc_pg_control = dcn20_dsc_pg_control,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn20_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn20_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn20_funcs;
+ dc->hwseq->funcs = dcn20_private_funcs;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
new file mode 100644
index 0000000000..12277797cd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN20_INIT_H__
+#define __DC_DCN20_INIT_H__
+
+struct dc;
+
+void dcn20_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN20_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
new file mode 100644
index 0000000000..51a57dae18
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn20_link_encoder.h"
+#include "stream_encoder.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+ enc10->base.ctx
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
+static struct mpll_cfg dcn2_mpll_cfg[] = {
+ // RBR
+ {
+ .hdmimode_enable = 1,
+ .ref_range = 3,
+ .ref_clk_mpllb_div = 2,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 226,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 39321,
+ .mpllb_fracn_rem = 3,
+ .mpllb_fracn_den = 5,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 38221,
+ .mpllb_ssc_stepsize = 49314,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 2,
+ .tx_vboost_lvl = 4,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 2,
+ .mpllb_ana_cp_int = 7,
+ .mpllb_ana_cp_prop = 18,
+ .hdmi_pixel_clk_div = 0,
+ },
+ // HBR
+ {
+ .hdmimode_enable = 1,
+ .ref_range = 3,
+ .ref_clk_mpllb_div = 2,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 184,
+ .mpllb_fracn_en = 0,
+ .mpllb_fracn_quot = 0,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 31850,
+ .mpllb_ssc_stepsize = 41095,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 1,
+ .tx_vboost_lvl = 4,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 7,
+ .mpllb_ana_cp_prop = 18,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR2
+ {
+ .hdmimode_enable = 1,
+ .ref_range = 3,
+ .ref_clk_mpllb_div = 2,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 184,
+ .mpllb_fracn_en = 0,
+ .mpllb_fracn_quot = 0,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 31850,
+ .mpllb_ssc_stepsize = 41095,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 4,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 7,
+ .mpllb_ana_cp_prop = 18,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR3
+ {
+ .hdmimode_enable = 1,
+ .ref_range = 3,
+ .ref_clk_mpllb_div = 2,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 292,
+ .mpllb_fracn_en = 0,
+ .mpllb_fracn_quot = 0,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 47776,
+ .mpllb_ssc_stepsize = 61642,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 4,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 0,
+ .mpllb_ana_cp_int = 7,
+ .mpllb_ana_cp_prop = 18,
+ .hdmi_pixel_clk_div = 0,
+ },
+};
+
+void enc2_fec_set_enable(struct link_encoder *enc, bool enable)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ DC_LOG_DSC("%s FEC at link encoder inst %d",
+ enable ? "Enabling" : "Disabling", enc->id.enum_id);
+ REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable);
+}
+
+void enc2_fec_set_ready(struct link_encoder *enc, bool ready)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, ready);
+}
+
+bool enc2_fec_is_active(struct link_encoder *enc)
+{
+ uint32_t active = 0;
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &active);
+
+ return (active != 0);
+}
+
+/* this function reads dsc related register fields to be logged later in dcn10_log_hw_state
+ * into a dcn_dsc_state struct.
+ */
+void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en);
+ REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow);
+ REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status);
+ REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete);
+}
+
+static bool update_cfg_data(
+ struct dcn10_link_encoder *enc10,
+ const struct dc_link_settings *link_settings,
+ struct dpcssys_phy_seq_cfg *cfg)
+{
+ int i;
+
+ cfg->load_sram_fw = false;
+
+ for (i = 0; i < link_settings->lane_count; i++)
+ cfg->lane_en[i] = true;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ cfg->mpll_cfg = dcn2_mpll_cfg[0];
+ break;
+ case LINK_RATE_HIGH:
+ cfg->mpll_cfg = dcn2_mpll_cfg[1];
+ break;
+ case LINK_RATE_HIGH2:
+ cfg->mpll_cfg = dcn2_mpll_cfg[2];
+ break;
+ case LINK_RATE_HIGH3:
+ cfg->mpll_cfg = dcn2_mpll_cfg[3];
+ break;
+ default:
+ DC_LOG_ERROR("%s: No supported link rate found %X!\n",
+ __func__, link_settings->link_rate);
+ return false;
+ }
+
+ return true;
+}
+
+void dcn20_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dcn20_link_encoder *enc20 = (struct dcn20_link_encoder *) enc10;
+ struct dpcssys_phy_seq_cfg *cfg = &enc20->phy_seq_cfg;
+
+ if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
+ dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ return;
+ }
+
+ if (!update_cfg_data(enc10, link_settings, cfg))
+ return;
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT);
+
+}
+
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t is_in_usb_c_dp4_mode = 0;
+
+ dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+ /* in usb c dp2 mode, max lane count is 2 */
+ if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ if (!is_in_usb_c_dp4_mode)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+ }
+
+}
+
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t dp_alt_mode_disable = 0;
+ bool is_usb_c_alt_mode = false;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ /* if value == 1 alt mode is disabled, otherwise it is enabled */
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+ }
+
+ return is_usb_c_alt_mode;
+}
+
+#define AUX_REG(reg)\
+ (enc10->aux_regs->reg)
+
+#define AUX_REG_READ(reg_name) \
+ dm_read_reg(CTX, AUX_REG(reg_name))
+
+#define AUX_REG_WRITE(reg_name, val) \
+ dm_write_reg(CTX, AUX_REG(reg_name), val)
+void enc2_hw_init(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+/*
+ 00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
+ 01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
+ 02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8
+ 03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16
+ 04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32
+ 05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64
+ 06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128
+ 07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256
+*/
+
+/*
+ AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0,
+ AUX_RX_START_WINDOW = 1 [6:4]
+ AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8]
+ AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1
+ AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1
+ AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0
+ AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1
+ AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1
+ AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
+ AUX_RX_DETECTION_THRESHOLD [30:28] = 1
+*/
+ if (enc->ctx->dc_bios->golden_table.dc_golden_table_ver > 0) {
+ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control0_val);
+
+ AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, enc->ctx->dc_bios->golden_table.aux_dphy_tx_control_val);
+
+ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL1, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control1_val);
+ } else {
+ AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
+
+ AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+ }
+
+ //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
+ // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
+ // 27MHz -> 0xd
+ // 100MHz -> 0x32
+ // 48MHz -> 0x18
+
+ // Set TMDS_CTL0 to 1. This is a legacy setting.
+ REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1);
+
+ dcn10_aux_initialize(enc10);
+}
+
+static const struct link_encoder_funcs dcn20_link_enc_funcs = {
+ .read_state = link_enc2_read_state,
+ .validate_output_with_stream =
+ dcn10_link_encoder_validate_output_with_stream,
+ .hw_init = enc2_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa,
+ .enable_dp_output = dcn20_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn10_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .fec_is_active = enc2_fec_is_active,
+ .get_dig_mode = dcn10_get_dig_mode,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
+};
+
+void dcn20_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc20->enc10;
+
+ enc10->base.funcs = &dcn20_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc10->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc10->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc10->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc10->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc10->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc10->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
new file mode 100644
index 0000000000..b2b266953d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN20_H__
+#define __DC_LINK_ENCODER__DCN20_H__
+
+#include "dcn10/dcn10_link_encoder.h"
+
+#define DCN2_AUX_REG_LIST(id)\
+ AUX_REG_LIST(id), \
+ SRI(AUX_DPHY_TX_CONTROL, DP_AUX, id)
+
+#define UNIPHY_MASK_SH_LIST(mask_sh)\
+ LE_SF(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh)
+
+#define DPCS_MASK_SH_LIST(mask_sh)\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA, RDPCS_PLL_UPDATE_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_CLOCK_ON, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_GATE_DIS, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh)
+
+#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
+ DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_REF_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_VCO_LD_VAL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_RATE, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh)
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
+ LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
+ LE_SF(DP0_DP_DPHY_CNTL, DPHY_FEC_EN, mask_sh),\
+ LE_SF(DP0_DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, mask_sh),\
+ LE_SF(DP0_DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, mask_sh),\
+ LE_SF(DIG0_DIG_LANE_ENABLE, DIG_LANE0EN, mask_sh),\
+ LE_SF(DIG0_DIG_LANE_ENABLE, DIG_LANE1EN, mask_sh),\
+ LE_SF(DIG0_DIG_LANE_ENABLE, DIG_LANE2EN, mask_sh),\
+ LE_SF(DIG0_DIG_LANE_ENABLE, DIG_LANE3EN, mask_sh),\
+ LE_SF(DIG0_DIG_LANE_ENABLE, DIG_CLK_EN, mask_sh),\
+ LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
+ UNIPHY_MASK_SH_LIST(mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_START_WINDOW, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_HALF_SYM_DETECT_LEN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_TRANSITION_FILTER_EN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_ALLOW_BELOW_THRESHOLD_START, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_ALLOW_BELOW_THRESHOLD_STOP, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_PHASE_DETECT_LEN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_DETECTION_THRESHOLD, mask_sh), \
+ LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_LEN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_SYMBOLS, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_PRECHARGE_SKIP, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
+ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
+
+#define UNIPHY_DCN2_REG_LIST(id) \
+ SRI(CLOCK_ENABLE, SYMCLK, id), \
+ SRI(CHANNEL_XBAR_CNTL, UNIPHY, id)
+
+#define DPCS_DCN2_CMN_REG_LIST(id) \
+ SRI(DIG_LANE_ENABLE, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \
+ SRI(RDPCSTX_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \
+ SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \
+ SRI(RDPCSTX_PLL_UPDATE_DATA, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \
+ SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
+ SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+
+#define DPCS_DCN2_REG_LIST(id) \
+ DPCS_DCN2_CMN_REG_LIST(id), \
+ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\
+ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id)
+
+#define LE_DCN2_REG_LIST(id) \
+ LE_DCN10_REG_LIST(id), \
+ SR(DCIO_SOFT_RESET)
+
+struct mpll_cfg {
+ uint32_t mpllb_ana_v2i;
+ uint32_t mpllb_ana_freq_vco;
+ uint32_t mpllb_ana_cp_int;
+ uint32_t mpllb_ana_cp_prop;
+ uint32_t mpllb_multiplier;
+ uint32_t ref_clk_mpllb_div;
+ bool mpllb_word_div2_en;
+ bool mpllb_ssc_en;
+ bool mpllb_div5_clk_en;
+ bool mpllb_div_clk_en;
+ bool mpllb_fracn_en;
+ bool mpllb_pmix_en;
+ uint32_t mpllb_div_multiplier;
+ uint32_t mpllb_tx_clk_div;
+ uint32_t mpllb_fracn_quot;
+ uint32_t mpllb_fracn_den;
+ uint32_t mpllb_ssc_peak;
+ uint32_t mpllb_ssc_stepsize;
+ uint32_t mpllb_ssc_up_spread;
+ uint32_t mpllb_fracn_rem;
+ uint32_t mpllb_hdmi_div;
+ // TODO: May not mpll params, need to figure out.
+ uint32_t tx_vboost_lvl;
+ uint32_t hdmi_pixel_clk_div;
+ uint32_t ref_range;
+ uint32_t ref_clk;
+ bool hdmimode_enable;
+ bool sup_pre_hp;
+ bool dp_tx0_vergdrv_byp;
+ bool dp_tx1_vergdrv_byp;
+ bool dp_tx2_vergdrv_byp;
+ bool dp_tx3_vergdrv_byp;
+ uint32_t tx_peaking_lvl;
+ uint32_t ctr_reqs_pll;
+
+
+};
+
+struct dpcssys_phy_seq_cfg {
+ bool program_fuse;
+ bool bypass_sram;
+ bool lane_en[4];
+ bool use_calibration_setting;
+ struct mpll_cfg mpll_cfg;
+ bool load_sram_fw;
+#if 0
+
+ bool hdmimode_enable;
+ bool silver2;
+ bool ext_refclk_en;
+ uint32_t dp_tx0_term_ctrl;
+ uint32_t dp_tx1_term_ctrl;
+ uint32_t dp_tx2_term_ctrl;
+ uint32_t dp_tx3_term_ctrl;
+ uint32_t fw_data[0x1000];
+ uint32_t dp_tx0_width;
+ uint32_t dp_tx1_width;
+ uint32_t dp_tx2_width;
+ uint32_t dp_tx3_width;
+ uint32_t dp_tx0_rate;
+ uint32_t dp_tx1_rate;
+ uint32_t dp_tx2_rate;
+ uint32_t dp_tx3_rate;
+ uint32_t dp_tx0_eq_main;
+ uint32_t dp_tx0_eq_pre;
+ uint32_t dp_tx0_eq_post;
+ uint32_t dp_tx1_eq_main;
+ uint32_t dp_tx1_eq_pre;
+ uint32_t dp_tx1_eq_post;
+ uint32_t dp_tx2_eq_main;
+ uint32_t dp_tx2_eq_pre;
+ uint32_t dp_tx2_eq_post;
+ uint32_t dp_tx3_eq_main;
+ uint32_t dp_tx3_eq_pre;
+ uint32_t dp_tx3_eq_post;
+ bool data_swap_en;
+ bool data_order_invert_en;
+ uint32_t ldpcs_fifo_start_delay;
+ uint32_t rdpcs_fifo_start_delay;
+ bool rdpcs_reg_fifo_error_mask;
+ bool rdpcs_tx_fifo_error_mask;
+ bool rdpcs_dpalt_disable_mask;
+ bool rdpcs_dpalt_4lane_mask;
+#endif
+};
+
+struct dcn20_link_encoder {
+ struct dcn10_link_encoder enc10;
+ struct dpcssys_phy_seq_cfg phy_seq_cfg;
+};
+
+void enc2_fec_set_enable(struct link_encoder *enc, bool enable);
+void enc2_fec_set_ready(struct link_encoder *enc, bool ready);
+bool enc2_fec_is_active(struct link_encoder *enc);
+void enc2_hw_init(struct link_encoder *enc);
+
+void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s);
+
+void dcn20_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
+void dcn20_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+#endif /* __DC_LINK_ENCODER__DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
new file mode 100644
index 0000000000..259a98e4ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "reg_helper.h"
+#include "resource.h"
+#include "mcif_wb.h"
+#include "dcn20_mmhubbub.h"
+
+
+#define REG(reg)\
+ mcif_wb20->mcif_wb_regs->reg
+
+#define CTX \
+ mcif_wb20->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ mcif_wb20->mcif_wb_shift->field_name, mcif_wb20->mcif_wb_mask->field_name
+
+#define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8
+#define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40
+
+/* wbif programming guide:
+ * 1. set up wbif parameter:
+ * unsigned long long luma_address[4]; //4 frame buffer
+ * unsigned long long chroma_address[4];
+ * unsigned int luma_pitch;
+ * unsigned int chroma_pitch;
+ * unsigned int warmup_pitch=0x10; //256B align, the page size is 4KB when it is 0x10
+ * unsigned int slice_lines; //slice size
+ * unsigned int time_per_pixel; // time per pixel, in ns
+ * unsigned int arbitration_slice; // 0: 512 bytes 1: 1024 bytes 2: 2048 Bytes
+ * unsigned int max_scaled_time; // used for QOS generation
+ * unsigned int swlock=0x0;
+ * unsigned int cli_watermark[4]; //4 group urgent watermark
+ * unsigned int pstate_watermark[4]; //4 group pstate watermark
+ * unsigned int sw_int_en; // Software interrupt enable, frame end and overflow
+ * unsigned int sw_slice_int_en; // slice end interrupt enable
+ * unsigned int sw_overrun_int_en; // overrun error interrupt enable
+ * unsigned int vce_int_en; // VCE interrupt enable, frame end and overflow
+ * unsigned int vce_slice_int_en; // VCE slice end interrupt enable, frame end and overflow
+ *
+ * 2. configure wbif register
+ * a. call mmhubbub_config_wbif()
+ *
+ * 3. Enable wbif
+ * call set_wbif_bufmgr_enable();
+ *
+ * 4. wbif_dump_status(), option, for debug purpose
+ * the bufmgr status can show the progress of write back, can be used for debug purpose
+ */
+
+static void mmhubbub2_config_mcif_buf(struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *params,
+ unsigned int dest_height)
+{
+ struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb);
+
+ /* sw lock buffer0~buffer3, default is 0 */
+ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, params->swlock);
+
+ /* buffer address for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, MCIF_ADDR(params->luma_address[0]));
+ REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[0]));
+ /* right eye sub-buffer address offset for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB_BUF_1_ADDR_Y_OFFSET, 0);
+
+ /* buffer address for Chroma in planar mode (unused in packing mode) */
+ REG_UPDATE(MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, MCIF_ADDR(params->chroma_address[0]));
+ REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[0]));
+ /* right eye offset for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB_BUF_1_ADDR_C_OFFSET, 0);
+
+ /* buffer address for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, MCIF_ADDR(params->luma_address[1]));
+ REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[1]));
+ /* right eye sub-buffer address offset for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB_BUF_2_ADDR_Y_OFFSET, 0);
+
+ /* buffer address for Chroma in planar mode (unused in packing mode) */
+ REG_UPDATE(MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, MCIF_ADDR(params->chroma_address[1]));
+ REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[1]));
+ /* right eye offset for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB_BUF_2_ADDR_C_OFFSET, 0);
+
+ /* buffer address for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, MCIF_ADDR(params->luma_address[2]));
+ REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[2]));
+ /* right eye sub-buffer address offset for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB_BUF_3_ADDR_Y_OFFSET, 0);
+
+ /* buffer address for Chroma in planar mode (unused in packing mode) */
+ REG_UPDATE(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, MCIF_ADDR(params->chroma_address[2]));
+ REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[2]));
+ /* right eye offset for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB_BUF_3_ADDR_C_OFFSET, 0);
+
+ /* buffer address for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, MCIF_ADDR(params->luma_address[3]));
+ REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[3]));
+ /* right eye sub-buffer address offset for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB_BUF_4_ADDR_Y_OFFSET, 0);
+
+ /* buffer address for Chroma in planar mode (unused in packing mode) */
+ REG_UPDATE(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, MCIF_ADDR(params->chroma_address[3]));
+ REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[3]));
+ /* right eye offset for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB_BUF_4_ADDR_C_OFFSET, 0);
+
+ /* setup luma & chroma size
+ * should be enough to contain a whole frame Luma data,
+ * the programmed value is frame buffer size [27:8], 256-byte aligned
+ */
+ REG_UPDATE(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, (params->luma_pitch>>8) * dest_height);
+ REG_UPDATE(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, (params->chroma_pitch>>8) * dest_height);
+
+ /* enable address fence */
+ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, 1);
+
+ /* setup pitch, the programmed value is [15:8], 256B align */
+ REG_UPDATE_2(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, params->luma_pitch >> 8,
+ MCIF_WB_BUF_CHROMA_PITCH, params->chroma_pitch >> 8);
+
+ /* Set pitch for MC cache warm up mode */
+ /* Pitch is 256 bytes aligned. The default pitch is 4K */
+ /* default is 0x10 */
+ REG_UPDATE(MCIF_WB_WARM_UP_CNTL, MCIF_WB_PITCH_SIZE_WARMUP, params->warmup_pitch);
+}
+
+static void mmhubbub2_config_mcif_arb(struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *params)
+{
+ struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb);
+
+ /* Programmed by the video driver based on the CRTC timing (for DWB) */
+ REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, params->time_per_pixel);
+
+ /* Programming dwb watermark */
+ /* Watermark to generate urgent in MCIF_WB_CLI, value is determined by MCIF_WB_CLI_WATERMARK_MASK. */
+ /* Program in ns. A formula will be provided in the pseudo code to calculate the value. */
+ REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x0);
+ /* urgent_watermarkA */
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[0]);
+ REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x1);
+ /* urgent_watermarkB */
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[1]);
+ REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x2);
+ /* urgent_watermarkC */
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[2]);
+ REG_UPDATE(MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, 0x3);
+ /* urgent_watermarkD */
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[3]);
+
+ /* Programming nb pstate watermark */
+ /* nbp_state_change_watermarkA */
+ REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x0);
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
+ NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[0]);
+ /* nbp_state_change_watermarkB */
+ REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x1);
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
+ NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[1]);
+ /* nbp_state_change_watermarkC */
+ REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x2);
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
+ NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[2]);
+ /* nbp_state_change_watermarkD */
+ REG_UPDATE(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x3);
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
+ NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[3]);
+
+ /* max_scaled_time */
+ REG_UPDATE(MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, params->max_scaled_time);
+
+ /* slice_lines */
+ REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, params->slice_lines-1);
+
+ /* Set arbitration unit for Luma/Chroma */
+ /* arb_unit=2 should be chosen for more efficiency */
+ /* Arbitration size, 0: 512 bytes 1: 1024 bytes 2: 2048 Bytes */
+ REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice);
+}
+
+void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb,
+ struct mcif_irq_params *params)
+{
+ struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb);
+
+ /* Set interrupt mask */
+ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_EN, params->sw_int_en);
+ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_SLICE_INT_EN, params->sw_slice_int_en);
+ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, params->sw_overrun_int_en);
+
+ REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, params->vce_int_en);
+ if (mcif_wb20->mcif_wb_mask->MCIF_WB_BUFMGR_VCE_SLICE_INT_EN)
+ REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, params->vce_slice_int_en);
+}
+
+void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb)
+{
+ struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb);
+
+ /* Enable Mcifwb */
+ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, 1);
+}
+
+void mmhubbub2_disable_mcif(struct mcif_wb *mcif_wb)
+{
+ struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb);
+
+ /* disable buffer manager */
+ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, 0);
+}
+
+/* set which group of pstate watermark to use and set wbif watermark change request */
+/*
+static void mmhubbub2_wbif_watermark_change_req(struct mcif_wb *mcif_wb, unsigned int wm_set)
+{
+ struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb);
+ uint32_t change_req;
+
+ REG_GET(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, &change_req);
+ change_req = (change_req == 0) ? 1 : 0;
+ REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_SEL, wm_set);
+ REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, change_req);
+}
+*/
+/* Set watermark change interrupt disable bit */
+/*
+static void mmhubbub2_set_wbif_watermark_change_int_disable(struct mcif_wb *mcif_wb, unsigned int ack_int_dis)
+{
+ struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb);
+
+ REG_UPDATE(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_DIS, ack_int_dis);
+}
+*/
+/* Read watermark change interrupt status */
+/*
+unsigned int mmhubbub2_get_wbif_watermark_change_int_status(struct mcif_wb *mcif_wb)
+{
+ struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb);
+ uint32_t irq_status;
+
+ REG_GET(SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_STATUS, &irq_status);
+ return irq_status;
+}
+*/
+
+void mcifwb2_dump_frame(struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_params,
+ enum dwb_scaler_mode out_format,
+ unsigned int dest_width,
+ unsigned int dest_height,
+ struct mcif_wb_frame_dump_info *dump_info,
+ unsigned char *luma_buffer,
+ unsigned char *chroma_buffer,
+ unsigned char *dest_luma_buffer,
+ unsigned char *dest_chroma_buffer)
+{
+ struct dcn20_mmhubbub *mcif_wb20 = TO_DCN20_MMHUBBUB(mcif_wb);
+
+ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0xf);
+
+ memcpy(dest_luma_buffer, luma_buffer, mcif_params->luma_pitch * dest_height);
+ memcpy(dest_chroma_buffer, chroma_buffer, mcif_params->chroma_pitch * dest_height / 2);
+
+ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0x0);
+
+ dump_info->format = out_format;
+ dump_info->width = dest_width;
+ dump_info->height = dest_height;
+ dump_info->luma_pitch = mcif_params->luma_pitch;
+ dump_info->chroma_pitch = mcif_params->chroma_pitch;
+ dump_info->size = dest_height * (mcif_params->luma_pitch + mcif_params->chroma_pitch);
+}
+
+static const struct mcif_wb_funcs dcn20_mmhubbub_funcs = {
+ .enable_mcif = mmhubbub2_enable_mcif,
+ .disable_mcif = mmhubbub2_disable_mcif,
+ .config_mcif_buf = mmhubbub2_config_mcif_buf,
+ .config_mcif_arb = mmhubbub2_config_mcif_arb,
+ .config_mcif_irq = mmhubbub2_config_mcif_irq,
+ .dump_frame = mcifwb2_dump_frame,
+};
+
+void dcn20_mmhubbub_construct(struct dcn20_mmhubbub *mcif_wb20,
+ struct dc_context *ctx,
+ const struct dcn20_mmhubbub_registers *mcif_wb_regs,
+ const struct dcn20_mmhubbub_shift *mcif_wb_shift,
+ const struct dcn20_mmhubbub_mask *mcif_wb_mask,
+ int inst)
+{
+ mcif_wb20->base.ctx = ctx;
+
+ mcif_wb20->base.inst = inst;
+ mcif_wb20->base.funcs = &dcn20_mmhubbub_funcs;
+
+ mcif_wb20->mcif_wb_regs = mcif_wb_regs;
+ mcif_wb20->mcif_wb_shift = mcif_wb_shift;
+ mcif_wb20->mcif_wb_mask = mcif_wb_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
new file mode 100644
index 0000000000..5ab32aa51e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
@@ -0,0 +1,517 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MCIF_WB_DCN20_H__
+#define __DC_MCIF_WB_DCN20_H__
+
+#define TO_DCN20_MMHUBBUB(mcif_wb_base) \
+ container_of(mcif_wb_base, struct dcn20_mmhubbub, base)
+
+#define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \
+ SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_PITCH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst),\
+ SRI(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst),\
+ SRI(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst),\
+ SRI(MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB, inst),\
+ SRI(MCIF_WB_TEST_DEBUG_DATA, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst),\
+ SRI(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MCIF_WB, inst),\
+ SRI(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst),\
+ SRI(MCIF_WB_WATERMARK, MCIF_WB, inst),\
+ SRI(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst),\
+ SRI(MCIF_WB_WARM_UP_CNTL, MCIF_WB, inst),\
+ SRI(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst),\
+ SRI(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst),\
+ SRI(MCIF_WB_SECURITY_LEVEL, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst),\
+ SRI(SMU_WM_CONTROL, WBIF, inst)
+
+#define MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_ACK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_SLICE_INT_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_P_VMID, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB_BUFMGR_CUR_LINE_R, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_VCE_INT_STATUS, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_INT_STATUS, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_BUFTAG, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_NEXT_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_PITCH, MCIF_WB_BUF_CHROMA_PITCH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_ACTIVE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_OVERFLOW, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_DISABLE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_MODE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_BUFTAG, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_NXT_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_FIELD, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_LONG_LINE_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SHORT_LINE_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_FRAME_LENGTH_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_CUR_LINE_R, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_ACTIVE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_OVERFLOW, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_DISABLE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_MODE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_BUFTAG, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_NXT_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_FIELD, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_LONG_LINE_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SHORT_LINE_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_FRAME_LENGTH_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_CUR_LINE_R, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_ACTIVE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_OVERFLOW, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_DISABLE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_MODE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_BUFTAG, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_NXT_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_FIELD, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_LONG_LINE_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SHORT_LINE_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_FRAME_LENGTH_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_CUR_LINE_R, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_ACTIVE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_OVERFLOW, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_DISABLE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_MODE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_BUFTAG, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_NXT_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_FIELD, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_LONG_LINE_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SHORT_LINE_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_FRAME_LENGTH_ERROR, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_CUR_LINE_R, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_SCLK_CHANGE, WM_CHANGE_ACK_FORCE_ON, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_SCLK_CHANGE, MCIF_WB_CLI_WATERMARK_MASK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB_TEST_DEBUG_INDEX, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_TEST_DEBUG_DATA, MCIF_WB_TEST_DEBUG_DATA, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET, MCIF_WB_BUF_1_ADDR_Y_OFFSET, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET, MCIF_WB_BUF_1_ADDR_C_OFFSET, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET, MCIF_WB_BUF_2_ADDR_Y_OFFSET, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET, MCIF_WB_BUF_2_ADDR_C_OFFSET, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET, MCIF_WB_BUF_3_ADDR_Y_OFFSET, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET, MCIF_WB_BUF_3_ADDR_C_OFFSET, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET, MCIF_WB_BUF_4_ADDR_Y_OFFSET, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET, MCIF_WB_BUF_4_ADDR_C_OFFSET, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_FORCE_ON, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_ALLOW_FOR_URGENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB_CLI_CLOCK_GATER_OVERRIDE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_WARM_UP_CNTL, MCIF_WB_PITCH_SIZE_WARMUP, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL, DIS_REFRESH_UNDER_NBPREQ, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL, PERFRAME_SELF_REFRESH, mask_sh),\
+ SF(MCIF_WB0_MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_SECURITY_LEVEL, MCIF_WB_SECURITY_LEVEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_HEIGHT, mask_sh),\
+ SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_SEL, mask_sh),\
+ SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_REQ, mask_sh),\
+ SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_DIS, mask_sh),\
+ SF(WBIF0_SMU_WM_CONTROL, MCIF_WB0_WM_CHG_ACK_INT_STATUS, mask_sh)
+
+#define MCIF_WB_REG_FIELD_LIST_DCN2_0(type) \
+ type MCIF_WB_BUFMGR_ENABLE;\
+ type MCIF_WB_BUFMGR_SW_INT_EN;\
+ type MCIF_WB_BUFMGR_SW_INT_ACK;\
+ type MCIF_WB_BUFMGR_SW_SLICE_INT_EN;\
+ type MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN;\
+ type MCIF_WB_BUFMGR_SW_LOCK;\
+ type MCIF_WB_P_VMID;\
+ type MCIF_WB_BUF_ADDR_FENCE_EN;\
+ type MCIF_WB_BUFMGR_CUR_LINE_R;\
+ type MCIF_WB_BUFMGR_VCE_INT_STATUS;\
+ type MCIF_WB_BUFMGR_SW_INT_STATUS;\
+ type MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS;\
+ type MCIF_WB_BUFMGR_CUR_BUF;\
+ type MCIF_WB_BUFMGR_BUFTAG;\
+ type MCIF_WB_BUFMGR_CUR_LINE_L;\
+ type MCIF_WB_BUFMGR_NEXT_BUF;\
+ type MCIF_WB_BUF_LUMA_PITCH;\
+ type MCIF_WB_BUF_CHROMA_PITCH;\
+ type MCIF_WB_BUF_1_ACTIVE;\
+ type MCIF_WB_BUF_1_SW_LOCKED;\
+ type MCIF_WB_BUF_1_VCE_LOCKED;\
+ type MCIF_WB_BUF_1_OVERFLOW;\
+ type MCIF_WB_BUF_1_DISABLE;\
+ type MCIF_WB_BUF_1_MODE;\
+ type MCIF_WB_BUF_1_BUFTAG;\
+ type MCIF_WB_BUF_1_NXT_BUF;\
+ type MCIF_WB_BUF_1_FIELD;\
+ type MCIF_WB_BUF_1_CUR_LINE_L;\
+ type MCIF_WB_BUF_1_LONG_LINE_ERROR;\
+ type MCIF_WB_BUF_1_SHORT_LINE_ERROR;\
+ type MCIF_WB_BUF_1_FRAME_LENGTH_ERROR;\
+ type MCIF_WB_BUF_1_CUR_LINE_R;\
+ type MCIF_WB_BUF_1_NEW_CONTENT;\
+ type MCIF_WB_BUF_1_COLOR_DEPTH;\
+ type MCIF_WB_BUF_1_TMZ_BLACK_PIXEL;\
+ type MCIF_WB_BUF_1_TMZ;\
+ type MCIF_WB_BUF_1_Y_OVERRUN;\
+ type MCIF_WB_BUF_1_C_OVERRUN;\
+ type MCIF_WB_BUF_2_ACTIVE;\
+ type MCIF_WB_BUF_2_SW_LOCKED;\
+ type MCIF_WB_BUF_2_VCE_LOCKED;\
+ type MCIF_WB_BUF_2_OVERFLOW;\
+ type MCIF_WB_BUF_2_DISABLE;\
+ type MCIF_WB_BUF_2_MODE;\
+ type MCIF_WB_BUF_2_BUFTAG;\
+ type MCIF_WB_BUF_2_NXT_BUF;\
+ type MCIF_WB_BUF_2_FIELD;\
+ type MCIF_WB_BUF_2_CUR_LINE_L;\
+ type MCIF_WB_BUF_2_LONG_LINE_ERROR;\
+ type MCIF_WB_BUF_2_SHORT_LINE_ERROR;\
+ type MCIF_WB_BUF_2_FRAME_LENGTH_ERROR;\
+ type MCIF_WB_BUF_2_CUR_LINE_R;\
+ type MCIF_WB_BUF_2_NEW_CONTENT;\
+ type MCIF_WB_BUF_2_COLOR_DEPTH;\
+ type MCIF_WB_BUF_2_TMZ_BLACK_PIXEL;\
+ type MCIF_WB_BUF_2_TMZ;\
+ type MCIF_WB_BUF_2_Y_OVERRUN;\
+ type MCIF_WB_BUF_2_C_OVERRUN;\
+ type MCIF_WB_BUF_3_ACTIVE;\
+ type MCIF_WB_BUF_3_SW_LOCKED;\
+ type MCIF_WB_BUF_3_VCE_LOCKED;\
+ type MCIF_WB_BUF_3_OVERFLOW;\
+ type MCIF_WB_BUF_3_DISABLE;\
+ type MCIF_WB_BUF_3_MODE;\
+ type MCIF_WB_BUF_3_BUFTAG;\
+ type MCIF_WB_BUF_3_NXT_BUF;\
+ type MCIF_WB_BUF_3_FIELD;\
+ type MCIF_WB_BUF_3_CUR_LINE_L;\
+ type MCIF_WB_BUF_3_LONG_LINE_ERROR;\
+ type MCIF_WB_BUF_3_SHORT_LINE_ERROR;\
+ type MCIF_WB_BUF_3_FRAME_LENGTH_ERROR;\
+ type MCIF_WB_BUF_3_CUR_LINE_R;\
+ type MCIF_WB_BUF_3_NEW_CONTENT;\
+ type MCIF_WB_BUF_3_COLOR_DEPTH;\
+ type MCIF_WB_BUF_3_TMZ_BLACK_PIXEL;\
+ type MCIF_WB_BUF_3_TMZ;\
+ type MCIF_WB_BUF_3_Y_OVERRUN;\
+ type MCIF_WB_BUF_3_C_OVERRUN;\
+ type MCIF_WB_BUF_4_ACTIVE;\
+ type MCIF_WB_BUF_4_SW_LOCKED;\
+ type MCIF_WB_BUF_4_VCE_LOCKED;\
+ type MCIF_WB_BUF_4_OVERFLOW;\
+ type MCIF_WB_BUF_4_DISABLE;\
+ type MCIF_WB_BUF_4_MODE;\
+ type MCIF_WB_BUF_4_BUFTAG;\
+ type MCIF_WB_BUF_4_NXT_BUF;\
+ type MCIF_WB_BUF_4_FIELD;\
+ type MCIF_WB_BUF_4_CUR_LINE_L;\
+ type MCIF_WB_BUF_4_LONG_LINE_ERROR;\
+ type MCIF_WB_BUF_4_SHORT_LINE_ERROR;\
+ type MCIF_WB_BUF_4_FRAME_LENGTH_ERROR;\
+ type MCIF_WB_BUF_4_CUR_LINE_R;\
+ type MCIF_WB_BUF_4_NEW_CONTENT;\
+ type MCIF_WB_BUF_4_COLOR_DEPTH;\
+ type MCIF_WB_BUF_4_TMZ_BLACK_PIXEL;\
+ type MCIF_WB_BUF_4_TMZ;\
+ type MCIF_WB_BUF_4_Y_OVERRUN;\
+ type MCIF_WB_BUF_4_C_OVERRUN;\
+ type MCIF_WB_CLIENT_ARBITRATION_SLICE;\
+ type MCIF_WB_TIME_PER_PIXEL;\
+ type WM_CHANGE_ACK_FORCE_ON;\
+ type MCIF_WB_CLI_WATERMARK_MASK;\
+ type MCIF_WB_TEST_DEBUG_INDEX;\
+ type MCIF_WB_TEST_DEBUG_DATA;\
+ type MCIF_WB_BUF_1_ADDR_Y;\
+ type MCIF_WB_BUF_1_ADDR_Y_OFFSET;\
+ type MCIF_WB_BUF_1_ADDR_C;\
+ type MCIF_WB_BUF_1_ADDR_C_OFFSET;\
+ type MCIF_WB_BUF_2_ADDR_Y;\
+ type MCIF_WB_BUF_2_ADDR_Y_OFFSET;\
+ type MCIF_WB_BUF_2_ADDR_C;\
+ type MCIF_WB_BUF_2_ADDR_C_OFFSET;\
+ type MCIF_WB_BUF_3_ADDR_Y;\
+ type MCIF_WB_BUF_3_ADDR_Y_OFFSET;\
+ type MCIF_WB_BUF_3_ADDR_C;\
+ type MCIF_WB_BUF_3_ADDR_C_OFFSET;\
+ type MCIF_WB_BUF_4_ADDR_Y;\
+ type MCIF_WB_BUF_4_ADDR_Y_OFFSET;\
+ type MCIF_WB_BUF_4_ADDR_C;\
+ type MCIF_WB_BUF_4_ADDR_C_OFFSET;\
+ type MCIF_WB_BUFMGR_VCE_LOCK_IGNORE;\
+ type MCIF_WB_BUFMGR_VCE_INT_EN;\
+ type MCIF_WB_BUFMGR_VCE_INT_ACK;\
+ type MCIF_WB_BUFMGR_VCE_SLICE_INT_EN;\
+ type MCIF_WB_BUFMGR_VCE_LOCK;\
+ type MCIF_WB_BUFMGR_SLICE_SIZE;\
+ type NB_PSTATE_CHANGE_REFRESH_WATERMARK;\
+ type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST;\
+ type NB_PSTATE_CHANGE_FORCE_ON;\
+ type NB_PSTATE_ALLOW_FOR_URGENT;\
+ type NB_PSTATE_CHANGE_WATERMARK_MASK;\
+ type MCIF_WB_CLI_WATERMARK;\
+ type MCIF_WB_CLI_CLOCK_GATER_OVERRIDE;\
+ type MCIF_WB_PITCH_SIZE_WARMUP;\
+ type DIS_REFRESH_UNDER_NBPREQ;\
+ type PERFRAME_SELF_REFRESH;\
+ type MAX_SCALED_TIME_TO_URGENT;\
+ type MCIF_WB_SECURITY_LEVEL;\
+ type MCIF_WB_BUF_LUMA_SIZE;\
+ type MCIF_WB_BUF_CHROMA_SIZE;\
+ type MCIF_WB_BUF_1_ADDR_Y_HIGH;\
+ type MCIF_WB_BUF_1_ADDR_C_HIGH;\
+ type MCIF_WB_BUF_2_ADDR_Y_HIGH;\
+ type MCIF_WB_BUF_2_ADDR_C_HIGH;\
+ type MCIF_WB_BUF_3_ADDR_Y_HIGH;\
+ type MCIF_WB_BUF_3_ADDR_C_HIGH;\
+ type MCIF_WB_BUF_4_ADDR_Y_HIGH;\
+ type MCIF_WB_BUF_4_ADDR_C_HIGH;\
+ type MCIF_WB_BUF_1_RESOLUTION_WIDTH;\
+ type MCIF_WB_BUF_1_RESOLUTION_HEIGHT;\
+ type MCIF_WB_BUF_2_RESOLUTION_WIDTH;\
+ type MCIF_WB_BUF_2_RESOLUTION_HEIGHT;\
+ type MCIF_WB_BUF_3_RESOLUTION_WIDTH;\
+ type MCIF_WB_BUF_3_RESOLUTION_HEIGHT;\
+ type MCIF_WB_BUF_4_RESOLUTION_WIDTH;\
+ type MCIF_WB_BUF_4_RESOLUTION_HEIGHT;\
+ type MCIF_WB0_WM_CHG_SEL;\
+ type MCIF_WB0_WM_CHG_REQ;\
+ type MCIF_WB0_WM_CHG_ACK_INT_DIS;\
+ type MCIF_WB0_WM_CHG_ACK_INT_STATUS
+
+#define MCIF_WB_REG_VARIABLE_LIST_DCN2_0 \
+ uint32_t MCIF_WB_BUFMGR_SW_CONTROL;\
+ uint32_t MCIF_WB_BUFMGR_CUR_LINE_R;\
+ uint32_t MCIF_WB_BUFMGR_STATUS;\
+ uint32_t MCIF_WB_BUF_PITCH;\
+ uint32_t MCIF_WB_BUF_1_STATUS;\
+ uint32_t MCIF_WB_BUF_1_STATUS2;\
+ uint32_t MCIF_WB_BUF_2_STATUS;\
+ uint32_t MCIF_WB_BUF_2_STATUS2;\
+ uint32_t MCIF_WB_BUF_3_STATUS;\
+ uint32_t MCIF_WB_BUF_3_STATUS2;\
+ uint32_t MCIF_WB_BUF_4_STATUS;\
+ uint32_t MCIF_WB_BUF_4_STATUS2;\
+ uint32_t MCIF_WB_ARBITRATION_CONTROL;\
+ uint32_t MCIF_WB_SCLK_CHANGE;\
+ uint32_t MCIF_WB_TEST_DEBUG_INDEX;\
+ uint32_t MCIF_WB_TEST_DEBUG_DATA;\
+ uint32_t MCIF_WB_BUF_1_ADDR_Y;\
+ uint32_t MCIF_WB_BUF_1_ADDR_Y_OFFSET;\
+ uint32_t MCIF_WB_BUF_1_ADDR_C;\
+ uint32_t MCIF_WB_BUF_1_ADDR_C_OFFSET;\
+ uint32_t MCIF_WB_BUF_2_ADDR_Y;\
+ uint32_t MCIF_WB_BUF_2_ADDR_Y_OFFSET;\
+ uint32_t MCIF_WB_BUF_2_ADDR_C;\
+ uint32_t MCIF_WB_BUF_2_ADDR_C_OFFSET;\
+ uint32_t MCIF_WB_BUF_3_ADDR_Y;\
+ uint32_t MCIF_WB_BUF_3_ADDR_Y_OFFSET;\
+ uint32_t MCIF_WB_BUF_3_ADDR_C;\
+ uint32_t MCIF_WB_BUF_3_ADDR_C_OFFSET;\
+ uint32_t MCIF_WB_BUF_4_ADDR_Y;\
+ uint32_t MCIF_WB_BUF_4_ADDR_Y_OFFSET;\
+ uint32_t MCIF_WB_BUF_4_ADDR_C;\
+ uint32_t MCIF_WB_BUF_4_ADDR_C_OFFSET;\
+ uint32_t MCIF_WB_BUFMGR_VCE_CONTROL;\
+ uint32_t MCIF_WB_NB_PSTATE_LATENCY_WATERMARK;\
+ uint32_t MCIF_WB_NB_PSTATE_CONTROL;\
+ uint32_t MCIF_WB_WATERMARK;\
+ uint32_t MCIF_WB_CLOCK_GATER_CONTROL;\
+ uint32_t MCIF_WB_WARM_UP_CNTL;\
+ uint32_t MCIF_WB_SELF_REFRESH_CONTROL;\
+ uint32_t MULTI_LEVEL_QOS_CTRL;\
+ uint32_t MCIF_WB_SECURITY_LEVEL;\
+ uint32_t MCIF_WB_BUF_LUMA_SIZE;\
+ uint32_t MCIF_WB_BUF_CHROMA_SIZE;\
+ uint32_t MCIF_WB_BUF_1_ADDR_Y_HIGH;\
+ uint32_t MCIF_WB_BUF_1_ADDR_C_HIGH;\
+ uint32_t MCIF_WB_BUF_2_ADDR_Y_HIGH;\
+ uint32_t MCIF_WB_BUF_2_ADDR_C_HIGH;\
+ uint32_t MCIF_WB_BUF_3_ADDR_Y_HIGH;\
+ uint32_t MCIF_WB_BUF_3_ADDR_C_HIGH;\
+ uint32_t MCIF_WB_BUF_4_ADDR_Y_HIGH;\
+ uint32_t MCIF_WB_BUF_4_ADDR_C_HIGH;\
+ uint32_t MCIF_WB_BUF_1_RESOLUTION;\
+ uint32_t MCIF_WB_BUF_2_RESOLUTION;\
+ uint32_t MCIF_WB_BUF_3_RESOLUTION;\
+ uint32_t MCIF_WB_BUF_4_RESOLUTION;\
+ uint32_t SMU_WM_CONTROL
+
+struct dcn20_mmhubbub_registers {
+ MCIF_WB_REG_VARIABLE_LIST_DCN2_0;
+};
+
+
+struct dcn20_mmhubbub_mask {
+ MCIF_WB_REG_FIELD_LIST_DCN2_0(uint32_t);
+};
+
+struct dcn20_mmhubbub_shift {
+ MCIF_WB_REG_FIELD_LIST_DCN2_0(uint8_t);
+};
+
+struct dcn20_mmhubbub {
+ struct mcif_wb base;
+ const struct dcn20_mmhubbub_registers *mcif_wb_regs;
+ const struct dcn20_mmhubbub_shift *mcif_wb_shift;
+ const struct dcn20_mmhubbub_mask *mcif_wb_mask;
+};
+
+void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb,
+ struct mcif_irq_params *params);
+
+void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb);
+
+void mmhubbub2_disable_mcif(struct mcif_wb *mcif_wb);
+
+void mcifwb2_dump_frame(struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_params,
+ enum dwb_scaler_mode out_format,
+ unsigned int dest_width,
+ unsigned int dest_height,
+ struct mcif_wb_frame_dump_info *dump_info,
+ unsigned char *luma_buffer,
+ unsigned char *chroma_buffer,
+ unsigned char *dest_luma_buffer,
+ unsigned char *dest_chroma_buffer);
+
+void dcn20_mmhubbub_construct(struct dcn20_mmhubbub *mcif_wb20,
+ struct dc_context *ctx,
+ const struct dcn20_mmhubbub_registers *mcif_wb_regs,
+ const struct dcn20_mmhubbub_shift *mcif_wb_shift,
+ const struct dcn20_mmhubbub_mask *mcif_wb_mask,
+ int inst);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
new file mode 100644
index 0000000000..5da6e44f28
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -0,0 +1,590 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn20_mpc.h"
+
+#include "reg_helper.h"
+#include "dc.h"
+#include "mem_input.h"
+#include "dcn10/dcn10_cm_common.h"
+
+#define REG(reg)\
+ mpc20->mpc_regs->reg
+
+#define IND_REG(index) \
+ (index)
+
+#define CTX \
+ mpc20->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ mpc20->mpc_shift->field_name, mpc20->mpc_mask->field_name
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+void mpc2_update_blending(
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ int mpcc_id)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+
+ REG_UPDATE_7(MPCC_CONTROL[mpcc_id],
+ MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, blnd_cfg->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, blnd_cfg->overlap_only,
+ MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha,
+ MPCC_GLOBAL_GAIN, blnd_cfg->global_gain,
+ MPCC_BG_BPC, blnd_cfg->background_color_bpc,
+ MPCC_BOT_GAIN_MODE, blnd_cfg->bottom_gain_mode);
+
+ REG_SET(MPCC_TOP_GAIN[mpcc_id], 0, MPCC_TOP_GAIN, blnd_cfg->top_gain);
+ REG_SET(MPCC_BOT_GAIN_INSIDE[mpcc_id], 0, MPCC_BOT_GAIN_INSIDE, blnd_cfg->bottom_inside_gain);
+ REG_SET(MPCC_BOT_GAIN_OUTSIDE[mpcc_id], 0, MPCC_BOT_GAIN_OUTSIDE, blnd_cfg->bottom_outside_gain);
+
+ mpcc->blnd_cfg = *blnd_cfg;
+}
+
+void mpc2_set_denorm(
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_depth output_depth)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ int denorm_mode = 0;
+
+ switch (output_depth) {
+ case COLOR_DEPTH_666:
+ denorm_mode = 1;
+ break;
+ case COLOR_DEPTH_888:
+ denorm_mode = 2;
+ break;
+ case COLOR_DEPTH_999:
+ denorm_mode = 3;
+ break;
+ case COLOR_DEPTH_101010:
+ denorm_mode = 4;
+ break;
+ case COLOR_DEPTH_111111:
+ denorm_mode = 5;
+ break;
+ case COLOR_DEPTH_121212:
+ denorm_mode = 6;
+ break;
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* not valid used case! */
+ break;
+ }
+
+ REG_UPDATE(DENORM_CONTROL[opp_id],
+ MPC_OUT_DENORM_MODE, denorm_mode);
+}
+
+void mpc2_set_denorm_clamp(
+ struct mpc *mpc,
+ int opp_id,
+ struct mpc_denorm_clamp denorm_clamp)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ REG_UPDATE_2(DENORM_CONTROL[opp_id],
+ MPC_OUT_DENORM_CLAMP_MAX_R_CR, denorm_clamp.clamp_max_r_cr,
+ MPC_OUT_DENORM_CLAMP_MIN_R_CR, denorm_clamp.clamp_min_r_cr);
+ REG_UPDATE_2(DENORM_CLAMP_G_Y[opp_id],
+ MPC_OUT_DENORM_CLAMP_MAX_G_Y, denorm_clamp.clamp_max_g_y,
+ MPC_OUT_DENORM_CLAMP_MIN_G_Y, denorm_clamp.clamp_min_g_y);
+ REG_UPDATE_2(DENORM_CLAMP_B_CB[opp_id],
+ MPC_OUT_DENORM_CLAMP_MAX_B_CB, denorm_clamp.clamp_max_b_cb,
+ MPC_OUT_DENORM_CLAMP_MIN_B_CB, denorm_clamp.clamp_min_b_cb);
+}
+
+
+
+void mpc2_set_output_csc(
+ struct mpc *mpc,
+ int opp_id,
+ const uint16_t *regval,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ uint32_t cur_mode;
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ struct color_matrices_reg ocsc_regs;
+
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
+ return;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
+ ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
+ ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
+ ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
+ ocsc_regs.masks.csc_c12 = mpc20->mpc_mask->MPC_OCSC_C12_A;
+
+ if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) {
+ ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]);
+ ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]);
+ } else {
+ ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
+ ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
+ }
+
+ cm_helper_program_color_matrices(
+ mpc20->base.ctx,
+ regval,
+ &ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
+}
+
+void mpc2_set_ocsc_default(
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_space color_space,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ uint32_t cur_mode;
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ uint32_t arr_size;
+ struct color_matrices_reg ocsc_regs;
+ const uint16_t *regval = NULL;
+
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) {
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
+ return;
+ }
+
+ regval = find_color_matrix(color_space, &arr_size);
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* determine which CSC coefficients (A or B) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA,
+ MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX,
+ MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != MPC_OUTPUT_CSC_COEF_A)
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ else
+ ocsc_mode = MPC_OUTPUT_CSC_COEF_B;
+
+ ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A;
+ ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A;
+ ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A;
+ ocsc_regs.masks.csc_c12 = mpc20->mpc_mask->MPC_OCSC_C12_A;
+
+
+ if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) {
+ ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]);
+ ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]);
+ } else {
+ ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
+ ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
+ }
+
+ cm_helper_program_color_matrices(
+ mpc20->base.ctx,
+ regval,
+ &ocsc_regs);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
+}
+
+static void mpc2_ogam_get_reg_field(
+ struct mpc *mpc,
+ struct xfer_func_reg *reg)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ reg->shifts.exp_region0_lut_offset = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->shifts.field_region_end = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_B;
+ reg->masks.field_region_end = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B;
+ reg->masks.field_region_linear_slope = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B;
+ reg->shifts.exp_region_start = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_B;
+ reg->masks.exp_region_start = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = mpc20->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = mpc20->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+}
+
+void mpc20_power_on_ogam_lut(
+ struct mpc *mpc, int mpcc_id,
+ bool power_on)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
+ MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
+
+}
+
+static void mpc20_configure_ogam_lut(
+ struct mpc *mpc, int mpcc_id,
+ bool is_ram_a)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ REG_UPDATE_2(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_id],
+ MPCC_OGAM_LUT_WRITE_EN_MASK, 7,
+ MPCC_OGAM_LUT_RAM_SEL, is_ram_a == true ? 0:1);
+
+ REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0);
+}
+
+static enum dc_lut_mode mpc20_get_ogam_current(struct mpc *mpc, int mpcc_id)
+{
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_id], MPCC_OGAM_CONFIG_STATUS, &state_mode);
+
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+
+ return mode;
+}
+
+static void mpc2_program_lutb(struct mpc *mpc, int mpcc_id,
+ const struct pwl_params *params)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ struct xfer_func_reg gam_regs;
+
+ mpc2_ogam_get_reg_field(mpc, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMB_START_CNTL_B[mpcc_id]);
+ gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMB_START_CNTL_G[mpcc_id]);
+ gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMB_START_CNTL_R[mpcc_id]);
+ gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_B[mpcc_id]);
+ gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_G[mpcc_id]);
+ gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMB_SLOPE_CNTL_R[mpcc_id]);
+ gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMB_END_CNTL1_B[mpcc_id]);
+ gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMB_END_CNTL2_B[mpcc_id]);
+ gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMB_END_CNTL1_G[mpcc_id]);
+ gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMB_END_CNTL2_G[mpcc_id]);
+ gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMB_END_CNTL1_R[mpcc_id]);
+ gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMB_END_CNTL2_R[mpcc_id]);
+ gam_regs.region_start = REG(MPCC_OGAM_RAMB_REGION_0_1[mpcc_id]);
+ gam_regs.region_end = REG(MPCC_OGAM_RAMB_REGION_32_33[mpcc_id]);
+
+ cm_helper_program_xfer_func(mpc20->base.ctx, params, &gam_regs);
+
+}
+
+static void mpc2_program_luta(struct mpc *mpc, int mpcc_id,
+ const struct pwl_params *params)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ struct xfer_func_reg gam_regs;
+
+ mpc2_ogam_get_reg_field(mpc, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMA_START_CNTL_B[mpcc_id]);
+ gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMA_START_CNTL_G[mpcc_id]);
+ gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMA_START_CNTL_R[mpcc_id]);
+ gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_B[mpcc_id]);
+ gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_G[mpcc_id]);
+ gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMA_SLOPE_CNTL_R[mpcc_id]);
+ gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMA_END_CNTL1_B[mpcc_id]);
+ gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMA_END_CNTL2_B[mpcc_id]);
+ gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMA_END_CNTL1_G[mpcc_id]);
+ gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMA_END_CNTL2_G[mpcc_id]);
+ gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMA_END_CNTL1_R[mpcc_id]);
+ gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMA_END_CNTL2_R[mpcc_id]);
+ gam_regs.region_start = REG(MPCC_OGAM_RAMA_REGION_0_1[mpcc_id]);
+ gam_regs.region_end = REG(MPCC_OGAM_RAMA_REGION_32_33[mpcc_id]);
+
+ cm_helper_program_xfer_func(mpc20->base.ctx, params, &gam_regs);
+
+}
+
+static void mpc20_program_ogam_pwl(
+ struct mpc *mpc, int mpcc_id,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i;
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ PERF_TRACE();
+ REG_SEQ_START();
+
+ for (i = 0 ; i < num; i++) {
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0,
+ MPCC_OGAM_LUT_DATA, rgb[i].delta_red_reg);
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0,
+ MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg);
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0,
+ MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg);
+
+ }
+
+}
+
+static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id,
+ enum dc_lut_mode current_mode,
+ enum dc_lut_mode next_mode)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ if (mpc->ctx->dc->debug.cm_in_bypass) {
+ REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
+ return;
+ }
+
+ if (mpc->ctx->dc->work_arounds.dedcn20_305_wa == false) {
+ /*hw fixed in new review*/
+ return;
+ }
+ if (current_mode == LUT_BYPASS)
+ /*this will only work if OTG is locked.
+ *if we were to support OTG unlock case,
+ *the workaround will be more complex
+ */
+ REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE,
+ next_mode == LUT_RAM_A ? 1:2);
+}
+
+void mpc2_set_output_gamma(
+ struct mpc *mpc,
+ int mpcc_id,
+ const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+ if (mpc->ctx->dc->debug.cm_in_bypass) {
+ REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
+ return;
+ }
+
+ if (params == NULL) {
+ REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
+ return;
+ }
+
+ current_mode = mpc20_get_ogam_current(mpc, mpcc_id);
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ mpc20_power_on_ogam_lut(mpc, mpcc_id, true);
+ mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A);
+
+ if (next_mode == LUT_RAM_A)
+ mpc2_program_luta(mpc, mpcc_id, params);
+ else
+ mpc2_program_lutb(mpc, mpcc_id, params);
+
+ apply_DEDCN20_305_wa(mpc, mpcc_id, current_mode, next_mode);
+
+ mpc20_program_ogam_pwl(
+ mpc, mpcc_id, params->rgb_resulted, params->hw_points_num);
+
+ REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE,
+ next_mode == LUT_RAM_A ? 1:2);
+}
+void mpc2_assert_idle_mpcc(struct mpc *mpc, int id)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ unsigned int mpc_disabled;
+
+ ASSERT(!(mpc20->mpcc_in_use_mask & 1 << id));
+ REG_GET(MPCC_STATUS[id], MPCC_DISABLED, &mpc_disabled);
+ if (mpc_disabled)
+ return;
+
+ REG_WAIT(MPCC_STATUS[id],
+ MPCC_IDLE, 1,
+ 1, 100000);
+}
+
+void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
+{
+ struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+ unsigned int top_sel, mpc_busy, mpc_idle, mpc_disabled;
+
+ REG_GET(MPCC_TOP_SEL[mpcc_id],
+ MPCC_TOP_SEL, &top_sel);
+
+ REG_GET_3(MPCC_STATUS[mpcc_id],
+ MPCC_BUSY, &mpc_busy,
+ MPCC_IDLE, &mpc_idle,
+ MPCC_DISABLED, &mpc_disabled);
+
+ if (top_sel == 0xf) {
+ ASSERT(!mpc_busy);
+ ASSERT(mpc_idle);
+ ASSERT(mpc_disabled);
+ } else {
+ ASSERT(!mpc_disabled);
+ ASSERT(!mpc_idle);
+ }
+
+ REG_SEQ_SUBMIT();
+ PERF_TRACE();
+ REG_SEQ_WAIT_DONE();
+ PERF_TRACE();
+}
+
+static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
+{
+ mpcc->mpcc_id = mpcc_inst;
+ mpcc->dpp_id = 0xf;
+ mpcc->mpcc_bot = NULL;
+ mpcc->blnd_cfg.overlap_only = false;
+ mpcc->blnd_cfg.global_alpha = 0xff;
+ mpcc->blnd_cfg.global_gain = 0xff;
+ mpcc->blnd_cfg.background_color_bpc = 4;
+ mpcc->blnd_cfg.bottom_gain_mode = 0;
+ mpcc->blnd_cfg.top_gain = 0x1f000;
+ mpcc->blnd_cfg.bottom_inside_gain = 0x1f000;
+ mpcc->blnd_cfg.bottom_outside_gain = 0x1f000;
+ mpcc->sm_cfg.enable = false;
+}
+
+static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
+{
+ struct mpcc *tmp_mpcc = tree->opp_list;
+
+ while (tmp_mpcc != NULL) {
+ if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
+ return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
+ tmp_mpcc = tmp_mpcc->mpcc_bot;
+ }
+ return NULL;
+}
+
+static const struct mpc_funcs dcn20_mpc_funcs = {
+ .read_mpcc_state = mpc1_read_mpcc_state,
+ .insert_plane = mpc1_insert_plane,
+ .remove_mpcc = mpc1_remove_mpcc,
+ .mpc_init = mpc1_mpc_init,
+ .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .update_blending = mpc2_update_blending,
+ .cursor_lock = mpc1_cursor_lock,
+ .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
+ .wait_for_idle = mpc2_assert_idle_mpcc,
+ .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
+ .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
+ .set_denorm = mpc2_set_denorm,
+ .set_denorm_clamp = mpc2_set_denorm_clamp,
+ .set_output_csc = mpc2_set_output_csc,
+ .set_ocsc_default = mpc2_set_ocsc_default,
+ .set_output_gamma = mpc2_set_output_gamma,
+ .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+ .get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .set_bg_color = mpc1_set_bg_color,
+};
+
+void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
+ struct dc_context *ctx,
+ const struct dcn20_mpc_registers *mpc_regs,
+ const struct dcn20_mpc_shift *mpc_shift,
+ const struct dcn20_mpc_mask *mpc_mask,
+ int num_mpcc)
+{
+ int i;
+
+ mpc20->base.ctx = ctx;
+
+ mpc20->base.funcs = &dcn20_mpc_funcs;
+
+ mpc20->mpc_regs = mpc_regs;
+ mpc20->mpc_shift = mpc_shift;
+ mpc20->mpc_mask = mpc_mask;
+
+ mpc20->mpcc_in_use_mask = 0;
+ mpc20->num_mpcc = num_mpcc;
+
+ for (i = 0; i < MAX_MPCC; i++)
+ mpc2_init_mpcc(&mpc20->base.mpcc_array[i], i);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
new file mode 100644
index 0000000000..496658f420
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
@@ -0,0 +1,312 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MPCC_DCN20_H__
+#define __DC_MPCC_DCN20_H__
+
+#include "dcn10/dcn10_mpc.h"
+
+#define TO_DCN20_MPC(mpc_base) \
+ container_of(mpc_base, struct dcn20_mpc, base)
+
+#define MPC_REG_LIST_DCN2_0(inst)\
+ MPC_COMMON_REG_LIST_DCN1_0(inst),\
+ SRII(MPCC_TOP_GAIN, MPCC, inst),\
+ SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst),\
+ SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst),\
+ SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_SLOPE_CNTL_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_SLOPE_CNTL_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_SLOPE_CNTL_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_SLOPE_CNTL_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_SLOPE_CNTL_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_SLOPE_CNTL_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst),\
+ SRII(MPCC_MEM_PWR_CTRL, MPCC, inst),\
+ SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_MODE, MPCC_OGAM, inst)
+
+#define MPC_OUT_MUX_REG_LIST_DCN2_0(inst) \
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst),\
+ SRII(CSC_MODE, MPC_OUT, inst),\
+ SRII(CSC_C11_C12_A, MPC_OUT, inst),\
+ SRII(CSC_C33_C34_A, MPC_OUT, inst),\
+ SRII(CSC_C11_C12_B, MPC_OUT, inst),\
+ SRII(CSC_C33_C34_B, MPC_OUT, inst),\
+ SRII(DENORM_CONTROL, MPC_OUT, inst),\
+ SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\
+ SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst)
+
+#define MPC_DBG_REG_LIST_DCN2_0() \
+ SR(MPC_OCSC_TEST_DEBUG_DATA),\
+ SR(MPC_OCSC_TEST_DEBUG_INDEX)
+
+#define MPC_REG_VARIABLE_LIST_DCN2_0 \
+ MPC_COMMON_REG_VARIABLE_LIST \
+ uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \
+ uint32_t MPCC_BOT_GAIN_INSIDE[MAX_MPCC]; \
+ uint32_t MPCC_BOT_GAIN_OUTSIDE[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_START_CNTL_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_START_CNTL_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_START_CNTL_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_SLOPE_CNTL_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_SLOPE_CNTL_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_SLOPE_CNTL_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_END_CNTL1_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_END_CNTL2_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_END_CNTL1_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_END_CNTL2_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_END_CNTL1_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_END_CNTL2_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_REGION_0_1[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_REGION_32_33[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_START_CNTL_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_START_CNTL_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_START_CNTL_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_SLOPE_CNTL_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_SLOPE_CNTL_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_SLOPE_CNTL_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_END_CNTL1_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_END_CNTL2_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_END_CNTL1_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_END_CNTL2_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_END_CNTL1_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_END_CNTL2_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_REGION_0_1[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_REGION_32_33[MAX_MPCC];\
+ uint32_t MPCC_MEM_PWR_CTRL[MAX_MPCC];\
+ uint32_t MPCC_OGAM_LUT_INDEX[MAX_MPCC];\
+ uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\
+ uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\
+ uint32_t MPCC_OGAM_MODE[MAX_MPCC];\
+ uint32_t MPC_OCSC_TEST_DEBUG_DATA;\
+ uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\
+ uint32_t CSC_MODE[MAX_OPP]; \
+ uint32_t CSC_C11_C12_A[MAX_OPP]; \
+ uint32_t CSC_C33_C34_A[MAX_OPP]; \
+ uint32_t CSC_C11_C12_B[MAX_OPP]; \
+ uint32_t CSC_C33_C34_B[MAX_OPP]; \
+ uint32_t DENORM_CONTROL[MAX_OPP]; \
+ uint32_t DENORM_CLAMP_G_Y[MAX_OPP]; \
+ uint32_t DENORM_CLAMP_B_CB[MAX_OPP];
+
+#define MPC_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\
+ SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\
+ SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\
+ SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\
+ SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\
+ SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\
+ SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\
+ SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\
+ SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM_RAMB_EXP_REGION_END_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_START_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\
+ SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_LUT_WRITE_EN_MASK, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_LUT_RAM_SEL, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM_CONFIG_STATUS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_MODE, MPCC_OGAM_MODE, mask_sh),\
+ SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\
+ SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\
+ SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
+ SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
+
+/*
+ * DCN2 MPC_OCSC debug status register:
+ *
+ * Status index including current OCSC Mode is 1
+ * OCSC Mode: [1..0]
+ */
+#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1
+
+#define MPC_DEBUG_REG_LIST_SH_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0
+
+#define MPC_DEBUG_REG_LIST_MASK_DCN20 \
+ .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3
+
+#define MPC_REG_FIELD_LIST_DCN2_0(type) \
+ MPC_REG_FIELD_LIST(type)\
+ type MPCC_BG_BPC;\
+ type MPCC_BOT_GAIN_MODE;\
+ type MPCC_TOP_GAIN;\
+ type MPCC_BOT_GAIN_INSIDE;\
+ type MPCC_BOT_GAIN_OUTSIDE;\
+ type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\
+ type MPC_OCSC_TEST_DEBUG_INDEX;\
+ type MPC_OCSC_MODE;\
+ type MPC_OCSC_C11_A;\
+ type MPC_OCSC_C12_A;\
+ type MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;\
+ type MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;\
+ type MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;\
+ type MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;\
+ type MPCC_OGAM_RAMA_EXP_REGION_END_B;\
+ type MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;\
+ type MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;\
+ type MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B;\
+ type MPCC_OGAM_RAMA_EXP_REGION_START_B;\
+ type MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;\
+ type MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET;\
+ type MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS;\
+ type MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET;\
+ type MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS;\
+ type MPCC_OGAM_RAMB_EXP_REGION_END_B;\
+ type MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B;\
+ type MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B;\
+ type MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;\
+ type MPCC_OGAM_RAMB_EXP_REGION_START_B;\
+ type MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B;\
+ type MPCC_OGAM_MEM_PWR_FORCE;\
+ type MPCC_OGAM_LUT_INDEX;\
+ type MPCC_OGAM_LUT_WRITE_EN_MASK;\
+ type MPCC_OGAM_LUT_RAM_SEL;\
+ type MPCC_OGAM_CONFIG_STATUS;\
+ type MPCC_OGAM_LUT_DATA;\
+ type MPCC_OGAM_MODE;\
+ type MPC_OUT_DENORM_MODE;\
+ type MPC_OUT_DENORM_CLAMP_MAX_R_CR;\
+ type MPC_OUT_DENORM_CLAMP_MIN_R_CR;\
+ type MPC_OUT_DENORM_CLAMP_MAX_G_Y;\
+ type MPC_OUT_DENORM_CLAMP_MIN_G_Y;\
+ type MPC_OUT_DENORM_CLAMP_MAX_B_CB;\
+ type MPC_OUT_DENORM_CLAMP_MIN_B_CB;\
+ type MPCC_DISABLED;\
+ type MPCC_OGAM_MEM_PWR_DIS;
+
+struct dcn20_mpc_registers {
+ MPC_REG_VARIABLE_LIST_DCN2_0
+};
+
+struct dcn20_mpc_shift {
+ MPC_REG_FIELD_LIST_DCN2_0(uint8_t)
+};
+
+struct dcn20_mpc_mask {
+ MPC_REG_FIELD_LIST_DCN2_0(uint32_t)
+};
+
+struct dcn20_mpc {
+ struct mpc base;
+
+ int mpcc_in_use_mask;
+ int num_mpcc;
+ const struct dcn20_mpc_registers *mpc_regs;
+ const struct dcn20_mpc_shift *mpc_shift;
+ const struct dcn20_mpc_mask *mpc_mask;
+};
+
+void dcn20_mpc_construct(struct dcn20_mpc *mpcc20,
+ struct dc_context *ctx,
+ const struct dcn20_mpc_registers *mpc_regs,
+ const struct dcn20_mpc_shift *mpc_shift,
+ const struct dcn20_mpc_mask *mpc_mask,
+ int num_mpcc);
+
+void mpc2_update_blending(
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ int mpcc_id);
+
+void mpc2_set_denorm(
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_depth output_depth);
+
+void mpc2_set_denorm_clamp(
+ struct mpc *mpc,
+ int opp_id,
+ struct mpc_denorm_clamp denorm_clamp);
+
+void mpc2_set_output_csc(
+ struct mpc *mpc,
+ int opp_id,
+ const uint16_t *regval,
+ enum mpc_output_csc_mode ocsc_mode);
+
+void mpc2_set_ocsc_default(
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_space color_space,
+ enum mpc_output_csc_mode ocsc_mode);
+
+void mpc2_set_output_gamma(
+ struct mpc *mpc,
+ int mpcc_id,
+ const struct pwl_params *params);
+
+void mpc2_assert_idle_mpcc(struct mpc *mpc, int id);
+void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id);
+void mpc20_power_on_ogam_lut(struct mpc *mpc, int mpcc_id, bool power_on);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
new file mode 100644
index 0000000000..0784d01986
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn20_opp.h"
+#include "reg_helper.h"
+
+#define REG(reg) \
+ (oppn20->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ oppn20->opp_shift->field_name, oppn20->opp_mask->field_name
+
+#define CTX \
+ oppn20->base.ctx
+
+
+void opp2_set_disp_pattern_generator(
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ const struct tg_color *solid_color,
+ int width,
+ int height,
+ int offset)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+ enum test_pattern_color_format bit_depth;
+ enum test_pattern_dyn_range dyn_range;
+ enum test_pattern_mode mode;
+
+ /* color ramp generator mixes 16-bits color */
+ uint32_t src_bpc = 16;
+ /* requested bpc */
+ uint32_t dst_bpc;
+ uint32_t index;
+ /* RGB values of the color bars.
+ * Produce two RGB colors: RGB0 - white (all Fs)
+ * and RGB1 - black (all 0s)
+ * (three RGB components for two colors)
+ */
+ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
+ 0x0000, 0x0000};
+ /* dest color (converted to the specified color format) */
+ uint16_t dst_color[6];
+ uint32_t inc_base;
+
+ /* translate to bit depth */
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
+ break;
+ case COLOR_DEPTH_888:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
+ break;
+ default:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ }
+
+ /* set DPG dimentions */
+ REG_SET_2(DPG_DIMENSIONS, 0,
+ DPG_ACTIVE_WIDTH, width,
+ DPG_ACTIVE_HEIGHT, height);
+
+ /* set DPG offset */
+ REG_SET_2(DPG_OFFSET_SEGMENT, 0,
+ DPG_X_OFFSET, offset,
+ DPG_SEGMENT_WIDTH, 0);
+
+ switch (test_pattern) {
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
+ {
+ dyn_range = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
+ TEST_PATTERN_DYN_RANGE_CEA :
+ TEST_PATTERN_DYN_RANGE_VESA);
+
+ switch (color_space) {
+ case CONTROLLER_DP_COLOR_SPACE_YCBCR601:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601;
+ break;
+ case CONTROLLER_DP_COLOR_SPACE_YCBCR709:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709;
+ break;
+ case CONTROLLER_DP_COLOR_SPACE_RGB:
+ default:
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+ break;
+ }
+
+ REG_UPDATE_6(DPG_CONTROL,
+ DPG_EN, 1,
+ DPG_MODE, mode,
+ DPG_DYNAMIC_RANGE, dyn_range,
+ DPG_BIT_DEPTH, bit_depth,
+ DPG_VRES, 6,
+ DPG_HRES, 6);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
+ case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
+ {
+ mode = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
+ TEST_PATTERN_MODE_VERTICALBARS :
+ TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* adjust color to the required colorFormat */
+ for (index = 0; index < 6; index++) {
+ /* dst = 2^dstBpc * src / 2^srcBpc = src >>
+ * (srcBpc - dstBpc);
+ */
+ dst_color[index] =
+ src_color[index] >> (src_bpc - dst_bpc);
+ /* DPG_COLOUR registers are 16-bit MSB aligned value with bits 3:0 hardwired to ZERO.
+ * XXXXXXXXXX000000 for 10 bit,
+ * XXXXXXXX00000000 for 8 bit,
+ * XXXXXX0000000000 for 6 bits
+ */
+ dst_color[index] <<= (16 - dst_bpc);
+ }
+
+ REG_SET_2(DPG_COLOUR_R_CR, 0,
+ DPG_COLOUR1_R_CR, dst_color[0],
+ DPG_COLOUR0_R_CR, dst_color[3]);
+ REG_SET_2(DPG_COLOUR_G_Y, 0,
+ DPG_COLOUR1_G_Y, dst_color[1],
+ DPG_COLOUR0_G_Y, dst_color[4]);
+ REG_SET_2(DPG_COLOUR_B_CB, 0,
+ DPG_COLOUR1_B_CB, dst_color[2],
+ DPG_COLOUR0_B_CB, dst_color[5]);
+
+ /* enable test pattern */
+ REG_UPDATE_6(DPG_CONTROL,
+ DPG_EN, 1,
+ DPG_MODE, mode,
+ DPG_DYNAMIC_RANGE, 0,
+ DPG_BIT_DEPTH, bit_depth,
+ DPG_VRES, 0,
+ DPG_HRES, 0);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
+ {
+ mode = (bit_depth ==
+ TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
+ TEST_PATTERN_MODE_DUALRAMP_RGB :
+ TEST_PATTERN_MODE_SINGLERAMP_RGB);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* increment for the first ramp for one color gradation
+ * 1 gradation for 6-bit color is 2^10
+ * gradations in 16-bit color
+ */
+ inc_base = (src_bpc - dst_bpc);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ {
+ REG_SET_3(DPG_RAMP_CONTROL, 0,
+ DPG_RAMP0_OFFSET, 0,
+ DPG_INC0, inc_base,
+ DPG_INC1, 0);
+ REG_UPDATE_2(DPG_CONTROL,
+ DPG_VRES, 6,
+ DPG_HRES, 6);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ {
+ REG_SET_3(DPG_RAMP_CONTROL, 0,
+ DPG_RAMP0_OFFSET, 0,
+ DPG_INC0, inc_base,
+ DPG_INC1, 0);
+ REG_UPDATE_2(DPG_CONTROL,
+ DPG_VRES, 6,
+ DPG_HRES, 8);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ {
+ REG_SET_3(DPG_RAMP_CONTROL, 0,
+ DPG_RAMP0_OFFSET, 384 << 6,
+ DPG_INC0, inc_base,
+ DPG_INC1, inc_base + 2);
+ REG_UPDATE_2(DPG_CONTROL,
+ DPG_VRES, 5,
+ DPG_HRES, 8);
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* enable test pattern */
+ REG_UPDATE_4(DPG_CONTROL,
+ DPG_EN, 1,
+ DPG_MODE, mode,
+ DPG_DYNAMIC_RANGE, 0,
+ DPG_BIT_DEPTH, bit_depth);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
+ {
+ REG_WRITE(DPG_CONTROL, 0);
+ REG_WRITE(DPG_COLOUR_R_CR, 0);
+ REG_WRITE(DPG_COLOUR_G_Y, 0);
+ REG_WRITE(DPG_COLOUR_B_CB, 0);
+ REG_WRITE(DPG_RAMP_CONTROL, 0);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR:
+ {
+ opp2_dpg_set_blank_color(opp, solid_color);
+ REG_UPDATE_2(DPG_CONTROL,
+ DPG_EN, 1,
+ DPG_MODE, TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ REG_SET_2(DPG_DIMENSIONS, 0,
+ DPG_ACTIVE_WIDTH, width,
+ DPG_ACTIVE_HEIGHT, height);
+ }
+ break;
+ default:
+ break;
+
+ }
+}
+
+void opp2_program_dpg_dimensions(
+ struct output_pixel_processor *opp,
+ int width, int height)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ REG_SET_2(DPG_DIMENSIONS, 0,
+ DPG_ACTIVE_WIDTH, width,
+ DPG_ACTIVE_HEIGHT, height);
+}
+
+void opp2_dpg_set_blank_color(
+ struct output_pixel_processor *opp,
+ const struct tg_color *color)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ /* 16-bit MSB aligned value. Bits 3:0 of this field are hardwired to ZERO */
+ ASSERT(color);
+ REG_SET_2(DPG_COLOUR_B_CB, 0,
+ DPG_COLOUR1_B_CB, color->color_b_cb << 6,
+ DPG_COLOUR0_B_CB, color->color_b_cb << 6);
+ REG_SET_2(DPG_COLOUR_G_Y, 0,
+ DPG_COLOUR1_G_Y, color->color_g_y << 6,
+ DPG_COLOUR0_G_Y, color->color_g_y << 6);
+ REG_SET_2(DPG_COLOUR_R_CR, 0,
+ DPG_COLOUR1_R_CR, color->color_r_cr << 6,
+ DPG_COLOUR0_R_CR, color->color_r_cr << 6);
+}
+
+bool opp2_dpg_is_blanked(struct output_pixel_processor *opp)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+ uint32_t dpg_en, dpg_mode;
+ uint32_t double_buffer_pending;
+
+ REG_GET_2(DPG_CONTROL,
+ DPG_EN, &dpg_en,
+ DPG_MODE, &dpg_mode);
+
+ REG_GET(DPG_STATUS,
+ DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
+
+ return (dpg_en == 1) &&
+ (double_buffer_pending == 0);
+}
+
+void opp2_program_left_edge_extra_pixel (
+ struct output_pixel_processor *opp,
+ bool count)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ /* Specifies the number of extra left edge pixels that are supplied to
+ * the 422 horizontal chroma sub-sample filter.
+ * Note that when left edge pixel is not "0", fmt pixel encoding can be in either 420 or 422 mode
+ * */
+ REG_UPDATE(FMT_422_CONTROL, FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT, count);
+}
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static struct opp_funcs dcn20_opp_funcs = {
+ .opp_set_dyn_expansion = opp1_set_dyn_expansion,
+ .opp_program_fmt = opp1_program_fmt,
+ .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
+ .opp_program_stereo = opp1_program_stereo,
+ .opp_pipe_clock_control = opp1_pipe_clock_control,
+ .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
+ .opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
+ .dpg_is_blanked = opp2_dpg_is_blanked,
+ .opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
+ .opp_destroy = opp1_destroy,
+ .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
+};
+
+void dcn20_opp_construct(struct dcn20_opp *oppn20,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn20_opp_registers *regs,
+ const struct dcn20_opp_shift *opp_shift,
+ const struct dcn20_opp_mask *opp_mask)
+{
+ oppn20->base.ctx = ctx;
+ oppn20->base.inst = inst;
+ oppn20->base.funcs = &dcn20_opp_funcs;
+
+ oppn20->regs = regs;
+ oppn20->opp_shift = opp_shift;
+ oppn20->opp_mask = opp_mask;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
new file mode 100644
index 0000000000..3ab221bdd2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -0,0 +1,170 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCN20_H__
+#define __DC_OPP_DCN20_H__
+
+#include "dcn10/dcn10_opp.h"
+
+#define TO_DCN20_OPP(opp)\
+ container_of(opp, struct dcn20_opp, base)
+
+#define OPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define OPP_DPG_REG_LIST(id) \
+ SRI(DPG_CONTROL, DPG, id), \
+ SRI(DPG_DIMENSIONS, DPG, id), \
+ SRI(DPG_OFFSET_SEGMENT, DPG, id), \
+ SRI(DPG_COLOUR_B_CB, DPG, id), \
+ SRI(DPG_COLOUR_G_Y, DPG, id), \
+ SRI(DPG_COLOUR_R_CR, DPG, id), \
+ SRI(DPG_RAMP_CONTROL, DPG, id), \
+ SRI(DPG_STATUS, DPG, id)
+
+#define OPP_REG_LIST_DCN20(id) \
+ OPP_REG_LIST_DCN10(id), \
+ OPP_DPG_REG_LIST(id), \
+ SRI(FMT_422_CONTROL, FMT, id), \
+ SRI(OPPBUF_CONTROL1, OPPBUF, id)
+
+#define OPP_REG_VARIABLE_LIST_DCN2_0 \
+ OPP_COMMON_REG_VARIABLE_LIST; \
+ uint32_t FMT_422_CONTROL; \
+ uint32_t DPG_CONTROL; \
+ uint32_t DPG_DIMENSIONS; \
+ uint32_t DPG_OFFSET_SEGMENT; \
+ uint32_t DPG_COLOUR_B_CB; \
+ uint32_t DPG_COLOUR_G_Y; \
+ uint32_t DPG_COLOUR_R_CR; \
+ uint32_t DPG_RAMP_CONTROL; \
+ uint32_t DPG_STATUS
+
+#define OPP_DPG_MASK_SH_LIST(mask_sh) \
+ OPP_SF(DPG0_DPG_CONTROL, DPG_EN, mask_sh), \
+ OPP_SF(DPG0_DPG_CONTROL, DPG_MODE, mask_sh), \
+ OPP_SF(DPG0_DPG_CONTROL, DPG_DYNAMIC_RANGE, mask_sh), \
+ OPP_SF(DPG0_DPG_CONTROL, DPG_BIT_DEPTH, mask_sh), \
+ OPP_SF(DPG0_DPG_CONTROL, DPG_VRES, mask_sh), \
+ OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \
+ OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \
+ OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \
+ OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \
+ OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \
+ OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \
+ OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR1_B_CB, mask_sh), \
+ OPP_SF(DPG0_DPG_COLOUR_G_Y, DPG_COLOUR0_G_Y, mask_sh), \
+ OPP_SF(DPG0_DPG_COLOUR_G_Y, DPG_COLOUR1_G_Y, mask_sh), \
+ OPP_SF(DPG0_DPG_RAMP_CONTROL, DPG_RAMP0_OFFSET, mask_sh), \
+ OPP_SF(DPG0_DPG_RAMP_CONTROL, DPG_INC0, mask_sh), \
+ OPP_SF(DPG0_DPG_RAMP_CONTROL, DPG_INC1, mask_sh), \
+ OPP_SF(DPG0_DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, mask_sh)
+
+#define OPP_MASK_SH_LIST_DCN20(mask_sh) \
+ OPP_MASK_SH_LIST_DCN(mask_sh), \
+ OPP_DPG_MASK_SH_LIST(mask_sh), \
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, mask_sh),\
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, mask_sh), \
+ OPP_SF(FMT0_FMT_422_CONTROL, FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT, mask_sh)
+
+#define OPP_DCN20_REG_FIELD_LIST(type) \
+ OPP_DCN10_REG_FIELD_LIST(type); \
+ type FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT; \
+ type DPG_EN; \
+ type DPG_MODE; \
+ type DPG_DYNAMIC_RANGE; \
+ type DPG_BIT_DEPTH; \
+ type DPG_VRES; \
+ type DPG_HRES; \
+ type DPG_ACTIVE_WIDTH; \
+ type DPG_ACTIVE_HEIGHT; \
+ type DPG_X_OFFSET; \
+ type DPG_SEGMENT_WIDTH; \
+ type DPG_COLOUR0_R_CR; \
+ type DPG_COLOUR1_R_CR; \
+ type DPG_COLOUR0_B_CB; \
+ type DPG_COLOUR1_B_CB; \
+ type DPG_COLOUR0_G_Y; \
+ type DPG_COLOUR1_G_Y; \
+ type DPG_RAMP0_OFFSET; \
+ type DPG_INC0; \
+ type DPG_INC1; \
+ type DPG_DOUBLE_BUFFER_PENDING
+
+struct dcn20_opp_registers {
+ OPP_REG_VARIABLE_LIST_DCN2_0;
+};
+
+struct dcn20_opp_shift {
+ OPP_DCN20_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn20_opp_mask {
+ OPP_DCN20_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn20_opp {
+ struct output_pixel_processor base;
+
+ const struct dcn20_opp_registers *regs;
+ const struct dcn20_opp_shift *opp_shift;
+ const struct dcn20_opp_mask *opp_mask;
+
+ bool is_write_to_ram_a_safe;
+};
+
+void dcn20_opp_construct(struct dcn20_opp *oppn20,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn20_opp_registers *regs,
+ const struct dcn20_opp_shift *opp_shift,
+ const struct dcn20_opp_mask *opp_mask);
+
+void opp2_set_disp_pattern_generator(
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ const struct tg_color *solid_color,
+ int width,
+ int height,
+ int offset);
+
+void opp2_program_dpg_dimensions(
+ struct output_pixel_processor *opp,
+ int width, int height);
+
+bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
+
+void opp2_dpg_set_blank_color(
+ struct output_pixel_processor *opp,
+ const struct tg_color *color);
+
+void opp2_program_left_edge_extra_pixel (
+ struct output_pixel_processor *opp,
+ bool count);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
new file mode 100644
index 0000000000..58bdbd859b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -0,0 +1,587 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn20_optc.h"
+#include "dc.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+/**
+ * optc2_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator.
+ *
+ * @optc: timing_generator instance.
+ *
+ * Return: If CRTC is enabled, return true.
+ *
+ */
+bool optc2_enable_crtc(struct timing_generator *optc)
+{
+ /* TODO FPGA wait for answer
+ * OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE
+ * OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK
+ */
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ /* opp instance for OTG. For DCN1.0, ODM is remoed.
+ * OPP and OPTC should 1:1 mapping
+ */
+ REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SEG0_SRC_SEL, optc->inst);
+
+ /* VTG enable first is for HW workaround */
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 1);
+
+ REG_SEQ_START();
+
+ /* Enable CRTC */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_DISABLE_POINT_CNTL, 3,
+ OTG_MASTER_EN, 1);
+
+ REG_SEQ_SUBMIT();
+ REG_SEQ_WAIT_DONE();
+
+ return true;
+}
+
+/**
+ * optc2_set_gsl() - Assign OTG to GSL groups,
+ * set one of the OTGs to be master & rest are slaves
+ *
+ * @optc: timing_generator instance.
+ * @params: pointer to gsl_params
+ */
+void optc2_set_gsl(struct timing_generator *optc,
+ const struct gsl_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+/*
+ * There are (MAX_OPTC+1)/2 gsl groups available for use.
+ * In each group (assign an OTG to a group by setting OTG_GSLX_EN = 1,
+ * set one of the OTGs to be the master (OTG_GSL_MASTER_EN = 1) and the rest are slaves.
+ */
+ REG_UPDATE_5(OTG_GSL_CONTROL,
+ OTG_GSL0_EN, params->gsl0_en,
+ OTG_GSL1_EN, params->gsl1_en,
+ OTG_GSL2_EN, params->gsl2_en,
+ OTG_GSL_MASTER_EN, params->gsl_master_en,
+ OTG_GSL_MASTER_MODE, params->gsl_master_mode);
+}
+
+
+void optc2_set_gsl_source_select(
+ struct timing_generator *optc,
+ int group_idx,
+ uint32_t gsl_ready_signal)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ switch (group_idx) {
+ case 1:
+ REG_UPDATE(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, gsl_ready_signal);
+ break;
+ case 2:
+ REG_UPDATE(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, gsl_ready_signal);
+ break;
+ case 3:
+ REG_UPDATE(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, gsl_ready_signal);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Set DSC-related configuration.
+ * dsc_mode: 0 disables DSC, other values enable DSC in specified format
+ * sc_bytes_per_pixel: Bytes per pixel in u3.28 format
+ * dsc_slice_width: Slice width in pixels
+ */
+void optc2_set_dsc_config(struct timing_generator *optc,
+ enum optc_dsc_mode dsc_mode,
+ uint32_t dsc_bytes_per_pixel,
+ uint32_t dsc_slice_width)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE(OPTC_DATA_FORMAT_CONTROL,
+ OPTC_DSC_MODE, dsc_mode);
+
+ REG_SET(OPTC_BYTES_PER_PIXEL, 0,
+ OPTC_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel);
+
+ REG_UPDATE(OPTC_WIDTH_CONTROL,
+ OPTC_DSC_SLICE_WIDTH, dsc_slice_width);
+}
+
+/* Get DSC-related configuration.
+ * dsc_mode: 0 disables DSC, other values enable DSC in specified format
+ */
+void optc2_get_dsc_status(struct timing_generator *optc,
+ uint32_t *dsc_mode)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OPTC_DATA_FORMAT_CONTROL,
+ OPTC_DSC_MODE, dsc_mode);
+}
+
+
+/*TEMP: Need to figure out inheritance model here.*/
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ return optc1_is_two_pixels_per_containter(timing);
+}
+
+void optc2_set_odm_bypass(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t h_div_2 = 0;
+
+ REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
+ OPTC_NUM_OF_INPUT_SEGMENT, 0,
+ OPTC_SEG0_SRC_SEL, optc->inst,
+ OPTC_SEG1_SRC_SEL, 0xf);
+ REG_WRITE(OTG_H_TIMING_CNTL, 0);
+
+ h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing);
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_BY2, h_div_2);
+ REG_SET(OPTC_MEMORY_CONFIG, 0,
+ OPTC_MEM_SEL, 0);
+ optc1->opp_count = 1;
+}
+
+void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
+ / opp_cnt;
+ uint32_t memory_mask;
+
+ ASSERT(opp_cnt == 2);
+
+ /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
+ * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
+ * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start
+ * REG_SET_2(OTG_GLOBAL_CONTROL1, 0,
+ * MASTER_UPDATE_LOCK_DB_X, 160,
+ * MASTER_UPDATE_LOCK_DB_Y, 240);
+ */
+
+ /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
+ * however, for ODM combine we can simplify by always using 4.
+ * To make sure there's no overlap, each instance "reserves" 2 memories and
+ * they are uniquely combined here.
+ */
+ memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
+
+ if (REG(OPTC_MEMORY_CONFIG))
+ REG_SET(OPTC_MEMORY_CONFIG, 0,
+ OPTC_MEM_SEL, memory_mask);
+
+ REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
+ OPTC_NUM_OF_INPUT_SEGMENT, 1,
+ OPTC_SEG0_SRC_SEL, opp_id[0],
+ OPTC_SEG1_SRC_SEL, opp_id[1]);
+
+ REG_UPDATE(OPTC_WIDTH_CONTROL,
+ OPTC_SEGMENT_WIDTH, mpcc_hactive);
+
+ REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_BY2, 1);
+ optc1->opp_count = opp_cnt;
+}
+
+void optc2_get_optc_source(struct timing_generator *optc,
+ uint32_t *num_of_src_opp,
+ uint32_t *src_opp_id_0,
+ uint32_t *src_opp_id_1)
+{
+ uint32_t num_of_input_segments;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET_3(OPTC_DATA_SOURCE_SELECT,
+ OPTC_NUM_OF_INPUT_SEGMENT, &num_of_input_segments,
+ OPTC_SEG0_SRC_SEL, src_opp_id_0,
+ OPTC_SEG1_SRC_SEL, src_opp_id_1);
+
+ if (num_of_input_segments == 1)
+ *num_of_src_opp = 2;
+ else
+ *num_of_src_opp = 1;
+
+ /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */
+ if (*src_opp_id_1 == 0xf)
+ *num_of_src_opp = 1;
+}
+
+static void optc2_set_dwb_source(struct timing_generator *optc,
+ uint32_t dwb_pipe_inst)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ if (dwb_pipe_inst == 0)
+ REG_UPDATE(DWB_SOURCE_SELECT,
+ OPTC_DWB0_SOURCE_SELECT, optc->inst);
+ else if (dwb_pipe_inst == 1)
+ REG_UPDATE(DWB_SOURCE_SELECT,
+ OPTC_DWB1_SOURCE_SELECT, optc->inst);
+}
+
+static void optc2_align_vblanks(
+ struct timing_generator *optc_master,
+ struct timing_generator *optc_slave,
+ uint32_t master_pixel_clock_100Hz,
+ uint32_t slave_pixel_clock_100Hz,
+ uint8_t master_clock_divider,
+ uint8_t slave_clock_divider)
+{
+ /* accessing slave OTG registers */
+ struct optc *optc1 = DCN10TG_FROM_TG(optc_slave);
+
+ uint32_t master_v_active = 0;
+ uint32_t master_h_total = 0;
+ uint32_t slave_h_total = 0;
+ uint64_t L, XY;
+ uint32_t X, Y, p = 10000;
+ uint32_t master_update_lock;
+
+ /* disable slave OTG */
+ REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
+ /* wait until disabled */
+ REG_WAIT(OTG_CONTROL,
+ OTG_CURRENT_MASTER_EN_STATE,
+ 0, 10, 5000);
+
+ REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &slave_h_total);
+
+ /* assign slave OTG to be controlled by master update lock */
+ REG_SET(OTG_GLOBAL_CONTROL0, 0,
+ OTG_MASTER_UPDATE_LOCK_SEL, optc_master->inst);
+
+ /* accessing master OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_master);
+
+ /* saving update lock state, not sure if it's needed */
+ REG_GET(OTG_MASTER_UPDATE_LOCK,
+ OTG_MASTER_UPDATE_LOCK, &master_update_lock);
+ /* unlocking master OTG */
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 0);
+
+ REG_GET(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &master_v_active);
+ REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &master_h_total);
+
+ /* calculate when to enable slave OTG */
+ L = (uint64_t)p * slave_h_total * master_pixel_clock_100Hz;
+ L = div_u64(L, master_h_total);
+ L = div_u64(L, slave_pixel_clock_100Hz);
+ XY = div_u64(L, p);
+ Y = master_v_active - XY - 1;
+ X = div_u64(((XY + 1) * p - L) * master_h_total, p * master_clock_divider);
+
+ /*
+ * set master OTG to unlock when V/H
+ * counters reach calculated values
+ */
+ REG_UPDATE(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_EN, 1);
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_X,
+ X,
+ MASTER_UPDATE_LOCK_DB_Y,
+ Y);
+
+ /* lock master OTG */
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 1);
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1, 1, 10);
+
+ /* accessing slave OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_slave);
+
+ /*
+ * enable slave OTG, the OTG is locked with
+ * master's update lock, so it will not run
+ */
+ REG_UPDATE(OTG_CONTROL,
+ OTG_MASTER_EN, 1);
+
+ /* accessing master OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_master);
+
+ /*
+ * unlock master OTG. When master H/V counters reach
+ * DB_XY point, slave OTG will start
+ */
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 0);
+
+ /* accessing slave OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_slave);
+
+ /* wait for slave OTG to start running*/
+ REG_WAIT(OTG_CONTROL,
+ OTG_CURRENT_MASTER_EN_STATE,
+ 1, 10, 5000);
+
+ /* accessing master OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_master);
+
+ /* disable the XY point*/
+ REG_UPDATE(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_EN, 0);
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_X,
+ 0,
+ MASTER_UPDATE_LOCK_DB_Y,
+ 0);
+
+ /*restore master update lock*/
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, master_update_lock);
+
+ /* accessing slave OTG registers */
+ optc1 = DCN10TG_FROM_TG(optc_slave);
+ /* restore slave to be controlled by it's own */
+ REG_SET(OTG_GLOBAL_CONTROL0, 0,
+ OTG_MASTER_UPDATE_LOCK_SEL, optc_slave->inst);
+
+}
+
+void optc2_triplebuffer_lock(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET(OTG_GLOBAL_CONTROL0, 0,
+ OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
+
+ REG_SET(OTG_VUPDATE_KEEPOUT, 0,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
+
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 1);
+
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 10);
+}
+
+void optc2_triplebuffer_unlock(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 0);
+
+ REG_SET(OTG_VUPDATE_KEEPOUT, 0,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0);
+
+}
+
+void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t v_blank_start = 0;
+ uint32_t h_blank_start = 0;
+
+ REG_UPDATE(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, 1);
+
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1,
+ DIG_UPDATE_LOCATION, 20);
+
+ REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START, &v_blank_start);
+
+ REG_GET(OTG_H_BLANK_START_END, OTG_H_BLANK_START, &h_blank_start);
+
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_X,
+ (h_blank_start - 200 - 1) / optc1->opp_count,
+ MASTER_UPDATE_LOCK_DB_Y,
+ v_blank_start - 1);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
+}
+
+void optc2_lock_doublebuffer_disable(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_X,
+ 0,
+ MASTER_UPDATE_LOCK_DB_Y,
+ 0);
+
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0,
+ DIG_UPDATE_LOCATION, 0);
+
+ REG_UPDATE(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, 0);
+}
+
+void optc2_setup_manual_trigger(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ /* Set the min/max selectors unconditionally so that
+ * DMCUB fw may change OTG timings when necessary
+ * TODO: Remove the w/a after fixing the issue in DMCUB firmware
+ */
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+
+ REG_SET_8(OTG_TRIGA_CNTL, 0,
+ OTG_TRIGA_SOURCE_SELECT, 21,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
+ OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
+ OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
+ OTG_TRIGA_POLARITY_SELECT, 0,
+ OTG_TRIGA_FREQUENCY_SELECT, 0,
+ OTG_TRIGA_DELAY, 0,
+ OTG_TRIGA_CLEAR, 1);
+}
+
+void optc2_program_manual_trigger(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET(OTG_TRIGA_MANUAL_TRIG, 0,
+ OTG_TRIGA_MANUAL_TRIG, 1);
+}
+
+bool optc2_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_2(OTG_CRC_CNTL2, 0,
+ OTG_CRC_DSC_MODE, params->dsc_mode,
+ OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode);
+
+ return optc1_configure_crc(optc, params);
+}
+
+
+void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, uint32_t *refresh_rate)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate);
+}
+
+static struct timing_generator_funcs dcn20_tg_funcs = {
+ .validate_timing = optc1_validate_timing,
+ .program_timing = optc1_program_timing,
+ .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+ .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+ .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc2_enable_crtc,
+ .disable_crtc = optc1_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
+ .set_early_control = optc1_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank = optc1_set_blank,
+ .is_blanked = optc1_is_blanked,
+ .set_blank_color = optc1_program_blank_color,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .triplebuffer_lock = optc2_triplebuffer_lock,
+ .triplebuffer_unlock = optc2_triplebuffer_unlock,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .lock = optc1_lock,
+ .unlock = optc1_unlock,
+ .lock_doublebuffer_enable = optc2_lock_doublebuffer_enable,
+ .lock_doublebuffer_disable = optc2_lock_doublebuffer_disable,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc1_set_drr,
+ .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer,
+ .tg_init = optc1_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
+ .setup_global_swap_lock = NULL,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc2_configure_crc,
+ .set_dsc_config = optc2_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
+ .set_dwb_source = optc2_set_dwb_source,
+ .set_odm_bypass = optc2_set_odm_bypass,
+ .set_odm_combine = optc2_set_odm_combine,
+ .get_optc_source = optc2_get_optc_source,
+ .set_gsl = optc2_set_gsl,
+ .set_gsl_source_select = optc2_set_gsl_source_select,
+ .set_vtg_params = optc1_set_vtg_params,
+ .program_manual_trigger = optc2_program_manual_trigger,
+ .setup_manual_trigger = optc2_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
+ .align_vblanks = optc2_align_vblanks,
+};
+
+void dcn20_timing_generator_init(struct optc *optc1)
+{
+ optc1->base.funcs = &dcn20_tg_funcs;
+
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
+
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue.
+ optc1->min_v_sync_width = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
new file mode 100644
index 0000000000..f7968b9ca1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPTC_DCN20_H__
+#define __DC_OPTC_DCN20_H__
+
+#include "../dcn10/dcn10_optc.h"
+
+#define TG_COMMON_REG_LIST_DCN2_0(inst) \
+ TG_COMMON_REG_LIST_DCN(inst),\
+ SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
+ SRI(OTG_GSL_WINDOW_X, OTG, inst),\
+ SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
+ SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
+ SRI(OTG_DSC_START_POSITION, OTG, inst),\
+ SRI(OTG_CRC_CNTL2, OTG, inst),\
+ SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
+ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
+ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
+ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
+ SR(DWB_SOURCE_SELECT),\
+ SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst), \
+ SRI(OTG_DRR_CONTROL, OTG, inst)
+
+#define TG_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
+ TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_X, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_Y, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, DIG_UPDATE_LOCATION, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
+ SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\
+ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\
+ SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\
+ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\
+ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\
+ SF(OTG0_OTG_MANUAL_FLOW_CONTROL, MANUAL_FLOW_CONTROL, mask_sh), \
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+
+void dcn20_timing_generator_init(struct optc *optc);
+
+void optc2_get_last_used_drr_vtotal(struct timing_generator *optc,
+ uint32_t *refresh_rate);
+
+bool optc2_enable_crtc(struct timing_generator *optc);
+
+void optc2_set_gsl(struct timing_generator *optc,
+ const struct gsl_params *params);
+
+void optc2_set_gsl_source_select(struct timing_generator *optc,
+ int group_idx,
+ uint32_t gsl_ready_signal);
+
+void optc2_set_dsc_config(struct timing_generator *optc,
+ enum optc_dsc_mode dsc_mode,
+ uint32_t dsc_bytes_per_pixel,
+ uint32_t dsc_slice_width);
+
+void optc2_get_dsc_status(struct timing_generator *optc,
+ uint32_t *dsc_mode);
+
+void optc2_set_odm_bypass(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing);
+
+void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing);
+
+void optc2_get_optc_source(struct timing_generator *optc,
+ uint32_t *num_of_src_opp,
+ uint32_t *src_opp_id_0,
+ uint32_t *src_opp_id_1);
+
+void optc2_triplebuffer_lock(struct timing_generator *optc);
+void optc2_triplebuffer_unlock(struct timing_generator *optc);
+void optc2_lock_doublebuffer_disable(struct timing_generator *optc);
+void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
+void optc2_setup_manual_trigger(struct timing_generator *optc);
+void optc2_program_manual_trigger(struct timing_generator *optc);
+bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+bool optc2_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params);
+#endif /* __DC_OPTC_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
new file mode 100644
index 0000000000..d587f807df
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -0,0 +1,2770 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2019 Raptor Engineering, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "dcn20_init.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn20/dcn20_resource.h"
+
+#include "dml/dcn20/dcn20_fpu.h"
+
+#include "dcn10/dcn10_hubp.h"
+#include "dcn10/dcn10_ipp.h"
+#include "dcn20_hubbub.h"
+#include "dcn20_mpc.h"
+#include "dcn20_hubp.h"
+#include "irq/dcn20/irq_service_dcn20.h"
+#include "dcn20_dpp.h"
+#include "dcn20_optc.h"
+#include "dcn20_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn20_opp.h"
+
+#include "dcn20_dsc.h"
+
+#include "dcn20_link_encoder.h"
+#include "dcn20_stream_encoder.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dml/display_mode_vba.h"
+#include "dcn20_dccg.h"
+#include "dcn20_vmid.h"
+#include "dce/dce_panel_cntl.h"
+
+#include "navi10_ip_offset.h"
+
+#include "dcn/dcn_2_0_0_offset.h"
+#include "dcn/dcn_2_0_0_sh_mask.h"
+#include "dpcs/dpcs_2_0_0_offset.h"
+#include "dpcs/dpcs_2_0_0_sh_mask.h"
+
+#include "nbio/nbio_2_3_offset.h"
+
+#include "dcn20/dcn20_dwb.h"
+#include "dcn20/dcn20_mmhubbub.h"
+
+#include "mmhub/mmhub_2_0_0_offset.h"
+#include "mmhub/mmhub_2_0_0_sh_mask.h"
+
+#include "reg_helper.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_i2c.h"
+#include "vm_helper.h"
+#include "link_enc_cfg.h"
+
+#include "amdgpu_socbb.h"
+
+#include "link.h"
+#define DC_LOGGER_INIT(logger)
+
+#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#endif
+
+
+enum dcn20_clk_src_array_id {
+ DCN20_CLK_SRC_PLL0,
+ DCN20_CLK_SRC_PLL1,
+ DCN20_CLK_SRC_PLL2,
+ DCN20_CLK_SRC_PLL3,
+ DCN20_CLK_SRC_PLL4,
+ DCN20_CLK_SRC_PLL5,
+ DCN20_CLK_SRC_TOTAL
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRI2_DWB(reg_name, block, id)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+#define SF_DWB(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define SF_DWB2(reg_name, block, id, field_name, post_fix) \
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define SRIR(var_name, reg_name, block, id)\
+ .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define DCCG_SRII(reg_name, block, id)\
+ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## _ ## block ## id
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) \
+ NBIO_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* MMHUB */
+#define MMHUB_BASE_INNER(seg) \
+ MMHUB_BASE__INST0_SEG ## seg
+
+#define MMHUB_BASE(seg) \
+ MMHUB_BASE_INNER(seg)
+
+#define MMHUB_SR(reg_name)\
+ .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
+ mmMM ## reg_name
+
+static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_3),
+ NBIO_SR(BIOS_SCRATCH_6)
+};
+
+#define clk_src_regs(index, pllid)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D),
+ clk_src_regs(4, E),
+ clk_src_regs(5, F)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+};
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCN10_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCN10(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCN20_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN20(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6),
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_DCN2_REG_LIST(id)\
+}
+
+static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5),
+};
+
+static const struct dcn10_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
+};
+
+
+#define aux_regs(id)\
+[id] = {\
+ DCN2_AUX_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id, phyid)\
+[id] = {\
+ LE_DCN10_REG_LIST(id), \
+ UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN2_REG_LIST(id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0, A),
+ link_regs(1, B),
+ link_regs(2, C),
+ link_regs(3, D),
+ link_regs(4, E),
+ link_regs(5, F)
+};
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
+ DPCS_DCN2_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
+ DPCS_DCN2_MASK_SH_LIST(_MASK)
+};
+
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_REG_LIST_DCN20(id),\
+}
+
+static const struct dcn10_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5),
+};
+
+static const struct dcn10_ipp_shift ipp_shift = {
+ IPP_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn10_ipp_mask ipp_mask = {
+ IPP_MASK_SH_LIST_DCN20(_MASK),
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_REG_LIST_DCN20(id),\
+}
+
+static const struct dcn20_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5),
+};
+
+static const struct dcn20_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn20_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN20(_MASK)
+};
+
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST0(id), \
+ .AUXN_IMPCAL = 0, \
+ .AUXP_IMPCAL = 0, \
+ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
+#define tf_regs(id)\
+[id] = {\
+ TF_REG_LIST_DCN20(id),\
+ TF_REG_LIST_DCN20_COMMON_APPEND(id),\
+}
+
+static const struct dcn2_dpp_registers tf_regs[] = {
+ tf_regs(0),
+ tf_regs(1),
+ tf_regs(2),
+ tf_regs(3),
+ tf_regs(4),
+ tf_regs(5),
+};
+
+static const struct dcn2_dpp_shift tf_shift = {
+ TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN20
+};
+
+static const struct dcn2_dpp_mask tf_mask = {
+ TF_REG_LIST_SH_MASK_DCN20(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN20
+};
+
+#define dwbc_regs_dcn2(id)\
+[id] = {\
+ DWBC_COMMON_REG_LIST_DCN2_0(id),\
+ }
+
+static const struct dcn20_dwbc_registers dwbc20_regs[] = {
+ dwbc_regs_dcn2(0),
+};
+
+static const struct dcn20_dwbc_shift dwbc20_shift = {
+ DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+};
+
+static const struct dcn20_dwbc_mask dwbc20_mask = {
+ DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+};
+
+#define mcif_wb_regs_dcn2(id)\
+[id] = {\
+ MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\
+ }
+
+static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = {
+ mcif_wb_regs_dcn2(0),
+};
+
+static const struct dcn20_mmhubbub_shift mcif_wb20_shift = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+};
+
+static const struct dcn20_mmhubbub_mask mcif_wb20_mask = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+};
+
+static const struct dcn20_mpc_registers mpc_regs = {
+ MPC_REG_LIST_DCN2_0(0),
+ MPC_REG_LIST_DCN2_0(1),
+ MPC_REG_LIST_DCN2_0(2),
+ MPC_REG_LIST_DCN2_0(3),
+ MPC_REG_LIST_DCN2_0(4),
+ MPC_REG_LIST_DCN2_0(5),
+ MPC_OUT_MUX_REG_LIST_DCN2_0(0),
+ MPC_OUT_MUX_REG_LIST_DCN2_0(1),
+ MPC_OUT_MUX_REG_LIST_DCN2_0(2),
+ MPC_OUT_MUX_REG_LIST_DCN2_0(3),
+ MPC_OUT_MUX_REG_LIST_DCN2_0(4),
+ MPC_OUT_MUX_REG_LIST_DCN2_0(5),
+ MPC_DBG_REG_LIST_DCN2_0()
+};
+
+static const struct dcn20_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
+ MPC_DEBUG_REG_LIST_SH_DCN20
+};
+
+static const struct dcn20_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
+ MPC_DEBUG_REG_LIST_MASK_DCN20
+};
+
+#define tg_regs(id)\
+[id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
+
+
+static const struct dcn_optc_registers tg_regs[] = {
+ tg_regs(0),
+ tg_regs(1),
+ tg_regs(2),
+ tg_regs(3),
+ tg_regs(4),
+ tg_regs(5)
+};
+
+static const struct dcn_optc_shift tg_shift = {
+ TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+};
+
+static const struct dcn_optc_mask tg_mask = {
+ TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+};
+
+#define hubp_regs(id)\
+[id] = {\
+ HUBP_REG_LIST_DCN20(id)\
+}
+
+static const struct dcn_hubp2_registers hubp_regs[] = {
+ hubp_regs(0),
+ hubp_regs(1),
+ hubp_regs(2),
+ hubp_regs(3),
+ hubp_regs(4),
+ hubp_regs(5)
+};
+
+static const struct dcn_hubp2_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn_hubp2_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN20(_MASK)
+};
+
+static const struct dcn_hubbub_registers hubbub_reg = {
+ HUBBUB_REG_LIST_DCN20(0)
+};
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN20(_MASK)
+};
+
+#define vmid_regs(id)\
+[id] = {\
+ DCN20_VMID_REG_LIST(id)\
+}
+
+static const struct dcn_vmid_registers vmid_regs[] = {
+ vmid_regs(0),
+ vmid_regs(1),
+ vmid_regs(2),
+ vmid_regs(3),
+ vmid_regs(4),
+ vmid_regs(5),
+ vmid_regs(6),
+ vmid_regs(7),
+ vmid_regs(8),
+ vmid_regs(9),
+ vmid_regs(10),
+ vmid_regs(11),
+ vmid_regs(12),
+ vmid_regs(13),
+ vmid_regs(14),
+ vmid_regs(15)
+};
+
+static const struct dcn20_vmid_shift vmid_shifts = {
+ DCN20_VMID_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn20_vmid_mask vmid_masks = {
+ DCN20_VMID_MASK_SH_LIST(_MASK)
+};
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
+#define dsc_regsDCN20(id)\
+[id] = {\
+ DSC_REG_LIST_DCN20(id)\
+}
+
+static const struct dcn20_dsc_registers dsc_regs[] = {
+ dsc_regsDCN20(0),
+ dsc_regsDCN20(1),
+ dsc_regsDCN20(2),
+ dsc_regsDCN20(3),
+ dsc_regsDCN20(4),
+ dsc_regsDCN20(5)
+};
+
+static const struct dcn20_dsc_shift dsc_shift = {
+ DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
+};
+
+static const struct dcn20_dsc_mask dsc_mask = {
+ DSC_REG_LIST_SH_MASK_DCN20(_MASK)
+};
+
+static const struct dccg_registers dccg_regs = {
+ DCCG_REG_LIST_DCN2()
+};
+
+static const struct dccg_shift dccg_shift = {
+ DCCG_MASK_SH_LIST_DCN2(__SHIFT)
+};
+
+static const struct dccg_mask dccg_mask = {
+ DCCG_MASK_SH_LIST_DCN2(_MASK)
+};
+
+static const struct resource_caps res_cap_nv10 = {
+ .num_timing_generator = 6,
+ .num_opp = 6,
+ .num_video_plane = 6,
+ .num_audio = 7,
+ .num_stream_encoder = 6,
+ .num_pll = 6,
+ .num_dwb = 1,
+ .num_ddc = 6,
+ .num_vmid = 16,
+ .num_dsc = 6,
+};
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true,
+ .p010 = true
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 250,
+ .fp16 = 1
+ },
+ 16,
+ 16
+};
+static const struct resource_caps res_cap_nv14 = {
+ .num_timing_generator = 5,
+ .num_opp = 5,
+ .num_video_plane = 5,
+ .num_audio = 6,
+ .num_stream_encoder = 5,
+ .num_pll = 5,
+ .num_dwb = 1,
+ .num_ddc = 5,
+ .num_vmid = 16,
+ .num_dsc = 5,
+};
+
+static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dmcu = false,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = true,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .vsr_support = true,
+ .performance_trace = false,
+ .max_downscale_src_width = 5120,/*upto 5K*/
+ .disable_pplib_wm_range = false,
+ .scl_reset_length10 = true,
+ .sanity_checks = false,
+ .underflow_assert_delay_us = 0xFFFFFFFF,
+ .enable_legacy_fast_update = true,
+};
+
+void dcn20_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN20_DPP(*dpp));
+ *dpp = NULL;
+}
+
+struct dpp *dcn20_dpp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn20_dpp *dpp =
+ kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
+
+ if (!dpp)
+ return NULL;
+
+ if (dpp2_construct(dpp, ctx, inst,
+ &tf_regs[inst], &tf_shift, &tf_mask))
+ return &dpp->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(dpp);
+ return NULL;
+}
+
+struct input_pixel_processor *dcn20_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn10_ipp *ipp =
+ kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn20_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+
+struct output_pixel_processor *dcn20_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_opp *opp =
+ kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn20_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct dce_aux *dcn20_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
+
+static const struct dce_i2c_registers i2c_hw_regs[] = {
+ i2c_inst_regs(1),
+ i2c_inst_regs(2),
+ i2c_inst_regs(3),
+ i2c_inst_regs(4),
+ i2c_inst_regs(5),
+ i2c_inst_regs(6),
+};
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
+};
+
+struct dce_i2c_hw *dcn20_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+ dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+struct mpc *dcn20_mpc_create(struct dc_context *ctx)
+{
+ struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
+ GFP_ATOMIC);
+
+ if (!mpc20)
+ return NULL;
+
+ dcn20_mpc_construct(mpc20, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ 6);
+
+ return &mpc20->base;
+}
+
+struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
+{
+ int i;
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
+ GFP_ATOMIC);
+
+ if (!hubbub)
+ return NULL;
+
+ hubbub2_construct(hubbub, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask);
+
+ for (i = 0; i < res_cap_nv10.num_vmid; i++) {
+ struct dcn20_vmid *vmid = &hubbub->vmid[i];
+
+ vmid->ctx = ctx;
+
+ vmid->regs = &vmid_regs[i];
+ vmid->shifts = &vmid_shifts;
+ vmid->masks = &vmid_masks;
+ }
+
+ return &hubbub->base;
+}
+
+struct timing_generator *dcn20_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_ATOMIC);
+
+ if (!tgn10)
+ return NULL;
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &tg_regs[instance];
+ tgn10->tg_shift = &tg_shift;
+ tgn10->tg_mask = &tg_mask;
+
+ dcn20_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .fec_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+struct link_encoder *dcn20_link_encoder_create(
+ struct dc_context *ctx,
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn20_link_encoder *enc20 =
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ int link_regs_id;
+
+ if (!enc20)
+ return NULL;
+
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
+ dcn20_link_encoder_construct(enc20,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[link_regs_id],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc20->enc10.base;
+}
+
+static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
+static struct clock_source *dcn20_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dcn20_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+}
+
+static struct audio *dcn20_create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+struct stream_encoder *dcn20_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn10_stream_encoder *enc1 =
+ kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
+
+ if (!enc1)
+ return NULL;
+
+ if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
+ if (eng_id >= ENGINE_ID_DIGD)
+ eng_id++;
+ }
+
+ dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+
+ return &enc1->base;
+}
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCN2_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN2_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN2_MASK_SH_LIST(_MASK)
+};
+
+struct dce_hwseq *dcn20_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = dcn20_create_audio,
+ .create_stream_encoder = dcn20_stream_encoder_create,
+ .create_hwseq = dcn20_hwseq_create,
+};
+
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
+void dcn20_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+
+struct display_stream_compressor *dcn20_dsc_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_dsc *dsc =
+ kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
+
+ if (!dsc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
+ return &dsc->base;
+}
+
+void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
+{
+ kfree(container_of(*dsc, struct dcn20_dsc, base));
+ *dsc = NULL;
+}
+
+
+static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ if (pool->base.dscs[i] != NULL)
+ dcn20_dsc_destroy(&pool->base.dscs[i]);
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN20_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.dpps[i] != NULL)
+ dcn20_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ }
+ if (pool->base.sw_i2cs[i] != NULL) {
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
+ if (pool->base.dwbc[i] != NULL) {
+ kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
+ pool->base.dwbc[i] = NULL;
+ }
+ if (pool->base.mcif_wb[i] != NULL) {
+ kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
+ pool->base.mcif_wb[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn20_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.dccg != NULL)
+ dcn_dccg_destroy(&pool->base.dccg);
+
+ if (pool->base.pp_smu != NULL)
+ dcn20_pp_smu_destroy(&pool->base.pp_smu);
+
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
+}
+
+struct hubp *dcn20_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn20_hubp *hubp2 =
+ kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
+
+ if (!hubp2)
+ return NULL;
+
+ if (hubp2_construct(hubp2, ctx, inst,
+ &hubp_regs[inst], &hubp_shift, &hubp_mask))
+ return &hubp2->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(hubp2);
+ return NULL;
+}
+
+static void get_pixel_clock_parameters(
+ struct pipe_ctx *pipe_ctx,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ struct dc_link *link = stream->link;
+ struct link_encoder *link_enc = NULL;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ if (link_enc)
+ pixel_clk_params->encoder_object_id = link_enc->id;
+
+ pixel_clk_params->signal_type = pipe_ctx->stream->signal;
+ pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
+ /* TODO: un-hardcode*/
+ /* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->flags.ENABLE_SS = 0;
+ pixel_clk_params->color_depth =
+ stream->timing.display_color_depth;
+ pixel_clk_params->flags.DISPLAY_BLANKED = 1;
+ pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
+
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ pixel_clk_params->color_depth = COLOR_DEPTH_888;
+
+ if (opp_cnt == 4)
+ pixel_clk_params->requested_pix_clk_100hz /= 4;
+ else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
+ pixel_clk_params->requested_pix_clk_100hz /= 2;
+ else if (hws->funcs.is_dp_dig_pixel_rate_div_policy) {
+ if (hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ pixel_clk_params->requested_pix_clk_100hz /= 2;
+ }
+
+ if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ pixel_clk_params->requested_pix_clk_100hz *= 2;
+
+}
+
+static void build_clamping_params(struct dc_stream_state *stream)
+{
+ stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
+ stream->clamping.c_depth = stream->timing.display_color_depth;
+ stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
+}
+
+static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+{
+
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings);
+
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ build_clamping_params(pipe_ctx->stream);
+
+ return DC_OK;
+}
+
+enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
+{
+ enum dc_status status = DC_OK;
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+
+ status = build_pipe_hw_param(pipe_ctx);
+
+ return status;
+}
+
+
+void dcn20_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct display_stream_compressor **dsc,
+ int pipe_idx)
+{
+ int i;
+ const struct resource_pool *pool = dc->res_pool;
+ struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
+
+ ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
+ *dsc = NULL;
+
+ /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
+ if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
+ *dsc = pool->dscs[pipe_idx];
+ res_ctx->is_dsc_acquired[pipe_idx] = true;
+ return;
+ }
+
+ /* Return old DSC to avoid the need for re-programming */
+ if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
+ *dsc = dsc_old;
+ res_ctx->is_dsc_acquired[dsc_old->inst] = true;
+ return ;
+ }
+
+ /* Find first free DSC */
+ for (i = 0; i < pool->res_cap->num_dsc; i++)
+ if (!res_ctx->is_dsc_acquired[i]) {
+ *dsc = pool->dscs[i];
+ res_ctx->is_dsc_acquired[i] = true;
+ break;
+ }
+}
+
+void dcn20_release_dsc(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct display_stream_compressor **dsc)
+{
+ int i;
+
+ for (i = 0; i < pool->res_cap->num_dsc; i++)
+ if (pool->dscs[i] == *dsc) {
+ res_ctx->is_dsc_acquired[i] = false;
+ *dsc = NULL;
+ break;
+ }
+}
+
+
+
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
+ struct dc_state *dc_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_OK;
+ int i;
+
+ /* Get a DSC if required and available */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream != dc_stream)
+ continue;
+
+ if (pipe_ctx->stream_res.dsc)
+ continue;
+
+ dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i);
+
+ /* The number of DSCs can be less than the number of pipes */
+ if (!pipe_ctx->stream_res.dsc) {
+ result = DC_NO_DSC_RESOURCE;
+ }
+
+ break;
+ }
+
+ return result;
+}
+
+
+static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ struct pipe_ctx *pipe_ctx = NULL;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
+ pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream_res.dsc)
+ dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
+ }
+ }
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+ else
+ return DC_OK;
+}
+
+
+enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
+
+ /* Get a DSC if required and available */
+ if (result == DC_OK && dc_stream->timing.flags.DSC)
+ result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+
+enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_OK;
+
+ result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+/**
+ * dcn20_split_stream_for_odm - Check if stream can be splited for ODM
+ *
+ * @dc: DC object with resource pool info required for pipe split
+ * @res_ctx: Persistent state of resources
+ * @prev_odm_pipe: Reference to the previous ODM pipe
+ * @next_odm_pipe: Reference to the next ODM pipe
+ *
+ * This function takes a logically active pipe and a logically free pipe and
+ * halves all the scaling parameters that need to be halved while populating
+ * the free pipe with the required resources and configuring the next/previous
+ * ODM pipe pointers.
+ *
+ * Return:
+ * Return true if split stream for ODM is possible, otherwise, return false.
+ */
+bool dcn20_split_stream_for_odm(
+ const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *prev_odm_pipe,
+ struct pipe_ctx *next_odm_pipe)
+{
+ int pipe_idx = next_odm_pipe->pipe_idx;
+ const struct resource_pool *pool = dc->res_pool;
+
+ *next_odm_pipe = *prev_odm_pipe;
+
+ next_odm_pipe->pipe_idx = pipe_idx;
+ next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
+ next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
+ next_odm_pipe->stream_res.dsc = NULL;
+ if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
+ next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
+ next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
+ }
+ if (prev_odm_pipe->top_pipe && prev_odm_pipe->top_pipe->next_odm_pipe) {
+ prev_odm_pipe->top_pipe->next_odm_pipe->bottom_pipe = next_odm_pipe;
+ next_odm_pipe->top_pipe = prev_odm_pipe->top_pipe->next_odm_pipe;
+ }
+ if (prev_odm_pipe->bottom_pipe && prev_odm_pipe->bottom_pipe->next_odm_pipe) {
+ prev_odm_pipe->bottom_pipe->next_odm_pipe->top_pipe = next_odm_pipe;
+ next_odm_pipe->bottom_pipe = prev_odm_pipe->bottom_pipe->next_odm_pipe;
+ }
+ prev_odm_pipe->next_odm_pipe = next_odm_pipe;
+ next_odm_pipe->prev_odm_pipe = prev_odm_pipe;
+
+ if (prev_odm_pipe->plane_state) {
+ struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
+ int new_width;
+
+ /* HACTIVE halved for odm combine */
+ sd->h_active /= 2;
+ /* Calculate new vp and recout for left pipe */
+ /* Need at least 16 pixels width per side */
+ if (sd->recout.x + 16 >= sd->h_active)
+ return false;
+ new_width = sd->h_active - sd->recout.x;
+ sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz, sd->recout.width - new_width));
+ sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz_c, sd->recout.width - new_width));
+ sd->recout.width = new_width;
+
+ /* Calculate new vp and recout for right pipe */
+ sd = &next_odm_pipe->plane_res.scl_data;
+ /* HACTIVE halved for odm combine */
+ sd->h_active /= 2;
+ /* Need at least 16 pixels width per side */
+ if (new_width <= 16)
+ return false;
+ new_width = sd->recout.width + sd->recout.x - sd->h_active;
+ sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz, sd->recout.width - new_width));
+ sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz_c, sd->recout.width - new_width));
+ sd->recout.width = new_width;
+ sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz, sd->h_active - sd->recout.x));
+ sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
+ sd->ratios.horz_c, sd->h_active - sd->recout.x));
+ sd->recout.x = 0;
+ }
+ if (!next_odm_pipe->top_pipe)
+ next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
+ else
+ next_odm_pipe->stream_res.opp = next_odm_pipe->top_pipe->stream_res.opp;
+ if (next_odm_pipe->stream->timing.flags.DSC == 1 && !next_odm_pipe->top_pipe) {
+ dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
+ ASSERT(next_odm_pipe->stream_res.dsc);
+ if (next_odm_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+
+ return true;
+}
+
+void dcn20_split_stream_for_mpc(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *primary_pipe,
+ struct pipe_ctx *secondary_pipe)
+{
+ int pipe_idx = secondary_pipe->pipe_idx;
+ struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
+
+ *secondary_pipe = *primary_pipe;
+ secondary_pipe->bottom_pipe = sec_bot_pipe;
+
+ secondary_pipe->pipe_idx = pipe_idx;
+ secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
+ secondary_pipe->stream_res.dsc = NULL;
+ if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
+ ASSERT(!secondary_pipe->bottom_pipe);
+ secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
+ secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
+ }
+ primary_pipe->bottom_pipe = secondary_pipe;
+ secondary_pipe->top_pipe = primary_pipe;
+
+ ASSERT(primary_pipe->plane_state);
+}
+
+unsigned int dcn20_calc_max_scaled_time(
+ unsigned int time_per_pixel,
+ enum mmhubbub_wbif_mode mode,
+ unsigned int urgent_watermark)
+{
+ unsigned int time_per_byte = 0;
+ unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */
+ unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */
+ unsigned int small_free_entry, max_free_entry;
+ unsigned int buf_lh_capability;
+ unsigned int max_scaled_time;
+
+ if (mode == PACKED_444) /* packed mode */
+ time_per_byte = time_per_pixel/4;
+ else if (mode == PLANAR_420_8BPC)
+ time_per_byte = time_per_pixel;
+ else if (mode == PLANAR_420_10BPC) /* p010 */
+ time_per_byte = time_per_pixel * 819/1024;
+
+ if (time_per_byte == 0)
+ time_per_byte = 1;
+
+ small_free_entry = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry;
+ max_free_entry = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry;
+ buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */
+ max_scaled_time = buf_lh_capability - urgent_watermark;
+ return max_scaled_time;
+}
+
+void dcn20_set_mcif_arb_params(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt)
+{
+ enum mmhubbub_wbif_mode wbif_mode;
+ struct mcif_arb_params *wb_arb_params;
+ int i, j, dwb_pipe;
+
+ /* Writeback MCIF_WB arbitration parameters */
+ dwb_pipe = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ for (j = 0; j < MAX_DWB_PIPES; j++) {
+ if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false)
+ continue;
+
+ //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
+ wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe];
+
+ if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) {
+ if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
+ wbif_mode = PLANAR_420_8BPC;
+ else
+ wbif_mode = PLANAR_420_10BPC;
+ } else
+ wbif_mode = PACKED_444;
+
+ DC_FP_START();
+ dcn20_fpu_set_wb_arb_params(wb_arb_params, context, pipes, pipe_cnt, i);
+ DC_FP_END();
+
+ wb_arb_params->slice_lines = 32;
+ wb_arb_params->arbitration_slice = 2;
+ wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
+ wbif_mode,
+ wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
+
+ dwb_pipe++;
+
+ if (dwb_pipe >= MAX_DWB_PIPES)
+ return;
+ }
+ if (dwb_pipe >= MAX_DWB_PIPES)
+ return;
+ }
+}
+
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+{
+ int i;
+
+ /* Validate DSC config, dsc count validation is already done */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dsc_config dsc_cfg;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ /* Only need to validate top pipe */
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
+ continue;
+
+ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
+ + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
+ + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+
+ if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
+ return false;
+ }
+ return true;
+}
+
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe)
+{
+ struct pipe_ctx *secondary_pipe = NULL;
+
+ if (dc && primary_pipe) {
+ int j;
+ int preferred_pipe_idx = 0;
+
+ /* first check the prev dc state:
+ * if this primary pipe has a bottom pipe in prev. state
+ * and if the bottom pipe is still available (which it should be),
+ * pick that pipe as secondary
+ * Same logic applies for ODM pipes
+ */
+ if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+ preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ }
+ }
+ if (secondary_pipe == NULL &&
+ dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
+ preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ }
+ }
+
+ /*
+ * if this primary pipe does not have a bottom pipe in prev. state
+ * start backward and find a pipe that did not used to be a bottom pipe in
+ * prev. dc state. This way we make sure we keep the same assignment as
+ * last state and will not have to reprogram every pipe
+ */
+ if (secondary_pipe == NULL) {
+ for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
+ preferred_pipe_idx = j;
+
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ break;
+ }
+ }
+ }
+ }
+ /*
+ * We should never hit this assert unless assignments are shuffled around
+ * if this happens we will prob. hit a vsync tdr
+ */
+ ASSERT(secondary_pipe);
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+ if (secondary_pipe == NULL) {
+ for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
+ preferred_pipe_idx = j;
+
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ break;
+ }
+ }
+ }
+ }
+
+ return secondary_pipe;
+}
+
+void dcn20_merge_pipes_for_validate(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ /* merge previously split odm pipes since mode support needs to make the decision */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *odm_pipe = pipe->next_odm_pipe;
+
+ if (pipe->prev_odm_pipe)
+ continue;
+
+ pipe->next_odm_pipe = NULL;
+ while (odm_pipe) {
+ struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe;
+
+ odm_pipe->plane_state = NULL;
+ odm_pipe->stream = NULL;
+ odm_pipe->top_pipe = NULL;
+ odm_pipe->bottom_pipe = NULL;
+ odm_pipe->prev_odm_pipe = NULL;
+ odm_pipe->next_odm_pipe = NULL;
+ if (odm_pipe->stream_res.dsc)
+ dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
+ /* Clear plane_res and stream_res */
+ memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
+ memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
+ odm_pipe = next_odm_pipe;
+ }
+ if (pipe->plane_state)
+ resource_build_scaling_params(pipe);
+ }
+
+ /* merge previously mpc split pipes since mode support needs to make the decision */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
+
+ if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state)
+ continue;
+
+ pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
+ if (hsplit_pipe->bottom_pipe)
+ hsplit_pipe->bottom_pipe->top_pipe = pipe;
+ hsplit_pipe->plane_state = NULL;
+ hsplit_pipe->stream = NULL;
+ hsplit_pipe->top_pipe = NULL;
+ hsplit_pipe->bottom_pipe = NULL;
+
+ /* Clear plane_res and stream_res */
+ memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
+ memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
+ if (pipe->plane_state)
+ resource_build_scaling_params(pipe);
+ }
+}
+
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ int *split,
+ bool *merge)
+{
+ int i, pipe_idx, vlevel_split;
+ int plane_count = 0;
+ bool force_split = false;
+ bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
+ struct vba_vars_st *v = &context->bw_ctx.dml.vba;
+ int max_mpc_comb = v->maxMpcComb;
+
+ if (context->stream_count > 1) {
+ if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
+ avoid_split = true;
+ } else if (dc->debug.force_single_disp_pipe_split)
+ force_split = true;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16))
+ avoid_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
+ if (pipe->stream && !pipe->prev_odm_pipe &&
+ (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
+ ++plane_count;
+ }
+ if (plane_count > dc->res_pool->pipe_count / 2)
+ avoid_split = true;
+
+ /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct dc_crtc_timing timing;
+
+ if (!pipe->stream)
+ continue;
+ else {
+ timing = pipe->stream->timing;
+ if (timing.h_border_left + timing.h_border_right
+ + timing.v_border_top + timing.v_border_bottom > 0) {
+ avoid_split = true;
+ break;
+ }
+ }
+ }
+
+ /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
+ if (avoid_split) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
+ if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
+ v->ModeSupport[vlevel][0])
+ break;
+ /* Impossible to not split this pipe */
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ vlevel = vlevel_split;
+ else
+ max_mpc_comb = 0;
+ pipe_idx++;
+ }
+ v->maxMpcComb = max_mpc_comb;
+ }
+
+ /* Split loop sets which pipe should be split based on dml outputs and dc flags */
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ int pipe_plane = v->pipe_plane[pipe_idx];
+ bool split4mpc = context->stream_count == 1 && plane_count == 1
+ && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
+
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
+ split[i] = 4;
+ else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
+ split[i] = 2;
+
+ if ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))
+ split[i] = 2;
+ if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 2;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
+ }
+ if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 4;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
+ }
+ /*420 format workaround*/
+ if (pipe->stream->timing.h_addressable > 7680 &&
+ pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ split[i] = 4;
+ }
+ v->ODMCombineEnabled[pipe_plane] =
+ v->ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ if (resource_get_num_mpc_splits(pipe) == 1) {
+ /*If need split for mpc but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 MPC */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 MPC */
+ else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 2 -> 1 MPC */
+ } else if (resource_get_num_mpc_splits(pipe) == 3) {
+ /*If need split for mpc but 4 way split already*/
+ if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
+ || !pipe->bottom_pipe)) {
+ merge[i] = true; /* 4 -> 2 MPC */
+ } else if (split[i] == 0 && pipe->top_pipe &&
+ pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 4 -> 1 MPC */
+ split[i] = 0;
+ } else if (resource_get_num_odm_splits(pipe)) {
+ /* ODM -> MPC transition */
+ if (pipe->prev_odm_pipe) {
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ } else {
+ if (resource_get_num_odm_splits(pipe) == 1) {
+ /*If need split for odm but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 ODM */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 ODM */
+ else if (pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ } else if (resource_get_num_odm_splits(pipe) == 3) {
+ /*If need split for odm but 4 way split already*/
+ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
+ || !pipe->next_odm_pipe)) {
+ merge[i] = true; /* 4 -> 2 ODM */
+ } else if (split[i] == 0 && pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ split[i] = 0;
+ } else if (resource_get_num_mpc_splits(pipe)) {
+ /* MPC -> ODM transition */
+ ASSERT(0); /* NOT expected yet */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ }
+
+ /* Adjust dppclk when split is forced, do not bother with dispclk */
+ if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) {
+ DC_FP_START();
+ dcn20_fpu_adjust_dppclk(v, vlevel, max_mpc_comb, pipe_idx, false);
+ DC_FP_END();
+ }
+ pipe_idx++;
+ }
+
+ return vlevel;
+}
+
+bool dcn20_fast_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *pipe_split_from,
+ int *vlevel_out,
+ bool fast_validate)
+{
+ bool out = false;
+ int split[MAX_PIPES] = { 0 };
+ int pipe_cnt, i, pipe_idx, vlevel;
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ dcn20_merge_pipes_for_validate(dc, context);
+
+ DC_FP_START();
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ DC_FP_END();
+
+ *pipe_cnt_out = pipe_cnt;
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
+
+ /*initialize pipe_just_split_from to invalid idx*/
+ for (i = 0; i < MAX_PIPES; i++)
+ pipe_split_from[i] = -1;
+
+ for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
+
+ if (!pipe->stream || pipe_split_from[i] >= 0)
+ continue;
+
+ pipe_idx++;
+
+ if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
+ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
+ ASSERT(hsplit_pipe);
+ if (!dcn20_split_stream_for_odm(
+ dc, &context->res_ctx,
+ pipe, hsplit_pipe))
+ goto validate_fail;
+ pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
+ dcn20_build_mapped_resource(dc, context, pipe->stream);
+ }
+
+ if (!pipe->plane_state)
+ continue;
+ /* Skip 2nd half of already split pipe */
+ if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
+ continue;
+
+ /* We do not support mpo + odm at the moment */
+ if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
+ && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
+ goto validate_fail;
+
+ if (split[i] == 2) {
+ if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
+ /* pipe not split previously needs split */
+ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
+ ASSERT(hsplit_pipe);
+ if (!hsplit_pipe) {
+ DC_FP_START();
+ dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true);
+ DC_FP_END();
+ continue;
+ }
+ if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
+ if (!dcn20_split_stream_for_odm(
+ dc, &context->res_ctx,
+ pipe, hsplit_pipe))
+ goto validate_fail;
+ dcn20_build_mapped_resource(dc, context, pipe->stream);
+ } else {
+ dcn20_split_stream_for_mpc(
+ &context->res_ctx, dc->res_pool,
+ pipe, hsplit_pipe);
+ resource_build_scaling_params(pipe);
+ resource_build_scaling_params(hsplit_pipe);
+ }
+ pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
+ }
+ } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+ /* merge should already have been done */
+ ASSERT(0);
+ }
+ }
+ /* Actual dsc count per stream dsc validation*/
+ if (!dcn20_validate_dsc(dc, context)) {
+ context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
+ DML_FAIL_DSC_VALIDATION_FAILURE;
+ goto validate_fail;
+ }
+
+ *vlevel_out = vlevel;
+
+ out = true;
+ goto validate_out;
+
+validate_fail:
+ out = false;
+
+validate_out:
+ return out;
+}
+
+bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+{
+ bool voltage_supported;
+ DC_FP_START();
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
+ DC_FP_END();
+ return voltage_supported;
+}
+
+struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *opp_head)
+{
+ struct resource_context *res_ctx = &new_ctx->res_ctx;
+ struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream);
+ struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master);
+
+ ASSERT(otg_master);
+
+ if (!sec_dpp_pipe)
+ return NULL;
+
+ sec_dpp_pipe->stream = opp_head->stream;
+ sec_dpp_pipe->stream_res.tg = opp_head->stream_res.tg;
+ sec_dpp_pipe->stream_res.opp = opp_head->stream_res.opp;
+
+ sec_dpp_pipe->plane_res.hubp = pool->hubps[sec_dpp_pipe->pipe_idx];
+ sec_dpp_pipe->plane_res.ipp = pool->ipps[sec_dpp_pipe->pipe_idx];
+ sec_dpp_pipe->plane_res.dpp = pool->dpps[sec_dpp_pipe->pipe_idx];
+ sec_dpp_pipe->plane_res.mpcc_inst = pool->dpps[sec_dpp_pipe->pipe_idx]->inst;
+
+ return sec_dpp_pipe;
+}
+
+bool dcn20_get_dcc_compression_cap(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
+ dc->res_pool->hubbub,
+ input,
+ output);
+}
+
+static void dcn20_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
+
+ dcn20_resource_destruct(dcn20_pool);
+ kfree(dcn20_pool);
+ *pool = NULL;
+}
+
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
+};
+
+
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+{
+ enum surface_pixel_format surf_pix_format = plane_state->format;
+ unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
+
+ plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_S;
+ if (bpp == 64)
+ plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_D;
+
+ return DC_OK;
+}
+
+static const struct resource_funcs dcn20_res_pool_funcs = {
+ .destroy = dcn20_destroy_resource_pool,
+ .link_enc_create = dcn20_link_encoder_create,
+ .panel_cntl_create = dcn20_panel_cntl_create,
+ .validate_bandwidth = dcn20_validate_bandwidth,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
+ .add_stream_to_ctx = dcn20_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
+ .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
+ .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .set_mcif_arb_params = dcn20_set_mcif_arb_params,
+ .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+};
+
+bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
+ GFP_KERNEL);
+
+ if (!dwbc20) {
+ dm_error("DC: failed to create dwbc20!\n");
+ return false;
+ }
+ dcn20_dwbc_construct(dwbc20, ctx,
+ &dwbc20_regs[i],
+ &dwbc20_shift,
+ &dwbc20_mask,
+ i);
+ pool->dwbc[i] = &dwbc20->base;
+ }
+ return true;
+}
+
+bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ ASSERT(pipe_count > 0);
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub),
+ GFP_KERNEL);
+
+ if (!mcif_wb20) {
+ dm_error("DC: failed to create mcif_wb20!\n");
+ return false;
+ }
+
+ dcn20_mmhubbub_construct(mcif_wb20, ctx,
+ &mcif_wb20_regs[i],
+ &mcif_wb20_shift,
+ &mcif_wb20_mask,
+ i);
+
+ pool->mcif_wb[i] = &mcif_wb20->base;
+ }
+ return true;
+}
+
+static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
+{
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
+
+ if (!pp_smu)
+ return pp_smu;
+
+ dm_pp_get_funcs(ctx, pp_smu);
+
+ if (pp_smu->ctx.ver != PP_SMU_VER_NV)
+ pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
+
+ return pp_smu;
+}
+
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+{
+ if (pp_smu && *pp_smu) {
+ kfree(*pp_smu);
+ *pp_smu = NULL;
+ }
+}
+
+static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
+ uint32_t hw_internal_rev)
+{
+ if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+ return &dcn2_0_nv14_soc;
+
+ if (ASICREV_IS_NAVI12_P(hw_internal_rev))
+ return &dcn2_0_nv12_soc;
+
+ return &dcn2_0_soc;
+}
+
+static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
+ uint32_t hw_internal_rev)
+{
+ /* NV14 */
+ if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+ return &dcn2_0_nv14_ip;
+
+ /* NV12 and NV10 */
+ return &dcn2_0_ip;
+}
+
+static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
+{
+ return DML_PROJECT_NAVI10v2;
+}
+
+static bool init_soc_bounding_box(struct dc *dc,
+ struct dcn20_resource_pool *pool)
+{
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
+ get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev);
+ struct _vcs_dpi_ip_params_st *loaded_ip =
+ get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev);
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (pool->base.pp_smu) {
+ struct pp_smu_nv_clock_table max_clocks = {0};
+ unsigned int uclk_states[8] = {0};
+ unsigned int num_states = 0;
+ enum pp_smu_status status;
+ bool clock_limits_available = false;
+ bool uclk_states_available = false;
+
+ if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
+ status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
+ (&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
+
+ uclk_states_available = (status == PP_SMU_RESULT_OK);
+ }
+
+ if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
+ status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
+ (&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
+ /* SMU cannot set DCF clock to anything equal to or higher than SOC clock
+ */
+ if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz)
+ max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000;
+ clock_limits_available = (status == PP_SMU_RESULT_OK);
+ }
+
+ if (clock_limits_available && uclk_states_available && num_states) {
+ DC_FP_START();
+ dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
+ DC_FP_END();
+ } else if (clock_limits_available) {
+ DC_FP_START();
+ dcn20_cap_soc_clocks(loaded_bb, max_clocks);
+ DC_FP_END();
+ }
+ }
+
+ loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
+ loaded_ip->max_num_dpp = pool->base.pipe_count;
+ DC_FP_START();
+ dcn20_patch_bounding_box(dc, loaded_bb);
+ DC_FP_END();
+ return true;
+}
+
+static bool dcn20_resource_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn20_resource_pool *pool)
+{
+ int i;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data init_data;
+ struct ddc_service_init_data ddc_init_data = {0};
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
+ get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);
+ struct _vcs_dpi_ip_params_st *loaded_ip =
+ get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev);
+ enum dml_project dml_project_version =
+ get_dml_project_version(ctx->asic_id.hw_internal_rev);
+
+ ctx->dc_bios->regs = &bios_regs;
+ pool->base.funcs = &dcn20_res_pool_funcs;
+
+ if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
+ pool->base.res_cap = &res_cap_nv14;
+ pool->base.pipe_count = 5;
+ pool->base.mpcc_count = 5;
+ } else {
+ pool->base.res_cap = &res_cap_nv10;
+ pool->base.pipe_count = 6;
+ pool->base.mpcc_count = 6;
+ }
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/
+ dc->caps.max_cursor_size = 256;
+ dc->caps.min_horizontal_blanking_period = 80;
+ dc->caps.dmdata_alloc_size = 2048;
+
+ dc->caps.max_slave_planes = 1;
+ dc->caps.max_slave_yuv_planes = 1;
+ dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.post_blend_color_processing = true;
+ dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.extended_aux_timeout_support = true;
+
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+ dc->caps.color.dpp.dgam_rom_for_yuv = 1;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN2, only MPC ROM
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
+ dc->caps.dp_hdmi21_pcon_support = true;
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+
+ //dcn2.0x
+ dc->work_arounds.dedcn20_305_wa = true;
+
+ // Init the vm_helper
+ if (dc->vm_helper)
+ vm_helper_init(dc->vm_helper, 16);
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
+ dcn20_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
+ dcn20_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
+ dcn20_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
+ dcn20_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
+ dcn20_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL5] =
+ dcn20_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ &clk_src_regs[5], false);
+ pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL;
+ /* todo: not reuse phy_pll registers */
+ pool->base.dp_clock_source =
+ dcn20_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+
+ pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create dccg!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ pool->base.dmcu = dcn20_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ pool->base.pp_smu = dcn20_pp_smu_create(ctx);
+
+
+ if (!init_soc_bounding_box(dc, pool)) {
+ dm_error("DC: failed to initialize soc bounding box!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version);
+
+ if (!dc->debug.disable_pplib_wm_range) {
+ struct pp_smu_wm_range_sets ranges = {0};
+ int i = 0;
+
+ ranges.num_reader_wm_sets = 0;
+
+ if (loaded_bb->num_states == 1) {
+ ranges.reader_wm_sets[0].wm_inst = i;
+ ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ ranges.num_reader_wm_sets = 1;
+ } else if (loaded_bb->num_states > 1) {
+ for (i = 0; i < 4 && i < loaded_bb->num_states; i++) {
+ ranges.reader_wm_sets[i].wm_inst = i;
+ ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ DC_FP_START();
+ dcn20_fpu_set_wm_ranges(i, &ranges, loaded_bb);
+ DC_FP_END();
+
+ ranges.num_reader_wm_sets = i + 1;
+ }
+
+ ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ }
+
+ ranges.num_writer_wm_sets = 1;
+
+ ranges.writer_wm_sets[0].wm_inst = 0;
+ ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ if (pool->base.pp_smu->nv_funcs.set_wm_ranges)
+ pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges);
+ }
+
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn20_create(&init_data);
+ if (!pool->base.irqs)
+ goto create_fail;
+
+ /* mem input -> ipp -> dpp -> opp -> TG */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.hubps[i] = dcn20_hubp_create(ctx, i);
+ if (pool->base.hubps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto create_fail;
+ }
+
+ pool->base.ipps[i] = dcn20_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto create_fail;
+ }
+
+ pool->base.dpps[i] = dcn20_dpp_create(ctx, i);
+ if (pool->base.dpps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpps!\n");
+ goto create_fail;
+ }
+ }
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn20_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto create_fail;
+ }
+ pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create hw i2c!!\n");
+ goto create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ pool->base.opps[i] = dcn20_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.timing_generators[i] = dcn20_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ }
+
+ pool->base.timing_generator_count = i;
+
+ pool->base.mpc = dcn20_mpc_create(ctx);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto create_fail;
+ }
+
+ pool->base.hubbub = dcn20_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create hubbub!\n");
+ goto create_fail;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
+ if (pool->base.dscs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create display stream compressor %d!\n", i);
+ goto create_fail;
+ }
+ }
+
+ if (!dcn20_dwbc_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create dwbc!\n");
+ goto create_fail;
+ }
+ if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mcif_wb!\n");
+ goto create_fail;
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto create_fail;
+
+ dcn20_hw_sequencer_construct(dc);
+
+ // IF NV12, set PG function pointer to NULL. It's not that
+ // PG isn't supported for NV12, it's that we don't want to
+ // program the registers because that will cause more power
+ // to be consumed. We could have created dcn20_init_hw to get
+ // the same effect by checking ASIC rev, but there was a
+ // request at some point to not check ASIC rev on hw sequencer.
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->hwseq->funcs.enable_power_gating_plane = NULL;
+ dc->debug.disable_dpp_power_gate = true;
+ dc->debug.disable_hubp_power_gate = true;
+ }
+
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->cap_funcs = cap_funcs;
+
+ if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
+ ddc_init_data.ctx = dc->ctx;
+ ddc_init_data.link = NULL;
+ ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
+ ddc_init_data.id.enum_id = 0;
+ ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
+ } else {
+ pool->base.oem_device = NULL;
+ }
+
+ return true;
+
+create_fail:
+
+ dcn20_resource_destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn20_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc)
+{
+ struct dcn20_resource_pool *pool =
+ kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
+
+ if (!pool)
+ return NULL;
+
+ if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(pool);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
new file mode 100644
index 0000000000..6d1a8924e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -0,0 +1,168 @@
+/*
+* Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCN20_H__
+#define __DC_RESOURCE_DCN20_H__
+
+#include "core_types.h"
+#include "dml/dcn20/dcn20_fpu.h"
+
+#define TO_DCN20_RES_POOL(pool)\
+ container_of(pool, struct dcn20_resource_pool, base)
+
+struct dc;
+struct resource_pool;
+struct _vcs_dpi_display_pipe_params_st;
+
+extern struct _vcs_dpi_ip_params_st dcn2_0_ip;
+extern struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc;
+extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc;
+extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc;
+
+struct dcn20_resource_pool {
+ struct resource_pool base;
+};
+struct resource_pool *dcn20_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+
+struct link_encoder *dcn20_link_encoder_create(
+ struct dc_context *ctx,
+ const struct encoder_init_data *enc_init_data);
+
+unsigned int dcn20_calc_max_scaled_time(
+ unsigned int time_per_pixel,
+ enum mmhubbub_wbif_mode mode,
+ unsigned int urgent_watermark);
+
+struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *opp_head_pipe);
+
+struct stream_encoder *dcn20_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx);
+
+struct dce_hwseq *dcn20_hwseq_create(
+ struct dc_context *ctx);
+
+bool dcn20_get_dcc_compression_cap(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output);
+
+void dcn20_dpp_destroy(struct dpp **dpp);
+
+struct dpp *dcn20_dpp_create(
+ struct dc_context *ctx,
+ uint32_t inst);
+
+struct input_pixel_processor *dcn20_ipp_create(
+ struct dc_context *ctx, uint32_t inst);
+
+struct output_pixel_processor *dcn20_opp_create(
+ struct dc_context *ctx, uint32_t inst);
+
+struct dce_aux *dcn20_aux_engine_create(
+ struct dc_context *ctx, uint32_t inst);
+
+struct dce_i2c_hw *dcn20_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst);
+
+void dcn20_clock_source_destroy(struct clock_source **clk_src);
+
+struct display_stream_compressor *dcn20_dsc_create(
+ struct dc_context *ctx, uint32_t inst);
+void dcn20_dsc_destroy(struct display_stream_compressor **dsc);
+
+struct hubp *dcn20_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst);
+struct timing_generator *dcn20_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance);
+struct mpc *dcn20_mpc_create(struct dc_context *ctx);
+struct hubbub *dcn20_hubbub_create(struct dc_context *ctx);
+
+bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool);
+bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool);
+
+void dcn20_set_mcif_arb_params(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt);
+bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+void dcn20_merge_pipes_for_validate(
+ struct dc *dc,
+ struct dc_state *context);
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ int *split,
+ bool *merge);
+void dcn20_release_dsc(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct display_stream_compressor **dsc);
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
+void dcn20_split_stream_for_mpc(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *primary_pipe,
+ struct pipe_ctx *secondary_pipe);
+bool dcn20_split_stream_for_odm(
+ const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *prev_odm_pipe,
+ struct pipe_ctx *next_odm_pipe);
+void dcn20_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct display_stream_compressor **dsc,
+ int pipe_idx);
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe);
+bool dcn20_fast_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *pipe_split_from,
+ int *vlevel_out,
+ bool fast_validate);
+
+enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
+enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
+enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
+enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state);
+
+#endif /* __DC_RESOURCE_DCN20_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
new file mode 100644
index 0000000000..0b47aeb60e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -0,0 +1,661 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/delay.h>
+
+#include "dc_bios_types.h"
+#include "dcn20_stream_encoder.h"
+#include "reg_helper.h"
+#include "hw_shared.h"
+#include "link.h"
+#include "dpcd_defs.h"
+
+#define DC_LOGGER \
+ enc1->base.ctx->logger
+
+#define REG(reg)\
+ (enc1->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc1->se_shift->field_name, enc1->se_mask->field_name
+
+
+#define CTX \
+ enc1->base.ctx
+
+
+static void enc2_update_hdmi_info_packet(
+ struct dcn10_stream_encoder *enc1,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet)
+{
+ uint32_t cont, send, line;
+
+ if (info_packet->valid) {
+ enc1_update_generic_info_packet(
+ enc1,
+ packet_index,
+ info_packet);
+
+ /* enable transmission of packet(s) -
+ * packet transmission begins on the next frame */
+ cont = 1;
+ /* send packet(s) every frame */
+ send = 1;
+ /* select line number to send packets on */
+ line = 2;
+ } else {
+ cont = 0;
+ send = 0;
+ line = 0;
+ }
+
+ /* DP_SEC_GSP[x]_LINE_REFERENCE - keep default value REFER_TO_DP_SOF */
+
+ /* choose which generic packet control to use */
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 1:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 2:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC2_CONT, cont,
+ HDMI_GENERIC2_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC2_LINE, line);
+ break;
+ case 3:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC3_CONT, cont,
+ HDMI_GENERIC3_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC3_LINE, line);
+ break;
+ case 4:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC4_CONT, cont,
+ HDMI_GENERIC4_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC4_LINE, line);
+ break;
+ case 5:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC5_CONT, cont,
+ HDMI_GENERIC5_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC5_LINE, line);
+ break;
+ case 6:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC6_CONT, cont,
+ HDMI_GENERIC6_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4,
+ HDMI_GENERIC6_LINE, line);
+ break;
+ case 7:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC7_CONT, cont,
+ HDMI_GENERIC7_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4,
+ HDMI_GENERIC7_LINE, line);
+ break;
+ default:
+ /* invalid HW packet index */
+ DC_LOG_WARNING(
+ "Invalid HW packet index: %s()\n",
+ __func__);
+ return;
+ }
+}
+
+static void enc2_stream_encoder_update_hdmi_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* for bring up, disable dp double TODO */
+ REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
+
+ /*Always add mandatory packets first followed by optional ones*/
+ enc2_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
+ enc2_update_hdmi_info_packet(enc1, 1, &info_frame->hfvsif);
+ enc2_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
+ enc2_update_hdmi_info_packet(enc1, 3, &info_frame->vendor);
+ enc2_update_hdmi_info_packet(enc1, 4, &info_frame->spd);
+ enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hdrsmd);
+ enc2_update_hdmi_info_packet(enc1, 6, &info_frame->vtem);
+}
+
+static void enc2_stream_encoder_stop_hdmi_info_packets(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* stop generic packets 0,1 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL1, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC1_LINE, 0);
+
+ /* stop generic packets 2,3 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC2_CONT, 0,
+ HDMI_GENERIC2_SEND, 0,
+ HDMI_GENERIC3_CONT, 0,
+ HDMI_GENERIC3_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL2, 0,
+ HDMI_GENERIC2_LINE, 0,
+ HDMI_GENERIC3_LINE, 0);
+
+ /* stop generic packets 4,5 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC4_CONT, 0,
+ HDMI_GENERIC4_SEND, 0,
+ HDMI_GENERIC5_CONT, 0,
+ HDMI_GENERIC5_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL3, 0,
+ HDMI_GENERIC4_LINE, 0,
+ HDMI_GENERIC5_LINE, 0);
+
+ /* stop generic packets 6,7 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC6_CONT, 0,
+ HDMI_GENERIC6_SEND, 0,
+ HDMI_GENERIC7_CONT, 0,
+ HDMI_GENERIC7_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL4, 0,
+ HDMI_GENERIC6_LINE, 0,
+ HDMI_GENERIC7_LINE, 0);
+}
+
+
+/* Update GSP7 SDP 128 byte long */
+static void enc2_update_gsp7_128_info_packet(
+ struct dcn10_stream_encoder *enc1,
+ const struct dc_info_packet_128 *info_packet,
+ bool immediate_update)
+{
+ uint32_t i;
+
+ /* TODOFPGA Figure out a proper number for max_retries polling for lock
+ * use 50 for now.
+ */
+ uint32_t max_retries = 50;
+ const uint32_t *content = (const uint32_t *) &info_packet->sb[0];
+
+ ASSERT(info_packet->hb1 == DC_DP_INFOFRAME_TYPE_PPS);
+
+ /* Configure for PPS packet size (128 bytes) */
+ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP7_PPS, 1);
+
+ /* We need turn on clock before programming AFMT block*/
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ /* Poll dig_update_lock is not locked -> asic internal signal
+ * assumes otg master lock will unlock it
+ */
+ /*REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, 0, 10, max_retries);*/
+
+ /* Wait for HW/SW GSP memory access conflict to go away */
+ REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
+ 0, 10, max_retries);
+
+ /* Clear HW/SW memory access conflict flag */
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
+
+ /* write generic packet header */
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, 7);
+ REG_SET_4(AFMT_GENERIC_HDR, 0,
+ AFMT_GENERIC_HB0, info_packet->hb0,
+ AFMT_GENERIC_HB1, info_packet->hb1,
+ AFMT_GENERIC_HB2, info_packet->hb2,
+ AFMT_GENERIC_HB3, info_packet->hb3);
+
+ /* Write generic packet content 128 bytes long. Four sets are used (indexes 7
+ * through 10) to fit 128 bytes.
+ */
+ for (i = 0; i < 4; i++) {
+ uint32_t packet_index = 7 + i;
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, packet_index);
+
+ REG_WRITE(AFMT_GENERIC_0, *content++);
+ REG_WRITE(AFMT_GENERIC_1, *content++);
+ REG_WRITE(AFMT_GENERIC_2, *content++);
+ REG_WRITE(AFMT_GENERIC_3, *content++);
+ REG_WRITE(AFMT_GENERIC_4, *content++);
+ REG_WRITE(AFMT_GENERIC_5, *content++);
+ REG_WRITE(AFMT_GENERIC_6, *content++);
+ REG_WRITE(AFMT_GENERIC_7, *content++);
+ }
+
+ REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC7_FRAME_UPDATE, !immediate_update,
+ AFMT_GENERIC7_IMMEDIATE_UPDATE, immediate_update);
+}
+
+/* Set DSC-related configuration.
+ * dsc_mode: 0 disables DSC, other values enable DSC in specified format
+ * sc_bytes_per_pixel: Bytes per pixel in u3.28 format
+ * dsc_slice_width: Slice width in pixels
+ */
+static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
+ enum optc_dsc_mode dsc_mode,
+ uint32_t dsc_bytes_per_pixel,
+ uint32_t dsc_slice_width)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE_2(DP_DSC_CNTL,
+ DP_DSC_MODE, dsc_mode,
+ DP_DSC_SLICE_WIDTH, dsc_slice_width);
+
+ REG_SET(DP_DSC_BYTES_PER_PIXEL, 0,
+ DP_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel);
+}
+
+
+static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enable) {
+ struct dc_info_packet_128 pps_sdp;
+
+ ASSERT(dsc_packed_pps);
+
+ /* Load PPS into infoframe (SDP) registers */
+ pps_sdp.valid = true;
+ pps_sdp.hb0 = 0;
+ pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS;
+ pps_sdp.hb2 = 127;
+ pps_sdp.hb3 = 0;
+ memcpy(&pps_sdp.sb[0], dsc_packed_pps, sizeof(pps_sdp.sb));
+ enc2_update_gsp7_128_info_packet(enc1, &pps_sdp, immediate_update);
+
+ /* Enable Generic Stream Packet 7 (GSP) transmission */
+ //REG_UPDATE(DP_SEC_CNTL,
+ // DP_SEC_GSP7_ENABLE, 1);
+
+ /* SW should make sure VBID[6] update line number is bigger
+ * than PPS transmit line number
+ */
+ REG_UPDATE(DP_SEC_CNTL6,
+ DP_SEC_GSP7_LINE_NUM, 2);
+ REG_UPDATE_2(DP_MSA_VBID_MISC,
+ DP_VBID6_LINE_REFERENCE, 0,
+ DP_VBID6_LINE_NUM, 3);
+
+ /* Send PPS data at the line number specified above.
+ * DP spec requires PPS to be sent only when it changes, however since
+ * decoder has to be able to handle its change on every frame, we're
+ * sending it always (i.e. on every frame) to reduce the chance it'd be
+ * missed by decoder. If it turns out required to send PPS only when it
+ * changes, we can use DP_SEC_GSP7_SEND register.
+ */
+ REG_UPDATE_2(DP_SEC_CNTL,
+ DP_SEC_GSP7_ENABLE, 1,
+ DP_SEC_STREAM_ENABLE, 1);
+ } else {
+ /* Disable Generic Stream Packet 7 (GSP) transmission */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, 0);
+ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP7_PPS, 0);
+ }
+}
+
+
+/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
+ * into a dcn_dsc_state struct.
+ */
+static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ //if dsc is enabled, continue to read
+ REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode);
+ if (s->dsc_mode) {
+ REG_GET(DP_DSC_CNTL, DP_DSC_SLICE_WIDTH, &s->dsc_slice_width);
+ REG_GET(DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, &s->sec_gsp_pps_line_num);
+
+ REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference);
+ REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num);
+
+ REG_GET(DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, &s->sec_gsp_pps_enable);
+ REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
+ }
+}
+
+/* Set Dynamic Metadata-configuration.
+ * enable_dme: TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME
+ * hubp_requestor_id: HUBP physical instance that is the source of dynamic metadata
+ * only needs to be set when enable_dme is TRUE
+ * dmdata_mode: dynamic metadata packet type: DP, HDMI, or Dolby Vision
+ *
+ * Ensure the OTG master update lock is set when changing DME configuration.
+ */
+void enc2_set_dynamic_metadata(struct stream_encoder *enc,
+ bool enable_dme,
+ uint32_t hubp_requestor_id,
+ enum dynamic_metadata_mode dmdata_mode)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enable_dme) {
+ REG_UPDATE_2(DME_CONTROL,
+ METADATA_HUBP_REQUESTOR_ID, hubp_requestor_id,
+ METADATA_STREAM_TYPE, (dmdata_mode == dmdata_dolby_vision) ? 1 : 0);
+
+ /* Use default line reference DP_SOF for bringup.
+ * Should use OTG_SOF for DRR cases
+ */
+ if (dmdata_mode == dmdata_dp)
+ REG_UPDATE_3(DP_SEC_METADATA_TRANSMISSION,
+ DP_SEC_METADATA_PACKET_ENABLE, 1,
+ DP_SEC_METADATA_PACKET_LINE_REFERENCE, 0,
+ DP_SEC_METADATA_PACKET_LINE, 20);
+ else {
+ REG_UPDATE_3(HDMI_METADATA_PACKET_CONTROL,
+ HDMI_METADATA_PACKET_ENABLE, 1,
+ HDMI_METADATA_PACKET_LINE_REFERENCE, 0,
+ HDMI_METADATA_PACKET_LINE, 2);
+
+ if (dmdata_mode == dmdata_dolby_vision)
+ REG_UPDATE(DIG_FE_CNTL,
+ DOLBY_VISION_EN, 1);
+ }
+
+ REG_UPDATE(DME_CONTROL,
+ METADATA_ENGINE_EN, 1);
+ } else {
+ REG_UPDATE(DME_CONTROL,
+ METADATA_ENGINE_EN, 0);
+
+ if (dmdata_mode == dmdata_dp)
+ REG_UPDATE(DP_SEC_METADATA_TRANSMISSION,
+ DP_SEC_METADATA_PACKET_ENABLE, 0);
+ else {
+ REG_UPDATE(HDMI_METADATA_PACKET_CONTROL,
+ HDMI_METADATA_PACKET_ENABLE, 0);
+ REG_UPDATE(DIG_FE_CNTL,
+ DOLBY_VISION_EN, 0);
+ }
+ }
+}
+
+static void enc2_stream_encoder_update_dp_info_packets_sdp_line_num(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (info_frame->adaptive_sync.valid == true &&
+ info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) {
+ //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF
+ REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1);
+
+ REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM,
+ info_frame->sdp_line_num.adaptive_sync_line_num);
+ }
+}
+
+static void enc2_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t dmdata_packet_enabled = 0;
+
+ enc1_stream_encoder_update_dp_info_packets(enc, info_frame);
+
+ /* check if dynamic metadata packet transmission is enabled */
+ REG_GET(DP_SEC_METADATA_TRANSMISSION,
+ DP_SEC_METADATA_PACKET_ENABLE, &dmdata_packet_enabled);
+
+ if (dmdata_packet_enabled)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
+
+ two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
+ && !timing->dsc_cfg.ycbcr422_simple);
+ return two_pix;
+}
+
+void enc2_stream_encoder_dp_unblank(
+ struct dc_link *link,
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
+ uint32_t n_vid = 0x8000;
+ uint32_t m_vid;
+ uint32_t n_multiply = 0;
+ uint64_t m_vid_l = n_vid;
+
+ /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
+ if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1) {
+ /*this logic should be the same in get_pixel_clock_parameters() */
+ n_multiply = 1;
+ }
+ /* M / N = Fstream / Flink
+ * m_vid / n_vid = pixel rate / link rate
+ */
+
+ m_vid_l *= param->timing.pix_clk_100hz / 10;
+ m_vid_l = div_u64(m_vid_l,
+ param->link_settings.link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ);
+
+ m_vid = (uint32_t) m_vid_l;
+
+ /* enable auto measurement */
+
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
+
+ /* auto measurement need 1 full 0x8000 symbol cycle to kick in,
+ * therefore program initial value for Mvid and Nvid
+ */
+
+ REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
+
+ REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
+
+ REG_UPDATE_2(DP_VID_TIMING,
+ DP_VID_M_N_GEN_EN, 1,
+ DP_VID_N_MUL, n_multiply);
+ }
+
+ /* make sure stream is disabled before resetting steer fifo */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false);
+ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000);
+
+ /* set DIG_START to 0x1 to reset FIFO */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(1);
+
+ /* write 0 to take the FIFO out of reset */
+
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
+
+ /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
+ * that it overflows during mode transition, and sometimes doesn't recover.
+ */
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
+ udelay(10);
+
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
+
+ /* wait 100us for DIG/DP logic to prime
+ * (i.e. a few video lines)
+ */
+ udelay(100);
+
+ /* the hardware would start sending video at the start of the next DP
+ * frame (i.e. rising edge of the vblank).
+ * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
+ * register has no effect on enable transition! HW always guarantees
+ * VID_STREAM enable at start of next frame, and this is not
+ * programmable
+ */
+
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+}
+
+static void enc2_dp_set_odm_combine(
+ struct stream_encoder *enc,
+ bool odm_combine)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, odm_combine);
+}
+
+void enc2_stream_encoder_dp_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
+ uint32_t enable_sdp_splitting)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ enc1_stream_encoder_dp_set_stream_attribute(enc,
+ crtc_timing,
+ output_color_space,
+ use_vsc_sdp_for_colorimetry,
+ enable_sdp_splitting);
+
+ REG_UPDATE(DP_SEC_FRAMING4,
+ DP_SST_SDP_SPLITTING, enable_sdp_splitting);
+}
+
+uint32_t enc2_get_fifo_cal_average_level(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t fifo_level;
+
+ REG_GET(DIG_FIFO_STATUS,
+ DIG_FIFO_CAL_AVERAGE_LEVEL, &fifo_level);
+ return fifo_level;
+}
+
+static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
+ .dp_set_odm_combine =
+ enc2_dp_set_odm_combine,
+ .dp_set_stream_attribute =
+ enc2_stream_encoder_dp_set_stream_attribute,
+ .hdmi_set_stream_attribute =
+ enc1_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ enc1_stream_encoder_dvi_set_stream_attribute,
+ .set_throttled_vcp_size =
+ enc1_stream_encoder_set_throttled_vcp_size,
+ .update_hdmi_info_packets =
+ enc2_stream_encoder_update_hdmi_info_packets,
+ .stop_hdmi_info_packets =
+ enc2_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets_sdp_line_num =
+ enc2_stream_encoder_update_dp_info_packets_sdp_line_num,
+ .update_dp_info_packets =
+ enc2_stream_encoder_update_dp_info_packets,
+ .send_immediate_sdp_message =
+ enc1_stream_encoder_send_immediate_sdp_message,
+ .stop_dp_info_packets =
+ enc1_stream_encoder_stop_dp_info_packets,
+ .dp_blank =
+ enc1_stream_encoder_dp_blank,
+ .dp_unblank =
+ enc2_stream_encoder_dp_unblank,
+ .audio_mute_control = enc1_se_audio_mute_control,
+
+ .dp_audio_setup = enc1_se_dp_audio_setup,
+ .dp_audio_enable = enc1_se_dp_audio_enable,
+ .dp_audio_disable = enc1_se_dp_audio_disable,
+
+ .hdmi_audio_setup = enc1_se_hdmi_audio_setup,
+ .hdmi_audio_disable = enc1_se_hdmi_audio_disable,
+ .setup_stereo_sync = enc1_setup_stereo_sync,
+ .set_avmute = enc1_stream_encoder_set_avmute,
+ .dig_connect_to_otg = enc1_dig_connect_to_otg,
+ .dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format =
+ enc1_stream_encoder_dp_get_pixel_format,
+
+ .enc_read_state = enc2_read_state,
+ .dp_set_dsc_config = enc2_dp_set_dsc_config,
+ .dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet,
+ .set_dynamic_metadata = enc2_set_dynamic_metadata,
+ .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
+ .get_fifo_cal_average_level = enc2_get_fifo_cal_average_level,
+};
+
+void dcn20_stream_encoder_construct(
+ struct dcn10_stream_encoder *enc1,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ const struct dcn10_stream_enc_registers *regs,
+ const struct dcn10_stream_encoder_shift *se_shift,
+ const struct dcn10_stream_encoder_mask *se_mask)
+{
+ enc1->base.funcs = &dcn20_str_enc_funcs;
+ enc1->base.ctx = ctx;
+ enc1->base.id = eng_id;
+ enc1->base.bp = bp;
+ enc1->regs = regs;
+ enc1->se_shift = se_shift;
+ enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
new file mode 100644
index 0000000000..baa1e539f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_STREAM_ENCODER_DCN20_H__
+#define __DC_STREAM_ENCODER_DCN20_H__
+
+#include "stream_encoder.h"
+#include "dcn10/dcn10_stream_encoder.h"
+
+
+#define SE_DCN2_REG_LIST(id)\
+ SE_COMMON_DCN_REG_LIST(id),\
+ SRI(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \
+ SRI(DP_DSC_CNTL, DP, id), \
+ SRI(DP_DSC_BYTES_PER_PIXEL, DP, id), \
+ SRI(DME_CONTROL, DIG, id),\
+ SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI(DP_SEC_FRAMING4, DP, id)
+
+#define SE_COMMON_MASK_SH_LIST_DCN20(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC0_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC1_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC2_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC3_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC4_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC5_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC6_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC7_LINE, mask_sh),\
+ SE_SF(DP0_DP_DSC_CNTL, DP_DSC_MODE, mask_sh),\
+ SE_SF(DP0_DP_DSC_CNTL, DP_DSC_SLICE_WIDTH, mask_sh),\
+ SE_SF(DP0_DP_DSC_BYTES_PER_PIXEL, DP_DSC_BYTES_PER_PIXEL, mask_sh),\
+ SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, mask_sh),\
+ SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, mask_sh),\
+ SE_SF(DIG0_DME_CONTROL, METADATA_ENGINE_EN, mask_sh),\
+ SE_SF(DIG0_DME_CONTROL, METADATA_HUBP_REQUESTOR_ID, mask_sh),\
+ SE_SF(DIG0_DME_CONTROL, METADATA_STREAM_TYPE, mask_sh),\
+ SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE_REFERENCE, mask_sh),\
+ SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE_REFERENCE, mask_sh),\
+ SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DOLBY_VISION_EN, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, mask_sh),\
+ SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh)
+
+void dcn20_stream_encoder_construct(
+ struct dcn10_stream_encoder *enc1,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ const struct dcn10_stream_enc_registers *regs,
+ const struct dcn10_stream_encoder_shift *se_shift,
+ const struct dcn10_stream_encoder_mask *se_mask);
+
+void enc2_stream_encoder_dp_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
+ uint32_t enable_sdp_splitting);
+
+void enc2_stream_encoder_dp_unblank(
+ struct dc_link *link,
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param);
+
+void enc2_set_dynamic_metadata(struct stream_encoder *enc,
+ bool enable_dme,
+ uint32_t hubp_requestor_id,
+ enum dynamic_metadata_mode dmdata_mode);
+
+uint32_t enc2_get_fifo_cal_average_level(
+ struct stream_encoder *enc);
+
+#endif /* __DC_STREAM_ENCODER_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
new file mode 100644
index 0000000000..96c2632233
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/delay.h>
+
+#include "dcn20_vmid.h"
+#include "reg_helper.h"
+
+#define REG(reg)\
+ vmid->regs->reg
+
+#define CTX \
+ vmid->ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ vmid->shifts->field_name, vmid->masks->field_name
+
+static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid)
+{
+ /* According the hardware spec, we need to poll for the lowest
+ * bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM
+ * context is updated. We can't use REG_WAIT here since we
+ * don't have a seperate field to wait on.
+ *
+ * TODO: Confirm timeout / poll interval with hardware team
+ */
+
+ int max_times = 10000;
+ int delay_us = 5;
+ int i;
+
+ for (i = 0; i < max_times; ++i) {
+ uint32_t entry_lo32;
+
+ REG_GET(PAGE_TABLE_BASE_ADDR_LO32,
+ VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32,
+ &entry_lo32);
+
+ if (entry_lo32 & 0x1)
+ return;
+
+ udelay(delay_us);
+ }
+
+ /* VM setup timed out */
+ DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n");
+ ASSERT(0);
+}
+
+void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config)
+{
+ REG_SET(PAGE_TABLE_START_ADDR_HI32, 0,
+ VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4, (config->page_table_start_addr >> 32) & 0xF);
+ REG_SET(PAGE_TABLE_START_ADDR_LO32, 0,
+ VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32, config->page_table_start_addr & 0xFFFFFFFF);
+
+ REG_SET(PAGE_TABLE_END_ADDR_HI32, 0,
+ VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4, (config->page_table_end_addr >> 32) & 0xF);
+ REG_SET(PAGE_TABLE_END_ADDR_LO32, 0,
+ VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32, config->page_table_end_addr & 0xFFFFFFFF);
+
+ REG_SET_2(CNTL, 0,
+ VM_CONTEXT0_PAGE_TABLE_DEPTH, config->depth,
+ VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE, config->block_size);
+
+ REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0,
+ VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF);
+ /* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */
+ REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0,
+ VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF);
+
+ dcn20_wait_for_vmid_ready(vmid);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
new file mode 100644
index 0000000000..e7a1b7fa2c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCN20_DCN20_VMID_H_
+#define DAL_DC_DCN20_DCN20_VMID_H_
+
+#include "vmid.h"
+
+#define DCN20_VMID_REG_LIST(id)\
+ SRI(CNTL, DCN_VM_CONTEXT, id),\
+ SRI(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id),\
+ SRI(PAGE_TABLE_BASE_ADDR_LO32, DCN_VM_CONTEXT, id),\
+ SRI(PAGE_TABLE_START_ADDR_HI32, DCN_VM_CONTEXT, id),\
+ SRI(PAGE_TABLE_START_ADDR_LO32, DCN_VM_CONTEXT, id),\
+ SRI(PAGE_TABLE_END_ADDR_HI32, DCN_VM_CONTEXT, id),\
+ SRI(PAGE_TABLE_END_ADDR_LO32, DCN_VM_CONTEXT, id)
+
+#define DCN20_VMID_MASK_SH_LIST(mask_sh)\
+ SF(DCN_VM_CONTEXT0_CNTL, VM_CONTEXT0_PAGE_TABLE_DEPTH, mask_sh),\
+ SF(DCN_VM_CONTEXT0_CNTL, VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE, mask_sh),\
+ SF(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, mask_sh),\
+ SF(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, mask_sh),\
+ SF(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4, mask_sh),\
+ SF(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32, mask_sh),\
+ SF(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4, mask_sh),\
+ SF(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32, mask_sh)
+
+#define DCN20_VMID_REG_FIELD_LIST(type)\
+ type VM_CONTEXT0_PAGE_TABLE_DEPTH;\
+ type VM_CONTEXT0_PAGE_TABLE_BLOCK_SIZE;\
+ type VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32;\
+ type VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32;\
+ type VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_HI4;\
+ type VM_CONTEXT0_START_LOGICAL_PAGE_NUMBER_LO32;\
+ type VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_HI4;\
+ type VM_CONTEXT0_END_LOGICAL_PAGE_NUMBER_LO32
+
+struct dcn20_vmid_shift {
+ DCN20_VMID_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn20_vmid_mask {
+ DCN20_VMID_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn20_vmid {
+ struct dc_context *ctx;
+ const struct dcn_vmid_registers *regs;
+ const struct dcn20_vmid_shift *shifts;
+ const struct dcn20_vmid_mask *masks;
+};
+
+void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config);
+
+#endif /* DAL_DC_DCN20_DCN20_VMID_H_ */