summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/disp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/disp')
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h341
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h358
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h403
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h431
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h223
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h419
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h230
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h159
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h232
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h151
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h171
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h411
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h279
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h473
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h433
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h448
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h80
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c493
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h100
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c1517
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h303
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2505
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h210
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h406
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c788
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c723
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c716
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c1068
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h88
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c676
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h849
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c709
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h293
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c213
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c387
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c99
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h98
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c644
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h80
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c583
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h146
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c187
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h111
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h510
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c62
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h65
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c327
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h138
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c702
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h352
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c300
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h158
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c544
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h368
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c241
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c223
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h88
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1402
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h209
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c1468
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h96
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c654
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h126
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h968
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c358
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c89
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h1155
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h1181
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c666
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c175
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c213
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c110
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c595
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h219
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c445
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c121
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c161
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c419
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h1979
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c1415
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h126
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c203
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c1360
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c764
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h78
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c370
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c125
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c1009
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h327
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c168
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h36
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c175
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h46
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c1048
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c408
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h87
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h111
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c183
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.c138
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h142
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c138
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h144
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c196
108 files changed, 45303 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
new file mode 100644
index 0000000000..43c47a19cd
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_3_0_MSM8998_H
+#define _DPU_3_0_MSM8998_H
+
+static const struct dpu_caps msm8998_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg msm8998_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x458,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 12 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ [DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x3b0, .bit_off = 16 },
+ },
+};
+
+static const struct dpu_ctl_cfg msm8998_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+};
+
+static const struct dpu_sspp_cfg msm8998_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &msm8998_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &msm8998_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &msm8998_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &msm8998_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1ac,
+ .features = DMA_CURSOR_MSM8998_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1ac,
+ .features = DMA_CURSOR_MSM8998_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg msm8998_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .pingpong = PINGPONG_NONE,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .pingpong = PINGPONG_NONE,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
+};
+
+static const struct dpu_pingpong_cfg msm8998_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
+ },
+};
+
+static const struct dpu_dsc_cfg msm8998_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ },
+};
+
+static const struct dpu_dspp_cfg msm8998_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg msm8998_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .type = INTF_HDMI,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg msm8998_perf_data = {
+ .max_bw_low = 6700000,
+ .max_bw_high = 6700000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 25,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 200,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version msm8998_mdss_ver = {
+ .core_major_ver = 3,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_msm8998_cfg = {
+ .mdss_ver = &msm8998_mdss_ver,
+ .caps = &msm8998_dpu_caps,
+ .mdp = &msm8998_mdp,
+ .ctl_count = ARRAY_SIZE(msm8998_ctl),
+ .ctl = msm8998_ctl,
+ .sspp_count = ARRAY_SIZE(msm8998_sspp),
+ .sspp = msm8998_sspp,
+ .mixer_count = ARRAY_SIZE(msm8998_lm),
+ .mixer = msm8998_lm,
+ .dspp_count = ARRAY_SIZE(msm8998_dspp),
+ .dspp = msm8998_dspp,
+ .pingpong_count = ARRAY_SIZE(msm8998_pp),
+ .pingpong = msm8998_pp,
+ .dsc_count = ARRAY_SIZE(msm8998_dsc),
+ .dsc = msm8998_dsc,
+ .intf_count = ARRAY_SIZE(msm8998_intf),
+ .intf = msm8998_intf,
+ .vbif_count = ARRAY_SIZE(msm8998_vbif),
+ .vbif = msm8998_vbif,
+ .perf = &msm8998_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
new file mode 100644
index 0000000000..88a5177dfd
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_4_0_SDM845_H
+#define _DPU_4_0_SDM845_H
+
+static const struct dpu_caps sdm845_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sdm845_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = BIT(DPU_MDP_AUDIO_SELECT) | BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_ctl_cfg sdm845_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xe4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xe4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xe4,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0xe4,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0xe4,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+};
+
+static const struct dpu_sspp_cfg sdm845_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &sdm845_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &sdm845_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &sdm845_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &sdm845_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1c8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1c8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1c8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1c8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sdm845_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x0, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_NONE,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x0, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_NONE,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
+};
+
+static const struct dpu_dspp_cfg sdm845_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sdm845_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
+ },
+};
+
+static const struct dpu_dsc_cfg sdm845_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ }, {
+ .name = "dsc_2", .id = DSC_2,
+ .base = 0x80800, .len = 0x140,
+ }, {
+ .name = "dsc_3", .id = DSC_3,
+ .base = 0x80c00, .len = 0x140,
+ },
+};
+
+static const struct dpu_intf_cfg sdm845_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sdm845_perf_data = {
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sdm845_qos_linear),
+ .entries = sdm845_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+ .entries = sdm845_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+ .entries = sdm845_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sdm845_mdss_ver = {
+ .core_major_ver = 4,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sdm845_cfg = {
+ .mdss_ver = &sdm845_mdss_ver,
+ .caps = &sdm845_dpu_caps,
+ .mdp = &sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .dspp_count = ARRAY_SIZE(sdm845_dspp),
+ .dspp = sdm845_dspp,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .dsc_count = ARRAY_SIZE(sdm845_dsc),
+ .dsc = sdm845_dsc,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sdm845_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
new file mode 100644
index 0000000000..99acaf917e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_5_0_SM8150_H
+#define _DPU_5_0_SM8150_H
+
+static const struct dpu_caps sm8150_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 4096,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sm8150_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = BIT(DPU_MDP_AUDIO_SELECT),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sm8150_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sm8150_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8150_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm8150_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x72000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x72800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x83000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x83100, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x83200, .len = 0x8,
+ },
+};
+
+static const struct dpu_dsc_cfg sm8150_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_2", .id = DSC_2,
+ .base = 0x80800, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_3", .id = DSC_3,
+ .base = 0x80c00, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ },
+};
+
+static const struct dpu_intf_cfg sm8150_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2bc,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x2bc,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sm8150_perf_data = {
+ .max_bw_low = 12800000,
+ .max_bw_high = 12800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm8150_qos_linear),
+ .entries = sm8150_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8150_mdss_ver = {
+ .core_major_ver = 5,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sm8150_cfg = {
+ .mdss_ver = &sm8150_mdss_ver,
+ .caps = &sm8150_dpu_caps,
+ .mdp = &sm8150_mdp,
+ .ctl_count = ARRAY_SIZE(sm8150_ctl),
+ .ctl = sm8150_ctl,
+ .sspp_count = ARRAY_SIZE(sm8150_sspp),
+ .sspp = sm8150_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .dspp_count = ARRAY_SIZE(sm8150_dspp),
+ .dspp = sm8150_dspp,
+ .dsc_count = ARRAY_SIZE(sm8150_dsc),
+ .dsc = sm8150_dsc,
+ .pingpong_count = ARRAY_SIZE(sm8150_pp),
+ .pingpong = sm8150_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
+ .merge_3d = sm8150_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8150_intf),
+ .intf = sm8150_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm8150_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
new file mode 100644
index 0000000000..c92fbf24fb
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -0,0 +1,431 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_5_1_SC8180X_H
+#define _DPU_5_1_SC8180X_H
+
+static const struct dpu_caps sc8180x_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 4096,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sc8180x_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = BIT(DPU_MDP_AUDIO_SELECT),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_ctl_cfg sc8180x_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &sdm845_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sc8180x_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sc8180x_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sc8180x_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x72000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x72800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sc8180x_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x83000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x83100, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x83200, .len = 0x8,
+ },
+};
+
+static const struct dpu_dsc_cfg sc8180x_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_2", .id = DSC_2,
+ .base = 0x80800, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_3", .id = DSC_3,
+ .base = 0x80c00, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_4", .id = DSC_4,
+ .base = 0x81000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_5", .id = DSC_5,
+ .base = 0x81400, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ },
+};
+
+static const struct dpu_intf_cfg sc8180x_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2bc,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x2bc,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ },
+ /* INTF_3 is for MST, wired to INTF_DP 0 and 1, use dummy index until this is supported */
+ {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = 999,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_4", .id = INTF_4,
+ .base = 0x6c000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_5", .id = INTF_5,
+ .base = 0x6c800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sc8180x_perf_data = {
+ .max_bw_low = 9600000,
+ .max_bw_high = 9600000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sc8180x_mdss_ver = {
+ .core_major_ver = 5,
+ .core_minor_ver = 1,
+};
+
+const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
+ .mdss_ver = &sc8180x_mdss_ver,
+ .caps = &sc8180x_dpu_caps,
+ .mdp = &sc8180x_mdp,
+ .ctl_count = ARRAY_SIZE(sc8180x_ctl),
+ .ctl = sc8180x_ctl,
+ .sspp_count = ARRAY_SIZE(sc8180x_sspp),
+ .sspp = sc8180x_sspp,
+ .mixer_count = ARRAY_SIZE(sc8180x_lm),
+ .mixer = sc8180x_lm,
+ .dspp_count = ARRAY_SIZE(sc8180x_dspp),
+ .dspp = sc8180x_dspp,
+ .dsc_count = ARRAY_SIZE(sc8180x_dsc),
+ .dsc = sc8180x_dsc,
+ .pingpong_count = ARRAY_SIZE(sc8180x_pp),
+ .pingpong = sc8180x_pp,
+ .merge_3d_count = ARRAY_SIZE(sc8180x_merge_3d),
+ .merge_3d = sc8180x_merge_3d,
+ .intf_count = ARRAY_SIZE(sc8180x_intf),
+ .intf = sc8180x_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sc8180x_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
new file mode 100644
index 0000000000..2491eed100
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Marijn Suijten <marijn.suijten@somainline.org>. All rights reserved.
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_5_4_SM6125_H
+#define _DPU_5_4_SM6125_H
+
+static const struct dpu_caps sm6125_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x6,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sm6125_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = 0,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm6125_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm6125_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SM6125_MASK,
+ .sblk = &sm6125_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ },
+};
+
+static const struct dpu_lm_cfg sm6125_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ .lm_pair = LM_1,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_1,
+ .dspp = 0,
+ .lm_pair = LM_0,
+ },
+};
+
+static const struct dpu_dspp_cfg sm6125_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm6125_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .merge_3d = 0,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .merge_3d = 0,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_intf_cfg sm6125_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = 0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
+};
+
+static const struct dpu_perf_cfg sm6125_perf_data = {
+ .max_bw_low = 4100000,
+ .max_bw_high = 4100000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm8150_qos_linear),
+ .entries = sm8150_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm6125_mdss_ver = {
+ .core_major_ver = 5,
+ .core_minor_ver = 4,
+};
+
+const struct dpu_mdss_cfg dpu_sm6125_cfg = {
+ .mdss_ver = &sm6125_mdss_ver,
+ .caps = &sm6125_dpu_caps,
+ .mdp = &sm6125_mdp,
+ .ctl_count = ARRAY_SIZE(sm6125_ctl),
+ .ctl = sm6125_ctl,
+ .sspp_count = ARRAY_SIZE(sm6125_sspp),
+ .sspp = sm6125_sspp,
+ .mixer_count = ARRAY_SIZE(sm6125_lm),
+ .mixer = sm6125_lm,
+ .dspp_count = ARRAY_SIZE(sm6125_dspp),
+ .dspp = sm6125_dspp,
+ .pingpong_count = ARRAY_SIZE(sm6125_pp),
+ .pingpong = sm6125_pp,
+ .intf_count = ARRAY_SIZE(sm6125_intf),
+ .intf = sm6125_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm6125_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
new file mode 100644
index 0000000000..ee781037ad
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_6_0_SM8250_H
+#define _DPU_6_0_SM8250_H
+
+static const struct dpu_caps sm8250_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 4096,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm8250_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sm8250_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8250_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK_SDMA,
+ .sblk = &sm8250_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK_SDMA,
+ .sblk = &sm8250_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK_SDMA,
+ .sblk = &sm8250_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK_SDMA,
+ .sblk = &sm8250_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sm8250_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8250_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm8250_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x72000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x72800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8250_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x83000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x83100, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x83200, .len = 0x8,
+ },
+};
+
+static const struct dpu_dsc_cfg sm8250_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_2", .id = DSC_2,
+ .base = 0x80800, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ }, {
+ .name = "dsc_3", .id = DSC_3,
+ .base = 0x80c00, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ },
+};
+
+static const struct dpu_intf_cfg sm8250_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_wb_cfg sm8250_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats,
+ .num_formats = ARRAY_SIZE(wb2_formats),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_perf_cfg sm8250_perf_data = {
+ .max_bw_low = 13700000,
+ .max_bw_high = 16600000,
+ .min_core_ib = 4800000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8250_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sm8250_cfg = {
+ .mdss_ver = &sm8250_mdss_ver,
+ .caps = &sm8250_dpu_caps,
+ .mdp = &sm8250_mdp,
+ .ctl_count = ARRAY_SIZE(sm8250_ctl),
+ .ctl = sm8250_ctl,
+ .sspp_count = ARRAY_SIZE(sm8250_sspp),
+ .sspp = sm8250_sspp,
+ .mixer_count = ARRAY_SIZE(sm8250_lm),
+ .mixer = sm8250_lm,
+ .dspp_count = ARRAY_SIZE(sm8250_dspp),
+ .dspp = sm8250_dspp,
+ .dsc_count = ARRAY_SIZE(sm8250_dsc),
+ .dsc = sm8250_dsc,
+ .pingpong_count = ARRAY_SIZE(sm8250_pp),
+ .pingpong = sm8250_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8250_merge_3d),
+ .merge_3d = sm8250_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8250_intf),
+ .intf = sm8250_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .wb_count = ARRAY_SIZE(sm8250_wb),
+ .wb = sm8250_wb,
+ .perf = &sm8250_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
new file mode 100644
index 0000000000..69d3f7e5e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_6_2_SC7180_H
+#define _DPU_6_2_SC7180_H
+
+static const struct dpu_caps sc7180_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x9,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sc7180_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
+ },
+};
+
+static const struct dpu_ctl_cfg sc7180_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ },
+};
+
+static const struct dpu_sspp_cfg sc7180_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sc7180_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
+};
+
+static const struct dpu_lm_cfg sc7180_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ },
+};
+
+static const struct dpu_dspp_cfg sc7180_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sc7180_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_intf_cfg sc7180_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
+};
+
+static const struct dpu_wb_cfg sc7180_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats,
+ .num_formats = ARRAY_SIZE(wb2_formats),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_perf_cfg sc7180_perf_data = {
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 1600000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sc7180_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 2,
+};
+
+const struct dpu_mdss_cfg dpu_sc7180_cfg = {
+ .mdss_ver = &sc7180_mdss_ver,
+ .caps = &sc7180_dpu_caps,
+ .mdp = &sc7180_mdp,
+ .ctl_count = ARRAY_SIZE(sc7180_ctl),
+ .ctl = sc7180_ctl,
+ .sspp_count = ARRAY_SIZE(sc7180_sspp),
+ .sspp = sc7180_sspp,
+ .mixer_count = ARRAY_SIZE(sc7180_lm),
+ .mixer = sc7180_lm,
+ .dspp_count = ARRAY_SIZE(sc7180_dspp),
+ .dspp = sc7180_dspp,
+ .pingpong_count = ARRAY_SIZE(sc7180_pp),
+ .pingpong = sc7180_pp,
+ .intf_count = ARRAY_SIZE(sc7180_intf),
+ .intf = sc7180_intf,
+ .wb_count = ARRAY_SIZE(sc7180_wb),
+ .wb = sc7180_wb,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sc7180_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
new file mode 100644
index 0000000000..7e6000167e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_6_3_SM6115_H
+#define _DPU_6_3_SM6115_H
+
+static const struct dpu_caps sm6115_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm6115_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm6115_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+};
+
+static const struct dpu_sspp_cfg sm6115_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm6115_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
+};
+
+static const struct dpu_lm_cfg sm6115_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &qcm2290_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ },
+};
+
+static const struct dpu_dspp_cfg sm6115_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm6115_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_intf_cfg sm6115_intf[] = {
+ {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
+};
+
+static const struct dpu_perf_cfg sm6115_perf_data = {
+ .max_bw_low = 3100000,
+ .max_bw_high = 4000000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm6115_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 3,
+};
+
+const struct dpu_mdss_cfg dpu_sm6115_cfg = {
+ .mdss_ver = &sm6115_mdss_ver,
+ .caps = &sm6115_dpu_caps,
+ .mdp = &sm6115_mdp,
+ .ctl_count = ARRAY_SIZE(sm6115_ctl),
+ .ctl = sm6115_ctl,
+ .sspp_count = ARRAY_SIZE(sm6115_sspp),
+ .sspp = sm6115_sspp,
+ .mixer_count = ARRAY_SIZE(sm6115_lm),
+ .mixer = sm6115_lm,
+ .dspp_count = ARRAY_SIZE(sm6115_dspp),
+ .dspp = sm6115_dspp,
+ .pingpong_count = ARRAY_SIZE(sm6115_pp),
+ .pingpong = sm6115_pp,
+ .intf_count = ARRAY_SIZE(sm6115_intf),
+ .intf = sm6115_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm6115_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
new file mode 100644
index 0000000000..cf5db6f296
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DPU_6_4_SM6350_H
+#define _DPU_6_4_SM6350_H
+
+static const struct dpu_caps sm6350_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm6350_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm6350_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ },
+};
+
+static const struct dpu_sspp_cfg sm6350_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sc7180_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
+};
+
+static const struct dpu_lm_cfg sm6350_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = 0,
+ },
+};
+
+static const struct dpu_dspp_cfg sm6350_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static struct dpu_pingpong_cfg sm6350_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_dsc_cfg sm6350_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ },
+};
+
+static const struct dpu_intf_cfg sm6350_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 35,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 35,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
+};
+
+static const struct dpu_perf_cfg sm6350_perf_data = {
+ .max_bw_low = 4200000,
+ .max_bw_high = 5100000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 1600000,
+ .min_prefill_lines = 35,
+ /* TODO: confirm danger_lut_tbl */
+ .danger_lut_tbl = {0xffff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xff00, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
+ .entries = sm6350_qos_linear_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
+ .entries = sm6350_qos_linear_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm6350_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 4,
+};
+
+const struct dpu_mdss_cfg dpu_sm6350_cfg = {
+ .mdss_ver = &sm6350_mdss_ver,
+ .caps = &sm6350_dpu_caps,
+ .mdp = &sm6350_mdp,
+ .ctl_count = ARRAY_SIZE(sm6350_ctl),
+ .ctl = sm6350_ctl,
+ .sspp_count = ARRAY_SIZE(sm6350_sspp),
+ .sspp = sm6350_sspp,
+ .mixer_count = ARRAY_SIZE(sm6350_lm),
+ .mixer = sm6350_lm,
+ .dspp_count = ARRAY_SIZE(sm6350_dspp),
+ .dspp = sm6350_dspp,
+ .dsc_count = ARRAY_SIZE(sm6350_dsc),
+ .dsc = sm6350_dsc,
+ .pingpong_count = ARRAY_SIZE(sm6350_pp),
+ .pingpong = sm6350_pp,
+ .intf_count = ARRAY_SIZE(sm6350_intf),
+ .intf = sm6350_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm6350_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
new file mode 100644
index 0000000000..87a03aa165
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_6_5_QCM2290_H
+#define _DPU_6_5_QCM2290_H
+
+static const struct dpu_caps qcm2290_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg qcm2290_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_ctl_cfg qcm2290_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+};
+
+static const struct dpu_sspp_cfg qcm2290_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_QCM2290_MASK,
+ .sblk = &qcm2290_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &qcm2290_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
+};
+
+static const struct dpu_lm_cfg qcm2290_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &qcm2290_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ },
+};
+
+static const struct dpu_dspp_cfg qcm2290_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg qcm2290_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_intf_cfg qcm2290_intf[] = {
+ {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
+};
+
+static const struct dpu_perf_cfg qcm2290_perf_data = {
+ .max_bw_low = 2700000,
+ .max_bw_high = 2700000,
+ .min_core_ib = 1300000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 1600000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xff, 0x0, 0x0},
+ .safe_lut_tbl = {0xfff0, 0x0, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(qcm2290_qos_linear),
+ .entries = qcm2290_qos_linear
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version qcm2290_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 5,
+};
+
+const struct dpu_mdss_cfg dpu_qcm2290_cfg = {
+ .mdss_ver = &qcm2290_mdss_ver,
+ .caps = &qcm2290_dpu_caps,
+ .mdp = &qcm2290_mdp,
+ .ctl_count = ARRAY_SIZE(qcm2290_ctl),
+ .ctl = qcm2290_ctl,
+ .sspp_count = ARRAY_SIZE(qcm2290_sspp),
+ .sspp = qcm2290_sspp,
+ .mixer_count = ARRAY_SIZE(qcm2290_lm),
+ .mixer = qcm2290_lm,
+ .dspp_count = ARRAY_SIZE(qcm2290_dspp),
+ .dspp = qcm2290_dspp,
+ .pingpong_count = ARRAY_SIZE(qcm2290_pp),
+ .pingpong = qcm2290_pp,
+ .intf_count = ARRAY_SIZE(qcm2290_intf),
+ .intf = qcm2290_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &qcm2290_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
new file mode 100644
index 0000000000..a327e21c90
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DPU_6_9_SM6375_H
+#define _DPU_6_9_SM6375_H
+
+static const struct dpu_caps sm6375_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm6375_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm6375_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+};
+
+static const struct dpu_sspp_cfg sm6375_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm6115_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
+};
+
+static const struct dpu_lm_cfg sm6375_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &qcm2290_lm_sblk,
+ .lm_pair = 0,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ },
+};
+
+static const struct dpu_dspp_cfg sm6375_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm6375_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_dsc_cfg sm6375_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ .features = BIT(DPU_DSC_OUTPUT_CTRL),
+ },
+};
+
+static const struct dpu_intf_cfg sm6375_intf[] = {
+ {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ },
+};
+
+static const struct dpu_perf_cfg sm6375_perf_data = {
+ .max_bw_low = 5200000,
+ .max_bw_high = 6200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 1600000,
+ .min_prefill_lines = 24,
+ /* TODO: confirm danger_lut_tbl */
+ .danger_lut_tbl = {0xffff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
+ .entries = sm6350_qos_linear_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
+ .entries = sm6350_qos_linear_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm6375_mdss_ver = {
+ .core_major_ver = 6,
+ .core_minor_ver = 9,
+};
+
+const struct dpu_mdss_cfg dpu_sm6375_cfg = {
+ .mdss_ver = &sm6375_mdss_ver,
+ .caps = &sm6375_dpu_caps,
+ .mdp = &sm6375_mdp,
+ .ctl_count = ARRAY_SIZE(sm6375_ctl),
+ .ctl = sm6375_ctl,
+ .sspp_count = ARRAY_SIZE(sm6375_sspp),
+ .sspp = sm6375_sspp,
+ .mixer_count = ARRAY_SIZE(sm6375_lm),
+ .mixer = sm6375_lm,
+ .dspp_count = ARRAY_SIZE(sm6375_dspp),
+ .dspp = sm6375_dspp,
+ .dsc_count = ARRAY_SIZE(sm6375_dsc),
+ .dsc = sm6375_dsc,
+ .pingpong_count = ARRAY_SIZE(sm6375_pp),
+ .pingpong = sm6375_pp,
+ .intf_count = ARRAY_SIZE(sm6375_intf),
+ .intf = sm6375_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm6375_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
new file mode 100644
index 0000000000..f8d16f9bf5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_7_0_SM8350_H
+#define _DPU_7_0_SM8350_H
+
+static const struct dpu_caps sm8350_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 4096,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm8350_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sm8350_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1e8,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1e8,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8350_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f8,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sm8350_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8350_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm8350_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8350_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sm8350_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_intf_cfg sm8350_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x2c4,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x2c4,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sm8350_perf_data = {
+ .max_bw_low = 11800000,
+ .max_bw_high = 15500000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 40,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8350_mdss_ver = {
+ .core_major_ver = 7,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sm8350_cfg = {
+ .mdss_ver = &sm8350_mdss_ver,
+ .caps = &sm8350_dpu_caps,
+ .mdp = &sm8350_mdp,
+ .ctl_count = ARRAY_SIZE(sm8350_ctl),
+ .ctl = sm8350_ctl,
+ .sspp_count = ARRAY_SIZE(sm8350_sspp),
+ .sspp = sm8350_sspp,
+ .mixer_count = ARRAY_SIZE(sm8350_lm),
+ .mixer = sm8350_lm,
+ .dspp_count = ARRAY_SIZE(sm8350_dspp),
+ .dspp = sm8350_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8350_pp),
+ .pingpong = sm8350_pp,
+ .dsc_count = ARRAY_SIZE(sm8350_dsc),
+ .dsc = sm8350_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8350_merge_3d),
+ .merge_3d = sm8350_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8350_intf),
+ .intf = sm8350_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm8350_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
new file mode 100644
index 0000000000..9195cb996f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_7_2_SC7280_H
+#define _DPU_7_2_SC7280_H
+
+static const struct dpu_caps sc7280_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2400,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sc7280_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x2014,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
+ },
+};
+
+static const struct dpu_ctl_cfg sc7280_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ },
+};
+
+static const struct dpu_sspp_cfg sc7280_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f8,
+ .features = VIG_SC7280_MASK_SDMA,
+ .sblk = &sc7280_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
+};
+
+static const struct dpu_lm_cfg sc7280_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sc7180_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
+};
+
+static const struct dpu_dspp_cfg sc7280_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sc7280_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = 0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = -1,
+ },
+};
+
+/* NOTE: sc7280 only has one DSC hard slice encoder */
+static const struct dpu_dsc_cfg sc7280_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ },
+};
+
+static const struct dpu_wb_cfg sc7280_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats,
+ .num_formats = ARRAY_SIZE(wb2_formats),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_intf_cfg sc7280_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x2c4,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_5", .id = INTF_5,
+ .base = 0x39000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sc7280_perf_data = {
+ .max_bw_low = 4700000,
+ .max_bw_high = 8800000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 1600000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xffff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xff00, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sc7280_mdss_ver = {
+ .core_major_ver = 7,
+ .core_minor_ver = 2,
+};
+
+const struct dpu_mdss_cfg dpu_sc7280_cfg = {
+ .mdss_ver = &sc7280_mdss_ver,
+ .caps = &sc7280_dpu_caps,
+ .mdp = &sc7280_mdp,
+ .ctl_count = ARRAY_SIZE(sc7280_ctl),
+ .ctl = sc7280_ctl,
+ .sspp_count = ARRAY_SIZE(sc7280_sspp),
+ .sspp = sc7280_sspp,
+ .dspp_count = ARRAY_SIZE(sc7280_dspp),
+ .dspp = sc7280_dspp,
+ .mixer_count = ARRAY_SIZE(sc7280_lm),
+ .mixer = sc7280_lm,
+ .pingpong_count = ARRAY_SIZE(sc7280_pp),
+ .pingpong = sc7280_pp,
+ .dsc_count = ARRAY_SIZE(sc7280_dsc),
+ .dsc = sc7280_dsc,
+ .wb_count = ARRAY_SIZE(sc7280_wb),
+ .wb = sc7280_wb,
+ .intf_count = ARRAY_SIZE(sc7280_intf),
+ .intf = sc7280_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sc7280_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
new file mode 100644
index 0000000000..ff9adb8000
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -0,0 +1,473 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_8_0_SC8280XP_H
+#define _DPU_8_0_SC8280XP_H
+
+static const struct dpu_caps sc8280xp_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 11,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sc8280xp_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sc8280xp_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x2ac,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x2ac,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x2ac,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x2ac,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x2ac,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x2ac,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x2ac,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x2ac,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sc8280xp_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sc8280xp_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sc8280xp_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sc8280xp_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */
+static const struct dpu_intf_cfg sc8280xp_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_4", .id = INTF_4,
+ .base = 0x38000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_5", .id = INTF_5,
+ .base = 0x39000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_3,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_6", .id = INTF_6,
+ .base = 0x3a000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_7", .id = INTF_7,
+ .base = 0x3b000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_8", .id = INTF_8,
+ .base = 0x3c000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sc8280xp_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc8180x_qos_linear),
+ .entries = sc8180x_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc8180x_qos_macrotile),
+ .entries = sc8180x_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sc8280xp_mdss_ver = {
+ .core_major_ver = 8,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sc8280xp_cfg = {
+ .mdss_ver = &sc8280xp_mdss_ver,
+ .caps = &sc8280xp_dpu_caps,
+ .mdp = &sc8280xp_mdp,
+ .ctl_count = ARRAY_SIZE(sc8280xp_ctl),
+ .ctl = sc8280xp_ctl,
+ .sspp_count = ARRAY_SIZE(sc8280xp_sspp),
+ .sspp = sc8280xp_sspp,
+ .mixer_count = ARRAY_SIZE(sc8280xp_lm),
+ .mixer = sc8280xp_lm,
+ .dspp_count = ARRAY_SIZE(sc8280xp_dspp),
+ .dspp = sc8280xp_dspp,
+ .pingpong_count = ARRAY_SIZE(sc8280xp_pp),
+ .pingpong = sc8280xp_pp,
+ .dsc_count = ARRAY_SIZE(sc8280xp_dsc),
+ .dsc = sc8280xp_dsc,
+ .merge_3d_count = ARRAY_SIZE(sc8280xp_merge_3d),
+ .merge_3d = sc8280xp_merge_3d,
+ .intf_count = ARRAY_SIZE(sc8280xp_intf),
+ .intf = sc8280xp_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sc8280xp_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
new file mode 100644
index 0000000000..1b12178dfb
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -0,0 +1,433 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_8_1_SM8450_H
+#define _DPU_8_1_SM8450_H
+
+static const struct dpu_caps sm8450_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm8450_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sm8450_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8450_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8250_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x32c,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x32c,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x32c,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x32c,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sm8450_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8450_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm8450_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x65800, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = -1,
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x65c00, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = -1,
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8450_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x65f00, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sm8450_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_intf_cfg sm8450_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sm8450_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8450_mdss_ver = {
+ .core_major_ver = 8,
+ .core_minor_ver = 1,
+};
+
+const struct dpu_mdss_cfg dpu_sm8450_cfg = {
+ .mdss_ver = &sm8450_mdss_ver,
+ .caps = &sm8450_dpu_caps,
+ .mdp = &sm8450_mdp,
+ .ctl_count = ARRAY_SIZE(sm8450_ctl),
+ .ctl = sm8450_ctl,
+ .sspp_count = ARRAY_SIZE(sm8450_sspp),
+ .sspp = sm8450_sspp,
+ .mixer_count = ARRAY_SIZE(sm8450_lm),
+ .mixer = sm8450_lm,
+ .dspp_count = ARRAY_SIZE(sm8450_dspp),
+ .dspp = sm8450_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8450_pp),
+ .pingpong = sm8450_pp,
+ .dsc_count = ARRAY_SIZE(sm8450_dsc),
+ .dsc = sm8450_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8450_merge_3d),
+ .merge_3d = sm8450_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8450_intf),
+ .intf = sm8450_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm8450_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
new file mode 100644
index 0000000000..7bed819dfc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_9_0_SM8550_H
+#define _DPU_9_0_SM8550_H
+
+static const struct dpu_caps sm8550_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm8550_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x4330, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x6330, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x8330, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0xa330, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x24330, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x26330, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x28330, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2a330, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA4] = { .reg_off = 0x2c330, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA5] = { .reg_off = 0x2e330, .bit_off = 0 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sm8550_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x290,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x290,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8550_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8550_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8550_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8550_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SC7180_MASK,
+ .sblk = &sm8550_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_0,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_1,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_2,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK,
+ .sblk = &sdm845_dma_sblk_3,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sm8550_dma_sblk_4,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA4,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK,
+ .sblk = &sm8550_dma_sblk_5,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA5,
+ },
+};
+
+static const struct dpu_lm_cfg sm8550_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8550_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+static const struct dpu_pingpong_cfg sm8550_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x66000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = -1,
+ .intr_rdptr = -1,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x66400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = -1,
+ .intr_rdptr = -1,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8550_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sm8550_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_intf_cfg sm8550_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sm8550_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8550_mdss_ver = {
+ .core_major_ver = 9,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sm8550_cfg = {
+ .mdss_ver = &sm8550_mdss_ver,
+ .caps = &sm8550_dpu_caps,
+ .mdp = &sm8550_mdp,
+ .ctl_count = ARRAY_SIZE(sm8550_ctl),
+ .ctl = sm8550_ctl,
+ .sspp_count = ARRAY_SIZE(sm8550_sspp),
+ .sspp = sm8550_sspp,
+ .mixer_count = ARRAY_SIZE(sm8550_lm),
+ .mixer = sm8550_lm,
+ .dspp_count = ARRAY_SIZE(sm8550_dspp),
+ .dspp = sm8550_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8550_pp),
+ .pingpong = sm8550_pp,
+ .dsc_count = ARRAY_SIZE(sm8550_dsc),
+ .dsc = sm8550_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8550_merge_3d),
+ .merge_3d = sm8550_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8550_intf),
+ .intf = sm8550_intf,
+ .vbif_count = ARRAY_SIZE(sm8550_vbif),
+ .vbif = sm8550_vbif,
+ .perf = &sm8550_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644
index 0000000000..b5b6e7031f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: none
+ */
+void dpu_core_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: none
+ */
+void dpu_core_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct msm_kms *kms);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+ struct dpu_kms *dpu_kms,
+ int irq_idx);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback funcion.
+ * @irq_arg: IRQ callback argument.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ void (*irq_cb)(void *arg, int irq_idx),
+ void *irq_arg);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ */
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644
index 0000000000..ef871239ad
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/pm_opp.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ * @DPU_PERF_MODE_MAX: maximum value, used for error checking
+ */
+enum dpu_perf_mode {
+ DPU_PERF_MODE_NORMAL,
+ DPU_PERF_MODE_MINIMUM,
+ DPU_PERF_MODE_FIXED,
+ DPU_PERF_MODE_MAX
+};
+
+/**
+ * _dpu_core_perf_calc_bw() - to calculate BW per crtc
+ * @perf_cfg: performance configuration
+ * @crtc: pointer to a crtc
+ * Return: returns aggregated BW for all planes in crtc.
+ */
+static u64 _dpu_core_perf_calc_bw(const struct dpu_perf_cfg *perf_cfg,
+ struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+ u64 crtc_plane_bw = 0;
+ u32 bw_factor;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ if (!pstate)
+ continue;
+
+ crtc_plane_bw += pstate->plane_fetch_bw;
+ }
+
+ bw_factor = perf_cfg->bw_inefficiency_factor;
+ if (bw_factor) {
+ crtc_plane_bw *= bw_factor;
+ do_div(crtc_plane_bw, 100);
+ }
+
+ return crtc_plane_bw;
+}
+
+/**
+ * _dpu_core_perf_calc_clk() - to calculate clock per crtc
+ * @perf_cfg: performance configuration
+ * @crtc: pointer to a crtc
+ * @state: pointer to a crtc state
+ * Return: returns max clk for all planes in crtc.
+ */
+static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg,
+ struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+ struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+ struct drm_display_mode *mode;
+ u64 crtc_clk;
+ u32 clk_factor;
+
+ mode = &state->adjusted_mode;
+
+ crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ if (!pstate)
+ continue;
+
+ crtc_clk = max(pstate->plane_clk, crtc_clk);
+ }
+
+ clk_factor = perf_cfg->clk_inefficiency_factor;
+ if (clk_factor) {
+ crtc_clk *= clk_factor;
+ do_div(crtc_clk, 100);
+ }
+
+ return crtc_clk;
+}
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+ priv = crtc->dev->dev_private;
+ return to_dpu_kms(priv->kms);
+}
+
+static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct dpu_core_perf_params *perf)
+{
+ const struct dpu_perf_cfg *perf_cfg = core_perf->perf_cfg;
+
+ if (!perf_cfg || !crtc || !state || !perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+ if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ perf->bw_ctl = 0;
+ perf->max_per_pipe_ib = 0;
+ perf->core_clk_rate = 0;
+ } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ perf->bw_ctl = core_perf->fix_core_ab_vote;
+ perf->max_per_pipe_ib = core_perf->fix_core_ib_vote;
+ perf->core_clk_rate = core_perf->fix_core_clk_rate;
+ } else {
+ perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
+ perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
+ perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
+ }
+
+ DRM_DEBUG_ATOMIC(
+ "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib, perf->bw_ctl);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 bw, threshold;
+ u64 bw_sum_of_intfs = 0;
+ enum dpu_crtc_client_type curr_client_type;
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+ struct dpu_kms *kms;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+ return 0;
+
+ dpu_cstate = to_dpu_crtc_state(state);
+
+ /* obtain new values */
+ _dpu_core_perf_calc_crtc(&kms->perf, crtc, state, &dpu_cstate->new_perf);
+
+ bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl;
+ curr_client_type = dpu_crtc_get_client_type(crtc);
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (tmp_crtc->enabled &&
+ dpu_crtc_get_client_type(tmp_crtc) == curr_client_type &&
+ tmp_crtc != crtc) {
+ struct dpu_crtc_state *tmp_cstate =
+ to_dpu_crtc_state(tmp_crtc->state);
+
+ DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
+ tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
+ tmp_cstate->bw_control);
+
+ bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
+ }
+
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
+
+ threshold = kms->perf.perf_cfg->max_bw_high;
+
+ DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
+
+ if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
+ }
+ }
+
+ return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct dpu_core_perf_params perf = { 0 };
+ enum dpu_crtc_client_type curr_client_type
+ = dpu_crtc_get_client_type(crtc);
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int i, ret = 0;
+ u64 avg_bw;
+
+ if (!kms->num_paths)
+ return 0;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (tmp_crtc->enabled &&
+ curr_client_type ==
+ dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
+ dpu_cstate->new_perf.max_per_pipe_ib);
+
+ perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
+
+ DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
+ tmp_crtc->base.id,
+ dpu_cstate->new_perf.bw_ctl, kms->num_paths);
+ }
+ }
+
+ avg_bw = perf.bw_ctl;
+ do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
+
+ for (i = 0; i < kms->num_paths; i++)
+ icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
+
+ return ret;
+}
+
+/**
+ * dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc: pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *kms;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ if (atomic_dec_return(&kms->bandwidth_ref) > 0)
+ return;
+
+ /* Release the bandwidth */
+ if (kms->perf.enable_bw_release) {
+ trace_dpu_cmd_release_bw(crtc->base.id);
+ DRM_DEBUG_ATOMIC("Release BW crtc=%d\n", crtc->base.id);
+ dpu_crtc->cur_perf.bw_ctl = 0;
+ _dpu_core_perf_crtc_update_bus(kms, crtc);
+ }
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+ u64 clk_rate;
+ struct drm_crtc *crtc;
+ struct dpu_crtc_state *dpu_cstate;
+
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+ return kms->perf.fix_core_clk_rate;
+
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM)
+ return kms->perf.max_core_clk_rate;
+
+ clk_rate = 0;
+ drm_for_each_crtc(crtc, kms->dev) {
+ if (crtc->enabled) {
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+ clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+ clk_rate);
+ }
+ }
+
+ return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed)
+{
+ struct dpu_core_perf_params *new, *old;
+ bool update_bus = false, update_clk = false;
+ u64 clk_rate = 0;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ struct dpu_kms *kms;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ DRM_DEBUG_ATOMIC("crtc:%d enabled:%d core_clk:%llu\n",
+ crtc->base.id, crtc->enabled, kms->perf.core_clk_rate);
+
+ old = &dpu_crtc->cur_perf;
+ new = &dpu_cstate->new_perf;
+
+ if (crtc->enabled) {
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote - "ab or ib vote" is higher
+ * than current vote for update request.
+ * 2. new bandwidth vote - "ab or ib vote" is lower
+ * than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
+ (new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
+ (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
+ (new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
+ DRM_DEBUG_ATOMIC("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed,
+ new->bw_ctl, old->bw_ctl);
+ old->bw_ctl = new->bw_ctl;
+ old->max_per_pipe_ib = new->max_per_pipe_ib;
+ update_bus = true;
+ }
+
+ if ((params_changed && new->core_clk_rate > old->core_clk_rate) ||
+ (!params_changed && new->core_clk_rate < old->core_clk_rate)) {
+ old->core_clk_rate = new->core_clk_rate;
+ update_clk = true;
+ }
+ } else {
+ DRM_DEBUG_ATOMIC("crtc=%d disable\n", crtc->base.id);
+ memset(old, 0, sizeof(*old));
+ update_bus = true;
+ update_clk = true;
+ }
+
+ trace_dpu_perf_crtc_update(crtc->base.id, new->bw_ctl,
+ new->core_clk_rate, !crtc->enabled, update_bus, update_clk);
+
+ if (update_bus) {
+ ret = _dpu_core_perf_crtc_update_bus(kms, crtc);
+ if (ret) {
+ DPU_ERROR("crtc-%d: failed to update bus bw vote\n",
+ crtc->base.id);
+ return ret;
+ }
+ }
+
+ /*
+ * Update the clock after bandwidth vote to ensure
+ * bandwidth is available before clock rate is increased.
+ */
+ if (update_clk) {
+ clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+ DRM_DEBUG_ATOMIC("clk:%llu\n", clk_rate);
+
+ trace_dpu_core_perf_update_clk(kms->dev, !crtc->enabled, clk_rate);
+
+ clk_rate = min(clk_rate, kms->perf.max_core_clk_rate);
+ ret = dev_pm_opp_set_rate(&kms->pdev->dev, clk_rate);
+ if (ret) {
+ DPU_ERROR("failed to set core clock rate %llu\n", clk_rate);
+ return ret;
+ }
+
+ kms->perf.core_clk_rate = clk_rate;
+ DRM_DEBUG_ATOMIC("update clk rate = %lld HZ\n", clk_rate);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ u32 perf_mode = 0;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode);
+ if (ret)
+ return ret;
+
+ if (perf_mode >= DPU_PERF_MODE_MAX)
+ return -EINVAL;
+
+ if (perf_mode == DPU_PERF_MODE_FIXED) {
+ DRM_INFO("fix performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+ /* run the driver with max clk and BW vote */
+ DRM_INFO("minimum performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+ /* reset the perf tune params to 0 */
+ DRM_INFO("normal performance mode\n");
+ }
+ perf->perf_tune.mode = perf_mode;
+
+ return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ int len;
+ char buf[128];
+
+ len = scnprintf(buf, sizeof(buf),
+ "mode %d\n",
+ perf->perf_tune.mode);
+
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+ .open = simple_open,
+ .read = _dpu_core_perf_mode_read,
+ .write = _dpu_core_perf_mode_write,
+};
+
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
+{
+ struct dpu_core_perf *perf = &dpu_kms->perf;
+ struct dentry *entry;
+
+ entry = debugfs_create_dir("core_perf", parent);
+
+ debugfs_create_u64("max_core_clk_rate", 0600, entry,
+ &perf->max_core_clk_rate);
+ debugfs_create_u64("core_clk_rate", 0600, entry,
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0600, entry,
+ (u32 *)&perf->enable_bw_release);
+ debugfs_create_u32("threshold_low", 0600, entry,
+ (u32 *)&perf->perf_cfg->max_bw_low);
+ debugfs_create_u32("threshold_high", 0600, entry,
+ (u32 *)&perf->perf_cfg->max_bw_high);
+ debugfs_create_u32("min_core_ib", 0600, entry,
+ (u32 *)&perf->perf_cfg->min_core_ib);
+ debugfs_create_u32("min_llcc_ib", 0600, entry,
+ (u32 *)&perf->perf_cfg->min_llcc_ib);
+ debugfs_create_u32("min_dram_ib", 0600, entry,
+ (u32 *)&perf->perf_cfg->min_dram_ib);
+ debugfs_create_file("perf_mode", 0600, entry,
+ (u32 *)perf, &dpu_core_perf_mode_fops);
+ debugfs_create_u64("fix_core_clk_rate", 0600, entry,
+ &perf->fix_core_clk_rate);
+ debugfs_create_u64("fix_core_ib_vote", 0600, entry,
+ &perf->fix_core_ib_vote);
+ debugfs_create_u64("fix_core_ab_vote", 0600, entry,
+ &perf->fix_core_ab_vote);
+
+ return 0;
+}
+#endif
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ const struct dpu_perf_cfg *perf_cfg,
+ unsigned long max_core_clk_rate)
+{
+ perf->perf_cfg = perf_cfg;
+ perf->max_core_clk_rate = max_core_clk_rate;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644
index 0000000000..4186977390
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+ u64 max_per_pipe_ib;
+ u64 bw_ctl;
+ u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ */
+struct dpu_core_perf_tune {
+ u32 mode;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @perf_cfg: Platform-specific performance configuration
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+ const struct dpu_perf_cfg *perf_cfg;
+ u64 core_clk_rate;
+ u64 max_core_clk_rate;
+ struct dpu_core_perf_tune perf_tune;
+ u32 enable_bw_release;
+ u64 fix_core_clk_rate;
+ u64 fix_core_ib_vote;
+ u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @perf_cfg: Pointer to platform performance configuration
+ * @max_core_clk_rate: Maximum core clock rate
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ const struct dpu_perf_cfg *perf_cfg,
+ unsigned long max_core_clk_rate);
+
+struct dpu_kms;
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @dpu_kms: Pointer to the dpu_kms struct
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
new file mode 100644
index 0000000000..e238e4e811
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -0,0 +1,1517 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <linux/bits.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_self_refresh_helper.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_crtc.h"
+#include "dpu_plane.h"
+#include "dpu_encoder.h"
+#include "dpu_vbif.h"
+#include "dpu_core_perf.h"
+#include "dpu_trace.h"
+
+/* layer mixer index on dpu_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+/* timeout in ms waiting for frame done */
+#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
+
+#define CONVERT_S3_15(val) \
+ (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+
+ return to_dpu_kms(priv->kms);
+}
+
+static void dpu_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ if (!crtc)
+ return;
+
+ drm_crtc_cleanup(crtc);
+ kfree(dpu_crtc);
+}
+
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
+static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
+{
+ if (!src_name ||
+ !strcmp(src_name, "none"))
+ return DPU_CRTC_CRC_SOURCE_NONE;
+ if (!strcmp(src_name, "auto") ||
+ !strcmp(src_name, "lm"))
+ return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
+ if (!strcmp(src_name, "encoder"))
+ return DPU_CRTC_CRC_SOURCE_ENCODER;
+
+ return DPU_CRTC_CRC_SOURCE_INVALID;
+}
+
+static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name, size_t *values_cnt)
+{
+ enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
+ struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) {
+ *values_cnt = crtc_state->num_mixers;
+ } else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) {
+ struct drm_encoder *drm_enc;
+
+ *values_cnt = 0;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
+ *values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc);
+ }
+
+ return 0;
+}
+
+static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
+{
+ struct dpu_crtc_mixer *m;
+ int i;
+
+ for (i = 0; i < crtc_state->num_mixers; ++i) {
+ m = &crtc_state->mixers[i];
+
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+ continue;
+
+ /* Calculate MISR over 1 frame */
+ m->hw_lm->ops.setup_misr(m->hw_lm);
+ }
+}
+
+static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc)
+{
+ struct drm_encoder *drm_enc;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_setup_misr(drm_enc);
+}
+
+static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+{
+ enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
+ enum dpu_crtc_crc_source current_source;
+ struct dpu_crtc_state *crtc_state;
+ struct drm_device *drm_dev = crtc->dev;
+
+ bool was_enabled;
+ bool enable = false;
+ int ret = 0;
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ ret = drm_modeset_lock(&crtc->mutex, NULL);
+
+ if (ret)
+ return ret;
+
+ enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ spin_lock_irq(&drm_dev->event_lock);
+ current_source = crtc_state->crc_source;
+ spin_unlock_irq(&drm_dev->event_lock);
+
+ was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
+
+ if (!was_enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+
+ if (ret)
+ goto cleanup;
+
+ } else if (was_enabled && !enable) {
+ drm_crtc_vblank_put(crtc);
+ }
+
+ spin_lock_irq(&drm_dev->event_lock);
+ crtc_state->crc_source = source;
+ spin_unlock_irq(&drm_dev->event_lock);
+
+ crtc_state->crc_frame_skip_count = 0;
+
+ if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
+ dpu_crtc_setup_lm_misr(crtc_state);
+ else if (source == DPU_CRTC_CRC_SOURCE_ENCODER)
+ dpu_crtc_setup_encoder_misr(crtc);
+ else
+ ret = -EINVAL;
+
+cleanup:
+ drm_modeset_unlock(&crtc->mutex);
+
+ return ret;
+}
+
+static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
+ return 0;
+ }
+
+ return dpu_encoder_get_vsync_count(encoder);
+}
+
+static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc,
+ struct dpu_crtc_state *crtc_state)
+{
+ struct dpu_crtc_mixer *m;
+ u32 crcs[CRTC_DUAL_MIXERS];
+
+ int rc = 0;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
+
+ for (i = 0; i < crtc_state->num_mixers; ++i) {
+
+ m = &crtc_state->mixers[i];
+
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+ continue;
+
+ rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
+
+ if (rc) {
+ if (rc != -ENODATA)
+ DRM_DEBUG_DRIVER("MISR read failed\n");
+ return rc;
+ }
+ }
+
+ return drm_crtc_add_crc_entry(crtc, true,
+ drm_crtc_accurate_vblank_count(crtc), crcs);
+}
+
+static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc)
+{
+ struct drm_encoder *drm_enc;
+ int rc, pos = 0;
+ u32 crcs[INTF_MAX];
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) {
+ rc = dpu_encoder_get_crc(drm_enc, crcs, pos);
+ if (rc < 0) {
+ if (rc != -ENODATA)
+ DRM_DEBUG_DRIVER("MISR read failed\n");
+
+ return rc;
+ }
+
+ pos += rc;
+ }
+
+ return drm_crtc_add_crc_entry(crtc, true,
+ drm_crtc_accurate_vblank_count(crtc), crcs);
+}
+
+static int dpu_crtc_get_crc(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
+
+ /* Skip first 2 frames in case of "uncooked" CRCs */
+ if (crtc_state->crc_frame_skip_count < 2) {
+ crtc_state->crc_frame_skip_count++;
+ return 0;
+ }
+
+ if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
+ return dpu_crtc_get_lm_crc(crtc, crtc_state);
+ else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER)
+ return dpu_crtc_get_encoder_crc(crtc);
+
+ return -EINVAL;
+}
+
+static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pipe = crtc->index;
+ struct drm_encoder *encoder;
+ int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", pipe);
+ return false;
+ }
+
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ /*
+ * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+ * the end of VFP. Translate the porch values relative to the line
+ * counter positions.
+ */
+
+ vactive_start = vsw + vbp + 1;
+ vactive_end = vactive_start + mode->crtc_vdisplay;
+
+ /* last scan line before VSYNC */
+ vfp_end = mode->crtc_vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ line = dpu_encoder_get_linecount(encoder);
+
+ if (line < vactive_start)
+ line -= vactive_start;
+ else if (line > vactive_end)
+ line = line - vfp_end - vactive_start;
+ else
+ line -= vactive_start;
+
+ *vpos = line;
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
+static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
+ struct dpu_plane_state *pstate, struct dpu_format *format)
+{
+ struct dpu_hw_mixer *lm = mixer->hw_lm;
+ uint32_t blend_op;
+ uint32_t fg_alpha, bg_alpha;
+
+ fg_alpha = pstate->base.alpha >> 8;
+ bg_alpha = 0xff - fg_alpha;
+
+ /* default to opaque blending */
+ if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
+ !format->alpha_enable) {
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST;
+ } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= DPU_BLEND_BG_MOD_ALPHA |
+ DPU_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= DPU_BLEND_BG_INV_ALPHA;
+ }
+ } else {
+ /* coverage blending */
+ blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
+ DPU_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= DPU_BLEND_FG_MOD_ALPHA |
+ DPU_BLEND_FG_INV_MOD_ALPHA |
+ DPU_BLEND_BG_MOD_ALPHA |
+ DPU_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= DPU_BLEND_BG_INV_ALPHA;
+ }
+ }
+
+ lm->ops.setup_blend_config(lm, pstate->stage,
+ fg_alpha, bg_alpha, blend_op);
+
+ DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
+ &format->base.pixel_format, format->alpha_enable, blend_op);
+}
+
+static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *crtc_state;
+ int lm_idx, lm_horiz_position;
+
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ lm_horiz_position = 0;
+ for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
+ const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
+ struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
+ struct dpu_hw_mixer_cfg cfg;
+
+ if (!lm_roi || !drm_rect_visible(lm_roi))
+ continue;
+
+ cfg.out_width = drm_rect_width(lm_roi);
+ cfg.out_height = drm_rect_height(lm_roi);
+ cfg.right_mixer = lm_horiz_position++;
+ cfg.flags = 0;
+ hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
+ }
+}
+
+static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
+ struct drm_plane *plane,
+ struct dpu_crtc_mixer *mixer,
+ u32 num_mixers,
+ enum dpu_stage stage,
+ struct dpu_format *format,
+ uint64_t modifier,
+ struct dpu_sw_pipe *pipe,
+ unsigned int stage_idx,
+ struct dpu_hw_stage_cfg *stage_cfg
+ )
+{
+ uint32_t lm_idx;
+ enum dpu_sspp sspp_idx;
+ struct drm_plane_state *state;
+
+ sspp_idx = pipe->sspp->idx;
+
+ state = plane->state;
+
+ trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
+ state, to_dpu_plane_state(state), stage_idx,
+ format->base.pixel_format,
+ modifier);
+
+ DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n",
+ crtc->base.id,
+ stage,
+ plane->base.id,
+ sspp_idx - SSPP_NONE,
+ state->fb ? state->fb->base.id : -1,
+ pipe->multirect_index);
+
+ stage_cfg->stage[stage][stage_idx] = sspp_idx;
+ stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index;
+
+ /* blend config update */
+ for (lm_idx = 0; lm_idx < num_mixers; lm_idx++)
+ mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx);
+}
+
+static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+ struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
+ struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct drm_plane *plane;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_format *format;
+ struct dpu_hw_ctl *ctl = mixer->lm_ctl;
+
+ uint32_t lm_idx;
+ bool bg_alpha_enable = false;
+ DECLARE_BITMAP(fetch_active, SSPP_MAX);
+
+ memset(fetch_active, 0, sizeof(fetch_active));
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ state = plane->state;
+ if (!state)
+ continue;
+
+ if (!state->visible)
+ continue;
+
+ pstate = to_dpu_plane_state(state);
+ fb = state->fb;
+
+ format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
+
+ if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
+ bg_alpha_enable = true;
+
+ set_bit(pstate->pipe.sspp->idx, fetch_active);
+ _dpu_crtc_blend_setup_pipe(crtc, plane,
+ mixer, cstate->num_mixers,
+ pstate->stage,
+ format, fb ? fb->modifier : 0,
+ &pstate->pipe, 0, stage_cfg);
+
+ if (pstate->r_pipe.sspp) {
+ set_bit(pstate->r_pipe.sspp->idx, fetch_active);
+ _dpu_crtc_blend_setup_pipe(crtc, plane,
+ mixer, cstate->num_mixers,
+ pstate->stage,
+ format, fb ? fb->modifier : 0,
+ &pstate->r_pipe, 1, stage_cfg);
+ }
+
+ /* blend config update */
+ for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format);
+
+ if (bg_alpha_enable && !format->alpha_enable)
+ mixer[lm_idx].mixer_op_mode = 0;
+ else
+ mixer[lm_idx].mixer_op_mode |=
+ 1 << pstate->stage;
+ }
+ }
+
+ if (ctl->ops.set_active_pipes)
+ ctl->ops.set_active_pipes(ctl, fetch_active);
+
+ _dpu_crtc_program_lm_output_roi(crtc);
+}
+
+/**
+ * _dpu_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc_mixer *mixer = cstate->mixers;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+ struct dpu_hw_stage_cfg stage_cfg;
+ int i;
+
+ DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ mixer[i].mixer_op_mode = 0;
+ if (mixer[i].lm_ctl->ops.clear_all_blendstages)
+ mixer[i].lm_ctl->ops.clear_all_blendstages(
+ mixer[i].lm_ctl);
+ }
+
+ /* initialize stage cfg */
+ memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ ctl = mixer[i].lm_ctl;
+ lm = mixer[i].hw_lm;
+
+ lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush_mixer(ctl,
+ mixer[i].hw_lm->idx);
+
+ DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
+ mixer[i].hw_lm->idx - LM_0,
+ mixer[i].mixer_op_mode,
+ ctl->idx - CTL_0);
+
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &stage_cfg);
+ }
+}
+
+/**
+ * _dpu_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (dpu_crtc->event) {
+ DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+ dpu_crtc->event);
+ trace_dpu_crtc_complete_flip(DRMID(crtc));
+ drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
+ dpu_crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ /*
+ * TODO: This function is called from dpu debugfs and as part of atomic
+ * check. When called from debugfs, the crtc->mutex must be held to
+ * read crtc->state. However reading crtc->state from atomic check isn't
+ * allowed (unless you have a good reason, a big comment, and a deep
+ * understanding of how the atomic/modeset locks work (<- and this is
+ * probably not possible)). So we'll keep the WARN_ON here for now, but
+ * really we need to figure out a better way to track our operating mode
+ */
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ /* TODO: Returns the first INTF_MODE, could there be multiple values? */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ return dpu_encoder_get_intf_mode(encoder);
+
+ return INTF_MODE_NONE;
+}
+
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ /* keep statistics on vblank callback - with auto reset via debugfs */
+ if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
+ dpu_crtc->vblank_cb_time = ktime_get();
+ else
+ dpu_crtc->vblank_cb_count++;
+
+ dpu_crtc_get_crc(crtc);
+
+ drm_crtc_handle_vblank(crtc);
+ trace_dpu_crtc_vblank_cb(DRMID(crtc));
+}
+
+static void dpu_crtc_frame_event_work(struct kthread_work *work)
+{
+ struct dpu_crtc_frame_event *fevent = container_of(work,
+ struct dpu_crtc_frame_event, work);
+ struct drm_crtc *crtc = fevent->crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ unsigned long flags;
+ bool frame_done = false;
+
+ DPU_ATRACE_BEGIN("crtc_frame_event");
+
+ DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+ ktime_to_ns(fevent->ts));
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (atomic_read(&dpu_crtc->frame_pending) < 1) {
+ /* ignore vblank when not pending */
+ } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
+ /* release bandwidth and other resources */
+ trace_dpu_crtc_frame_event_done(DRMID(crtc),
+ fevent->event);
+ dpu_core_perf_crtc_release_bw(crtc);
+ } else {
+ trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
+ fevent->event);
+ }
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR))
+ frame_done = true;
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
+ DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
+ crtc->base.id, ktime_to_ns(fevent->ts));
+
+ if (frame_done)
+ complete_all(&dpu_crtc->frame_done_comp);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+ DPU_ATRACE_END("crtc_frame_event");
+}
+
+/*
+ * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
+ * registers this API to encoder for all frame event callbacks like
+ * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
+ * from different context - IRQ, user thread, commit_thread, etc. Each event
+ * should be carefully reviewed and should be processed in proper task context
+ * to avoid schedulin delay or properly manage the irq context's bottom half
+ * processing.
+ */
+static void dpu_crtc_frame_event_cb(void *data, u32 event)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ unsigned long flags;
+ u32 crtc_id;
+
+ /* Nothing to do on idle event */
+ if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+ crtc_id = drm_crtc_index(crtc);
+
+ trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
+ struct dpu_crtc_frame_event, list);
+ if (fevent)
+ list_del_init(&fevent->list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+
+ if (!fevent) {
+ DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
+ return;
+ }
+
+ fevent->event = event;
+ fevent->crtc = crtc;
+ fevent->ts = ktime_get();
+ kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
+}
+
+void dpu_crtc_complete_commit(struct drm_crtc *crtc)
+{
+ trace_dpu_crtc_complete_commit(DRMID(crtc));
+ dpu_core_perf_crtc_update(crtc, 0);
+ _dpu_crtc_complete_flip(crtc);
+}
+
+static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ struct drm_display_mode *adj_mode = &state->adjusted_mode;
+ u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
+ int i;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ struct drm_rect *r = &cstate->lm_bounds[i];
+ r->x1 = crtc_split_width * i;
+ r->y1 = 0;
+ r->x2 = r->x1 + crtc_split_width;
+ r->y2 = adj_mode->vdisplay;
+
+ trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+ }
+}
+
+static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
+ struct dpu_hw_pcc_cfg *cfg)
+{
+ struct drm_color_ctm *ctm;
+
+ memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
+
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+
+ if (!ctm)
+ return;
+
+ cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
+ cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
+ cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
+
+ cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
+ cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
+ cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
+
+ cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
+ cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
+ cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
+}
+
+static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *state = crtc->state;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc_mixer *mixer = cstate->mixers;
+ struct dpu_hw_pcc_cfg cfg;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_dspp *dspp;
+ int i;
+
+
+ if (!state->color_mgmt_changed && !drm_atomic_crtc_needs_modeset(state))
+ return;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ ctl = mixer[i].lm_ctl;
+ dspp = mixer[i].hw_dspp;
+
+ if (!dspp || !dspp->ops.setup_pcc)
+ continue;
+
+ if (!state->ctm) {
+ dspp->ops.setup_pcc(dspp, NULL);
+ } else {
+ _dpu_crtc_get_pcc_coeff(state, &cfg);
+ dspp->ops.setup_pcc(dspp, &cfg);
+ }
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush_dspp(ctl,
+ mixer[i].hw_dspp->idx, DPU_DSPP_PCC);
+ }
+}
+
+static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct drm_encoder *encoder;
+
+ if (!crtc->state->enable) {
+ DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
+
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+
+ /* encoder will trigger pending mask now */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_trigger_kickoff_pending(encoder);
+
+ /*
+ * If no mixers have been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!cstate->num_mixers))
+ return;
+
+ _dpu_crtc_blend_setup(crtc);
+
+ _dpu_crtc_setup_cp_blocks(crtc);
+
+ /*
+ * PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+}
+
+static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ struct msm_drm_private *priv;
+ unsigned long flags;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc->state->enable) {
+ DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
+ if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
+ return;
+ }
+
+ WARN_ON(dpu_crtc->event);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!cstate->num_mixers))
+ return;
+
+ /* update performance setting before crtc kickoff */
+ dpu_core_perf_crtc_update(crtc, 1);
+
+ /*
+ * Final plane updates: Give each plane a chance to complete all
+ * required writes/flushing before crtc's "flush
+ * everything" call below.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (dpu_crtc->smmu_state.transition_error)
+ dpu_plane_set_error(plane, true);
+ dpu_plane_flush(plane);
+ }
+
+ /* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * dpu_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(cstate);
+}
+
+static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ int ret, rc = 0;
+
+ if (!atomic_read(&dpu_crtc->frame_pending)) {
+ DRM_DEBUG_ATOMIC("no frames pending\n");
+ return 0;
+ }
+
+ DPU_ATRACE_BEGIN("frame done completion wait");
+ ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
+ msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
+ if (!ret) {
+ DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
+ rc = -ETIMEDOUT;
+ }
+ DPU_ATRACE_END("frame done completion wait");
+
+ return rc;
+}
+
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to start a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!cstate->num_mixers))
+ return;
+
+ DPU_ATRACE_BEGIN("crtc_commit");
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ if (!dpu_encoder_is_valid_for_commit(encoder)) {
+ DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
+ goto end;
+ }
+ }
+ /*
+ * Encoder will flush/start now, unless it has a tx pending. If so, it
+ * may delay and flush at an irq event (e.g. ppdone)
+ */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask)
+ dpu_encoder_prepare_for_kickoff(encoder);
+
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
+ } else
+ DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
+
+ dpu_crtc->play_count++;
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_kickoff(encoder);
+
+ reinit_completion(&dpu_crtc->frame_done_comp);
+
+end:
+ DPU_ATRACE_END("crtc_commit");
+}
+
+static void dpu_crtc_reset(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+
+ if (crtc->state)
+ dpu_crtc_destroy_state(crtc, crtc->state);
+
+ if (cstate)
+ __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+}
+
+/**
+ * dpu_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ */
+static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
+
+ cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return NULL;
+ }
+
+ /* duplicate base helper */
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+ return &cstate->base;
+}
+
+static void dpu_crtc_atomic_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ int i;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
+ drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0);
+ if (cstate->mixers[i].hw_dspp)
+ drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
+ }
+}
+
+static void dpu_crtc_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct drm_encoder *encoder;
+ unsigned long flags;
+ bool release_bandwidth = false;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+ /* If disable is triggered while in self refresh mode,
+ * reset the encoder software state so that in enable
+ * it won't trigger a warn while assigning crtc.
+ */
+ if (old_crtc_state->self_refresh_active) {
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ old_crtc_state->encoder_mask) {
+ dpu_encoder_assign_crtc(encoder, NULL);
+ }
+ return;
+ }
+
+ /* Disable/save vblank irq handling */
+ drm_crtc_vblank_off(crtc);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ old_crtc_state->encoder_mask) {
+ /* in video mode, we hold an extra bandwidth reference
+ * as we cannot drop bandwidth at frame-done if any
+ * crtc is being used in video mode.
+ */
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ release_bandwidth = true;
+
+ /*
+ * If disable is triggered during psr active(e.g: screen dim in PSR),
+ * we will need encoder->crtc connection to process the device sleep &
+ * preserve it during psr sequence.
+ */
+ if (!crtc->state->self_refresh_active)
+ dpu_encoder_assign_crtc(encoder, NULL);
+ }
+
+ /* wait for frame_event_done completion */
+ if (_dpu_crtc_wait_for_frame_done(crtc))
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+
+ trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
+ dpu_crtc->enabled = false;
+
+ if (atomic_read(&dpu_crtc->frame_pending)) {
+ trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
+ atomic_read(&dpu_crtc->frame_pending));
+ if (release_bandwidth)
+ dpu_core_perf_crtc_release_bw(crtc);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+ }
+
+ dpu_core_perf_crtc_update(crtc, 0);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
+
+ memset(cstate->mixers, 0, sizeof(cstate->mixers));
+ cstate->num_mixers = 0;
+
+ /* disable clk & bw control until clk & bw properties are set */
+ cstate->bw_control = false;
+ cstate->bw_split_vote = false;
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+
+ pm_runtime_put_sync(crtc->dev->dev);
+}
+
+static void dpu_crtc_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *encoder;
+ bool request_bandwidth = false;
+ struct drm_crtc_state *old_crtc_state;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+ pm_runtime_get_sync(crtc->dev->dev);
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
+ /* in video mode, we hold an extra bandwidth reference
+ * as we cannot drop bandwidth at frame-done if any
+ * crtc is being used in video mode.
+ */
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ request_bandwidth = true;
+ dpu_encoder_register_frame_event_callback(encoder,
+ dpu_crtc_frame_event_cb, (void *)crtc);
+ }
+
+ if (request_bandwidth)
+ atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
+
+ trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
+ dpu_crtc->enabled = true;
+
+ if (!old_crtc_state->self_refresh_active) {
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_assign_crtc(encoder, crtc);
+ }
+
+ /* Enable/restore vblank irq handling */
+ drm_crtc_vblank_on(crtc);
+}
+
+static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
+{
+ struct drm_crtc *crtc = cstate->crtc;
+ struct drm_encoder *encoder;
+
+ if (cstate->self_refresh_active)
+ return true;
+
+ drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) {
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
+
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+
+ int rc = 0;
+
+ bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+
+ if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) {
+ DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
+ crtc->base.id, crtc_state->enable,
+ crtc_state->active);
+ memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
+ return 0;
+ }
+
+ DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
+
+ /* force a full mode set if active state changed */
+ if (crtc_state->active_changed)
+ crtc_state->mode_changed = true;
+
+ if (cstate->num_mixers)
+ _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
+
+ /* FIXME: move this to dpu_plane_atomic_check? */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
+ struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate);
+
+ if (IS_ERR_OR_NULL(pstate)) {
+ rc = PTR_ERR(pstate);
+ DPU_ERROR("%s: failed to get plane%d state, %d\n",
+ dpu_crtc->name, plane->base.id, rc);
+ return rc;
+ }
+
+ if (!pstate->visible)
+ continue;
+
+ dpu_pstate->needs_dirtyfb = needs_dirtyfb;
+ }
+
+ atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
+
+ rc = dpu_core_perf_crtc_check(crtc, crtc_state);
+ if (rc) {
+ DPU_ERROR("crtc%d failed performance check %d\n",
+ crtc->base.id, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
+
+ trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
+
+ /*
+ * Normally we would iterate through encoder_mask in crtc state to find
+ * attached encoders. In this case, we might be disabling vblank _after_
+ * encoder_mask has been cleared.
+ *
+ * Instead, we "assign" a crtc to the encoder in enable and clear it in
+ * disable (which is also after encoder_mask is cleared). So instead of
+ * using encoder mask, we'll ask the encoder to toggle itself iff it's
+ * currently assigned to our crtc.
+ *
+ * Note also that this function cannot be called while crtc is disabled
+ * since we use drm_crtc_vblank_on/off. So we don't need to worry
+ * about the assigned crtcs being inconsistent with the current state
+ * (which means no need to worry about modeset locks).
+ */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
+ dpu_crtc);
+
+ dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_crtc_mixer *m;
+
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+
+ int i, out_width;
+
+ dpu_crtc = s->private;
+ crtc = &dpu_crtc->base;
+
+ drm_modeset_lock_all(crtc->dev);
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ mode = &crtc->state->adjusted_mode;
+ out_width = mode->hdisplay / cstate->num_mixers;
+
+ seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+ mode->hdisplay, mode->vdisplay);
+
+ seq_puts(s, "\n");
+
+ for (i = 0; i < cstate->num_mixers; ++i) {
+ m = &cstate->mixers[i];
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
+ }
+
+ seq_puts(s, "\n");
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ state = plane->state;
+
+ if (!pstate || !state)
+ continue;
+
+ seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+ pstate->stage);
+
+ if (plane->state->fb) {
+ fb = plane->state->fb;
+
+ seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
+ fb->base.id, (char *) &fb->format->format,
+ fb->width, fb->height);
+ for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
+ seq_printf(s, "cpp[%d]:%u ",
+ i, fb->format->cpp[i]);
+ seq_puts(s, "\n\t");
+
+ seq_printf(s, "modifier:%8llu ", fb->modifier);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+ seq_printf(s, "pitches[%d]:%8u ", i,
+ fb->pitches[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+ seq_printf(s, "offsets[%d]:%8u ", i,
+ fb->offsets[i]);
+ seq_puts(s, "\n");
+ }
+
+ seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+ state->src_x, state->src_y, state->src_w, state->src_h);
+
+ seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+ state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h);
+ seq_printf(s, "\tsspp[0]:%s\n",
+ pstate->pipe.sspp->cap->name);
+ seq_printf(s, "\tmultirect[0]: mode: %d index: %d\n",
+ pstate->pipe.multirect_mode, pstate->pipe.multirect_index);
+ if (pstate->r_pipe.sspp) {
+ seq_printf(s, "\tsspp[1]:%s\n",
+ pstate->r_pipe.sspp->cap->name);
+ seq_printf(s, "\tmultirect[1]: mode: %d index: %d\n",
+ pstate->r_pipe.multirect_mode, pstate->r_pipe.multirect_index);
+ }
+
+ seq_puts(s, "\n");
+ }
+ if (dpu_crtc->vblank_cb_count) {
+ ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
+ s64 diff_ms = ktime_to_ms(diff);
+ s64 fps = diff_ms ? div_s64(
+ dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+ seq_printf(s,
+ "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
+ fps, dpu_crtc->vblank_cb_count,
+ ktime_to_ms(diff), dpu_crtc->play_count);
+
+ /* reset time & count for next measurement */
+ dpu_crtc->vblank_cb_count = 0;
+ dpu_crtc->vblank_cb_time = ktime_set(0, 0);
+ }
+
+ drm_modeset_unlock_all(crtc->dev);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
+
+static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+ struct drm_crtc *crtc = s->private;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
+ seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
+ seq_printf(s, "core_clk_rate: %llu\n",
+ dpu_crtc->cur_perf.core_clk_rate);
+ seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
+ seq_printf(s, "max_per_pipe_ib: %llu\n",
+ dpu_crtc->cur_perf.max_per_pipe_ib);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
+
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ debugfs_create_file("status", 0400,
+ crtc->debugfs_entry,
+ dpu_crtc, &_dpu_debugfs_status_fops);
+ debugfs_create_file("state", 0600,
+ crtc->debugfs_entry,
+ &dpu_crtc->base,
+ &dpu_crtc_debugfs_state_fops);
+
+ return 0;
+}
+#else
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int dpu_crtc_late_register(struct drm_crtc *crtc)
+{
+ return _dpu_crtc_init_debugfs(crtc);
+}
+
+static const struct drm_crtc_funcs dpu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = dpu_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = dpu_crtc_reset,
+ .atomic_duplicate_state = dpu_crtc_duplicate_state,
+ .atomic_destroy_state = dpu_crtc_destroy_state,
+ .atomic_print_state = dpu_crtc_atomic_print_state,
+ .late_register = dpu_crtc_late_register,
+ .verify_crc_source = dpu_crtc_verify_crc_source,
+ .set_crc_source = dpu_crtc_set_crc_source,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ .get_vblank_counter = dpu_crtc_get_vblank_counter,
+};
+
+static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+ .atomic_disable = dpu_crtc_disable,
+ .atomic_enable = dpu_crtc_enable,
+ .atomic_check = dpu_crtc_atomic_check,
+ .atomic_begin = dpu_crtc_atomic_begin,
+ .atomic_flush = dpu_crtc_atomic_flush,
+ .get_scanout_position = dpu_crtc_get_scanout_position,
+};
+
+/* initialize crtc */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_crtc *crtc = NULL;
+ struct dpu_crtc *dpu_crtc = NULL;
+ int i, ret;
+
+ dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
+ if (!dpu_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &dpu_crtc->base;
+ crtc->dev = dev;
+
+ spin_lock_init(&dpu_crtc->spin_lock);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+
+ init_completion(&dpu_crtc->frame_done_comp);
+
+ INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
+ INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
+ list_add(&dpu_crtc->frame_events[i].list,
+ &dpu_crtc->frame_event_list);
+ kthread_init_work(&dpu_crtc->frame_events[i].work,
+ dpu_crtc_frame_event_work);
+ }
+
+ drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
+ NULL);
+
+ drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+
+ if (dpu_kms->catalog->dspp_count)
+ drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
+
+ /* save user friendly CRTC name for later */
+ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+ /* initialize event handling */
+ spin_lock_init(&dpu_crtc->event_lock);
+
+ ret = drm_self_refresh_helper_init(crtc);
+ if (ret) {
+ DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n",
+ crtc->name, ret);
+ return ERR_PTR(ret);
+ }
+
+ DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644
index 0000000000..539b68b162
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+
+#define DPU_CRTC_NAME_SIZE 12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE 4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT: RealTime client like video/cmd mode display
+ * voting through apps rsc
+ * @NRT_CLIENT: Non-RealTime client like WB display
+ * voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+ RT_CLIENT,
+ NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state: smmu state
+ * @ATTACHED: all the context banks are attached.
+ * @DETACHED: all the context banks are detached.
+ * @ATTACH_ALL_REQ: transient state of attaching context banks.
+ * @DETACH_ALL_REQ: transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+ ATTACHED = 0,
+ DETACHED,
+ ATTACH_ALL_REQ,
+ DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+ NONE,
+ PRE_COMMIT,
+ POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+ uint32_t state;
+ uint32_t transition_type;
+ uint32_t transition_error;
+};
+
+/**
+ * enum dpu_crtc_crc_source: CRC source
+ * @DPU_CRTC_CRC_SOURCE_NONE: no source set
+ * @DPU_CRTC_CRC_SOURCE_LAYER_MIXER: CRC in layer mixer
+ * @DPU_CRTC_CRC_SOURCE_ENCODER: CRC in encoder
+ * @DPU_CRTC_CRC_SOURCE_INVALID: Invalid source
+ */
+enum dpu_crtc_crc_source {
+ DPU_CRTC_CRC_SOURCE_NONE = 0,
+ DPU_CRTC_CRC_SOURCE_LAYER_MIXER,
+ DPU_CRTC_CRC_SOURCE_ENCODER,
+ DPU_CRTC_CRC_SOURCE_MAX,
+ DPU_CRTC_CRC_SOURCE_INVALID = -1
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm: LM HW Driver context
+ * @lm_ctl: CTL Path HW driver context
+ * @lm_dspp: DSPP HW driver context
+ * @mixer_op_mode: mixer blending operation mode
+ * @flush_mask: mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+ struct dpu_hw_mixer *hw_lm;
+ struct dpu_hw_ctl *lm_ctl;
+ struct dpu_hw_dspp *hw_dspp;
+ u32 mixer_op_mode;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work: base work structure
+ * @crtc: Pointer to crtc handling this event
+ * @list: event list
+ * @ts: timestamp at queue entry
+ * @event: event identifier
+ */
+struct dpu_crtc_frame_event {
+ struct kthread_work work;
+ struct drm_crtc *crtc;
+ struct list_head list;
+ ktime_t ts;
+ u32 event;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT 16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base : Base drm crtc structure
+ * @name : ASCII description of this crtc
+ * @event : Pointer to last received drm vblank event. If there is a
+ * pending vblank event, this will be non-null.
+ * @vsync_count : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count : frame count between crtc enable and disable
+ * @vblank_cb_time : ktime at vblank count reset
+ * @enabled : whether the DPU CRTC is currently enabled. updated in the
+ * commit-thread, not state-swap time which is earlier, so
+ * safe to make decisions on during VBLANK on/off work
+ * @feature_list : list of color processing features supported on a crtc
+ * @active_list : list of color processing features are active
+ * @dirty_list : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp : for frame_event_done synchronization
+ * @event_thread : Pointer to event handler thread
+ * @event_worker : Event worker queue
+ * @event_lock : Spinlock around event handling code
+ * @phandle: Pointer to power handler
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @crc_source : CRC source
+ */
+struct dpu_crtc {
+ struct drm_crtc base;
+ char name[DPU_CRTC_NAME_SIZE];
+
+ struct drm_pending_vblank_event *event;
+ u32 vsync_count;
+
+ u32 vblank_cb_count;
+ u64 play_count;
+ ktime_t vblank_cb_time;
+ bool enabled;
+
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+ struct list_head ad_dirty;
+ struct list_head ad_active;
+
+ atomic_t frame_pending;
+ struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+ struct list_head frame_event_list;
+ spinlock_t spin_lock;
+ struct completion frame_done_comp;
+
+ /* for handling internal event thread */
+ spinlock_t event_lock;
+
+ struct dpu_core_perf_params cur_perf;
+
+ struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @bw_control : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
+ * Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ * @num_mixers : Number of mixers in use
+ * @mixers : List of active mixers
+ * @num_ctls : Number of ctl paths in use
+ * @hw_ctls : List of active ctl paths
+ * @crc_source : CRC source
+ * @crc_frame_skip_count: Number of frames skipped before getting CRC
+ */
+struct dpu_crtc_state {
+ struct drm_crtc_state base;
+
+ bool bw_control;
+ bool bw_split_vote;
+ struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+ uint64_t input_fence_timeout_ns;
+
+ struct dpu_core_perf_params new_perf;
+
+ /* HW Resources reserved for the crtc */
+ u32 num_mixers;
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+
+ u32 num_ctls;
+ struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
+
+ enum dpu_crtc_crc_source crc_source;
+ int crc_frame_skip_count;
+};
+
+#define to_dpu_crtc_state(x) \
+ container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+ return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @cursor: cursor plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+ struct drm_crtc *crtc)
+{
+ return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT;
+}
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
new file mode 100644
index 0000000000..b02aa2eb6c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -0,0 +1,2505 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/debugfs.h>
+#include <linux/kthread.h>
+#include <linux/seq_file.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
+#include <drm/drm_probe_helper.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_hw_dsc.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_formats.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_crtc.h"
+#include "dpu_trace.h"
+#include "dpu_core_irq.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+#define IDLE_SHORT_TIMEOUT 1
+
+#define MAX_HDISPLAY_SPLIT 1080
+
+/* timeout in frames waiting for frame done */
+#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
+
+/**
+ * enum dpu_enc_rc_events - events for resource control state machine
+ * @DPU_ENC_RC_EVENT_KICKOFF:
+ * This event happens at NORMAL priority.
+ * Event that signals the start of the transfer. When this event is
+ * received, enable MDP/DSI core clocks. Regardless of the previous
+ * state, the resource should be in ON state at the end of this event.
+ * @DPU_ENC_RC_EVENT_FRAME_DONE:
+ * This event happens at INTERRUPT level.
+ * Event signals the end of the data transfer after the PP FRAME_DONE
+ * event. At the end of this event, a delayed work is scheduled to go to
+ * IDLE_PC state after IDLE_TIMEOUT time.
+ * @DPU_ENC_RC_EVENT_PRE_STOP:
+ * This event happens at NORMAL priority.
+ * This event, when received during the ON state, leave the RC STATE
+ * in the PRE_OFF state. It should be followed by the STOP event as
+ * part of encoder disable.
+ * If received during IDLE or OFF states, it will do nothing.
+ * @DPU_ENC_RC_EVENT_STOP:
+ * This event happens at NORMAL priority.
+ * When this event is received, disable all the MDP/DSI core clocks, and
+ * disable IRQs. It should be called from the PRE_OFF or IDLE states.
+ * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
+ * PRE_OFF is expected when PRE_STOP was executed during the ON state.
+ * Resource state should be in OFF at the end of the event.
+ * @DPU_ENC_RC_EVENT_ENTER_IDLE:
+ * This event happens at NORMAL priority from a work item.
+ * Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ * This would disable MDP/DSI core clocks and change the resource state
+ * to IDLE.
+ */
+enum dpu_enc_rc_events {
+ DPU_ENC_RC_EVENT_KICKOFF = 1,
+ DPU_ENC_RC_EVENT_FRAME_DONE,
+ DPU_ENC_RC_EVENT_PRE_STOP,
+ DPU_ENC_RC_EVENT_STOP,
+ DPU_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum dpu_enc_rc_states - states that the resource control maintains
+ * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
+ * @DPU_ENC_RC_STATE_ON: Resource is in ON state
+ * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
+ * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum dpu_enc_rc_states {
+ DPU_ENC_RC_STATE_OFF,
+ DPU_ENC_RC_STATE_PRE_OFF,
+ DPU_ENC_RC_STATE_ON,
+ DPU_ENC_RC_STATE_IDLE
+};
+
+/**
+ * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
+ * encoders. Virtual encoder manages one "logical" display. Physical
+ * encoders manage one intf block, tied to a specific panel/sub-panel.
+ * Virtual encoder defers as much as possible to the physical encoders.
+ * Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base: drm_encoder base class for registration with DRM
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enabled: True if the encoder is active, protected by enc_lock
+ * @num_phys_encs: Actual number of physical encoders contained.
+ * @phys_encs: Container of physical encoders managed.
+ * @cur_master: Pointer to the current master in this mode. Optimization
+ * Only valid after enable. Cleared as disable.
+ * @cur_slave: As above but for the slave encoder.
+ * @hw_pp: Handle to the pingpong blocks used for the display. No.
+ * pingpong blocks can be different than num_phys_encs.
+ * @hw_dsc: Handle to the DSC blocks used for the display.
+ * @dsc_mask: Bitmask of used DSC blocks.
+ * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
+ * for partial update right-only cases, such as pingpong
+ * split where virtual pingpong does not generate IRQs
+ * @crtc: Pointer to the currently assigned crtc. Normally you
+ * would use crtc->state->encoder_mask to determine the
+ * link between encoder/crtc. However in this case we need
+ * to track crtc in the disable() hook which is called
+ * _after_ encoder_mask is cleared.
+ * @connector: If a mode is set, cached pointer to the active connector
+ * @crtc_kickoff_cb: Callback into CRTC that will flush & start
+ * all CTL paths
+ * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root: Debug file system root file node
+ * @enc_lock: Lock around physical encoder
+ * create/destroy/enable/disable
+ * @frame_busy_mask: Bitmask tracking which phys_enc we are still
+ * busy processing current command.
+ * Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb: callback handler for frame event
+ * @crtc_frame_event_cb_data: callback handler private data
+ * @frame_done_timeout_ms: frame done timeout in ms
+ * @frame_done_timer: watchdog timer for frame done event
+ * @disp_info: local copy of msm_display_info struct
+ * @idle_pc_supported: indicate if idle power collaps is supported
+ * @rc_lock: resource control mutex lock to protect
+ * virt encoder over various state changes
+ * @rc_state: resource controller state
+ * @delayed_off_work: delayed worker to schedule disabling of
+ * clks and resources after IDLE_TIMEOUT time.
+ * @topology: topology of the display
+ * @idle_timeout: idle timeout duration in milliseconds
+ * @wide_bus_en: wide bus is enabled on this interface
+ * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
+ */
+struct dpu_encoder_virt {
+ struct drm_encoder base;
+ spinlock_t enc_spinlock;
+
+ bool enabled;
+
+ unsigned int num_phys_encs;
+ struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+ struct dpu_encoder_phys *cur_master;
+ struct dpu_encoder_phys *cur_slave;
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+
+ unsigned int dsc_mask;
+
+ bool intfs_swapped;
+
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ struct dentry *debugfs_root;
+ struct mutex enc_lock;
+ DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+ void (*crtc_frame_event_cb)(void *, u32 event);
+ void *crtc_frame_event_cb_data;
+
+ atomic_t frame_done_timeout_ms;
+ struct timer_list frame_done_timer;
+
+ struct msm_display_info disp_info;
+
+ bool idle_pc_supported;
+ struct mutex rc_lock;
+ enum dpu_enc_rc_states rc_state;
+ struct delayed_work delayed_off_work;
+ struct msm_display_topology topology;
+
+ u32 idle_timeout;
+
+ bool wide_bus_en;
+
+ /* DSC configuration */
+ struct drm_dsc_config *dsc;
+};
+
+#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+
+static u32 dither_matrix[DITHER_MATRIX_SZ] = {
+ 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
+};
+
+
+bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
+{
+ const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ return dpu_enc->wide_bus_en;
+}
+
+int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i, num_intf = 0;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->hw_intf && phys->hw_intf->ops.setup_misr
+ && phys->hw_intf->ops.collect_misr)
+ num_intf++;
+ }
+
+ return num_intf;
+}
+
+void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
+ continue;
+
+ phys->hw_intf->ops.setup_misr(phys->hw_intf);
+ }
+}
+
+int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ int i, rc = 0, entries_added = 0;
+
+ if (!drm_enc->crtc) {
+ DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
+ continue;
+
+ rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
+ if (rc)
+ return rc;
+ entries_added++;
+ }
+
+ return entries_added;
+}
+
+static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
+{
+ struct dpu_hw_dither_cfg dither_cfg = { 0 };
+
+ if (!hw_pp->ops.setup_dither)
+ return;
+
+ switch (bpc) {
+ case 6:
+ dither_cfg.c0_bitdepth = 6;
+ dither_cfg.c1_bitdepth = 6;
+ dither_cfg.c2_bitdepth = 6;
+ dither_cfg.c3_bitdepth = 6;
+ dither_cfg.temporal_en = 0;
+ break;
+ default:
+ hw_pp->ops.setup_dither(hw_pp, NULL);
+ return;
+ }
+
+ memcpy(&dither_cfg.matrix, dither_matrix,
+ sizeof(u32) * DITHER_MATRIX_SZ);
+
+ hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
+}
+
+static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
+{
+ switch (intf_mode) {
+ case INTF_MODE_VIDEO:
+ return "INTF_MODE_VIDEO";
+ case INTF_MODE_CMD:
+ return "INTF_MODE_CMD";
+ case INTF_MODE_WB_BLOCK:
+ return "INTF_MODE_WB_BLOCK";
+ case INTF_MODE_WB_LINE:
+ return "INTF_MODE_WB_LINE";
+ default:
+ return "INTF_MODE_UNKNOWN";
+ }
+}
+
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
+ DRMID(phys_enc->parent),
+ dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
+ phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
+ phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1,
+ phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_ERROR);
+}
+
+static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+ u32 irq_idx, struct dpu_encoder_wait_info *info);
+
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ int irq,
+ void (*func)(void *arg, int irq_idx),
+ struct dpu_encoder_wait_info *wait_info)
+{
+ u32 irq_status;
+ int ret;
+
+ if (!wait_info) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ /* note: do master / slave checking outside */
+
+ /* return EWOULDBLOCK since we know the wait isn't necessary */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq);
+ return -EWOULDBLOCK;
+ }
+
+ if (irq < 0) {
+ DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
+ DRMID(phys_enc->parent), func);
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+
+ ret = dpu_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ irq,
+ wait_info);
+
+ if (ret <= 0) {
+ irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
+ if (irq_status) {
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ local_irq_save(flags);
+ func(phys_enc, irq);
+ local_irq_restore(flags);
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+ } else {
+ ret = 0;
+ trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+ func, irq,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+
+ return ret;
+}
+
+int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
+ return phys ? atomic_read(&phys->vsync_cnt) : 0;
+}
+
+int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ int linecount = 0;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ phys = dpu_enc ? dpu_enc->cur_master : NULL;
+
+ if (phys && phys->ops.get_line_count)
+ linecount = phys->ops.get_line_count(phys);
+
+ return linecount;
+}
+
+static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ mutex_lock(&dpu_enc->enc_lock);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.destroy) {
+ phys->ops.destroy(phys);
+ --dpu_enc->num_phys_encs;
+ dpu_enc->phys_encs[i] = NULL;
+ }
+ }
+
+ if (dpu_enc->num_phys_encs)
+ DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
+ dpu_enc->num_phys_encs);
+ dpu_enc->num_phys_encs = 0;
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ drm_encoder_cleanup(drm_enc);
+ mutex_destroy(&dpu_enc->enc_lock);
+}
+
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct split_pipe_cfg cfg = { 0 };
+ struct dpu_hw_mdp *hw_mdptop;
+ struct msm_display_info *disp_info;
+
+ if (!phys_enc->hw_mdptop || !phys_enc->parent) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ hw_mdptop = phys_enc->hw_mdptop;
+ disp_info = &dpu_enc->disp_info;
+
+ if (disp_info->intf_type != INTF_DSI)
+ return;
+
+ /**
+ * disable split modes since encoder will be operating in as the only
+ * encoder, either for the entire use case in the case of, for example,
+ * single DSI, or for this frame in the case of left/right only partial
+ * update.
+ */
+ if (phys_enc->split_role == ENC_ROLE_SOLO) {
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ return;
+ }
+
+ cfg.en = true;
+ cfg.mode = phys_enc->intf_mode;
+ cfg.intf = interface;
+
+ if (cfg.en && phys_enc->ops.needs_single_flush &&
+ phys_enc->ops.needs_single_flush(phys_enc))
+ cfg.split_flush_en = true;
+
+ if (phys_enc->split_role == ENC_ROLE_MASTER) {
+ DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
+
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ }
+}
+
+bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ int i, intf_count = 0, num_dsc = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
+ if (dpu_enc->dsc)
+ num_dsc = 2;
+
+ return (num_dsc > 0) && (num_dsc > intf_count);
+}
+
+static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
+{
+ struct msm_drm_private *priv = drm_enc->dev->dev_private;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ int index = dpu_enc->disp_info.h_tile_instance[0];
+
+ if (dpu_enc->disp_info.intf_type == INTF_DSI)
+ return msm_dsi_get_dsc_config(priv->dsi[index]);
+
+ return NULL;
+}
+
+static struct msm_display_topology dpu_encoder_get_topology(
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct drm_display_mode *mode,
+ struct drm_crtc_state *crtc_state,
+ struct drm_dsc_config *dsc)
+{
+ struct msm_display_topology topology = {0};
+ int i, intf_count = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* Datapath topology selection
+ *
+ * Dual display
+ * 2 LM, 2 INTF ( Split display using 2 interfaces)
+ *
+ * Single display
+ * 1 LM, 1 INTF
+ * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+ *
+ * Add dspps to the reservation requirements if ctm is requested
+ */
+ if (intf_count == 2)
+ topology.num_lm = 2;
+ else if (!dpu_kms->catalog->caps->has_3d_merge)
+ topology.num_lm = 1;
+ else
+ topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+
+ if (crtc_state->ctm)
+ topology.num_dspp = topology.num_lm;
+
+ topology.num_intf = intf_count;
+
+ if (dsc) {
+ /*
+ * In case of Display Stream Compression (DSC), we would use
+ * 2 DSC encoders, 2 layer mixers and 1 interface
+ * this is power optimal and can drive up to (including) 4k
+ * screens
+ */
+ topology.num_dsc = 2;
+ topology.num_lm = 2;
+ topology.num_intf = 1;
+ }
+
+ return topology;
+}
+
+static int dpu_encoder_virt_atomic_check(
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode *adj_mode;
+ struct msm_display_topology topology;
+ struct dpu_global_state *global_state;
+ struct drm_dsc_config *dsc;
+ int i = 0;
+ int ret = 0;
+
+ if (!drm_enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+ drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ adj_mode = &crtc_state->adjusted_mode;
+ global_state = dpu_kms_get_global_state(crtc_state->state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ trace_dpu_enc_atomic_check(DRMID(drm_enc));
+
+ /* perform atomic check on the first physical encoder (master) */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.atomic_check)
+ ret = phys->ops.atomic_check(phys, crtc_state,
+ conn_state);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "mode unsupported, phys idx %d\n", i);
+ return ret;
+ }
+ }
+
+ dsc = dpu_encoder_get_dsc_config(drm_enc);
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
+
+ /*
+ * Release and Allocate resources on every modeset
+ * Dont allocate when active is false.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ dpu_rm_release(global_state, drm_enc);
+
+ if (!crtc_state->active_changed || crtc_state->enable)
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ drm_enc, crtc_state, topology);
+ }
+
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
+
+ return ret;
+}
+
+static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
+ struct msm_display_info *disp_info)
+{
+ struct dpu_vsync_source_cfg vsync_cfg = { 0 };
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct drm_encoder *drm_enc;
+ struct dpu_encoder_phys *phys_enc;
+ int i;
+
+ if (!dpu_enc || !disp_info) {
+ DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
+ dpu_enc != NULL, disp_info != NULL);
+ return;
+ } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
+ DPU_ERROR("invalid num phys enc %d/%d\n",
+ dpu_enc->num_phys_encs,
+ (int) ARRAY_SIZE(dpu_enc->hw_pp));
+ return;
+ }
+
+ drm_enc = &dpu_enc->base;
+ /* this pointers are checked in virt_enable_helper */
+ priv = drm_enc->dev->dev_private;
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ hw_mdptop = dpu_kms->hw_mdp;
+ if (!hw_mdptop) {
+ DPU_ERROR("invalid mdptop\n");
+ return;
+ }
+
+ if (hw_mdptop->ops.setup_vsync_source &&
+ disp_info->is_cmd_mode) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
+
+ vsync_cfg.pp_count = dpu_enc->num_phys_encs;
+ vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode);
+
+ if (disp_info->is_te_using_watchdog_timer)
+ vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
+ else
+ vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
+
+ hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys_enc = dpu_enc->phys_encs[i];
+
+ if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel)
+ phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf,
+ vsync_cfg.vsync_source);
+ }
+ }
+}
+
+static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.irq_control)
+ phys->ops.irq_control(phys, enable);
+ }
+
+}
+
+static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+ bool enable)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("encoder master not set\n");
+ return;
+ }
+
+ if (enable) {
+ /* enable DPU core clks */
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* enable all the irq */
+ _dpu_encoder_irq_control(drm_enc, true);
+
+ } else {
+ /* disable all the irq */
+ _dpu_encoder_irq_control(drm_enc, false);
+
+ /* disable DPU core clks */
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ }
+
+}
+
+static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
+ u32 sw_event)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ bool is_vid_mode = false;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
+
+ /*
+ * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
+ * events and return early for other events (ie wb display).
+ */
+ if (!dpu_enc->idle_pc_supported &&
+ (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
+ sw_event != DPU_ENC_RC_EVENT_STOP &&
+ sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
+ return 0;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
+ dpu_enc->rc_state, "begin");
+
+ switch (sw_event) {
+ case DPU_ENC_RC_EVENT_KICKOFF:
+ /* cancel delayed off work, if any */
+ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in ON state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
+ dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
+ _dpu_encoder_irq_control(drm_enc, true);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, true);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "kickoff");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_FRAME_DONE:
+ /*
+ * mutex lock is not used as this event happens at interrupt
+ * context. And locking is not required as, the other events
+ * like KICKOFF and STOP does a wait-for-idle before executing
+ * the resource_control
+ */
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ return -EINVAL;
+ }
+
+ /*
+ * schedule off work item only when there are no
+ * frames pending
+ */
+ if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
+ DRM_DEBUG_KMS("id:%d skip schedule work\n",
+ DRMID(drm_enc));
+ return 0;
+ }
+
+ queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
+ msecs_to_jiffies(dpu_enc->idle_timeout));
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "frame done");
+ break;
+
+ case DPU_ENC_RC_EVENT_PRE_STOP:
+ /* cancel delayed off work, if any */
+ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (is_vid_mode &&
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ _dpu_encoder_irq_control(drm_enc, true);
+ }
+ /* skip if is already OFF or IDLE, resources are off already */
+ else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "pre stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_STOP:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in OFF state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
+ DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ /**
+ * expect to arrive here only if in either idle state or pre-off
+ * and in IDLE state the resources are already disabled
+ */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_ENTER_IDLE:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * if we are in ON but a frame was just kicked off,
+ * ignore the IDLE event, it's probably a stale timer event
+ */
+ if (dpu_enc->frame_busy_mask[0]) {
+ DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ if (is_vid_mode)
+ _dpu_encoder_irq_control(drm_enc, false);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "idle");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ default:
+ DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
+ sw_event);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "error");
+ break;
+ }
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "end");
+ return 0;
+}
+
+void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.prepare_wb_job)
+ phys->ops.prepare_wb_job(phys, job);
+
+ }
+}
+
+void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.cleanup_wb_job)
+ phys->ops.cleanup_wb_job(phys, job);
+
+ }
+}
+
+static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_crtc_state *cstate;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
+ struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
+ int num_lm, num_ctl, num_pp, num_dsc;
+ unsigned int dsc_mask = 0;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
+ if (IS_ERR_OR_NULL(global_state)) {
+ DPU_ERROR("Failed to get global state");
+ return;
+ }
+
+ trace_dpu_enc_mode_set(DRMID(drm_enc));
+
+ /* Query resource that have been reserved in atomic check step. */
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
+ ARRAY_SIZE(hw_pp));
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
+ : NULL;
+
+ num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSC,
+ hw_dsc, ARRAY_SIZE(hw_dsc));
+ for (i = 0; i < num_dsc; i++) {
+ dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
+ dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
+ }
+
+ dpu_enc->dsc_mask = dsc_mask;
+
+ cstate = to_dpu_crtc_state(crtc_state);
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
+ }
+
+ cstate->num_mixers = num_lm;
+
+ dpu_enc->connector = conn_state->connector;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no pp block assigned at idx: %d\n", i);
+ return;
+ }
+
+ if (!hw_ctl[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no ctl block assigned at idx: %d\n", i);
+ return;
+ }
+
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
+
+ phys->cached_mode = crtc_state->adjusted_mode;
+ if (phys->ops.atomic_mode_set)
+ phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
+ }
+}
+
+static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!drm_enc || !drm_enc->dev) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ if (!dpu_enc || !dpu_enc->cur_master) {
+ DPU_ERROR("invalid dpu encoder/master\n");
+ return;
+ }
+
+
+ if (dpu_enc->disp_info.intf_type == INTF_DP &&
+ dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+ dpu_enc->cur_master->hw_mdptop);
+
+ _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+
+ if (dpu_enc->disp_info.intf_type == INTF_DSI &&
+ !WARN_ON(dpu_enc->num_phys_encs == 0)) {
+ unsigned bpc = dpu_enc->connector->display_info.bpc;
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_enc->hw_pp[i])
+ continue;
+ _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
+ }
+ }
+}
+
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ mutex_lock(&dpu_enc->enc_lock);
+
+ if (!dpu_enc->enabled)
+ goto out;
+
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
+ dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
+ dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
+}
+
+static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int ret = 0;
+ struct drm_display_mode *cur_mode = NULL;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
+
+ mutex_lock(&dpu_enc->enc_lock);
+ cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+
+ trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+ cur_mode->vdisplay);
+
+ /* always enable slave encoder before master */
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
+ dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
+
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
+ dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
+
+ ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
+ ret);
+ goto out;
+ }
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+
+ dpu_enc->enabled = true;
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
+}
+
+static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_state = NULL;
+ int i = 0;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc);
+ if (crtc)
+ old_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * The encoder is already disabled if self refresh mode was set earlier,
+ * in the old_state for the corresponding crtc.
+ */
+ if (old_state && old_state->self_refresh_active)
+ return;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->enabled = false;
+
+ trace_dpu_enc_disable(DRMID(drm_enc));
+
+ /* wait for idle */
+ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.disable)
+ phys->ops.disable(phys);
+ }
+
+
+ /* after phys waits for frame-done, should be no more frames pending */
+ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
+ DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+ del_timer_sync(&dpu_enc->frame_done_timer);
+ }
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
+
+ dpu_enc->connector = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
+
+ mutex_unlock(&dpu_enc->enc_lock);
+}
+
+static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
+ struct dpu_rm *dpu_rm,
+ enum dpu_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ if (type == INTF_WB)
+ return NULL;
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id);
+ }
+ }
+
+ return NULL;
+}
+
+void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ unsigned long lock_flags;
+
+ if (!drm_enc || !phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_vblank_callback");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ atomic_inc(&phy_enc->vsync_cnt);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ if (dpu_enc->crtc)
+ dpu_crtc_vblank_callback(dpu_enc->crtc);
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ DPU_ATRACE_END("encoder_vblank_callback");
+}
+
+void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ if (!phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_underrun_callback");
+ atomic_inc(&phy_enc->underrun_cnt);
+
+ /* trigger dump only on the first underrun */
+ if (atomic_read(&phy_enc->underrun_cnt) == 1)
+ msm_disp_snapshot_state(drm_enc->dev);
+
+ trace_dpu_enc_underrun_cb(DRMID(drm_enc),
+ atomic_read(&phy_enc->underrun_cnt));
+ DPU_ATRACE_END("encoder_underrun_callback");
+}
+
+void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ /* crtc should always be cleared before re-assigning */
+ WARN_ON(crtc && dpu_enc->crtc);
+ dpu_enc->crtc = crtc;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
+ struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ int i;
+
+ trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ if (dpu_enc->crtc != crtc) {
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.control_vblank_irq)
+ phys->ops.control_vblank_irq(phys, enable);
+ }
+}
+
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+ void (*frame_event_cb)(void *, u32 event),
+ void *frame_event_cb_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+
+ enable = frame_event_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_frame_event_cb = frame_event_cb;
+ dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *ready_phys, u32 event)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned int i;
+
+ if (event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ /**
+ * suppress frame_done without waiter,
+ * likely autorefresh
+ */
+ trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
+ dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
+ ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1,
+ ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1);
+ return;
+ }
+
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] == ready_phys) {
+ trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
+ dpu_enc->frame_busy_mask[0]);
+ clear_bit(i, dpu_enc->frame_busy_mask);
+ }
+ }
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
+ del_timer(&dpu_enc->frame_done_timer);
+
+ dpu_encoder_resource_control(drm_enc,
+ DPU_ENC_RC_EVENT_FRAME_DONE);
+
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data,
+ event);
+ }
+ } else {
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data, event);
+ }
+}
+
+static void dpu_encoder_off_work(struct work_struct *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, delayed_off_work.work);
+
+ dpu_encoder_resource_control(&dpu_enc->base,
+ DPU_ENC_RC_EVENT_ENTER_IDLE);
+
+ dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
+ DPU_ENCODER_FRAME_EVENT_IDLE);
+}
+
+/**
+ * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
+ * @drm_enc: Pointer to drm encoder structure
+ * @phys: Pointer to physical encoder structure
+ * @extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+ struct dpu_hw_ctl *ctl;
+ int pending_kickoff_cnt;
+ u32 ret = UINT_MAX;
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ ctl = phys->hw_ctl;
+ if (!ctl->ops.trigger_flush) {
+ DPU_ERROR("missing trigger cb\n");
+ return;
+ }
+
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+
+ if (extra_flush_bits && ctl->ops.update_pending_flush)
+ ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+ ctl->ops.trigger_flush(ctl);
+
+ if (ctl->ops.get_pending_flush)
+ ret = ctl->ops.get_pending_flush(ctl);
+
+ trace_dpu_enc_trigger_flush(DRMID(drm_enc),
+ dpu_encoder_helper_get_intf_type(phys->intf_mode),
+ phys->hw_intf ? phys->hw_intf->idx : -1,
+ phys->hw_wb ? phys->hw_wb->idx : -1,
+ pending_kickoff_cnt, ctl->idx,
+ extra_flush_bits, ret);
+}
+
+/**
+ * _dpu_encoder_trigger_start - trigger start for a physical encoder
+ * @phys: Pointer to physical encoder structure
+ */
+static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+{
+ if (!phys) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
+ phys->ops.trigger_start(phys);
+}
+
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ ctl = phys_enc->hw_ctl;
+ if (ctl->ops.trigger_start) {
+ ctl->ops.trigger_start(ctl);
+ trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
+ }
+}
+
+static int dpu_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ u32 irq_idx,
+ struct dpu_encoder_wait_info *info)
+{
+ int rc = 0;
+ s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
+ s64 jiffies = msecs_to_jiffies(info->timeout_ms);
+ s64 time;
+
+ do {
+ rc = wait_event_timeout(*(info->wq),
+ atomic_read(info->atomic_cnt) == 0, jiffies);
+ time = ktime_to_ms(ktime_get());
+
+ trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
+ expected_time,
+ atomic_read(info->atomic_cnt));
+ /* If we timed out, counter is valid and time is less, wait again */
+ } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
+ (time < expected_time));
+
+ return rc;
+}
+
+static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+ struct drm_encoder *drm_enc;
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ ctl = phys_enc->hw_ctl;
+ drm_enc = phys_enc->parent;
+
+ if (!ctl->ops.reset)
+ return;
+
+ DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
+ ctl->idx);
+
+ rc = ctl->ops.reset(ctl);
+ if (rc) {
+ DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
+ msm_disp_snapshot_state(drm_enc->dev);
+ }
+
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+/**
+ * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
+ * Iterate through the physical encoders and perform consolidated flush
+ * and/or control start triggering as needed. This is done in the virtual
+ * encoder rather than the individual physical ones in order to handle
+ * use cases that require visibility into multiple physical encoders at
+ * a time.
+ * @dpu_enc: Pointer to virtual encoder structure
+ */
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ uint32_t i, pending_flush;
+ unsigned long lock_flags;
+
+ pending_flush = 0x0;
+
+ /* update pending counts and trigger kickoff ctl flush atomically */
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+
+ /* don't perform flush/start operations for slave encoders */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->enable_state == DPU_ENC_DISABLED)
+ continue;
+
+ ctl = phys->hw_ctl;
+
+ /*
+ * This is cleared in frame_done worker, which isn't invoked
+ * for async commits. So don't set this for async, since it'll
+ * roll over to the next commit.
+ */
+ if (phys->split_role != ENC_ROLE_SLAVE)
+ set_bit(i, dpu_enc->frame_busy_mask);
+
+ if (!phys->ops.needs_single_flush ||
+ !phys->ops.needs_single_flush(phys))
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+ else if (ctl->ops.get_pending_flush)
+ pending_flush |= ctl->ops.get_pending_flush(ctl);
+ }
+
+ /* for split flush, combine pending flush masks and send to master */
+ if (pending_flush && dpu_enc->cur_master) {
+ _dpu_encoder_trigger_flush(
+ &dpu_enc->base,
+ dpu_enc->cur_master,
+ pending_flush);
+ }
+
+ _dpu_encoder_trigger_start(dpu_enc->cur_master);
+
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ unsigned int i;
+ struct dpu_hw_ctl *ctl;
+ struct msm_display_info *disp_info;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ disp_info = &dpu_enc->disp_info;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+
+ ctl = phys->hw_ctl;
+ if (ctl->ops.clear_pending_flush)
+ ctl->ops.clear_pending_flush(ctl);
+
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+ disp_info->is_cmd_mode
+ && ctl->ops.trigger_pending)
+ ctl->ops.trigger_pending(ctl);
+ }
+}
+
+static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
+ struct drm_display_mode *mode)
+{
+ u64 pclk_rate;
+ u32 pclk_period;
+ u32 line_time;
+
+ /*
+ * For linetime calculation, only operate on master encoder.
+ */
+ if (!dpu_enc->cur_master)
+ return 0;
+
+ if (!dpu_enc->cur_master->ops.get_line_count) {
+ DPU_ERROR("get_line_count function not defined\n");
+ return 0;
+ }
+
+ pclk_rate = mode->clock; /* pixel clock in kHz */
+ if (pclk_rate == 0) {
+ DPU_ERROR("pclk is 0, cannot calculate line time\n");
+ return 0;
+ }
+
+ pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+ if (pclk_period == 0) {
+ DPU_ERROR("pclk period is 0\n");
+ return 0;
+ }
+
+ /*
+ * Line time calculation based on Pixel clock and HTOTAL.
+ * Final unit is in ns.
+ */
+ line_time = (pclk_period * mode->htotal) / 1000;
+ if (line_time == 0) {
+ DPU_ERROR("line time calculation is 0\n");
+ return 0;
+ }
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+ pclk_rate, pclk_period, line_time);
+
+ return line_time;
+}
+
+int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
+{
+ struct drm_display_mode *mode;
+ struct dpu_encoder_virt *dpu_enc;
+ u32 cur_line;
+ u32 line_time;
+ u32 vtotal, time_to_vsync;
+ ktime_t cur_time;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (!drm_enc->crtc || !drm_enc->crtc->state) {
+ DPU_ERROR("crtc/crtc state object is NULL\n");
+ return -EINVAL;
+ }
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
+ if (!line_time)
+ return -EINVAL;
+
+ cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
+
+ vtotal = mode->vtotal;
+ if (cur_line >= vtotal)
+ time_to_vsync = line_time * vtotal;
+ else
+ time_to_vsync = line_time * (vtotal - cur_line);
+
+ if (time_to_vsync == 0) {
+ DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
+ vtotal);
+ return -EINVAL;
+ }
+
+ cur_time = ktime_get();
+ *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+ cur_line, vtotal, time_to_vsync,
+ ktime_to_ms(cur_time),
+ ktime_to_ms(*wakeup_time));
+ return 0;
+}
+
+static u32
+dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
+ u32 enc_ip_width)
+{
+ int ssm_delay, total_pixels, soft_slice_per_enc;
+
+ soft_slice_per_enc = enc_ip_width / dsc->slice_width;
+
+ /*
+ * minimum number of initial line pixels is a sum of:
+ * 1. sub-stream multiplexer delay (83 groups for 8bpc,
+ * 91 for 10 bpc) * 3
+ * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
+ * 3. the initial xmit delay
+ * 4. total pipeline delay through the "lock step" of encoder (47)
+ * 5. 6 additional pixels as the output of the rate buffer is
+ * 48 bits wide
+ */
+ ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
+ total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
+ if (soft_slice_per_enc > 1)
+ total_pixels += (ssm_delay * 3);
+ return DIV_ROUND_UP(total_pixels, dsc->slice_width);
+}
+
+static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
+ struct dpu_hw_dsc *hw_dsc,
+ struct dpu_hw_pingpong *hw_pp,
+ struct drm_dsc_config *dsc,
+ u32 common_mode,
+ u32 initial_lines)
+{
+ if (hw_dsc->ops.dsc_config)
+ hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
+
+ if (hw_dsc->ops.dsc_config_thresh)
+ hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
+
+ if (hw_pp->ops.setup_dsc)
+ hw_pp->ops.setup_dsc(hw_pp);
+
+ if (hw_dsc->ops.dsc_bind_pingpong_blk)
+ hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx);
+
+ if (hw_pp->ops.enable_dsc)
+ hw_pp->ops.enable_dsc(hw_pp);
+
+ if (ctl->ops.update_pending_flush_dsc)
+ ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
+}
+
+static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
+ struct drm_dsc_config *dsc)
+{
+ /* coding only for 2LM, 2enc, 1 dsc config */
+ struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
+ struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
+ struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ int this_frame_slices;
+ int intf_ip_w, enc_ip_w;
+ int dsc_common_mode;
+ int pic_width;
+ u32 initial_lines;
+ int i;
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ hw_pp[i] = dpu_enc->hw_pp[i];
+ hw_dsc[i] = dpu_enc->hw_dsc[i];
+
+ if (!hw_pp[i] || !hw_dsc[i]) {
+ DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
+ return;
+ }
+ }
+
+ dsc_common_mode = 0;
+ pic_width = dsc->pic_width;
+
+ dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
+ if (enc_master->intf_mode == INTF_MODE_VIDEO)
+ dsc_common_mode |= DSC_MODE_VIDEO;
+
+ this_frame_slices = pic_width / dsc->slice_width;
+ intf_ip_w = this_frame_slices * dsc->slice_width;
+
+ /*
+ * dsc merge case: when using 2 encoders for the same stream,
+ * no. of slices need to be same on both the encoders.
+ */
+ enc_ip_w = intf_ip_w / 2;
+ initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
+ dsc, dsc_common_mode, initial_lines);
+}
+
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ bool needs_hw_reset = false;
+ unsigned int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
+
+ /* prepare for next kickoff, may include waiting on previous kickoff */
+ DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys);
+ if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+ needs_hw_reset = true;
+ }
+ DPU_ATRACE_END("enc_prepare_for_kickoff");
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+
+ /* if any phys needs reset, reset all phys, in-order */
+ if (needs_hw_reset) {
+ trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
+ }
+ }
+
+ if (dpu_enc->dsc)
+ dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
+}
+
+bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ unsigned int i;
+ struct dpu_encoder_phys *phys;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
+ DPU_DEBUG("invalid FB not kicking off\n");
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ unsigned long timeout_ms;
+ unsigned int i;
+
+ DPU_ATRACE_BEGIN("encoder_kickoff");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_kickoff(DRMID(drm_enc));
+
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
+
+ /* All phys encs are ready to go, trigger the kickoff */
+ _dpu_encoder_kickoff_phys(dpu_enc);
+
+ /* allow phys encs to handle any post-kickoff business */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys->ops.handle_post_kickoff)
+ phys->ops.handle_post_kickoff(phys);
+ }
+
+ DPU_ATRACE_END("encoder_kickoff");
+}
+
+static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_mixer_cfg mixer;
+ int i, num_lm;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *hw_lm[2];
+ struct dpu_hw_mixer *hw_mixer[2];
+ struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
+
+ memset(&mixer, 0, sizeof(mixer));
+
+ /* reset all mixers for this encoder */
+ if (phys_enc->hw_ctl->ops.clear_all_blendstages)
+ phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
+
+ global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
+
+ num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
+ phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+
+ for (i = 0; i < num_lm; i++) {
+ hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
+ phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
+
+ /* clear all blendstages */
+ if (phys_enc->hw_ctl->ops.setup_blendstage)
+ phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+ }
+}
+
+static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl,
+ struct dpu_hw_dsc *hw_dsc,
+ struct dpu_hw_pingpong *hw_pp)
+{
+ if (hw_dsc->ops.dsc_disable)
+ hw_dsc->ops.dsc_disable(hw_dsc);
+
+ if (hw_pp->ops.disable_dsc)
+ hw_pp->ops.disable_dsc(hw_pp);
+
+ if (hw_dsc->ops.dsc_bind_pingpong_blk)
+ hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE);
+
+ if (ctl->ops.update_pending_flush_dsc)
+ ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
+}
+
+static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
+{
+ /* coding only for 2LM, 2enc, 1 dsc config */
+ struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
+ struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
+ struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ int i;
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ hw_pp[i] = dpu_enc->hw_pp[i];
+ hw_dsc[i] = dpu_enc->hw_dsc[i];
+
+ if (hw_pp[i] && hw_dsc[i])
+ dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]);
+ }
+}
+
+void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+ int i;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+
+ phys_enc->hw_ctl->ops.reset(ctl);
+
+ dpu_encoder_helper_reset_mixers(phys_enc);
+
+ /*
+ * TODO: move the once-only operation like CTL flush/trigger
+ * into dpu_encoder_virt_disable() and all operations which need
+ * to be done per phys encoder into the phys_disable() op.
+ */
+ if (phys_enc->hw_wb) {
+ /* disable the PP block */
+ if (phys_enc->hw_wb->ops.bind_pingpong_blk)
+ phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
+
+ /* mark WB flush as pending */
+ if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
+ phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
+ } else {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ dpu_enc->phys_encs[i]->hw_intf,
+ PINGPONG_NONE);
+
+ /* mark INTF flush as pending */
+ if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
+ phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
+ dpu_enc->phys_encs[i]->hw_intf->idx);
+ }
+ }
+
+ /* reset the merge 3D HW block */
+ if (phys_enc->hw_pp->merge_3d) {
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ BLEND_3D_NONE);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
+ phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
+ phys_enc->hw_pp->merge_3d->idx);
+ }
+
+ if (dpu_enc->dsc) {
+ dpu_encoder_unprep_dsc(dpu_enc);
+ dpu_enc->dsc = NULL;
+ }
+
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
+
+ if (phys_enc->hw_intf)
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->hw_wb)
+ intf_cfg.wb = phys_enc->hw_wb->idx;
+
+ if (phys_enc->hw_pp->merge_3d)
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+
+ if (ctl->ops.reset_intf_cfg)
+ ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
+
+ ctl->ops.trigger_flush(ctl);
+ ctl->ops.trigger_start(ctl);
+ ctl->ops.clear_pending_flush(ctl);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_encoder_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_encoder_virt *dpu_enc = s->private;
+ int i;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
+ phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
+ phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
+ atomic_read(&phys->vsync_cnt),
+ atomic_read(&phys->underrun_cnt));
+
+ seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
+
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ char name[12];
+
+ if (!drm_enc->dev) {
+ DPU_ERROR("invalid encoder or kms\n");
+ return -EINVAL;
+ }
+
+ snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id);
+
+ /* create overall sub-directory for the encoder */
+ dpu_enc->debugfs_root = debugfs_create_dir(name,
+ drm_enc->dev->primary->debugfs_root);
+
+ /* don't error check these */
+ debugfs_create_file("status", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
+
+ return 0;
+}
+#else
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ return 0;
+}
+#endif
+
+static int dpu_encoder_late_register(struct drm_encoder *encoder)
+{
+ return _dpu_encoder_init_debugfs(encoder);
+}
+
+static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
+}
+
+static int dpu_encoder_virt_add_phys_encs(
+ struct msm_display_info *disp_info,
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_enc_phys_init_params *params)
+{
+ struct dpu_encoder_phys *enc = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /*
+ * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+ * in this function, check up-front.
+ */
+ if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+ ARRAY_SIZE(dpu_enc->phys_encs)) {
+ DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
+ dpu_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+
+ if (disp_info->intf_type == INTF_WB) {
+ enc = dpu_encoder_phys_wb_init(params);
+
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
+ PTR_ERR(enc));
+ return PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ } else if (disp_info->is_cmd_mode) {
+ enc = dpu_encoder_phys_cmd_init(params);
+
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
+ PTR_ERR(enc));
+ return PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ } else {
+ enc = dpu_encoder_phys_vid_init(params);
+
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+ PTR_ERR(enc));
+ return PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ if (params->split_role == ENC_ROLE_SLAVE)
+ dpu_enc->cur_slave = enc;
+ else
+ dpu_enc->cur_master = enc;
+
+ return 0;
+}
+
+static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct msm_display_info *disp_info)
+{
+ int ret = 0;
+ int i = 0;
+ struct dpu_enc_phys_init_params phys_params;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
+ return -EINVAL;
+ }
+
+ dpu_enc->cur_master = NULL;
+
+ memset(&phys_params, 0, sizeof(phys_params));
+ phys_params.dpu_kms = dpu_kms;
+ phys_params.parent = &dpu_enc->base;
+ phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
+
+ WARN_ON(disp_info->num_of_h_tiles < 1);
+
+ DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+ if (disp_info->intf_type != INTF_WB)
+ dpu_enc->idle_pc_supported =
+ dpu_kms->catalog->caps->has_idle_pc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+ /*
+ * Left-most tile is at index 0, content is controller id
+ * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+ * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+ */
+ u32 controller_id = disp_info->h_tile_instance[i];
+
+ if (disp_info->num_of_h_tiles > 1) {
+ if (i == 0)
+ phys_params.split_role = ENC_ROLE_MASTER;
+ else
+ phys_params.split_role = ENC_ROLE_SLAVE;
+ } else {
+ phys_params.split_role = ENC_ROLE_SOLO;
+ }
+
+ DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+ i, controller_id, phys_params.split_role);
+
+ phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm,
+ disp_info->intf_type,
+ controller_id);
+
+ if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX)
+ phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id);
+
+ if (!phys_params.hw_intf && !phys_params.hw_wb) {
+ DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (phys_params.hw_intf && phys_params.hw_wb) {
+ DPU_ERROR_ENC(dpu_enc,
+ "invalid phys both intf and wb block at idx: %d\n", i);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = dpu_encoder_virt_add_phys_encs(disp_info,
+ dpu_enc, &phys_params);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
+ break;
+ }
+ }
+
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return ret;
+}
+
+static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ frame_done_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ u32 event;
+
+ if (!drm_enc->dev) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
+ DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
+ DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
+ return;
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
+ DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
+ return;
+ }
+
+ DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+
+ event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+ dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
+}
+
+static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
+ .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
+ .atomic_disable = dpu_encoder_virt_atomic_disable,
+ .atomic_enable = dpu_encoder_virt_atomic_enable,
+ .atomic_check = dpu_encoder_virt_atomic_check,
+};
+
+static const struct drm_encoder_funcs dpu_encoder_funcs = {
+ .destroy = dpu_encoder_destroy,
+ .late_register = dpu_encoder_late_register,
+ .early_unregister = dpu_encoder_early_unregister,
+};
+
+struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+ int drm_enc_mode,
+ struct msm_display_info *disp_info)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_encoder *drm_enc = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int ret = 0;
+
+ dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
+ if (!dpu_enc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
+ drm_enc_mode, NULL);
+ if (ret) {
+ devm_kfree(dev->dev, dpu_enc);
+ return ERR_PTR(ret);
+ }
+
+ drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
+ spin_lock_init(&dpu_enc->enc_spinlock);
+ dpu_enc->enabled = false;
+ mutex_init(&dpu_enc->enc_lock);
+ mutex_init(&dpu_enc->rc_lock);
+
+ ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
+ if (ret)
+ goto fail;
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
+ timer_setup(&dpu_enc->frame_done_timer,
+ dpu_encoder_frame_done_timeout, 0);
+
+ if (disp_info->intf_type == INTF_DP)
+ dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
+ priv->dp[disp_info->h_tile_instance[0]]);
+
+ INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
+ dpu_encoder_off_work);
+ dpu_enc->idle_timeout = IDLE_TIMEOUT;
+
+ memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
+
+ DPU_DEBUG_ENC(dpu_enc, "created\n");
+
+ return &dpu_enc->base;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (drm_enc)
+ dpu_encoder_destroy(drm_enc);
+
+ return ERR_PTR(ret);
+}
+
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+ enum msm_event_wait event)
+{
+ int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ switch (event) {
+ case MSM_ENC_COMMIT_DONE:
+ fn_wait = phys->ops.wait_for_commit_done;
+ break;
+ case MSM_ENC_TX_COMPLETE:
+ fn_wait = phys->ops.wait_for_tx_complete;
+ break;
+ case MSM_ENC_VBLANK:
+ fn_wait = phys->ops.wait_for_vblank;
+ break;
+ default:
+ DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
+ event);
+ return -EINVAL;
+ }
+
+ if (fn_wait) {
+ DPU_ATRACE_BEGIN("wait_for_completion_event");
+ ret = fn_wait(phys);
+ DPU_ATRACE_END("wait_for_completion_event");
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+
+ if (!encoder) {
+ DPU_ERROR("invalid encoder\n");
+ return INTF_MODE_NONE;
+ }
+ dpu_enc = to_dpu_encoder_virt(encoder);
+
+ if (dpu_enc->cur_master)
+ return dpu_enc->cur_master->intf_mode;
+
+ if (dpu_enc->num_phys_encs)
+ return dpu_enc->phys_encs[0]->intf_mode;
+
+ return INTF_MODE_NONE;
+}
+
+unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ return dpu_enc->dsc_mask;
+}
+
+void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
+ struct dpu_enc_phys_init_params *p)
+{
+ int i;
+
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
+ phys_enc->hw_intf = p->hw_intf;
+ phys_enc->hw_wb = p->hw_wb;
+ phys_enc->parent = p->parent;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+
+ for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
+ phys_enc->irq[i] = -EINVAL;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+
+ atomic_set(&phys_enc->vsync_cnt, 0);
+ atomic_set(&phys_enc->underrun_cnt, 0);
+
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644
index 0000000000..4c05fd5e9e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE BIT(3)
+
+#define IDLE_TIMEOUT (66 - 16/2)
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type: INTF_ type
+ * @num_of_h_tiles: Number of horizontal tiles in case of split interface
+ * @h_tile_instance: Controller instance used per tile. Number of elements is
+ * based on num_of_h_tiles
+ * @is_cmd_mode Boolean to indicate if the CMD mode is requested
+ * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
+ * used instead of panel TE in cmd mode panels
+ */
+struct msm_display_info {
+ enum dpu_intf_type intf_type;
+ uint32_t num_of_h_tiles;
+ uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+ bool is_cmd_mode;
+ bool is_te_using_watchdog_timer;
+};
+
+/**
+ * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
+ * @encoder: encoder pointer
+ * @crtc: crtc pointer
+ */
+void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc);
+
+/**
+ * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
+ * the encoder is assigned to the given crtc
+ * @encoder: encoder pointer
+ * @crtc: crtc pointer
+ * @enable: true if vblank should be enabled
+ */
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc, bool enable);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ * will be called after the request is complete, or other events.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+ void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ * kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wakeup_time - get the time of the next vsync
+ */
+int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder: encoder pointer
+ * @event: event to wait for
+ * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
+ * frames to hardware at a vblank or ctl_start
+ * Encoders will map this differently depending on the
+ * panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
+ * the panel. Encoders will map this differently
+ * depending on the panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+ enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+ int drm_enc_mode,
+ struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ * and command mode encoders.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+ u32 idle_timeout);
+/**
+ * dpu_encoder_get_linecount - get interface line count for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_get_vsync_count - get vsync count for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
+
+bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
+ * in virtual encoder that can collect CRC values
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * Returns: Number of physical encoders for given drm encoder
+ */
+int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_setup_misr - enable misr calculations
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_setup_misr(const struct drm_encoder *drm_encoder);
+
+/**
+ * dpu_encoder_get_crc - get the crc value from interface blocks
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * Returns: 0 on success, error otherwise
+ */
+int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos);
+
+/**
+ * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
+void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job);
+
+/**
+ * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
+void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job);
+
+/**
+ * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit.
+ * @drm_enc: Pointer to drm encoder structure
+ */
+bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644
index 0000000000..d48558ede4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <drm/drm_writeback.h>
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_wb.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_encoder.h"
+#include "dpu_crtc.h"
+
+#define DPU_ENCODER_NAME_MAX 16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS 84
+#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ * split-panel configuration, where one panel is master, and others slaves.
+ * Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER: This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+ ENC_ROLE_SOLO,
+ ENC_ROLE_MASTER,
+ ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING: Encoder transitioning to disable state
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED: Encoder is disabled
+ * @DPU_ENC_ENABLING: Encoder transitioning to enabled
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED: Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET: Encoder is enabled, but requires a hw_reset
+ * to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+ DPU_ENC_DISABLING,
+ DPU_ENC_DISABLED,
+ DPU_ENC_ENABLING,
+ DPU_ENC_ENABLED,
+ DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ * the containing virtual encoder.
+ * @prepare_commit: MSM Atomic Call, start of atomic commit sequence
+ * @is_master: Whether this phys_enc is the current master
+ * encoder. Can be switched at enable time. Based
+ * on split_role and current mode (CMD/VID).
+ * @atomic_mode_set: DRM Call. Set a DRM mode.
+ * This likely caches the mode, for use at enable.
+ * @enable: DRM Call. Enable a DRM mode.
+ * @disable: DRM Call. Disable mode.
+ * @atomic_check: DRM Call. Atomic check new DRM state.
+ * @destroy: DRM Call. Destroy and release resources.
+ * @control_vblank_irq Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done: Wait for hardware to have flushed the
+ * current pending frames to hardware
+ * @wait_for_tx_complete: Wait for hardware to transfer the pixels
+ * to the panel
+ * @wait_for_vblank: Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff: Do any work necessary prior to a kickoff
+ * For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff: Do any work necessary post-kickoff work
+ * @trigger_start: Process start event on physical encoder
+ * @needs_single_flush: Whether encoder slaves need to be flushed
+ * @irq_control: Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc: phys encoder can update the vsync_enable status
+ * on idle power collapse prepare
+ * @restore: Restore all the encoder configs.
+ * @get_line_count: Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+ void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+ bool (*is_master)(struct dpu_encoder_phys *encoder);
+ void (*atomic_mode_set)(struct dpu_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*enable)(struct dpu_encoder_phys *encoder);
+ void (*disable)(struct dpu_encoder_phys *encoder);
+ int (*atomic_check)(struct dpu_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*destroy)(struct dpu_encoder_phys *encoder);
+ int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+ int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+ bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+ void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+ void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+ void (*restore)(struct dpu_encoder_phys *phys);
+ int (*get_line_count)(struct dpu_encoder_phys *phys);
+ int (*get_frame_count)(struct dpu_encoder_phys *phys);
+ void (*prepare_wb_job)(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job);
+ void (*cleanup_wb_job)(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job);
+ bool (*is_valid_for_commit)(struct dpu_encoder_phys *phys_enc);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done interrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun interrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR: Readpointer done interrupt for cmd mode panel
+ * @INTR_IDX_WB_DONE: Writeback done interrupt for virtual connector
+ */
+enum dpu_intr_idx {
+ INTR_IDX_VSYNC,
+ INTR_IDX_PINGPONG,
+ INTR_IDX_UNDERRUN,
+ INTR_IDX_CTL_START,
+ INTR_IDX_RDPTR,
+ INTR_IDX_WB_DONE,
+ INTR_IDX_MAX,
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ * tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ * phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent: Pointer to the containing virtual encoder
+ * @ops: Operations exposed to the virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop: Hardware interface to the top registers
+ * @hw_ctl: Hardware interface to the ctl registers
+ * @hw_pp: Hardware interface to the ping pong registers
+ * @hw_intf: Hardware interface to the intf registers
+ * @hw_wb: Hardware interface to the wb registers
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @cached_mode: DRM mode cached at mode_set time, acted on in enable
+ * @enabled: Whether the encoder has enabled and running a mode
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_mode: Interface mode
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state: Enable state tracking
+ * @vblank_refcount: Reference count of vblank request
+ * @vsync_cnt: Vsync count for the physical encoder
+ * @underrun_cnt: Underrun count for the physical encoder
+ * @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs
+ * vs. the number of done/vblank irqs. Should hover
+ * between 0-2 Incremented when a new kickoff is
+ * scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
+ * pending.
+ * @pending_kickoff_wq: Wait queue for blocking until kickoff completes
+ * @irq: IRQ indices
+ * @has_intf_te: Interface TE configuration support
+ */
+struct dpu_encoder_phys {
+ struct drm_encoder *parent;
+ struct dpu_encoder_phys_ops ops;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_hw_intf *hw_intf;
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode cached_mode;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf_mode intf_mode;
+ spinlock_t *enc_spinlock;
+ enum dpu_enc_enable_state enable_state;
+ atomic_t vblank_refcount;
+ atomic_t vsync_cnt;
+ atomic_t underrun_cnt;
+ atomic_t pending_ctlstart_cnt;
+ atomic_t pending_kickoff_cnt;
+ wait_queue_head_t pending_kickoff_wq;
+ int irq[INTR_IDX_MAX];
+ bool has_intf_te;
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+ atomic_inc_return(&phys->pending_ctlstart_cnt);
+ return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_wb - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @wbirq_refcount: Reference count of writeback interrupt
+ * @wb_done_timeout_cnt: number of wb done irq timeout errors
+ * @wb_cfg: writeback block config to store fb related details
+ * @wb_conn: backpointer to writeback connector
+ * @wb_job: backpointer to current writeback job
+ * @dest: dpu buffer layout for current writeback output buffer
+ */
+struct dpu_encoder_phys_wb {
+ struct dpu_encoder_phys base;
+ atomic_t wbirq_refcount;
+ int wb_done_timeout_cnt;
+ struct dpu_hw_wb_cfg wb_cfg;
+ struct drm_writeback_connector *wb_conn;
+ struct drm_writeback_job *wb_job;
+ struct dpu_hw_fmt_layout dest;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @intf_idx: Intf Block index used by this phys encoder
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
+ * after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+ struct dpu_encoder_phys base;
+ int stream_sel;
+ bool serialize_wait4pp;
+ int pp_timeout_report_cnt;
+ atomic_t pending_vblank_cnt;
+ wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @parent: Pointer to the containing virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @split_role: Role to play in a split-panel configuration
+ * @hw_intf: Hardware interface to the intf registers
+ * @hw_wb: Hardware interface to the wb registers
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+ struct dpu_kms *dpu_kms;
+ struct drm_encoder *parent;
+ enum dpu_enc_split_role split_role;
+ struct dpu_hw_intf *hw_intf;
+ struct dpu_hw_wb *hw_wb;
+ spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+ wait_queue_head_t *wq;
+ atomic_t *atomic_cnt;
+ s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @init: Pointer to init info structure with initialization params
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_crtc_state *dpu_cstate;
+
+ if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+ return BLEND_3D_NONE;
+
+ dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
+
+ /* Use merge_3d unless DSC MERGE topology is used */
+ if (phys_enc->split_role == ENC_ROLE_SOLO &&
+ dpu_cstate->num_mixers == CRTC_DUAL_MIXERS &&
+ !dpu_encoder_use_dsc_merge(phys_enc->parent))
+ return BLEND_3D_H_ROW_INT;
+
+ return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
+ * This helper function is used by physical encoder to get DSC blocks mask
+ * used for this encoder.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ * timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ * note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @irq: IRQ index
+ * @func: IRQ callback to be called in case of timeout
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ int irq,
+ void (*func)(void *arg, int irq_idx),
+ struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception
+ * @drm_enc: Pointer to drm encoder structure
+ * @phys_enc: Pointer to physical encoder
+ * Note: This is called from IRQ handler context.
+ */
+void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc);
+
+/** dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception
+ * @drm_enc: Pointer to drm encoder structure
+ * @phys_enc: Pointer to physical encoder
+ * Note: This is called from IRQ handler context.
+ */
+void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc);
+
+/** dpu_encoder_frame_done_callback -- Notify virtual encoder that this phys encoder completes last request frame
+ * @drm_enc: Pointer to drm encoder structure
+ * @phys_enc: Pointer to physical encoder
+ * @event: Event to process
+ */
+void dpu_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *ready_phys, u32 event);
+
+void dpu_encoder_phys_init(struct dpu_encoder_phys *phys,
+ struct dpu_enc_phys_init_params *p);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644
index 0000000000..df88358e70
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -0,0 +1,788 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/delay.h>
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+ container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS 10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc);
+
+static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc->split_role != ENC_ROLE_SLAVE);
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+ struct dpu_hw_intf_cmd_mode_cfg cmd_mode_cfg = {};
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl->ops.setup_intf_cfg)
+ return;
+
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+ intf_cfg.stream_sel = cmd_enc->stream_sel;
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
+ ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+
+ /* setup which pp blk will connect to this intf */
+ if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ phys_enc->hw_pp->idx);
+
+ if (intf_cfg.dsc != 0)
+ cmd_mode_cfg.data_compress = true;
+
+ if (phys_enc->hw_intf->ops.program_intf_cmd_cfg)
+ phys_enc->hw_intf->ops.program_intf_cmd_cfg(phys_enc->hw_intf, &cmd_mode_cfg);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ unsigned long lock_flags;
+ int new_cnt;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ if (!phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("pp_done_irq");
+ /* notify all synchronous clients first, then asynchronous clients */
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ new_cnt, event);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (phys_enc->has_intf_te) {
+ if (!phys_enc->hw_intf)
+ return;
+ } else {
+ if (!phys_enc->hw_pp)
+ return;
+ }
+
+ DPU_ATRACE_BEGIN("rd_ptr_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
+
+ atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+ wake_up_all(&cmd_enc->pending_vblank_wq);
+ DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ DPU_ATRACE_BEGIN("ctl_start_irq");
+
+ atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+ /* Signal any waiting ctl start interrupt */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_atomic_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
+
+ phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
+
+ if (phys_enc->has_intf_te)
+ phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
+ else
+ phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
+
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ bool do_log = false;
+ struct drm_encoder *drm_enc;
+
+ if (!phys_enc->hw_pp)
+ return -EINVAL;
+
+ drm_enc = phys_enc->parent;
+
+ cmd_enc->pp_timeout_report_cnt++;
+ if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+ frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+ do_log = true;
+ } else if (cmd_enc->pp_timeout_report_cnt == 1) {
+ do_log = true;
+ }
+
+ trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ frame_event);
+
+ /* to avoid flooding, only log first time, and "dead" time */
+ if (do_log) {
+ DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+ DRMID(drm_enc),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->hw_ctl->idx - CTL_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+ msm_disp_snapshot_state(drm_enc->dev);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR]);
+ }
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
+
+ return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_PINGPONG],
+ dpu_encoder_phys_cmd_pp_tx_done_irq,
+ &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+ else if (!ret)
+ cmd_enc->pp_timeout_report_cnt = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ int refcount;
+
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable ? "true" : "false", refcount);
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR],
+ dpu_encoder_phys_cmd_te_rd_ptr_irq,
+ phys_enc);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR]);
+
+end:
+ if (ret) {
+ DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, ret,
+ enable ? "true" : "false", refcount);
+ }
+
+ return ret;
+}
+
+static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_PINGPONG],
+ dpu_encoder_phys_cmd_pp_tx_done_irq,
+ phys_enc);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_cmd_underrun_irq,
+ phys_enc);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_CTL_START],
+ dpu_encoder_phys_cmd_ctl_start_irq,
+ phys_enc);
+ } else {
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_CTL_START]);
+
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN]);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_PINGPONG]);
+ }
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_tear_check tc_cfg = { 0 };
+ struct drm_display_mode *mode;
+ bool tc_enable = true;
+ unsigned long vsync_hz;
+ struct dpu_kms *dpu_kms;
+
+ if (phys_enc->has_intf_te) {
+ if (!phys_enc->hw_intf ||
+ !phys_enc->hw_intf->ops.enable_tearcheck) {
+ DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "");
+ } else {
+ if (!phys_enc->hw_pp ||
+ !phys_enc->hw_pp->ops.enable_tearcheck) {
+ DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+ }
+
+ mode = &phys_enc->cached_mode;
+
+ dpu_kms = phys_enc->dpu_kms;
+
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ *
+ * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+ * frequency divided by the no. of rows (lines) in the LCDpanel.
+ */
+ vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
+ if (!vsync_hz) {
+ DPU_DEBUG_CMDENC(cmd_enc, "invalid - no vsync clock\n");
+ return;
+ }
+
+ tc_cfg.vsync_count = vsync_hz /
+ (mode->vtotal * drm_mode_vrefresh(mode));
+
+ /*
+ * Set the sync_cfg_height to twice vtotal so that if we lose a
+ * TE event coming from the display TE pin we won't stall immediately
+ */
+ tc_cfg.hw_vsync_mode = 1;
+ tc_cfg.sync_cfg_height = mode->vtotal * 2;
+ tc_cfg.vsync_init_val = mode->vdisplay;
+ tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+ tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+ tc_cfg.start_pos = mode->vdisplay;
+ tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc vsync_clk_speed_hz %lu vtotal %u vrefresh %u\n",
+ vsync_hz, mode->vtotal, drm_mode_vrefresh(mode));
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc enable %u start_pos %u rd_ptr_irq %u\n",
+ tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+ tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
+ tc_cfg.vsync_init_val);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc cfgheight %u thresh_start %u thresh_cont %u\n",
+ tc_cfg.sync_cfg_height, tc_cfg.sync_threshold_start,
+ tc_cfg.sync_threshold_continue);
+
+ if (phys_enc->has_intf_te)
+ phys_enc->hw_intf->ops.enable_tearcheck(phys_enc->hw_intf, &tc_cfg);
+ else
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, &tc_cfg);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+ _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+ dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /**
+ * we do separate flush for each CTL and let
+ * CTL_START synchronize them
+ */
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
+ return;
+ }
+
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
+
+ _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return;
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid phys encoder\n");
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+ DPU_ERROR("already enabled\n");
+ return;
+ }
+
+ dpu_encoder_phys_cmd_enable_helper(phys_enc);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+ struct dpu_encoder_phys *phys_enc, bool enable)
+{
+ if (phys_enc->has_intf_te) {
+ if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.connect_external_te)
+ return;
+
+ trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+ phys_enc->hw_intf->ops.connect_external_te(phys_enc->hw_intf, enable);
+ } else {
+ if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
+ return;
+
+ trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+ phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+ }
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+ struct dpu_encoder_phys *phys_enc)
+{
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_hw_intf *hw_intf;
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return -EINVAL;
+
+ if (phys_enc->has_intf_te) {
+ hw_intf = phys_enc->hw_intf;
+ if (!hw_intf || !hw_intf->ops.get_line_count)
+ return -EINVAL;
+ return hw_intf->ops.get_line_count(hw_intf);
+ }
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp || !hw_pp->ops.get_line_count)
+ return -EINVAL;
+ return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+ return;
+ }
+
+ if (phys_enc->has_intf_te) {
+ DRM_DEBUG_KMS("id:%u intf:%d state:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0,
+ phys_enc->enable_state);
+
+ if (phys_enc->hw_intf->ops.disable_tearcheck)
+ phys_enc->hw_intf->ops.disable_tearcheck(phys_enc->hw_intf);
+ } else {
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->enable_state);
+
+ if (phys_enc->hw_pp->ops.disable_tearcheck)
+ phys_enc->hw_pp->ops.disable_tearcheck(phys_enc->hw_pp);
+ }
+
+ if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ PINGPONG_NONE);
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
+ }
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ /*
+ * Mark kickoff request as outstanding. If there are more than one,
+ * outstanding, then we have to wait for the previous one to complete
+ */
+ ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ /* force pending_kickoff_cnt 0 to discard failed kickoff */
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+ DRMID(phys_enc->parent), ret,
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ }
+
+ dpu_encoder_phys_cmd_enable_te(phys_enc);
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return;
+
+ if (phys_enc->has_intf_te) {
+ if (!phys_enc->hw_intf->ops.disable_autorefresh)
+ return;
+
+ phys_enc->hw_intf->ops.disable_autorefresh(
+ phys_enc->hw_intf,
+ DRMID(phys_enc->parent),
+ phys_enc->cached_mode.vdisplay);
+ } else {
+ if (!phys_enc->hw_pp ||
+ !phys_enc->hw_pp->ops.disable_autorefresh)
+ return;
+
+ phys_enc->hw_pp->ops.disable_autorefresh(
+ phys_enc->hw_pp,
+ DRMID(phys_enc->parent),
+ phys_enc->cached_mode.vdisplay);
+ }
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_CTL_START],
+ dpu_encoder_phys_cmd_ctl_start_irq,
+ &wait_info);
+ if (ret == -ETIMEDOUT) {
+ DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+ ret = -EINVAL;
+ } else if (!ret)
+ ret = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc;
+
+ rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (rc) {
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+ DRMID(phys_enc->parent), rc,
+ phys_enc->hw_intf->idx - INTF_0);
+ }
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /* only required for master controller */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return 0;
+
+ if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
+ return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
+
+ return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+ struct dpu_encoder_wait_info wait_info;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return rc;
+
+ wait_info.wq = &cmd_enc->pending_vblank_wq;
+ wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+ rc = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_RDPTR],
+ dpu_encoder_phys_cmd_te_rd_ptr_irq,
+ &wait_info);
+
+ return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /**
+ * re-enable external TE, either for the first time after enabling
+ * or if disabled for Autorefresh
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+ struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_cmd_is_master;
+ ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_cmd_enable;
+ ops->disable = dpu_encoder_phys_cmd_disable;
+ ops->destroy = dpu_encoder_phys_cmd_destroy;
+ ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+ ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+ ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+ ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+ ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+ ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+ ops->restore = dpu_encoder_phys_cmd_enable_helper;
+ ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+ ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+ ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+
+ DPU_DEBUG("intf\n");
+
+ cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ DPU_ERROR("failed to allocate\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ phys_enc = &cmd_enc->base;
+
+ dpu_encoder_phys_init(phys_enc, p);
+
+ dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+ phys_enc->intf_mode = INTF_MODE_CMD;
+ cmd_enc->stream_sel = 0;
+
+ phys_enc->has_intf_te = test_bit(DPU_INTF_TE,
+ &phys_enc->hw_intf->cap->features);
+
+ atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+ init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+ DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+ return phys_enc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644
index 0000000000..c2189e58de
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+ container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ bool ret = false;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE)
+ ret = true;
+
+ return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+ const struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct dpu_hw_intf_timing_params *timing)
+{
+ memset(timing, 0, sizeof(*timing));
+
+ if ((mode->htotal < mode->hsync_end)
+ || (mode->hsync_start < mode->hdisplay)
+ || (mode->vtotal < mode->vsync_end)
+ || (mode->vsync_start < mode->vdisplay)
+ || (mode->hsync_end < mode->hsync_start)
+ || (mode->vsync_end < mode->vsync_start)) {
+ DPU_ERROR(
+ "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal, mode->hdisplay);
+ DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->vdisplay);
+ return;
+ }
+
+ /*
+ * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+ * Active Region Front Porch Sync Back Porch
+ * <-----------------><------------><-----><----------->
+ * <- [hv]display --->
+ * <--------- [hv]sync_start ------>
+ * <----------------- [hv]sync_end ------->
+ * <---------------------------- [hv]total ------------->
+ */
+ timing->width = mode->hdisplay; /* active width */
+ timing->height = mode->vdisplay; /* active height */
+ timing->xres = timing->width;
+ timing->yres = timing->height;
+ timing->h_back_porch = mode->htotal - mode->hsync_end;
+ timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+ timing->v_back_porch = mode->vtotal - mode->vsync_end;
+ timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+ timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+ timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+ timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ timing->border_clr = 0;
+ timing->underflow_clr = 0xff;
+ timing->hsync_skew = mode->hskew;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (phys_enc->hw_intf->cap->type == INTF_DSI) {
+ timing->hsync_polarity = 0;
+ timing->vsync_polarity = 0;
+ }
+
+ /* for DP/EDP, Shift timings to align it to bottom right */
+ if (phys_enc->hw_intf->cap->type == INTF_DP) {
+ timing->h_back_porch += timing->h_front_porch;
+ timing->h_front_porch = 0;
+ timing->v_back_porch += timing->v_front_porch;
+ timing->v_front_porch = 0;
+ }
+
+ timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+
+ /*
+ * for DP, divide the horizonal parameters by 2 when
+ * widebus is enabled
+ */
+ if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
+ timing->width = timing->width >> 1;
+ timing->xres = timing->xres >> 1;
+ timing->h_back_porch = timing->h_back_porch >> 1;
+ timing->h_front_porch = timing->h_front_porch >> 1;
+ timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
+ }
+}
+
+static u32 get_horizontal_total(const struct dpu_hw_intf_timing_params *timing)
+{
+ u32 active = timing->xres;
+ u32 inactive =
+ timing->h_back_porch + timing->h_front_porch +
+ timing->hsync_pulse_width;
+ return active + inactive;
+}
+
+static u32 get_vertical_total(const struct dpu_hw_intf_timing_params *timing)
+{
+ u32 active = timing->yres;
+ u32 inactive =
+ timing->v_back_porch + timing->v_front_porch +
+ timing->vsync_pulse_width;
+ return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ * Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+ struct dpu_encoder_phys *phys_enc,
+ const struct dpu_hw_intf_timing_params *timing)
+{
+ u32 worst_case_needed_lines =
+ phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ u32 start_of_frame_lines =
+ timing->v_back_porch + timing->vsync_pulse_width;
+ u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+ u32 actual_vfp_lines = 0;
+
+ /* Fetch must be outside active lines, otherwise undefined. */
+ if (start_of_frame_lines >= worst_case_needed_lines) {
+ DPU_DEBUG_VIDENC(phys_enc,
+ "prog fetch is not needed, large vbp+vsw\n");
+ actual_vfp_lines = 0;
+ } else if (timing->v_front_porch < needed_vfp_lines) {
+ /* Warn fetch needed, but not enough porch in panel config */
+ pr_warn_once
+ ("low vbp+vfp may lead to perf issues in some cases\n");
+ DPU_DEBUG_VIDENC(phys_enc,
+ "less vfp than fetch req, using entire vfp\n");
+ actual_vfp_lines = timing->v_front_porch;
+ } else {
+ DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n");
+ actual_vfp_lines = needed_vfp_lines;
+ }
+
+ DPU_DEBUG_VIDENC(phys_enc,
+ "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+ timing->v_front_porch, timing->v_back_porch,
+ timing->vsync_pulse_width);
+ DPU_DEBUG_VIDENC(phys_enc,
+ "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+ worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+ return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ * the start of fetch into the vertical front porch for cases where the
+ * vsync pulse width and vertical back porch time is insufficient
+ *
+ * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ * HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+ const struct dpu_hw_intf_timing_params *timing)
+{
+ struct dpu_hw_intf_prog_fetch f = { 0 };
+ u32 vfp_fetch_lines = 0;
+ u32 horiz_total = 0;
+ u32 vert_total = 0;
+ u32 vfp_fetch_start_vsync_counter = 0;
+ unsigned long lock_flags;
+
+ if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
+ return;
+
+ vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
+ if (vfp_fetch_lines) {
+ vert_total = get_vertical_total(timing);
+ horiz_total = get_horizontal_total(timing);
+ vfp_fetch_start_vsync_counter =
+ (vert_total - vfp_fetch_lines) * horiz_total + 1;
+ f.enable = 1;
+ f.fetch_start = vfp_fetch_start_vsync_counter;
+ }
+
+ DPU_DEBUG_VIDENC(phys_enc,
+ "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+ vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_display_mode mode;
+ struct dpu_hw_intf_timing_params timing_params = { 0 };
+ const struct dpu_format *fmt = NULL;
+ u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ unsigned long lock_flags;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ drm_mode_init(&mode, &phys_enc->cached_mode);
+
+ if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
+ return;
+ }
+
+ if (!phys_enc->hw_intf->ops.setup_timing_gen) {
+ DPU_ERROR("timing engine setup is not supported\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n");
+ drm_mode_debug_printmodeline(&mode);
+
+ if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ mode.hdisplay >>= 1;
+ mode.htotal >>= 1;
+ mode.hsync_start >>= 1;
+ mode.hsync_end >>= 1;
+
+ DPU_DEBUG_VIDENC(phys_enc,
+ "split_role %d, halve horizontal %d %d %d %d\n",
+ phys_enc->split_role,
+ mode.hdisplay, mode.htotal,
+ mode.hsync_start, mode.hsync_end);
+ }
+
+ drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
+
+ fmt = dpu_get_dpu_format(fmt_fourcc);
+ DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
+ if (phys_enc->hw_pp->merge_3d)
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
+ &timing_params, fmt);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+
+ /* setup which pp blk will connect to this intf */
+ if (phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ phys_enc->hw_pp->idx);
+
+ if (phys_enc->hw_pp->merge_3d)
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, intf_cfg.mode_3d);
+
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ programmable_fetch_config(phys_enc, &timing_params);
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_hw_ctl *hw_ctl;
+ unsigned long lock_flags;
+ u32 flush_register = 0;
+
+ hw_ctl = phys_enc->hw_ctl;
+
+ DPU_ATRACE_BEGIN("vblank_irq");
+
+ dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
+
+ atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ if (hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+
+ DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return phys_enc->split_role != ENC_ROLE_SOLO;
+}
+
+static void dpu_encoder_phys_vid_atomic_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
+
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ int refcount;
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_VBL("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC],
+ dpu_encoder_phys_vid_vblank_irq,
+ phys_enc);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC]);
+
+end:
+ if (ret) {
+ DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0, ret, enable,
+ refcount);
+ }
+ return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ ctl = phys_enc->hw_ctl;
+
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
+ return;
+
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
+
+ dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+ /*
+ * For single flush cases (dual-ctl or pp-split), skip setting the
+ * flush bit for the slave intf, since both intfs use same ctl
+ * and HW will only flush the master.
+ */
+ if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+ !dpu_encoder_phys_vid_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
+ if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
+ ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
+
+skip_flush:
+ DPU_DEBUG_VIDENC(phys_enc,
+ "update pending flush ctl %d intf %d\n",
+ ctl->idx - CTL_0, phys_enc->hw_intf->idx);
+
+ atomic_set(&phys_enc->underrun_cnt, 0);
+
+ /* ctl_flush & timing engine enable will be triggered by framework */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED)
+ phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+ kfree(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+ return 0;
+ }
+
+ /* Wait for kickoff to complete */
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_VSYNC],
+ dpu_encoder_phys_vid_vblank_irq,
+ &wait_info);
+
+ if (ret == -ETIMEDOUT) {
+ dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+ }
+
+ return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+ int ret;
+
+ if (!hw_ctl)
+ return 0;
+
+ ret = wait_event_timeout(phys_enc->pending_kickoff_wq,
+ (hw_ctl->ops.get_flush_register(hw_ctl) == 0),
+ msecs_to_jiffies(50));
+ if (ret <= 0) {
+ DPU_ERROR("vblank timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ int rc;
+ struct drm_encoder *drm_enc;
+
+ drm_enc = phys_enc->parent;
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl->ops.wait_reset_status)
+ return;
+
+ /*
+ * hw supports hardware initiated ctl reset, so before we kickoff a new
+ * frame, need to check and wait for hw initiated ctl reset completion
+ */
+ rc = ctl->ops.wait_reset_status(ctl);
+ if (rc) {
+ DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
+ ctl->idx, rc);
+ msm_disp_snapshot_state(drm_enc->dev);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC]);
+ }
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+ int ret;
+ struct dpu_hw_intf_status intf_status = {0};
+
+ if (!phys_enc->parent || !phys_enc->parent->dev) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+
+ if (!phys_enc->hw_intf) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
+ return;
+ }
+
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
+ return;
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("already disabled\n");
+ return;
+ }
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
+ if (dpu_encoder_phys_vid_is_master(phys_enc))
+ dpu_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+ ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0, ret);
+ }
+ }
+
+ if (phys_enc->hw_intf && phys_enc->hw_intf->ops.get_status)
+ phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &intf_status);
+
+ /*
+ * Wait for a vsync if timing en status is on after timing engine
+ * is disabled.
+ */
+ if (intf_status.is_en && dpu_encoder_phys_vid_is_master(phys_enc)) {
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ dpu_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0, ret);
+ }
+ }
+
+ dpu_encoder_helper_phys_cleanup(phys_enc);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+
+ /*
+ * Video mode must flush CTL before enabling timing engine
+ * Video encoders need to turn on their interfaces now
+ */
+ if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+ trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0);
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+ }
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret;
+
+ trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0,
+ enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (WARN_ON(ret))
+ return;
+
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_vid_underrun_irq,
+ phys_enc);
+ } else {
+ dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN]);
+ }
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ return -EINVAL;
+
+ if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
+ return -EINVAL;
+
+ return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
+}
+
+static int dpu_encoder_phys_vid_get_frame_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_intf_status s = {0};
+ u32 fetch_start = 0;
+ struct drm_display_mode mode;
+
+ drm_mode_init(&mode, &phys_enc->cached_mode);
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ return -EINVAL;
+
+ if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_status)
+ return -EINVAL;
+
+ phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &s);
+
+ if (s.is_prog_fetch_en && s.is_en) {
+ fetch_start = mode.vtotal - (mode.vsync_start - mode.vdisplay);
+ if ((s.line_count > fetch_start) &&
+ (s.line_count <= mode.vtotal))
+ return s.frame_count + 1;
+ }
+
+ return s.frame_count;
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_vid_is_master;
+ ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_vid_enable;
+ ops->disable = dpu_encoder_phys_vid_disable;
+ ops->destroy = dpu_encoder_phys_vid_destroy;
+ ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
+ ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->irq_control = dpu_encoder_phys_vid_irq_control;
+ ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+ ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+ ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+
+ if (!p) {
+ DPU_ERROR("failed to create encoder due to invalid parameter\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
+ if (!phys_enc) {
+ DPU_ERROR("failed to create encoder due to memory allocation error\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+
+ dpu_encoder_phys_init(phys_enc, p);
+
+ dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+ phys_enc->intf_mode = INTF_MODE_VIDEO;
+
+ DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->hw_intf->idx);
+
+ return phys_enc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
new file mode 100644
index 0000000000..78037a6976
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include <drm/drm_framebuffer.h>
+
+#include "dpu_encoder_phys.h"
+#include "dpu_formats.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_wb.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_vbif.h"
+#include "dpu_crtc.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define to_dpu_encoder_phys_wb(x) \
+ container_of(x, struct dpu_encoder_phys_wb, base)
+
+/**
+ * dpu_encoder_phys_wb_is_master - report wb always as master encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static bool dpu_encoder_phys_wb_is_master(struct dpu_encoder_phys *phys_enc)
+{
+ /* there is only one physical enc for dpu_writeback */
+ return true;
+}
+
+/**
+ * dpu_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_ot_limit(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct dpu_vbif_set_ot_params ot_params;
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = hw_wb->caps->xin_id;
+ ot_params.num = hw_wb->idx - WB_0;
+ ot_params.width = phys_enc->cached_mode.hdisplay;
+ ot_params.height = phys_enc->cached_mode.vdisplay;
+ ot_params.is_wfd = true;
+ ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
+ ot_params.vbif_idx = hw_wb->caps->vbif_idx;
+ ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+ ot_params.rd = false;
+
+ dpu_vbif_set_ot_limit(phys_enc->dpu_kms, &ot_params);
+}
+
+/**
+ * dpu_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_qos_remap(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_vbif_set_qos_params qos_params;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ if (!phys_enc->hw_wb || !phys_enc->hw_wb->caps) {
+ DPU_ERROR("invalid writeback hardware\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = hw_wb->caps->vbif_idx;
+ qos_params.xin_id = hw_wb->caps->xin_id;
+ qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+ qos_params.num = hw_wb->idx - WB_0;
+ qos_params.is_rt = false;
+
+ DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n",
+ qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt);
+
+ dpu_vbif_set_qos_remap(phys_enc->dpu_kms, &qos_params);
+}
+
+/**
+ * dpu_encoder_phys_wb_set_qos - set QoS/danger/safe LUTs for writeback
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_qos_cfg qos_cfg;
+ const struct dpu_mdss_cfg *catalog;
+ const struct dpu_qos_lut_tbl *qos_lut_tb;
+
+ if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
+ DPU_ERROR("invalid parameter(s)\n");
+ return;
+ }
+
+ catalog = phys_enc->dpu_kms->catalog;
+
+ hw_wb = phys_enc->hw_wb;
+
+ memset(&qos_cfg, 0, sizeof(struct dpu_hw_qos_cfg));
+ qos_cfg.danger_safe_en = true;
+ qos_cfg.danger_lut =
+ catalog->perf->danger_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+
+ qos_cfg.safe_lut = catalog->perf->safe_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+
+ qos_lut_tb = &catalog->perf->qos_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+ qos_cfg.creq_lut = _dpu_hw_get_qos_lut(qos_lut_tb, 0);
+
+ if (hw_wb->ops.setup_qos_lut)
+ hw_wb->ops.setup_qos_lut(hw_wb, &qos_cfg);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup_fb - setup output framebuffer
+ * @phys_enc: Pointer to physical encoder
+ * @fb: Pointer to output framebuffer
+ */
+static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_wb_cfg *wb_cfg;
+
+ if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+ wb_cfg = &wb_enc->wb_cfg;
+
+ wb_cfg->intf_mode = phys_enc->intf_mode;
+ wb_cfg->roi.x1 = 0;
+ wb_cfg->roi.x2 = phys_enc->cached_mode.hdisplay;
+ wb_cfg->roi.y1 = 0;
+ wb_cfg->roi.y2 = phys_enc->cached_mode.vdisplay;
+
+ if (hw_wb->ops.setup_roi)
+ hw_wb->ops.setup_roi(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_outformat)
+ hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_cdp) {
+ const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf;
+
+ hw_wb->ops.setup_cdp(hw_wb, wb_cfg->dest.format,
+ perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable);
+ }
+
+ if (hw_wb->ops.setup_outaddress)
+ hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
+ * @phys_enc:Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+ ctl = phys_enc->hw_ctl;
+
+ if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
+ (phys_enc->hw_ctl &&
+ phys_enc->hw_ctl->ops.setup_intf_cfg)) {
+ struct dpu_hw_intf_cfg intf_cfg = {0};
+ struct dpu_hw_pingpong *hw_pp = phys_enc->hw_pp;
+ enum dpu_3d_blend_mode mode_3d;
+
+ mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ intf_cfg.intf = DPU_NONE;
+ intf_cfg.wb = hw_wb->idx;
+
+ if (mode_3d && hw_pp && hw_pp->merge_3d)
+ intf_cfg.merge_3d = hw_pp->merge_3d->idx;
+
+ if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ mode_3d);
+
+ /* setup which pp blk will connect to this wb */
+ if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
+ phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb,
+ phys_enc->hw_pp->idx);
+
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ } else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ struct dpu_hw_intf_cfg intf_cfg = {0};
+
+ intf_cfg.intf = DPU_NONE;
+ intf_cfg.wb = hw_wb->idx;
+ intf_cfg.mode_3d =
+ dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ }
+}
+
+/**
+ * dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states
+ * @phys_enc: Pointer to physical encoder
+ * @crtc_state: Pointer to CRTC atomic state
+ * @conn_state: Pointer to connector atomic state
+ */
+static int dpu_encoder_phys_wb_atomic_check(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_framebuffer *fb;
+ const struct drm_display_mode *mode = &crtc_state->mode;
+
+ DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
+ phys_enc->hw_wb->idx, mode->name, mode->hdisplay, mode->vdisplay);
+
+ if (!conn_state || !conn_state->connector) {
+ DPU_ERROR("invalid connector state\n");
+ return -EINVAL;
+ } else if (conn_state->connector->status !=
+ connector_status_connected) {
+ DPU_ERROR("connector not connected %d\n",
+ conn_state->connector->status);
+ return -EINVAL;
+ }
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+
+ DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
+ fb->width, fb->height);
+
+ if (fb->width != mode->hdisplay) {
+ DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
+ mode->hdisplay);
+ return -EINVAL;
+ } else if (fb->height != mode->vdisplay) {
+ DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
+ mode->vdisplay);
+ return -EINVAL;
+ } else if (fb->width > phys_enc->hw_wb->caps->maxlinewidth) {
+ DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
+ fb->width, phys_enc->hw_wb->caps->maxlinewidth);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
+ * _dpu_encoder_phys_wb_update_flush - flush hardware update
+ * @phys_enc: Pointer to physical encoder
+ */
+static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_pingpong *hw_pp;
+ u32 pending_flush = 0;
+
+ if (!phys_enc)
+ return;
+
+ hw_wb = phys_enc->hw_wb;
+ hw_pp = phys_enc->hw_pp;
+ hw_ctl = phys_enc->hw_ctl;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (!hw_ctl) {
+ DPU_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
+ return;
+ }
+
+ if (hw_ctl->ops.update_pending_flush_wb)
+ hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx);
+
+ if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d)
+ hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
+ hw_pp->merge_3d->idx);
+
+ if (hw_ctl->ops.get_pending_flush)
+ pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl);
+
+ DPU_DEBUG("Pending flush mask for CTL_%d is 0x%x, WB %d\n",
+ hw_ctl->idx - CTL_0, pending_flush,
+ hw_wb->idx - WB_0);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup - setup writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_setup(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct drm_display_mode mode = phys_enc->cached_mode;
+ struct drm_framebuffer *fb = NULL;
+
+ DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n",
+ hw_wb->idx - WB_0, mode.name,
+ mode.hdisplay, mode.vdisplay);
+
+ dpu_encoder_phys_wb_set_ot_limit(phys_enc);
+
+ dpu_encoder_phys_wb_set_qos_remap(phys_enc);
+
+ dpu_encoder_phys_wb_set_qos(phys_enc);
+
+ dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
+
+ dpu_encoder_phys_wb_setup_cdp(phys_enc);
+
+}
+
+static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ unsigned long lock_flags;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
+
+ dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ if (wb_enc->wb_conn)
+ drm_writeback_signal_completion(wb_enc->wb_conn, 0);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+/**
+ * dpu_encoder_phys_wb_done_irq - writeback interrupt handler
+ * @arg: Pointer to writeback encoder
+ * @irq_idx: interrupt index
+ */
+static void dpu_encoder_phys_wb_done_irq(void *arg, int irq_idx)
+{
+ _dpu_encoder_phys_wb_frame_done_helper(arg);
+}
+
+/**
+ * dpu_encoder_phys_wb_irq_ctrl - irq control of WB
+ * @phys: Pointer to physical encoder
+ * @enable: indicates enable or disable interrupts
+ */
+static void dpu_encoder_phys_wb_irq_ctrl(
+ struct dpu_encoder_phys *phys, bool enable)
+{
+
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys);
+
+ if (enable && atomic_inc_return(&wb_enc->wbirq_refcount) == 1)
+ dpu_core_irq_register_callback(phys->dpu_kms,
+ phys->irq[INTR_IDX_WB_DONE], dpu_encoder_phys_wb_done_irq, phys);
+ else if (!enable &&
+ atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
+ dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
+}
+
+static void dpu_encoder_phys_wb_atomic_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+
+ phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
+}
+
+static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+
+ wb_enc->wb_done_timeout_cnt++;
+
+ if (wb_enc->wb_done_timeout_cnt == 1)
+ msm_disp_snapshot_state(phys_enc->parent->dev);
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ if (wb_enc->wb_conn)
+ drm_writeback_signal_completion(wb_enc->wb_conn, 0);
+
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
+}
+
+/**
+ * dpu_encoder_phys_wb_wait_for_commit_done - wait until request is committed
+ * @phys_enc: Pointer to physical encoder
+ */
+static int dpu_encoder_phys_wb_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long ret;
+ struct dpu_encoder_wait_info wait_info;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_WB_DONE],
+ dpu_encoder_phys_wb_done_irq, &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_wb_handle_wbdone_timeout(phys_enc);
+ else if (!ret)
+ wb_enc->wb_done_timeout_cnt = 0;
+
+ return ret;
+}
+
+/**
+ * dpu_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ * Returns: Zero on success
+ */
+static void dpu_encoder_phys_wb_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct drm_connector *drm_conn;
+ struct drm_connector_state *state;
+
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+
+ if (!wb_enc->wb_conn || !wb_enc->wb_job) {
+ DPU_ERROR("invalid wb_conn or wb_job\n");
+ return;
+ }
+
+ drm_conn = &wb_enc->wb_conn->base;
+ state = drm_conn->state;
+
+ if (wb_enc->wb_conn && wb_enc->wb_job)
+ drm_writeback_queue_job(wb_enc->wb_conn, state);
+
+ dpu_encoder_phys_wb_setup(phys_enc);
+
+ _dpu_encoder_phys_wb_update_flush(phys_enc);
+}
+
+/**
+ * dpu_encoder_phys_wb_needs_single_flush - trigger flush processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static bool dpu_encoder_phys_wb_needs_single_flush(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+ return false;
+}
+
+/**
+ * dpu_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+
+}
+
+/**
+ * dpu_encoder_phys_wb_enable - enable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_enable(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+/**
+ * dpu_encoder_phys_wb_disable - disable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("encoder is already disabled\n");
+ return;
+ }
+
+ /* reset h/w before final flush */
+ if (phys_enc->hw_ctl->ops.clear_pending_flush)
+ phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
+
+ /*
+ * New CTL reset sequence from 5.0 MDP onwards.
+ * If has_3d_merge_reset is not set, legacy reset
+ * sequence is executed.
+ *
+ * Legacy reset sequence has not been implemented yet.
+ * Any target earlier than SM8150 will need it and when
+ * WB support is added to those targets will need to add
+ * the legacy teardown sequence as well.
+ */
+ if (hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG))
+ dpu_encoder_helper_phys_cleanup(phys_enc);
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+/**
+ * dpu_encoder_phys_wb_destroy - destroy writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+
+ kfree(phys_enc);
+}
+
+static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job)
+{
+ const struct msm_format *format;
+ struct msm_gem_address_space *aspace;
+ struct dpu_hw_wb_cfg *wb_cfg;
+ int ret;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ if (!job->fb)
+ return;
+
+ wb_enc->wb_job = job;
+ wb_enc->wb_conn = job->connector;
+ aspace = phys_enc->dpu_kms->base.aspace;
+
+ wb_cfg = &wb_enc->wb_cfg;
+
+ memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg));
+
+ ret = msm_framebuffer_prepare(job->fb, aspace, false);
+ if (ret) {
+ DPU_ERROR("prep fb failed, %d\n", ret);
+ return;
+ }
+
+ format = msm_framebuffer_format(job->fb);
+
+ wb_cfg->dest.format = dpu_get_dpu_format_ext(
+ format->pixel_format, job->fb->modifier);
+ if (!wb_cfg->dest.format) {
+ /* this error should be detected during atomic_check */
+ DPU_ERROR("failed to get format %x\n", format->pixel_format);
+ return;
+ }
+
+ ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest);
+ if (ret) {
+ DPU_DEBUG("failed to populate layout %d\n", ret);
+ return;
+ }
+
+ wb_cfg->dest.width = job->fb->width;
+ wb_cfg->dest.height = job->fb->height;
+ wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
+
+ if ((wb_cfg->dest.format->fetch_planes == DPU_PLANE_PLANAR) &&
+ (wb_cfg->dest.format->element[0] == C1_B_Cb))
+ swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
+
+ DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_addr[0], wb_cfg->dest.plane_addr[1],
+ wb_cfg->dest.plane_addr[2], wb_cfg->dest.plane_addr[3]);
+
+ DPU_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_pitch[0], wb_cfg->dest.plane_pitch[1],
+ wb_cfg->dest.plane_pitch[2], wb_cfg->dest.plane_pitch[3]);
+}
+
+static void dpu_encoder_phys_wb_cleanup_wb_job(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct msm_gem_address_space *aspace;
+
+ if (!job->fb)
+ return;
+
+ aspace = phys_enc->dpu_kms->base.aspace;
+
+ msm_framebuffer_cleanup(job->fb, aspace, false);
+ wb_enc->wb_job = NULL;
+ wb_enc->wb_conn = NULL;
+}
+
+static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ if (wb_enc->wb_job)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dpu_encoder_phys_wb_init_ops - initialize writeback operations
+ * @ops: Pointer to encoder operation table
+ */
+static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_wb_is_master;
+ ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_wb_enable;
+ ops->disable = dpu_encoder_phys_wb_disable;
+ ops->destroy = dpu_encoder_phys_wb_destroy;
+ ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
+ ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_wb_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_wb_needs_single_flush;
+ ops->trigger_start = dpu_encoder_helper_trigger_start;
+ ops->prepare_wb_job = dpu_encoder_phys_wb_prepare_wb_job;
+ ops->cleanup_wb_job = dpu_encoder_phys_wb_cleanup_wb_job;
+ ops->irq_control = dpu_encoder_phys_wb_irq_ctrl;
+ ops->is_valid_for_commit = dpu_encoder_phys_wb_is_valid_for_commit;
+
+}
+
+/**
+ * dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @p: Pointer to init info structure with initialization params
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_wb *wb_enc = NULL;
+
+ DPU_DEBUG("\n");
+
+ if (!p || !p->parent) {
+ DPU_ERROR("invalid params\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
+ if (!wb_enc) {
+ DPU_ERROR("failed to allocate wb phys_enc enc\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ phys_enc = &wb_enc->base;
+
+ dpu_encoder_phys_init(phys_enc, p);
+
+ dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
+ phys_enc->intf_mode = INTF_MODE_WB_LINE;
+
+ atomic_set(&wb_enc->wbirq_refcount, 0);
+
+ wb_enc->wb_done_timeout_cnt = 0;
+
+ DPU_DEBUG("Created dpu_encoder_phys for wb %d\n", phys_enc->hw_wb->idx);
+
+ return phys_enc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
new file mode 100644
index 0000000000..e366ab1342
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -0,0 +1,1068 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+
+#include "msm_media_info.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+
+#define DPU_UBWC_META_MACRO_W_H 16
+#define DPU_UBWC_META_BLOCK_SIZE 256
+#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
+
+#define DPU_TILE_HEIGHT_DEFAULT 1
+#define DPU_TILE_HEIGHT_TILED 4
+#define DPU_TILE_HEIGHT_UBWC 4
+#define DPU_TILE_HEIGHT_NV12 8
+
+#define DPU_MAX_IMG_WIDTH 0x3FFF
+#define DPU_MAX_IMG_HEIGHT 0x3FFF
+
+/*
+ * DPU supported format packing, bpp, and other format
+ * information.
+ * DPU currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
+bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
+alpha, bp, flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
+flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PLANAR, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 1, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+/*
+ * struct dpu_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct dpu_media_color_map {
+ uint32_t format;
+ uint32_t color;
+};
+
+static const struct dpu_format dpu_format_map[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ PSEUDO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV16,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV61,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT_LOOSE(P010,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PLANAR_YUV_FMT(YUV420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct dpu_format dpu_format_map_ubwc[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ /* ARGB8888 and ABGR8888 purposely have the same color
+ * ordering. The hardware only supports ABGR8888 UBWC
+ * natively.
+ */
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XRGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ /* XRGB2101010 and ARGB2101010 purposely have the same color
+ * ordering. The hardware only supports ARGB2101010 UBWC
+ * natively.
+ */
+ INTERLEAVED_RGB_FMT_TILED(ARGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
+ DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+
+ PSEUDO_YUV_FMT_TILED(P010,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_YUV |
+ DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_UBWC),
+};
+
+/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ * Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _dpu_get_v_h_subsample_rate(
+ enum dpu_chroma_samp_type chroma_sample,
+ uint32_t *v_sample,
+ uint32_t *h_sample)
+{
+ if (!v_sample || !h_sample)
+ return;
+
+ switch (chroma_sample) {
+ case DPU_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case DPU_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case DPU_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
+{
+ static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
+ {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_ARGB2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_XRGB2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+ };
+ int color_fmt = -1;
+ int i;
+
+ if (fmt->base.pixel_format == DRM_FORMAT_NV12 ||
+ fmt->base.pixel_format == DRM_FORMAT_P010) {
+ if (DPU_FORMAT_IS_DX(fmt)) {
+ if (fmt->unpack_tight)
+ color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+ else
+ color_fmt = COLOR_FMT_P010_UBWC;
+ } else
+ color_fmt = COLOR_FMT_NV12_UBWC;
+ return color_fmt;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
+ if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
+ color_fmt = dpu_media_ubwc_map[i].color;
+ break;
+ }
+ return color_fmt;
+}
+
+static int _dpu_format_get_plane_sizes_ubwc(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout)
+{
+ int i;
+ int color;
+ bool meta = DPU_FORMAT_IS_UBWC(fmt);
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ color = _dpu_format_get_media_color_ubwc(fmt);
+ if (color < 0) {
+ DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+ (char *)&fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ uint32_t y_sclines, uv_sclines;
+ uint32_t y_meta_scanlines = 0;
+ uint32_t uv_meta_scanlines = 0;
+
+ layout->num_planes = 2;
+ layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+ y_sclines = VENUS_Y_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+ uv_sclines = VENUS_UV_SCANLINES(color, height);
+ layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+ uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+ layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+ uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ } else {
+ uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+ layout->num_planes = 1;
+
+ layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+ }
+
+done:
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int _dpu_format_get_plane_sizes_linear(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ int i;
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ /* Due to memset above, only need to set planes of interest */
+ if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
+ layout->num_planes = 1;
+ layout->plane_size[0] = width * height * layout->format->bpp;
+ layout->plane_pitch[0] = width * layout->format->bpp;
+ } else {
+ uint32_t v_subsample, h_subsample;
+ uint32_t chroma_samp;
+ uint32_t bpp = 1;
+
+ chroma_samp = fmt->chroma_sample;
+ _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+ &h_subsample);
+
+ if (width % h_subsample || height % v_subsample) {
+ DRM_ERROR("mismatch in subsample vs dimensions\n");
+ return -EINVAL;
+ }
+
+ if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+ (DPU_FORMAT_IS_DX(fmt)))
+ bpp = 2;
+ layout->plane_pitch[0] = width * bpp;
+ layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+ layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[1] = layout->plane_pitch[1] *
+ (height / v_subsample);
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ layout->num_planes = 2;
+ layout->plane_size[1] *= 2;
+ layout->plane_pitch[1] *= 2;
+ } else {
+ /* planar */
+ layout->num_planes = 3;
+ layout->plane_size[2] = layout->plane_size[1];
+ layout->plane_pitch[2] = layout->plane_pitch[1];
+ }
+ }
+
+ /*
+ * linear format: allow user allocated pitches if they are greater than
+ * the requirement.
+ * ubwc format: pitch values are computed uniformly across
+ * all the components based on ubwc specifications.
+ */
+ for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
+ if (pitches && layout->plane_pitch[i] < pitches[i])
+ layout->plane_pitch[i] = pitches[i];
+ }
+
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int dpu_format_get_plane_sizes(
+ const struct dpu_format *fmt,
+ const uint32_t w,
+ const uint32_t h,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ if (!layout || !fmt) {
+ DRM_ERROR("invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
+ return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+ return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+}
+
+static int _dpu_format_populate_addrs_ubwc(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t base_addr = 0;
+ bool meta;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (aspace)
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ if (!base_addr) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+
+ meta = DPU_FORMAT_IS_UBWC(layout->format);
+
+ /* Per-format logic for verifying active planes */
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+ /* configure CbCr bitstream plane */
+ layout->plane_addr[1] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2] + layout->plane_size[3];
+
+ if (!meta)
+ return 0;
+
+ /* configure Y metadata plane */
+ layout->plane_addr[2] = base_addr;
+
+ /* configure CbCr metadata plane */
+ layout->plane_addr[3] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2];
+
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+ layout->plane_addr[1] = 0;
+
+ if (!meta)
+ return 0;
+
+ layout->plane_addr[2] = base_addr;
+ layout->plane_addr[3] = 0;
+ }
+ return 0;
+}
+
+static int _dpu_format_populate_addrs_linear(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ unsigned int i;
+
+ /* Can now check the pitches given vs pitches expected */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (layout->plane_pitch[i] > fb->pitches[i]) {
+ DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ return -EINVAL;
+ }
+ }
+
+ /* Populate addresses for simple formats here */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (aspace)
+ layout->plane_addr[i] =
+ msm_framebuffer_iova(fb, aspace, i);
+ if (!layout->plane_addr[i]) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ int ret;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if ((fb->width > DPU_MAX_IMG_WIDTH) ||
+ (fb->height > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ layout->format = to_dpu_format(msm_framebuffer_format(fb));
+
+ /* Populate the plane sizes etc via get_format */
+ ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
+ layout, fb->pitches);
+ if (ret)
+ return ret;
+
+ /* Populate the addresses given the fb */
+ if (DPU_FORMAT_IS_UBWC(layout->format) ||
+ DPU_FORMAT_IS_TILE(layout->format))
+ ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ else
+ ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
+
+ return ret;
+}
+
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos)
+{
+ const struct drm_format_info *info;
+ const struct dpu_format *fmt;
+ struct dpu_hw_fmt_layout layout;
+ uint32_t bos_total_size = 0;
+ int ret, i;
+
+ if (!msm_fmt || !cmd || !bos) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ fmt = to_dpu_format(msm_fmt);
+ info = drm_format_info(fmt->base.pixel_format);
+ if (!info)
+ return -EINVAL;
+
+ ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+ &layout, cmd->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->num_planes; i++) {
+ if (!bos[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ if ((i == 0) || (bos[i] != bos[0]))
+ bos_total_size += bos[i]->size;
+ }
+
+ if (bos_total_size < layout.total_size) {
+ DRM_ERROR("buffers total size too small %u expected %u\n",
+ bos_total_size, layout.total_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier)
+{
+ uint32_t i = 0;
+ const struct dpu_format *fmt = NULL;
+ const struct dpu_format *map = NULL;
+ ssize_t map_size = 0;
+
+ /*
+ * Currently only support exactly zero or one modifier.
+ * All planes use the same modifier.
+ */
+ DRM_DEBUG_ATOMIC("plane format modifier 0x%llX\n", modifier);
+
+ switch (modifier) {
+ case 0:
+ map = dpu_format_map;
+ map_size = ARRAY_SIZE(dpu_format_map);
+ break;
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+ map = dpu_format_map_ubwc;
+ map_size = ARRAY_SIZE(dpu_format_map_ubwc);
+ DRM_DEBUG_ATOMIC("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+ (char *)&format);
+ break;
+ default:
+ DPU_ERROR("unsupported format modifier %llX\n", modifier);
+ return NULL;
+ }
+
+ for (i = 0; i < map_size; i++) {
+ if (format == map[i].base.pixel_format) {
+ fmt = &map[i];
+ break;
+ }
+ }
+
+ if (fmt == NULL)
+ DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+ (char *)&format, modifier);
+ else
+ DRM_DEBUG_ATOMIC("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+ (char *)&format, modifier,
+ DPU_FORMAT_IS_UBWC(fmt),
+ DPU_FORMAT_IS_YUV(fmt));
+
+ return fmt;
+}
+
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers)
+{
+ const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
+ modifiers);
+ if (fmt)
+ return &fmt->base;
+ return NULL;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
new file mode 100644
index 0000000000..84b8b3289f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_FORMATS_H
+#define _DPU_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ */
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier);
+
+#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
+
+/**
+ * dpu_find_format - validate if the pixel format is supported
+ * @format: dpu format
+ * @supported_formats: supported formats by dpu HW
+ * @num_formatss: total number of formats
+ *
+ * Return: false if not valid format, true on success
+ */
+static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
+ size_t num_formats)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ /* check for valid formats supported */
+ if (format == supported_formats[i])
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dpu_get_msm_format - get an dpu_format by its msm_format base
+ * callback function registers with the msm_kms layer
+ * @kms: kms driver
+ * @format: DRM FourCC Code
+ * @modifiers: data layout modifier
+ */
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers);
+
+/**
+ * dpu_format_check_modified_format - validate format and buffers for
+ * dpu non-standard, i.e. modified format
+ * @kms: kms driver
+ * @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format
+ * @cmd: fb_cmd2 structure user request
+ * @bos: gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
+
+/**
+ * dpu_format_populate_layout - populate the given format layout based on
+ * mmu, fb, and format found in the fb
+ * @aspace: address space pointer
+ * @fb: framebuffer pointer
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ * are the same as before or 0 if new addresses were populated
+ */
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *fmtl);
+
+#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644
index 0000000000..713dfc0797
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_kms.h"
+
+#define VIG_BASE_MASK \
+ (BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_CDP) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define VIG_MASK \
+ (VIG_BASE_MASK | \
+ BIT(DPU_SSPP_CSC_10BIT))
+
+#define VIG_MSM8998_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+
+#define VIG_SDM845_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
+
+#define VIG_SDM845_MASK_SDMA \
+ (VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+
+#define VIG_SC7180_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
+
+#define VIG_SM6125_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
+
+#define VIG_SC7180_MASK_SDMA \
+ (VIG_SC7180_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+
+#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
+
+#define DMA_MSM8998_MASK \
+ (BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define VIG_SC7280_MASK \
+ (VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+
+#define VIG_SC7280_MASK_SDMA \
+ (VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+
+#define DMA_SDM845_MASK \
+ (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_CURSOR_SDM845_MASK \
+ (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
+
+#define DMA_SDM845_MASK_SDMA \
+ (DMA_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+
+#define DMA_CURSOR_SDM845_MASK_SDMA \
+ (DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+
+#define DMA_CURSOR_MSM8998_MASK \
+ (DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
+
+#define MIXER_MSM8998_MASK \
+ (BIT(DPU_MIXER_SOURCESPLIT))
+
+#define MIXER_SDM845_MASK \
+ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
+
+#define MIXER_QCM2290_MASK \
+ (BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
+
+#define PINGPONG_SDM845_MASK \
+ (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_TE) | BIT(DPU_PINGPONG_DSC))
+
+#define PINGPONG_SDM845_TE2_MASK \
+ (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define PINGPONG_SM8150_MASK \
+ (BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
+
+#define CTL_SC7280_MASK \
+ (BIT(DPU_CTL_ACTIVE_CFG) | \
+ BIT(DPU_CTL_FETCH_ACTIVE) | \
+ BIT(DPU_CTL_VM_CFG) | \
+ BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
+
+#define CTL_SM8550_MASK \
+ (CTL_SC7280_MASK | BIT(DPU_CTL_HAS_LAYER_EXT4))
+
+#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
+
+#define INTF_SC7180_MASK \
+ (BIT(DPU_INTF_INPUT_CTRL) | \
+ BIT(DPU_INTF_TE) | \
+ BIT(DPU_INTF_STATUS_SUPPORTED) | \
+ BIT(DPU_DATA_HCTL_EN))
+
+#define INTF_SC7280_MASK (INTF_SC7180_MASK)
+
+#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
+ BIT(DPU_WB_UBWC) | \
+ BIT(DPU_WB_YUV_CONFIG) | \
+ BIT(DPU_WB_PIPE_ALPHA) | \
+ BIT(DPU_WB_XY_ROI_OFFSET) | \
+ BIT(DPU_WB_QOS) | \
+ BIT(DPU_WB_QOS_8LVL) | \
+ BIT(DPU_WB_CDP) | \
+ BIT(DPU_WB_INPUT_CTRL))
+
+#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH 2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
+
+#define MAX_HORZ_DECIMATION 4
+#define MAX_VERT_DECIMATION 4
+
+#define MAX_UPSCALE_RATIO 20
+#define MAX_DOWNSCALE_RATIO 4
+#define SSPP_UNITY_SCALE 1
+
+#define STRCAT(X, Y) (X Y)
+
+static const uint32_t plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
+};
+
+static const uint32_t plane_formats_yuv[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
+
+ DRM_FORMAT_P010,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+};
+
+static const u32 rotation_v2_formats[] = {
+ DRM_FORMAT_NV12,
+ /* TODO add formats after validation */
+};
+
+static const uint32_t wb2_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_XBGR4444,
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+#define _VIG_SBLK(sdma_pri, qseed_ver) \
+ { \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .scaler_blk = {.name = "scaler", \
+ .id = qseed_ver, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = "csc", \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ .rotation_cfg = NULL, \
+ }
+
+#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, rot_cfg) \
+ { \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .scaler_blk = {.name = "scaler", \
+ .id = qseed_ver, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = "csc", \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ .rotation_cfg = rot_cfg, \
+ }
+
+#define _DMA_SBLK(sdma_pri) \
+ { \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .smart_dma_priority = sdma_pri, \
+ .format_list = plane_formats, \
+ .num_formats = ARRAY_SIZE(plane_formats), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ }
+
+static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
+ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
+ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
+ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
+ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
+
+static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
+ .rot_maxheight = 1088,
+ .rot_num_formats = ARRAY_SIZE(rotation_v2_formats),
+ .rot_format_list = rotation_v2_formats,
+};
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
+ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
+ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
+ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
+ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4);
+
+static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
+ _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4);
+
+static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
+ _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
+
+static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
+ _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4);
+
+static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 =
+ _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE);
+
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
+ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
+ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
+ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
+ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
+
+static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
+ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
+ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
+ _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
+ _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5);
+static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6);
+
+#define _VIG_SBLK_NOSCALE(sdma_pri) \
+ { \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .smart_dma_priority = sdma_pri, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ }
+
+static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE(2);
+static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK(1);
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+
+/* MSM8998 */
+
+static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 7, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x50, 0x80, 0xb0, 0x230,
+ 0x260, 0x290
+ },
+};
+
+/* SDM845 */
+
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 11, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+ 0xb0, 0xc8, 0xe0, 0xf8, 0x110
+ },
+};
+
+/* SC7180 */
+
+static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 7, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0
+ },
+};
+
+/* QCM2290 */
+
+static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_LINE_WIDTH,
+ .maxblendstages = 4, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68
+ },
+};
+
+/*************************************************************
+ * DSPP sub blocks config
+ *************************************************************/
+static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = {
+ .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
+ .len = 0x90, .version = 0x10007},
+};
+
+static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
+ .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
+ .len = 0x90, .version = 0x40000},
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+ .te2 = {.name = "te2", .id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .version = 0x1},
+ .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+ .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
+ .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0xe0,
+ .len = 0x20, .version = 0x20000},
+};
+
+/*************************************************************
+ * DSC sub blocks config
+ *************************************************************/
+static const struct dpu_dsc_sub_blks dsc_sblk_0 = {
+ .enc = {.name = "enc", .base = 0x100, .len = 0x9c},
+ .ctl = {.name = "ctl", .base = 0xF00, .len = 0x10},
+};
+
+static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
+ .enc = {.name = "enc", .base = 0x200, .len = 0x9c},
+ .ctl = {.name = "ctl", .base = 0xF80, .len = 0x10},
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static const u32 msm8998_rt_pri_lvl[] = {1, 2, 2, 2};
+static const u32 msm8998_nrt_pri_lvl[] = {1, 1, 1, 1};
+static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
+ {
+ .pps = 1920 * 1080 * 30,
+ .ot_limit = 2,
+ },
+ {
+ .pps = 1920 * 1080 * 60,
+ .ot_limit = 4,
+ },
+ {
+ .pps = 3840 * 2160 * 30,
+ .ot_limit = 16,
+ },
+};
+
+static const struct dpu_vbif_cfg msm8998_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1040,
+ .default_ot_rd_limit = 32,
+ .default_ot_wr_limit = 32,
+ .features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x20,
+ .dynamic_ot_rd_tbl = {
+ .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
+ .cfg = msm8998_ot_rdwr_cfg,
+ },
+ .dynamic_ot_wr_tbl = {
+ .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
+ .cfg = msm8998_ot_rdwr_cfg,
+ },
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(msm8998_rt_pri_lvl),
+ .priority_lvl = msm8998_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(msm8998_nrt_pri_lvl),
+ .priority_lvl = msm8998_nrt_pri_lvl,
+ },
+ .memtype_count = 14,
+ .memtype = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
+ },
+};
+
+static const struct dpu_vbif_cfg sdm845_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1040,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x40,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+ .priority_lvl = sdm845_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 14,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
+static const struct dpu_vbif_cfg sm8550_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1040,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x40,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+ .priority_lvl = sdm845_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 16,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static const struct dpu_qos_lut_entry msm8998_qos_linear[] = {
+ {.fl = 4, .lut = 0x1b},
+ {.fl = 5, .lut = 0x5b},
+ {.fl = 6, .lut = 0x15b},
+ {.fl = 7, .lut = 0x55b},
+ {.fl = 8, .lut = 0x155b},
+ {.fl = 9, .lut = 0x555b},
+ {.fl = 10, .lut = 0x1555b},
+ {.fl = 11, .lut = 0x5555b},
+ {.fl = 12, .lut = 0x15555b},
+ {.fl = 0, .lut = 0x55555b}
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+ {.fl = 4, .lut = 0x357},
+ {.fl = 5, .lut = 0x3357},
+ {.fl = 6, .lut = 0x23357},
+ {.fl = 7, .lut = 0x223357},
+ {.fl = 8, .lut = 0x2223357},
+ {.fl = 9, .lut = 0x22223357},
+ {.fl = 10, .lut = 0x222223357},
+ {.fl = 11, .lut = 0x2222223357},
+ {.fl = 12, .lut = 0x22222223357},
+ {.fl = 13, .lut = 0x222222223357},
+ {.fl = 14, .lut = 0x1222222223357},
+ {.fl = 0, .lut = 0x11222222223357}
+};
+
+static const struct dpu_qos_lut_entry msm8998_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x1aaff},
+ {.fl = 11, .lut = 0x5aaff},
+ {.fl = 12, .lut = 0x15aaff},
+ {.fl = 0, .lut = 0x55aaff},
+};
+
+static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
+ {.fl = 0, .lut = 0x0011222222335777},
+};
+
+static const struct dpu_qos_lut_entry sm6350_qos_linear_macrotile[] = {
+ {.fl = 0, .lut = 0x0011223445566777 },
+};
+
+static const struct dpu_qos_lut_entry sm8150_qos_linear[] = {
+ {.fl = 0, .lut = 0x0011222222223357 },
+};
+
+static const struct dpu_qos_lut_entry sc8180x_qos_linear[] = {
+ {.fl = 4, .lut = 0x0000000000000357 },
+};
+
+static const struct dpu_qos_lut_entry qcm2290_qos_linear[] = {
+ {.fl = 0, .lut = 0x0011222222335777},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x344556677},
+ {.fl = 11, .lut = 0x3344556677},
+ {.fl = 12, .lut = 0x23344556677},
+ {.fl = 13, .lut = 0x223344556677},
+ {.fl = 14, .lut = 0x1223344556677},
+ {.fl = 0, .lut = 0x112233344556677},
+};
+
+static const struct dpu_qos_lut_entry sc7180_qos_macrotile[] = {
+ {.fl = 0, .lut = 0x0011223344556677},
+};
+
+static const struct dpu_qos_lut_entry sc8180x_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x0000000344556677},
+};
+
+static const struct dpu_qos_lut_entry msm8998_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+/*************************************************************
+ * Hardware catalog
+ *************************************************************/
+
+#include "catalog/dpu_3_0_msm8998.h"
+
+#include "catalog/dpu_4_0_sdm845.h"
+
+#include "catalog/dpu_5_0_sm8150.h"
+#include "catalog/dpu_5_1_sc8180x.h"
+#include "catalog/dpu_5_4_sm6125.h"
+
+#include "catalog/dpu_6_0_sm8250.h"
+#include "catalog/dpu_6_2_sc7180.h"
+#include "catalog/dpu_6_3_sm6115.h"
+#include "catalog/dpu_6_4_sm6350.h"
+#include "catalog/dpu_6_5_qcm2290.h"
+#include "catalog/dpu_6_9_sm6375.h"
+
+#include "catalog/dpu_7_0_sm8350.h"
+#include "catalog/dpu_7_2_sc7280.h"
+
+#include "catalog/dpu_8_0_sc8280xp.h"
+#include "catalog/dpu_8_1_sm8450.h"
+
+#include "catalog/dpu_9_0_sm8550.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644
index 0000000000..6c9634209e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -0,0 +1,849 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS 12
+
+#define DPU_HW_BLK_NAME_LEN 16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS 2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_PERIPH_0_REMOVED Indicates that access to periph top0 block results
+ * in a failure
+ * @DPU_MDP_VSYNC_SEL Enables vsync source selection via MDP_VSYNC_SEL register
+ * (moved into INTF block since DPU 5.0.0)
+ * @DPU_MDP_MAX Maximum value
+
+ */
+enum {
+ DPU_MDP_PANIC_PER_PIPE = 0x1,
+ DPU_MDP_10BIT_SUPPORT,
+ DPU_MDP_AUDIO_SELECT,
+ DPU_MDP_PERIPH_0_REMOVED,
+ DPU_MDP_VSYNC_SEL,
+ DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
+ * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
+ * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC, Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP Supports client driven prefetch
+ * @DPU_SSPP_INLINE_ROTATION Support inline rotation
+ * @DPU_SSPP_MAX maximum value
+ */
+enum {
+ DPU_SSPP_SCALER_QSEED2 = 0x1,
+ DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_QSEED3LITE,
+ DPU_SSPP_SCALER_QSEED4,
+ DPU_SSPP_SCALER_RGB,
+ DPU_SSPP_CSC,
+ DPU_SSPP_CSC_10BIT,
+ DPU_SSPP_CURSOR,
+ DPU_SSPP_QOS,
+ DPU_SSPP_QOS_8LVL,
+ DPU_SSPP_EXCL_RECT,
+ DPU_SSPP_SMART_DMA_V1,
+ DPU_SSPP_SMART_DMA_V2,
+ DPU_SSPP_TS_PREFILL,
+ DPU_SSPP_TS_PREFILL_REC1,
+ DPU_SSPP_CDP,
+ DPU_SSPP_INLINE_ROTATION,
+ DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC Gamma correction block
+ * @DPU_DIM_LAYER Layer mixer supports dim layer
+ * @DPU_MIXER_COMBINED_ALPHA Layer mixer has combined alpha register
+ * @DPU_MIXER_MAX maximum value
+ */
+enum {
+ DPU_MIXER_LAYER = 0x1,
+ DPU_MIXER_SOURCESPLIT,
+ DPU_MIXER_GC,
+ DPU_DIM_LAYER,
+ DPU_MIXER_COMBINED_ALPHA,
+ DPU_MIXER_MAX
+};
+
+/**
+ * DSPP sub-blocks
+ * @DPU_DSPP_PCC Panel color correction block
+ */
+enum {
+ DPU_DSPP_PCC = 0x1,
+ DPU_DSPP_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE Tear check block
+ * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER Dither blocks
+ * @DPU_PINGPONG_DSC PP block supports DSC
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+ DPU_PINGPONG_TE = 0x1,
+ DPU_PINGPONG_TE2,
+ DPU_PINGPONG_SPLIT,
+ DPU_PINGPONG_SLAVE,
+ DPU_PINGPONG_DITHER,
+ DPU_PINGPONG_DSC,
+ DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display
+ * @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs)
+ * @DPU_CTL_VM_CFG: CTL config to support multiple VMs
+ * @DPU_CTL_HAS_LAYER_EXT4: CTL has the CTL_LAYER_EXT4 register
+ * @DPU_CTL_DSPP_BLOCK_FLUSH: CTL config to support dspp sub-block flush
+ * @DPU_CTL_MAX
+ */
+enum {
+ DPU_CTL_SPLIT_DISPLAY = 0x1,
+ DPU_CTL_ACTIVE_CFG,
+ DPU_CTL_FETCH_ACTIVE,
+ DPU_CTL_VM_CFG,
+ DPU_CTL_HAS_LAYER_EXT4,
+ DPU_CTL_DSPP_SUB_BLOCK_FLUSH,
+ DPU_CTL_MAX
+};
+
+/**
+ * INTF sub-blocks
+ * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
+ * pixel data arrives to this INTF
+ * @DPU_INTF_TE INTF block has TE configuration support
+ * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
+ * than video timing
+ * @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register
+ * @DPU_INTF_MAX
+ */
+enum {
+ DPU_INTF_INPUT_CTRL = 0x1,
+ DPU_INTF_TE,
+ DPU_DATA_HCTL_EN,
+ DPU_INTF_STATUS_SUPPORTED,
+ DPU_INTF_MAX
+};
+
+/**
+ * WB sub-blocks and features
+ * @DPU_WB_LINE_MODE Writeback module supports line/linear mode
+ * @DPU_WB_BLOCK_MODE Writeback module supports block mode read
+ * @DPU_WB_CHROMA_DOWN, Writeback chroma down block,
+ * @DPU_WB_DOWNSCALE, Writeback integer downscaler,
+ * @DPU_WB_DITHER, Dither block
+ * @DPU_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc
+ * @DPU_WB_UBWC, Writeback Universal bandwidth compression
+ * @DPU_WB_YUV_CONFIG Writeback supports output of YUV colorspace
+ * @DPU_WB_PIPE_ALPHA Writeback supports pipe alpha
+ * @DPU_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in
+ * the destination image
+ * @DPU_WB_QOS, Writeback supports QoS control, danger/safe/creq
+ * @DPU_WB_QOS_8LVL, Writeback supports 8-level QoS control
+ * @DPU_WB_CDP Writeback supports client driven prefetch
+ * @DPU_WB_INPUT_CTRL Writeback supports from which pp block input pixel
+ * data arrives.
+ * @DPU_WB_CROP CWB supports cropping
+ * @DPU_WB_MAX maximum value
+ */
+enum {
+ DPU_WB_LINE_MODE = 0x1,
+ DPU_WB_BLOCK_MODE,
+ DPU_WB_UBWC,
+ DPU_WB_YUV_CONFIG,
+ DPU_WB_PIPE_ALPHA,
+ DPU_WB_XY_ROI_OFFSET,
+ DPU_WB_QOS,
+ DPU_WB_QOS_8LVL,
+ DPU_WB_CDP,
+ DPU_WB_INPUT_CTRL,
+ DPU_WB_CROP,
+ DPU_WB_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX maximum value
+ */
+enum {
+ DPU_VBIF_QOS_OTLIM = 0x1,
+ DPU_VBIF_QOS_REMAP,
+ DPU_VBIF_MAX
+};
+
+/**
+ * DSC sub-blocks/features
+ * @DPU_DSC_OUTPUT_CTRL Configure which PINGPONG block gets
+ * the pixel output from this DSC.
+ * @DPU_DSC_HW_REV_1_2 DSC block supports DSC 1.1 and 1.2
+ * @DPU_DSC_NATIVE_42x_EN Supports NATIVE_422_EN and NATIVE_420_EN encoding
+ * @DPU_DSC_MAX
+ */
+enum {
+ DPU_DSC_OUTPUT_CTRL = 0x1,
+ DPU_DSC_HW_REV_1_2,
+ DPU_DSC_NATIVE_42x_EN,
+ DPU_DSC_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @len: length of hardware block
+ * @features bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len; \
+ unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this sub-block
+ * @base: offset of this sub-block relative to the block
+ * offset
+ * @len register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+struct dpu_csc_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+/**
+ * struct dpu_dsc_blk - DSC Encoder sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ */
+struct dpu_dsc_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+ DPU_QOS_LUT_USAGE_LINEAR,
+ DPU_QOS_LUT_USAGE_MACROTILE,
+ DPU_QOS_LUT_USAGE_NRT,
+ DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+ u32 fl;
+ u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+ u32 nentry;
+ const struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_rotation_cfg - define inline rotation config
+ * @rot_maxheight: max pre rotated height allowed for rotation
+ * @rot_num_formats: number of elements in @rot_format_list
+ * @rot_format_list: list of supported rotator formats
+ */
+struct dpu_rotation_cfg {
+ u32 rot_maxheight;
+ size_t rot_num_formats;
+ const u32 *rot_format_list;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ * supported z order
+ * @qseed_type qseed2 or qseed3 support.
+ * @has_src_split source split feature status
+ * @has_dim_layer dim layer feature status
+ * @has_idle_pc indicate if idle power collapse feature is supported
+ * @has_3d_merge indicate if 3D merge is supported
+ * @max_linewidth max linewidth for sspp
+ * @pixel_ram_size size of latency hiding and de-tiling buffer in bytes
+ * @max_hdeci_exp max horizontal decimation supported (max is 2^value)
+ * @max_vdeci_exp max vertical decimation supported (max is 2^value)
+ */
+struct dpu_caps {
+ u32 max_mixer_width;
+ u32 max_mixer_blendstages;
+ u32 qseed_type;
+ bool has_src_split;
+ bool has_dim_layer;
+ bool has_idle_pc;
+ bool has_3d_merge;
+ /* SSPP limits */
+ u32 max_linewidth;
+ u32 pixel_ram_size;
+ u32 max_hdeci_exp;
+ u32 max_vdeci_exp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale: maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @qseed_ver: qseed version
+ * @scaler_blk:
+ * @csc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @num_formats: Number of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ * @virt_num_formats: Number of supported formats for virtual planes
+ * @dpu_rotation_cfg: inline rotation configuration
+ */
+struct dpu_sspp_sub_blks {
+ u32 maxdwnscale;
+ u32 maxupscale;
+ u32 smart_dma_priority;
+ u32 max_per_pipe_bw;
+ u32 qseed_ver;
+ struct dpu_scaler_blk scaler_blk;
+ struct dpu_pp_blk csc_blk;
+
+ const u32 *format_list;
+ u32 num_formats;
+ const u32 *virt_format_list;
+ u32 virt_num_formats;
+ const struct dpu_rotation_cfg *rotation_cfg;
+};
+
+/**
+ * struct dpu_lm_sub_blks: information of mixer block
+ * @maxwidth: Max pixel width supported by this mixer
+ * @maxblendstages: Max number of blend-stages supported
+ * @blendstage_base: Blend-stage register base offset
+ */
+struct dpu_lm_sub_blks {
+ u32 maxwidth;
+ u32 maxblendstages;
+ u32 blendstage_base[MAX_BLOCKS];
+};
+
+/**
+ * struct dpu_dspp_sub_blks: Information of DSPP block
+ * @pcc: pixel color correction block
+ */
+struct dpu_dspp_sub_blks {
+ struct dpu_pp_blk pcc;
+};
+
+struct dpu_pingpong_sub_blks {
+ struct dpu_pp_blk te;
+ struct dpu_pp_blk te2;
+ struct dpu_pp_blk dither;
+};
+
+/**
+ * struct dpu_dsc_sub_blks - DSC sub-blks
+ * @enc: DSC encoder sub-block
+ * @ctl: DSC controller sub-block
+ */
+struct dpu_dsc_sub_blks {
+ struct dpu_dsc_blk enc;
+ struct dpu_dsc_blk ctl;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+ DPU_CLK_CTRL_NONE,
+ DPU_CLK_CTRL_VIG0,
+ DPU_CLK_CTRL_VIG1,
+ DPU_CLK_CTRL_VIG2,
+ DPU_CLK_CTRL_VIG3,
+ DPU_CLK_CTRL_VIG4,
+ DPU_CLK_CTRL_RGB0,
+ DPU_CLK_CTRL_RGB1,
+ DPU_CLK_CTRL_RGB2,
+ DPU_CLK_CTRL_RGB3,
+ DPU_CLK_CTRL_DMA0,
+ DPU_CLK_CTRL_DMA1,
+ DPU_CLK_CTRL_DMA2,
+ DPU_CLK_CTRL_DMA3,
+ DPU_CLK_CTRL_DMA4,
+ DPU_CLK_CTRL_DMA5,
+ DPU_CLK_CTRL_CURSOR0,
+ DPU_CLK_CTRL_CURSOR1,
+ DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+ DPU_CLK_CTRL_REG_DMA,
+ DPU_CLK_CTRL_WB2,
+ DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off: register offset
+ * @bit_off: bit offset
+ */
+struct dpu_clk_ctrl_reg {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @clk_ctrls clock control register definition
+ */
+struct dpu_mdp_cfg {
+ DPU_HW_BLK_INFO;
+ struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_ctl_cfg : MDP CTL instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @intr_start: interrupt index for CTL_START
+ */
+struct dpu_ctl_cfg {
+ DPU_HW_BLK_INFO;
+ s32 intr_start;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: SSPP sub-blocks information
+ * @xin_id: bus client identifier
+ * @clk_ctrl clock control identifier
+ * @type sspp type identifier
+ */
+struct dpu_sspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_sspp_sub_blks *sblk;
+ u32 xin_id;
+ enum dpu_clk_ctrl_type clk_ctrl;
+ u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: LM Sub-blocks information
+ * @pingpong: ID of connected PingPong, PINGPONG_NONE if unsupported
+ * @lm_pair: ID of LM that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_lm_sub_blks *sblk;
+ u32 pingpong;
+ u32 dspp;
+ unsigned long lm_pair;
+};
+
+/**
+ * struct dpu_dspp_cfg - information of DSPP blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ * @sblk sub-blocks information
+ */
+struct dpu_dspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_dspp_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @intr_done: index for PINGPONG done interrupt
+ * @intr_rdptr: index for PINGPONG readpointer done interrupt
+ * @sblk sub-blocks information
+ */
+struct dpu_pingpong_cfg {
+ DPU_HW_BLK_INFO;
+ u32 merge_3d;
+ s32 intr_done;
+ s32 intr_rdptr;
+ const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_merge_3d_cfg - information of DSPP blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ * @sblk sub-blocks information
+ */
+struct dpu_merge_3d_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_merge_3d_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_dsc_cfg - information of DSC blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @len: length of hardware block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: sub-blocks information
+ */
+struct dpu_dsc_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_dsc_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @type: Interface type(DSI, DP, HDMI)
+ * @controller_id: Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ * @intr_underrun: index for INTF underrun interrupt
+ * @intr_vsync: index for INTF VSYNC interrupt
+ * @intr_tear_rd_ptr: Index for INTF TEAR_RD_PTR interrupt
+ */
+struct dpu_intf_cfg {
+ DPU_HW_BLK_INFO;
+ u32 type; /* interface type*/
+ u32 controller_id;
+ u32 prog_fetch_lines_worst_case;
+ s32 intr_underrun;
+ s32 intr_vsync;
+ s32 intr_tear_rd_ptr;
+};
+
+/**
+ * struct dpu_wb_cfg - information of writeback blocks
+ * @DPU_HW_BLK_INFO: refer to the description above for DPU_HW_BLK_INFO
+ * @vbif_idx: vbif client index
+ * @maxlinewidth: max line width supported by writeback block
+ * @xin_id: bus client identifier
+ * @intr_wb_done: interrupt index for WB_DONE
+ * @format_list: list of formats supported by this writeback block
+ * @num_formats: number of formats supported by this writeback block
+ * @clk_ctrl: clock control identifier
+ */
+struct dpu_wb_cfg {
+ DPU_HW_BLK_INFO;
+ u8 vbif_idx;
+ u32 maxlinewidth;
+ u32 xin_id;
+ s32 intr_wb_done;
+ const u32 *format_list;
+ u32 num_formats;
+ enum dpu_clk_ctrl_type clk_ctrl;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps pixel per seconds
+ * @ot_limit OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+ u64 pps;
+ u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count length of cfg
+ * @cfg pointer to array of configuration settings with
+ * ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+ u32 count;
+ const struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl num of priority level
+ * @priority_lvl pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+ u32 npriority_lvl;
+ const u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @ot_rd_limit default OT read limit
+ * @ot_wr_limit default OT write limit
+ * @xin_halt_timeout maximum time (in usec) for xin to halt
+ * @qos_rp_remap_size size of VBIF_XINL_QOS_RP_REMAP register space
+ * @dynamic_ot_rd_tbl dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl dynamic OT write configuration table
+ * @qos_rt_tbl real-time QoS priority table
+ * @qos_nrt_tbl non-real-time QoS priority table
+ * @memtype_count number of defined memtypes
+ * @memtype array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+ DPU_HW_BLK_INFO;
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+ u32 xin_halt_timeout;
+ u32 qos_rp_remap_size;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+ struct dpu_vbif_qos_tbl qos_rt_tbl;
+ struct dpu_vbif_qos_tbl qos_nrt_tbl;
+ u32 memtype_count;
+ u32 memtype[MAX_XIN_COUNT];
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+ DPU_PERF_CDP_USAGE_RT,
+ DPU_PERF_CDP_USAGE_NRT,
+ DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+ bool rd_enable;
+ bool wr_enable;
+};
+
+/**
+ * struct dpu_mdss_version - DPU's major and minor versions
+ * @core_major_ver: DPU core's major version
+ * @core_minor_ver: DPU core's minor version
+ */
+struct dpu_mdss_version {
+ u8 core_major_ver;
+ u8 core_minor_ver;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low low threshold of maximum bandwidth (kbps)
+ * @max_bw_high high threshold of maximum bandwidth (kbps)
+ * @min_core_ib minimum bandwidth for core (kbps)
+ * @min_core_ib minimum mnoc ib vote in kbps
+ * @min_llcc_ib minimum llcc ib vote in kbps
+ * @min_dram_ib minimum dram ib vote in kbps
+ * @undersized_prefill_lines undersized prefill in lines
+ * @xtra_prefill_lines extra prefill latency in lines
+ * @dest_scale_prefill_lines destination scaler latency in lines
+ * @macrotile_perfill_lines macrotile latency in lines
+ * @yuv_nv12_prefill_lines yuv_nv12 latency in lines
+ * @linear_prefill_lines linear latency in lines
+ * @downscaling_prefill_lines downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines minimum pipeline latency in lines
+ * @clk_inefficiency_factor DPU src clock inefficiency factor
+ * @bw_inefficiency_factor DPU axi bus bw inefficiency factor
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg cdp use case configurations
+ */
+struct dpu_perf_cfg {
+ u32 max_bw_low;
+ u32 max_bw_high;
+ u32 min_core_ib;
+ u32 min_llcc_ib;
+ u32 min_dram_ib;
+ u32 undersized_prefill_lines;
+ u32 xtra_prefill_lines;
+ u32 dest_scale_prefill_lines;
+ u32 macrotile_prefill_lines;
+ u32 yuv_nv12_prefill_lines;
+ u32 linear_prefill_lines;
+ u32 downscaling_prefill_lines;
+ u32 amortizable_threshold;
+ u32 min_prefill_lines;
+ u32 clk_inefficiency_factor;
+ u32 bw_inefficiency_factor;
+ u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains dpu's major and minor versions,
+ * number of instances, register offsets, capabilities of the
+ * all MDSS HW sub-blocks.
+ *
+ * @dma_formats Supported formats for dma pipe
+ * @cursor_formats Supported formats for cursor pipe
+ * @vig_formats Supported formats for vig pipe
+ */
+struct dpu_mdss_cfg {
+ const struct dpu_mdss_version *mdss_ver;
+
+ const struct dpu_caps *caps;
+
+ const struct dpu_mdp_cfg *mdp;
+
+ u32 ctl_count;
+ const struct dpu_ctl_cfg *ctl;
+
+ u32 sspp_count;
+ const struct dpu_sspp_cfg *sspp;
+
+ u32 mixer_count;
+ const struct dpu_lm_cfg *mixer;
+
+ u32 pingpong_count;
+ const struct dpu_pingpong_cfg *pingpong;
+
+ u32 merge_3d_count;
+ const struct dpu_merge_3d_cfg *merge_3d;
+
+ u32 dsc_count;
+ const struct dpu_dsc_cfg *dsc;
+
+ u32 intf_count;
+ const struct dpu_intf_cfg *intf;
+
+ u32 vbif_count;
+ const struct dpu_vbif_cfg *vbif;
+
+ u32 wb_count;
+ const struct dpu_wb_cfg *wb;
+
+ u32 ad_count;
+
+ u32 dspp_count;
+ const struct dpu_dspp_cfg *dspp;
+
+ /* Add additional block data structures here */
+
+ const struct dpu_perf_cfg *perf;
+ const struct dpu_format_extended *dma_formats;
+ const struct dpu_format_extended *cursor_formats;
+ const struct dpu_format_extended *vig_formats;
+};
+
+extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
+extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8150_cfg;
+extern const struct dpu_mdss_cfg dpu_sc8180x_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
+extern const struct dpu_mdss_cfg dpu_sc7180_cfg;
+extern const struct dpu_mdss_cfg dpu_sm6115_cfg;
+extern const struct dpu_mdss_cfg dpu_sm6125_cfg;
+extern const struct dpu_mdss_cfg dpu_sm6350_cfg;
+extern const struct dpu_mdss_cfg dpu_qcm2290_cfg;
+extern const struct dpu_mdss_cfg dpu_sm6375_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8350_cfg;
+extern const struct dpu_mdss_cfg dpu_sc7280_cfg;
+extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
+
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644
index 0000000000..86182c7346
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT3(lm) \
+ (0xA0 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT4(lm) \
+ (0xB8 + (((lm) - LM_0) * 0x004))
+#define CTL_TOP 0x014
+#define CTL_FLUSH 0x018
+#define CTL_START 0x01C
+#define CTL_PREPARE 0x0d0
+#define CTL_SW_RESET 0x030
+#define CTL_LAYER_EXTN_OFFSET 0x40
+#define CTL_MERGE_3D_ACTIVE 0x0E4
+#define CTL_DSC_ACTIVE 0x0E8
+#define CTL_WB_ACTIVE 0x0EC
+#define CTL_INTF_ACTIVE 0x0F4
+#define CTL_FETCH_PIPE_ACTIVE 0x0FC
+#define CTL_MERGE_3D_FLUSH 0x100
+#define CTL_DSC_FLUSH 0x104
+#define CTL_WB_FLUSH 0x108
+#define CTL_INTF_FLUSH 0x110
+#define CTL_INTF_MASTER 0x134
+#define CTL_DSPP_n_FLUSH(n) ((0x13C) + ((n) * 4))
+
+#define CTL_MIXER_BORDER_OUT BIT(24)
+#define CTL_FLUSH_MASK_CTL BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US 2000
+#define MERGE_3D_IDX 23
+#define DSC_IDX 22
+#define INTF_IDX 31
+#define WB_IDX 16
+#define DSPP_IDX 29 /* From DPU hw rev 7.x.x */
+#define CTL_INVALID_BIT 0xffff
+#define CTL_DEFAULT_GROUP_ID 0xf
+
+static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
+ CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
+ 1, 2, 3, 4, 5};
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+ enum dpu_lm lm)
+{
+ int i;
+ int stages = -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ if (lm == mixer[i].id) {
+ stages = mixer[i].sblk->maxblendstages;
+ break;
+ }
+ }
+
+ return stages;
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
+{
+ return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ ctx->pending_flush_mask = 0x0;
+ ctx->pending_intf_flush_mask = 0;
+ ctx->pending_wb_flush_mask = 0;
+ ctx->pending_merge_3d_flush_mask = 0;
+ ctx->pending_dsc_flush_mask = 0;
+
+ memset(ctx->pending_dspp_flush_mask, 0,
+ sizeof(ctx->pending_dspp_flush_mask));
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
+{
+ trace_dpu_hw_ctl_update_pending_flush(flushbits,
+ ctx->pending_flush_mask);
+ ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
+{
+ int dspp;
+
+ if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
+ ctx->pending_merge_3d_flush_mask);
+ if (ctx->pending_flush_mask & BIT(INTF_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
+ ctx->pending_intf_flush_mask);
+ if (ctx->pending_flush_mask & BIT(WB_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
+ ctx->pending_wb_flush_mask);
+
+ if (ctx->pending_flush_mask & BIT(DSPP_IDX))
+ for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
+ if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
+ DPU_REG_WRITE(&ctx->hw,
+ CTL_DSPP_n_FLUSH(dspp - DSPP_0),
+ ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
+ }
+
+ if (ctx->pending_flush_mask & BIT(DSC_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
+ ctx->pending_dsc_flush_mask);
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp sspp)
+{
+ switch (sspp) {
+ case SSPP_VIG0:
+ ctx->pending_flush_mask |= BIT(0);
+ break;
+ case SSPP_VIG1:
+ ctx->pending_flush_mask |= BIT(1);
+ break;
+ case SSPP_VIG2:
+ ctx->pending_flush_mask |= BIT(2);
+ break;
+ case SSPP_VIG3:
+ ctx->pending_flush_mask |= BIT(18);
+ break;
+ case SSPP_RGB0:
+ ctx->pending_flush_mask |= BIT(3);
+ break;
+ case SSPP_RGB1:
+ ctx->pending_flush_mask |= BIT(4);
+ break;
+ case SSPP_RGB2:
+ ctx->pending_flush_mask |= BIT(5);
+ break;
+ case SSPP_RGB3:
+ ctx->pending_flush_mask |= BIT(19);
+ break;
+ case SSPP_DMA0:
+ ctx->pending_flush_mask |= BIT(11);
+ break;
+ case SSPP_DMA1:
+ ctx->pending_flush_mask |= BIT(12);
+ break;
+ case SSPP_DMA2:
+ ctx->pending_flush_mask |= BIT(24);
+ break;
+ case SSPP_DMA3:
+ ctx->pending_flush_mask |= BIT(25);
+ break;
+ case SSPP_DMA4:
+ ctx->pending_flush_mask |= BIT(13);
+ break;
+ case SSPP_DMA5:
+ ctx->pending_flush_mask |= BIT(14);
+ break;
+ case SSPP_CURSOR0:
+ ctx->pending_flush_mask |= BIT(22);
+ break;
+ case SSPP_CURSOR1:
+ ctx->pending_flush_mask |= BIT(23);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm)
+{
+ switch (lm) {
+ case LM_0:
+ ctx->pending_flush_mask |= BIT(6);
+ break;
+ case LM_1:
+ ctx->pending_flush_mask |= BIT(7);
+ break;
+ case LM_2:
+ ctx->pending_flush_mask |= BIT(8);
+ break;
+ case LM_3:
+ ctx->pending_flush_mask |= BIT(9);
+ break;
+ case LM_4:
+ ctx->pending_flush_mask |= BIT(10);
+ break;
+ case LM_5:
+ ctx->pending_flush_mask |= BIT(20);
+ break;
+ default:
+ break;
+ }
+
+ ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
+}
+
+static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
+ enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ ctx->pending_flush_mask |= BIT(31);
+ break;
+ case INTF_1:
+ ctx->pending_flush_mask |= BIT(30);
+ break;
+ case INTF_2:
+ ctx->pending_flush_mask |= BIT(29);
+ break;
+ case INTF_3:
+ ctx->pending_flush_mask |= BIT(28);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
+ enum dpu_wb wb)
+{
+ switch (wb) {
+ case WB_0:
+ case WB_1:
+ case WB_2:
+ ctx->pending_flush_mask |= BIT(WB_IDX);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_wb wb)
+{
+ ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
+ ctx->pending_flush_mask |= BIT(WB_IDX);
+}
+
+static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_intf intf)
+{
+ ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
+ ctx->pending_flush_mask |= BIT(INTF_IDX);
+}
+
+static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_merge_3d merge_3d)
+{
+ ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
+ ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
+}
+
+static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_dsc dsc_num)
+{
+ ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
+ ctx->pending_flush_mask |= BIT(DSC_IDX);
+}
+
+static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp dspp, u32 dspp_sub_blk)
+{
+ switch (dspp) {
+ case DSPP_0:
+ ctx->pending_flush_mask |= BIT(13);
+ break;
+ case DSPP_1:
+ ctx->pending_flush_mask |= BIT(14);
+ break;
+ case DSPP_2:
+ ctx->pending_flush_mask |= BIT(15);
+ break;
+ case DSPP_3:
+ ctx->pending_flush_mask |= BIT(21);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
+ struct dpu_hw_ctl *ctx, enum dpu_dspp dspp, u32 dspp_sub_blk)
+{
+ if (dspp >= DSPP_MAX)
+ return;
+
+ switch (dspp_sub_blk) {
+ case DPU_DSPP_PCC:
+ ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
+ break;
+ default:
+ return;
+ }
+
+ ctx->pending_flush_mask |= BIT(DSPP_IDX);
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ ktime_t timeout;
+ u32 status;
+
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ /*
+ * it takes around 30us to have mdp finish resetting its ctl path
+ * poll every 50us so that reset should be completed at 1st poll
+ */
+ do {
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x1;
+ if (status)
+ usleep_range(20, 50);
+ } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+ return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+ DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 status;
+
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x01;
+ if (!status)
+ return 0;
+
+ pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+ pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
+
+ DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
+ }
+
+ DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
+}
+
+struct ctl_blend_config {
+ int idx, shift, ext_shift;
+};
+
+static const struct ctl_blend_config ctl_blend_config[][2] = {
+ [SSPP_NONE] = { { -1 }, { -1 } },
+ [SSPP_MAX] = { { -1 }, { -1 } },
+ [SSPP_VIG0] = { { 0, 0, 0 }, { 3, 0 } },
+ [SSPP_VIG1] = { { 0, 3, 2 }, { 3, 4 } },
+ [SSPP_VIG2] = { { 0, 6, 4 }, { 3, 8 } },
+ [SSPP_VIG3] = { { 0, 26, 6 }, { 3, 12 } },
+ [SSPP_RGB0] = { { 0, 9, 8 }, { -1 } },
+ [SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
+ [SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
+ [SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
+ [SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
+ [SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
+ [SSPP_DMA2] = { { 2, 0 }, { 2, 16 } },
+ [SSPP_DMA3] = { { 2, 4 }, { 2, 20 } },
+ [SSPP_DMA4] = { { 4, 0 }, { 4, 8 } },
+ [SSPP_DMA5] = { { 4, 4 }, { 4, 12 } },
+ [SSPP_CURSOR0] = { { 1, 20 }, { -1 } },
+ [SSPP_CURSOR1] = { { 1, 26 }, { -1 } },
+};
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 mix, ext, mix_ext;
+ u32 mixercfg[5] = { 0 };
+ int i, j;
+ int stages;
+ int pipes_per_stage;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+ if (stages < 0)
+ return;
+
+ if (test_bit(DPU_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+ if (!stage_cfg)
+ goto exit;
+
+ for (i = 0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + 1) & 0x7;
+ ext = i >= 7;
+ mix_ext = (i + 1) & 0xf;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ enum dpu_sspp_multirect_index rect_index =
+ stage_cfg->multirect_index[i][j];
+ enum dpu_sspp pipe = stage_cfg->stage[i][j];
+ const struct ctl_blend_config *cfg =
+ &ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
+
+ /*
+ * CTL_LAYER has 3-bit field (and extra bits in EXT register),
+ * all EXT registers has 4-bit fields.
+ */
+ if (cfg->idx == -1) {
+ continue;
+ } else if (cfg->idx == 0) {
+ mixercfg[0] |= mix << cfg->shift;
+ mixercfg[1] |= ext << cfg->ext_shift;
+ } else {
+ mixercfg[cfg->idx] |= mix_ext << cfg->shift;
+ }
+ }
+ }
+
+exit:
+ DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
+ if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
+ DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
+}
+
+
+static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_active = 0;
+ u32 wb_active = 0;
+ u32 mode_sel = 0;
+
+ /* CTL_TOP[31:28] carries group_id to collate CTL paths
+ * per VM. Explicitly disable it until VM support is
+ * added in SW. Power on reset value is not disable.
+ */
+ if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
+ mode_sel = CTL_DEFAULT_GROUP_ID << 28;
+
+ if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
+ mode_sel |= BIT(17);
+
+ intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+ wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+
+ if (cfg->intf)
+ intf_active |= BIT(cfg->intf - INTF_0);
+
+ if (cfg->wb)
+ wb_active |= BIT(cfg->wb - WB_0);
+
+ DPU_REG_WRITE(c, CTL_TOP, mode_sel);
+ DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+ DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+
+ if (cfg->merge_3d)
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ BIT(cfg->merge_3d - MERGE_3D_0));
+
+ if (cfg->dsc)
+ DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_cfg = 0;
+
+ intf_cfg |= (cfg->intf & 0xF) << 4;
+
+ if (cfg->mode_3d) {
+ intf_cfg |= BIT(19);
+ intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+ }
+
+ if (cfg->wb)
+ intf_cfg |= (cfg->wb & 0x3) + 2;
+
+ switch (cfg->intf_mode_sel) {
+ case DPU_CTL_MODE_SEL_VID:
+ intf_cfg &= ~BIT(17);
+ intf_cfg &= ~(0x3 << 15);
+ break;
+ case DPU_CTL_MODE_SEL_CMD:
+ intf_cfg |= BIT(17);
+ intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+ break;
+ default:
+ pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+ return;
+ }
+
+ DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_active = 0;
+ u32 wb_active = 0;
+ u32 merge3d_active = 0;
+ u32 dsc_active;
+
+ /*
+ * This API resets each portion of the CTL path namely,
+ * clearing the sspps staged on the lm, merge_3d block,
+ * interfaces , writeback etc to ensure clean teardown of the pipeline.
+ * This will be used for writeback to begin with to have a
+ * proper teardown of the writeback session but upon further
+ * validation, this can be extended to all interfaces.
+ */
+ if (cfg->merge_3d) {
+ merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
+ merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ merge3d_active);
+ }
+
+ dpu_hw_ctl_clear_all_blendstages(ctx);
+
+ if (cfg->intf) {
+ intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+ intf_active &= ~BIT(cfg->intf - INTF_0);
+ DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+ }
+
+ if (cfg->wb) {
+ wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ wb_active &= ~BIT(cfg->wb - WB_0);
+ DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ }
+
+ if (cfg->dsc) {
+ dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
+ dsc_active &= ~cfg->dsc;
+ DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
+ }
+}
+
+static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
+ unsigned long *fetch_active)
+{
+ int i;
+ u32 val = 0;
+
+ if (fetch_active) {
+ for (i = 0; i < SSPP_MAX; i++) {
+ if (test_bit(i, fetch_active) &&
+ fetch_tbl[i] != CTL_INVALID_BIT)
+ val |= BIT(fetch_tbl[i]);
+ }
+ }
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+ unsigned long cap)
+{
+ if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+ ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
+ ops->update_pending_flush_intf =
+ dpu_hw_ctl_update_pending_flush_intf_v1;
+ ops->update_pending_flush_merge_3d =
+ dpu_hw_ctl_update_pending_flush_merge_3d_v1;
+ ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
+ ops->update_pending_flush_dsc =
+ dpu_hw_ctl_update_pending_flush_dsc_v1;
+ } else {
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ ops->update_pending_flush_intf =
+ dpu_hw_ctl_update_pending_flush_intf;
+ ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
+ }
+ ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+ ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+ ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+ ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+ ops->trigger_start = dpu_hw_ctl_trigger_start;
+ ops->is_started = dpu_hw_ctl_is_started;
+ ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+ ops->reset = dpu_hw_ctl_reset_control;
+ ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+ ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+ ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+ ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
+ ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
+ if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
+ ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
+ else
+ ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
+
+ if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
+ ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
+ void __iomem *addr,
+ u32 mixer_count,
+ const struct dpu_lm_cfg *mixer)
+{
+ struct dpu_hw_ctl *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_CTL;
+
+ c->caps = cfg;
+ _setup_ctl_ops(&c->ops, c->caps->features);
+ c->idx = cfg->id;
+ c->mixer_count = mixer_count;
+ c->mixer_hw_caps = mixer;
+
+ return c;
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+ kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644
index 0000000000..1c242298ff
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID: Video mode interface
+ * DPU_CTL_MODE_SEL_CMD: Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+ DPU_CTL_MODE_SEL_VID = 0,
+ DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+ enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+ enum dpu_sspp_multirect_index multirect_index
+ [DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf : Interface id
+ * @mode_3d: 3d mux configuration
+ * @merge_3d: 3d merge block used
+ * @intf_mode_sel: Interface mode, cmd / vid
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @dsc: DSC BIT masks used
+ */
+struct dpu_hw_intf_cfg {
+ enum dpu_intf intf;
+ enum dpu_wb wb;
+ enum dpu_3d_blend_mode mode_3d;
+ enum dpu_merge_3d merge_3d;
+ enum dpu_ctl_mode_sel intf_mode_sel;
+ int stream_sel;
+ unsigned int dsc;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+ /**
+ * kickoff hw operation for Sw controlled interfaces
+ * DSI cmd mode and WB interface are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * check if the ctl is started
+ * @ctx : ctl path ctx pointer
+ * @Return: true if started, false if stopped
+ */
+ bool (*is_started)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * kickoff prepare is in progress hw operation for sw
+ * controlled interfaces: DSI cmd mode and WB interface
+ * are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Clear the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Query the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
+ * OR in the given flushbits to the cached pending_(wb_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : writeback block index
+ */
+ void (*update_pending_flush_wb)(struct dpu_hw_ctl *ctx,
+ enum dpu_wb blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_(intf_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : interface block index
+ */
+ void (*update_pending_flush_intf)(struct dpu_hw_ctl *ctx,
+ enum dpu_intf blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_(merge_3d_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : interface block index
+ */
+ void (*update_pending_flush_merge_3d)(struct dpu_hw_ctl *ctx,
+ enum dpu_merge_3d blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : SSPP block index
+ */
+ void (*update_pending_flush_sspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : LM block index
+ */
+ void (*update_pending_flush_mixer)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : DSPP block index
+ * @dspp_sub_blk : DSPP sub-block index
+ */
+ void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp blk, u32 dspp_sub_blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_(dsc_)flush_mask
+ * No effect on hardware
+ * @ctx: ctl path ctx pointer
+ * @blk: interface block index
+ */
+ void (*update_pending_flush_dsc)(struct dpu_hw_ctl *ctx,
+ enum dpu_dsc blk);
+
+ /**
+ * Write the value of the pending_flush_mask to hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Read the value of the flush register
+ * @ctx : ctl path ctx pointer
+ * @Return: value of the ctl flush register.
+ */
+ u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Setup ctl_path interface config
+ * @ctx
+ * @cfg : interface config structure pointer
+ */
+ void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
+ /**
+ * reset ctl_path interface config
+ * @ctx : ctl path ctx pointer
+ * @cfg : interface config structure pointer
+ */
+ void (*reset_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
+ int (*reset)(struct dpu_hw_ctl *c);
+
+ /*
+ * wait_reset_status - checks ctl reset status
+ * @ctx : ctl path ctx pointer
+ *
+ * This function checks the ctl reset status bit.
+ * If the reset bit is set, it keeps polling the status till the hw
+ * reset is complete.
+ * Returns: 0 on success or -error if reset incomplete within interval
+ */
+ int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Set all blend stages to disabled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : ctl path ctx pointer
+ * @lm : layer mixer enumeration
+ * @cfg : blend stage configuration
+ */
+ void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+
+ void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
+ unsigned long *fetch_active);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @pending_intf_flush_mask: pending INTF flush
+ * @pending_wb_flush_mask: pending WB flush
+ * @pending_dsc_flush_mask: pending DSC flush
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* ctl path */
+ int idx;
+ const struct dpu_ctl_cfg *caps;
+ int mixer_count;
+ const struct dpu_lm_cfg *mixer_hw_caps;
+ u32 pending_flush_mask;
+ u32 pending_intf_flush_mask;
+ u32 pending_wb_flush_mask;
+ u32 pending_merge_3d_flush_mask;
+ u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0];
+ u32 pending_dsc_flush_mask;
+
+ /* ops */
+ struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
+ * Should be called before accessing any ctl_path register.
+ * @cfg: ctl_path catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @mixer_count: Number of mixers in @mixer
+ * @mixer: Pointer to an array of Layer Mixers defined in the catalog
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
+ void __iomem *addr,
+ u32 mixer_count,
+ const struct dpu_lm_cfg *mixer);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
new file mode 100644
index 0000000000..509dbaa51d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2022, Linaro Limited
+ */
+
+#include <drm/display/drm_dsc_helper.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_dsc.h"
+
+#define DSC_COMMON_MODE 0x000
+#define DSC_ENC 0x004
+#define DSC_PICTURE 0x008
+#define DSC_SLICE 0x00C
+#define DSC_CHUNK_SIZE 0x010
+#define DSC_DELAY 0x014
+#define DSC_SCALE_INITIAL 0x018
+#define DSC_SCALE_DEC_INTERVAL 0x01C
+#define DSC_SCALE_INC_INTERVAL 0x020
+#define DSC_FIRST_LINE_BPG_OFFSET 0x024
+#define DSC_BPG_OFFSET 0x028
+#define DSC_DSC_OFFSET 0x02C
+#define DSC_FLATNESS 0x030
+#define DSC_RC_MODEL_SIZE 0x034
+#define DSC_RC 0x038
+#define DSC_RC_BUF_THRESH 0x03C
+#define DSC_RANGE_MIN_QP 0x074
+#define DSC_RANGE_MAX_QP 0x0B0
+#define DSC_RANGE_BPG_OFFSET 0x0EC
+
+#define DSC_CTL(m) (0x1800 - 0x3FC * (m - DSC_0))
+
+static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
+{
+ struct dpu_hw_blk_reg_map *c = &dsc->hw;
+
+ DPU_REG_WRITE(c, DSC_COMMON_MODE, 0);
+}
+
+static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ struct drm_dsc_config *dsc,
+ u32 mode,
+ u32 initial_lines)
+{
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+ u32 data;
+ u32 slice_last_group_size;
+ u32 det_thresh_flatness;
+ bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
+
+ DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
+
+ if (is_cmd_mode)
+ initial_lines += 1;
+
+ slice_last_group_size = (dsc->slice_width + 2) % 3;
+
+ data = (initial_lines << 20);
+ data |= (slice_last_group_size << 18);
+ /* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+ data |= (dsc->bits_per_pixel << 8);
+ data |= (dsc->block_pred_enable << 7);
+ data |= (dsc->line_buf_depth << 3);
+ data |= (dsc->simple_422 << 2);
+ data |= (dsc->convert_rgb << 1);
+ data |= dsc->bits_per_component;
+
+ DPU_REG_WRITE(c, DSC_ENC, data);
+
+ data = dsc->pic_width << 16;
+ data |= dsc->pic_height;
+ DPU_REG_WRITE(c, DSC_PICTURE, data);
+
+ data = dsc->slice_width << 16;
+ data |= dsc->slice_height;
+ DPU_REG_WRITE(c, DSC_SLICE, data);
+
+ data = dsc->slice_chunk_size << 16;
+ DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
+
+ data = dsc->initial_dec_delay << 16;
+ data |= dsc->initial_xmit_delay;
+ DPU_REG_WRITE(c, DSC_DELAY, data);
+
+ data = dsc->initial_scale_value;
+ DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
+
+ data = dsc->scale_decrement_interval;
+ DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
+
+ data = dsc->scale_increment_interval;
+ DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
+
+ data = dsc->first_line_bpg_offset;
+ DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
+
+ data = dsc->nfl_bpg_offset << 16;
+ data |= dsc->slice_bpg_offset;
+ DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
+
+ data = dsc->initial_offset << 16;
+ data |= dsc->final_offset;
+ DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
+
+ det_thresh_flatness = drm_dsc_flatness_det_thresh(dsc);
+ data = det_thresh_flatness << 10;
+ data |= dsc->flatness_max_qp << 5;
+ data |= dsc->flatness_min_qp;
+ DPU_REG_WRITE(c, DSC_FLATNESS, data);
+
+ data = dsc->rc_model_size;
+ DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
+
+ data = dsc->rc_tgt_offset_low << 18;
+ data |= dsc->rc_tgt_offset_high << 14;
+ data |= dsc->rc_quant_incr_limit1 << 9;
+ data |= dsc->rc_quant_incr_limit0 << 4;
+ data |= dsc->rc_edge_factor;
+ DPU_REG_WRITE(c, DSC_RC, data);
+}
+
+static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
+ struct drm_dsc_config *dsc)
+{
+ struct drm_dsc_rc_range_parameters *rc = dsc->rc_range_params;
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+ u32 off;
+ int i;
+
+ off = DSC_RC_BUF_THRESH;
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
+ DPU_REG_WRITE(c, off, dsc->rc_buf_thresh[i]);
+ off += 4;
+ }
+
+ off = DSC_RANGE_MIN_QP;
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_min_qp);
+ off += 4;
+ }
+
+ off = DSC_RANGE_MAX_QP;
+ for (i = 0; i < 15; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_max_qp);
+ off += 4;
+ }
+
+ off = DSC_RANGE_BPG_OFFSET;
+ for (i = 0; i < 15; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_bpg_offset);
+ off += 4;
+ }
+}
+
+static void dpu_hw_dsc_bind_pingpong_blk(
+ struct dpu_hw_dsc *hw_dsc,
+ const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+ int mux_cfg = 0xF;
+ u32 dsc_ctl_offset;
+
+ dsc_ctl_offset = DSC_CTL(hw_dsc->idx);
+
+ if (pp)
+ mux_cfg = (pp - PINGPONG_0) & 0x7;
+
+ if (pp)
+ DRM_DEBUG_KMS("Binding dsc:%d to pp:%d\n",
+ hw_dsc->idx - DSC_0, pp - PINGPONG_0);
+ else
+ DRM_DEBUG_KMS("Unbinding dsc:%d from any pp\n",
+ hw_dsc->idx - DSC_0);
+
+ DPU_REG_WRITE(c, dsc_ctl_offset, mux_cfg);
+}
+
+static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
+ unsigned long cap)
+{
+ ops->dsc_disable = dpu_hw_dsc_disable;
+ ops->dsc_config = dpu_hw_dsc_config;
+ ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
+ if (cap & BIT(DPU_DSC_OUTPUT_CTRL))
+ ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
+};
+
+struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_dsc *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_DSC;
+
+ c->idx = cfg->id;
+ c->caps = cfg;
+ _setup_dsc_ops(&c->ops, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc)
+{
+ kfree(dsc);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
new file mode 100644
index 0000000000..d5b597ab8c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2022, Linaro Limited
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#ifndef _DPU_HW_DSC_H
+#define _DPU_HW_DSC_H
+
+#include <drm/display/drm_dsc.h>
+
+#define DSC_MODE_SPLIT_PANEL BIT(0)
+#define DSC_MODE_MULTIPLEX BIT(1)
+#define DSC_MODE_VIDEO BIT(2)
+
+struct dpu_hw_dsc;
+
+/**
+ * struct dpu_hw_dsc_ops - interface to the dsc hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_dsc_ops {
+ /**
+ * dsc_disable - disable dsc
+ * @hw_dsc: Pointer to dsc context
+ */
+ void (*dsc_disable)(struct dpu_hw_dsc *hw_dsc);
+
+ /**
+ * dsc_config - configures dsc encoder
+ * @hw_dsc: Pointer to dsc context
+ * @dsc: panel dsc parameters
+ * @mode: dsc topology mode to be set
+ * @initial_lines: amount of initial lines to be used
+ */
+ void (*dsc_config)(struct dpu_hw_dsc *hw_dsc,
+ struct drm_dsc_config *dsc,
+ u32 mode,
+ u32 initial_lines);
+
+ /**
+ * dsc_config_thresh - programs panel thresholds
+ * @hw_dsc: Pointer to dsc context
+ * @dsc: panel dsc parameters
+ */
+ void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
+ struct drm_dsc_config *dsc);
+
+ void (*dsc_bind_pingpong_blk)(struct dpu_hw_dsc *hw_dsc,
+ enum dpu_pingpong pp);
+};
+
+struct dpu_hw_dsc {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* dsc */
+ enum dpu_dsc idx;
+ const struct dpu_dsc_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_dsc_ops ops;
+};
+
+/**
+ * dpu_hw_dsc_init() - Initializes the DSC hw driver object.
+ * @cfg: DSC catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_dsc context
+ */
+struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
+ void __iomem *addr);
+
+/**
+ * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object
+ * @cfg: DSC catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Returns: Error code or allocated dpu_hw_dsc context
+ */
+struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
+ void __iomem *addr);
+
+/**
+ * dpu_hw_dsc_destroy - destroys dsc driver context
+ * @dsc: Pointer to dsc driver context returned by dpu_hw_dsc_init
+ */
+void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc);
+
+static inline struct dpu_hw_dsc *to_dpu_hw_dsc(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_dsc, base);
+}
+
+#endif /* _DPU_HW_DSC_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
new file mode 100644
index 0000000000..24fe1d98eb
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#include <drm/display/drm_dsc_helper.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_dsc.h"
+
+#define DSC_CMN_MAIN_CNF 0x00
+
+/* DPU_DSC_ENC register offsets */
+#define ENC_DF_CTRL 0x00
+#define ENC_GENERAL_STATUS 0x04
+#define ENC_HSLICE_STATUS 0x08
+#define ENC_OUT_STATUS 0x0C
+#define ENC_INT_STAT 0x10
+#define ENC_INT_CLR 0x14
+#define ENC_INT_MASK 0x18
+#define DSC_MAIN_CONF 0x30
+#define DSC_PICTURE_SIZE 0x34
+#define DSC_SLICE_SIZE 0x38
+#define DSC_MISC_SIZE 0x3C
+#define DSC_HRD_DELAYS 0x40
+#define DSC_RC_SCALE 0x44
+#define DSC_RC_SCALE_INC_DEC 0x48
+#define DSC_RC_OFFSETS_1 0x4C
+#define DSC_RC_OFFSETS_2 0x50
+#define DSC_RC_OFFSETS_3 0x54
+#define DSC_RC_OFFSETS_4 0x58
+#define DSC_FLATNESS_QP 0x5C
+#define DSC_RC_MODEL_SIZE 0x60
+#define DSC_RC_CONFIG 0x64
+#define DSC_RC_BUF_THRESH_0 0x68
+#define DSC_RC_BUF_THRESH_1 0x6C
+#define DSC_RC_BUF_THRESH_2 0x70
+#define DSC_RC_BUF_THRESH_3 0x74
+#define DSC_RC_MIN_QP_0 0x78
+#define DSC_RC_MIN_QP_1 0x7C
+#define DSC_RC_MIN_QP_2 0x80
+#define DSC_RC_MAX_QP_0 0x84
+#define DSC_RC_MAX_QP_1 0x88
+#define DSC_RC_MAX_QP_2 0x8C
+#define DSC_RC_RANGE_BPG_OFFSETS_0 0x90
+#define DSC_RC_RANGE_BPG_OFFSETS_1 0x94
+#define DSC_RC_RANGE_BPG_OFFSETS_2 0x98
+
+/* DPU_DSC_CTL register offsets */
+#define DSC_CTL 0x00
+#define DSC_CFG 0x04
+#define DSC_DATA_IN_SWAP 0x08
+#define DSC_CLK_CTRL 0x0C
+
+static int _dsc_calc_output_buf_max_addr(struct dpu_hw_dsc *hw_dsc, int num_softslice)
+{
+ int max_addr = 2400 / num_softslice;
+
+ if (hw_dsc->caps->features & BIT(DPU_DSC_NATIVE_42x_EN))
+ max_addr /= 2;
+
+ return max_addr - 1;
+};
+
+static void dpu_hw_dsc_disable_1_2(struct dpu_hw_dsc *hw_dsc)
+{
+ struct dpu_hw_blk_reg_map *hw;
+ const struct dpu_dsc_sub_blks *sblk;
+
+ if (!hw_dsc)
+ return;
+
+ hw = &hw_dsc->hw;
+ sblk = hw_dsc->caps->sblk;
+ DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CFG, 0);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + ENC_DF_CTRL, 0);
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_MAIN_CONF, 0);
+}
+
+static void dpu_hw_dsc_config_1_2(struct dpu_hw_dsc *hw_dsc,
+ struct drm_dsc_config *dsc,
+ u32 mode,
+ u32 initial_lines)
+{
+ struct dpu_hw_blk_reg_map *hw;
+ const struct dpu_dsc_sub_blks *sblk;
+ u32 data = 0;
+ u32 det_thresh_flatness;
+ u32 num_active_slice_per_enc;
+ u32 bpp;
+
+ if (!hw_dsc || !dsc)
+ return;
+
+ hw = &hw_dsc->hw;
+
+ sblk = hw_dsc->caps->sblk;
+
+ if (mode & DSC_MODE_SPLIT_PANEL)
+ data |= BIT(0);
+
+ if (mode & DSC_MODE_MULTIPLEX)
+ data |= BIT(1);
+
+ num_active_slice_per_enc = dsc->slice_count;
+ if (mode & DSC_MODE_MULTIPLEX)
+ num_active_slice_per_enc = dsc->slice_count / 2;
+
+ data |= (num_active_slice_per_enc & 0x3) << 7;
+
+ DPU_REG_WRITE(hw, DSC_CMN_MAIN_CNF, data);
+
+ data = (initial_lines & 0xff);
+
+ if (mode & DSC_MODE_VIDEO)
+ data |= BIT(9);
+
+ data |= (_dsc_calc_output_buf_max_addr(hw_dsc, num_active_slice_per_enc) << 18);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + ENC_DF_CTRL, data);
+
+ data = (dsc->dsc_version_minor & 0xf) << 28;
+ if (dsc->dsc_version_minor == 0x2) {
+ if (dsc->native_422)
+ data |= BIT(22);
+ if (dsc->native_420)
+ data |= BIT(21);
+ }
+
+ bpp = dsc->bits_per_pixel;
+ /* as per hw requirement bpp should be programmed
+ * twice the actual value in case of 420 or 422 encoding
+ */
+ if (dsc->native_422 || dsc->native_420)
+ bpp = 2 * bpp;
+
+ data |= bpp << 10;
+
+ if (dsc->block_pred_enable)
+ data |= BIT(20);
+
+ if (dsc->convert_rgb)
+ data |= BIT(4);
+
+ data |= (dsc->line_buf_depth & 0xf) << 6;
+ data |= dsc->bits_per_component & 0xf;
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_MAIN_CONF, data);
+
+ data = (dsc->pic_width & 0xffff) |
+ ((dsc->pic_height & 0xffff) << 16);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_PICTURE_SIZE, data);
+
+ data = (dsc->slice_width & 0xffff) |
+ ((dsc->slice_height & 0xffff) << 16);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_SLICE_SIZE, data);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_MISC_SIZE,
+ (dsc->slice_chunk_size) & 0xffff);
+
+ data = (dsc->initial_xmit_delay & 0xffff) |
+ ((dsc->initial_dec_delay & 0xffff) << 16);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_HRD_DELAYS, data);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_SCALE,
+ dsc->initial_scale_value & 0x3f);
+
+ data = (dsc->scale_increment_interval & 0xffff) |
+ ((dsc->scale_decrement_interval & 0x7ff) << 16);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_SCALE_INC_DEC, data);
+
+ data = (dsc->first_line_bpg_offset & 0x1f) |
+ ((dsc->second_line_bpg_offset & 0x1f) << 5);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_1, data);
+
+ data = (dsc->nfl_bpg_offset & 0xffff) |
+ ((dsc->slice_bpg_offset & 0xffff) << 16);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_2, data);
+
+ data = (dsc->initial_offset & 0xffff) |
+ ((dsc->final_offset & 0xffff) << 16);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_3, data);
+
+ data = (dsc->nsl_bpg_offset & 0xffff) |
+ ((dsc->second_line_offset_adj & 0xffff) << 16);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_OFFSETS_4, data);
+
+ det_thresh_flatness = drm_dsc_flatness_det_thresh(dsc);
+ data = (dsc->flatness_min_qp & 0x1f) |
+ ((dsc->flatness_max_qp & 0x1f) << 5) |
+ ((det_thresh_flatness & 0xff) << 10);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_FLATNESS_QP, data);
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MODEL_SIZE,
+ (dsc->rc_model_size) & 0xffff);
+
+ data = dsc->rc_edge_factor & 0xf;
+ data |= (dsc->rc_quant_incr_limit0 & 0x1f) << 8;
+ data |= (dsc->rc_quant_incr_limit1 & 0x1f) << 13;
+ data |= (dsc->rc_tgt_offset_high & 0xf) << 20;
+ data |= (dsc->rc_tgt_offset_low & 0xf) << 24;
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_CONFIG, data);
+
+ /* program the dsc wrapper */
+ data = BIT(0); /* encoder enable */
+ if (dsc->native_422)
+ data |= BIT(8);
+ else if (dsc->native_420)
+ data |= BIT(9);
+ if (!dsc->convert_rgb)
+ data |= BIT(10);
+ if (dsc->bits_per_component == 8)
+ data |= BIT(11);
+ if (mode & DSC_MODE_SPLIT_PANEL)
+ data |= BIT(12);
+ if (mode & DSC_MODE_MULTIPLEX)
+ data |= BIT(13);
+ if (!(mode & DSC_MODE_VIDEO))
+ data |= BIT(17);
+
+ DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CFG, data);
+}
+
+static void dpu_hw_dsc_config_thresh_1_2(struct dpu_hw_dsc *hw_dsc,
+ struct drm_dsc_config *dsc)
+{
+ struct dpu_hw_blk_reg_map *hw;
+ const struct dpu_dsc_sub_blks *sblk;
+ struct drm_dsc_rc_range_parameters *rc;
+
+ if (!hw_dsc || !dsc)
+ return;
+
+ hw = &hw_dsc->hw;
+
+ sblk = hw_dsc->caps->sblk;
+
+ rc = dsc->rc_range_params;
+
+ /*
+ * With BUF_THRESH -- 14 in total
+ * each register contains 4 thresh values with the last register
+ * containing only 2 thresh values
+ */
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_0,
+ (dsc->rc_buf_thresh[0] << 0) |
+ (dsc->rc_buf_thresh[1] << 8) |
+ (dsc->rc_buf_thresh[2] << 16) |
+ (dsc->rc_buf_thresh[3] << 24));
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_1,
+ (dsc->rc_buf_thresh[4] << 0) |
+ (dsc->rc_buf_thresh[5] << 8) |
+ (dsc->rc_buf_thresh[6] << 16) |
+ (dsc->rc_buf_thresh[7] << 24));
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_2,
+ (dsc->rc_buf_thresh[8] << 0) |
+ (dsc->rc_buf_thresh[9] << 8) |
+ (dsc->rc_buf_thresh[10] << 16) |
+ (dsc->rc_buf_thresh[11] << 24));
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_BUF_THRESH_3,
+ (dsc->rc_buf_thresh[12] << 0) |
+ (dsc->rc_buf_thresh[13] << 8));
+
+ /*
+ * with min/max_QP -- 5 bits
+ * each register contains 5 min_qp or max_qp for total of 15
+ *
+ * With BPG_OFFSET -- 6 bits
+ * each register contains 5 BPG_offset for total of 15
+ */
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_0,
+ (rc[0].range_min_qp << 0) |
+ (rc[1].range_min_qp << 5) |
+ (rc[2].range_min_qp << 10) |
+ (rc[3].range_min_qp << 15) |
+ (rc[4].range_min_qp << 20));
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_0,
+ (rc[0].range_max_qp << 0) |
+ (rc[1].range_max_qp << 5) |
+ (rc[2].range_max_qp << 10) |
+ (rc[3].range_max_qp << 15) |
+ (rc[4].range_max_qp << 20));
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_0,
+ (rc[0].range_bpg_offset << 0) |
+ (rc[1].range_bpg_offset << 6) |
+ (rc[2].range_bpg_offset << 12) |
+ (rc[3].range_bpg_offset << 18) |
+ (rc[4].range_bpg_offset << 24));
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_1,
+ (rc[5].range_min_qp << 0) |
+ (rc[6].range_min_qp << 5) |
+ (rc[7].range_min_qp << 10) |
+ (rc[8].range_min_qp << 15) |
+ (rc[9].range_min_qp << 20));
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_1,
+ (rc[5].range_max_qp << 0) |
+ (rc[6].range_max_qp << 5) |
+ (rc[7].range_max_qp << 10) |
+ (rc[8].range_max_qp << 15) |
+ (rc[9].range_max_qp << 20));
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_1,
+ (rc[5].range_bpg_offset << 0) |
+ (rc[6].range_bpg_offset << 6) |
+ (rc[7].range_bpg_offset << 12) |
+ (rc[8].range_bpg_offset << 18) |
+ (rc[9].range_bpg_offset << 24));
+
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MIN_QP_2,
+ (rc[10].range_min_qp << 0) |
+ (rc[11].range_min_qp << 5) |
+ (rc[12].range_min_qp << 10) |
+ (rc[13].range_min_qp << 15) |
+ (rc[14].range_min_qp << 20));
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_MAX_QP_2,
+ (rc[10].range_max_qp << 0) |
+ (rc[11].range_max_qp << 5) |
+ (rc[12].range_max_qp << 10) |
+ (rc[13].range_max_qp << 15) |
+ (rc[14].range_max_qp << 20));
+ DPU_REG_WRITE(hw, sblk->enc.base + DSC_RC_RANGE_BPG_OFFSETS_2,
+ (rc[10].range_bpg_offset << 0) |
+ (rc[11].range_bpg_offset << 6) |
+ (rc[12].range_bpg_offset << 12) |
+ (rc[13].range_bpg_offset << 18) |
+ (rc[14].range_bpg_offset << 24));
+}
+
+static void dpu_hw_dsc_bind_pingpong_blk_1_2(struct dpu_hw_dsc *hw_dsc,
+ const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *hw;
+ const struct dpu_dsc_sub_blks *sblk;
+ int mux_cfg = 0xf; /* Disabled */
+
+ hw = &hw_dsc->hw;
+
+ sblk = hw_dsc->caps->sblk;
+
+ if (pp)
+ mux_cfg = (pp - PINGPONG_0) & 0x7;
+
+ DPU_REG_WRITE(hw, sblk->ctl.base + DSC_CTL, mux_cfg);
+}
+
+static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops,
+ const unsigned long features)
+{
+ ops->dsc_disable = dpu_hw_dsc_disable_1_2;
+ ops->dsc_config = dpu_hw_dsc_config_1_2;
+ ops->dsc_config_thresh = dpu_hw_dsc_config_thresh_1_2;
+ ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2;
+}
+
+struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_dsc *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_DSC;
+
+ c->idx = cfg->id;
+ c->caps = cfg;
+ _setup_dcs_ops_1_2(&c->ops, c->caps->features);
+
+ return c;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
new file mode 100644
index 0000000000..9419b2209a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_kms.h"
+
+
+/* DSPP_PCC */
+#define PCC_EN BIT(0)
+#define PCC_DIS 0
+#define PCC_RED_R_OFF 0x10
+#define PCC_RED_G_OFF 0x1C
+#define PCC_RED_B_OFF 0x28
+#define PCC_GREEN_R_OFF 0x14
+#define PCC_GREEN_G_OFF 0x20
+#define PCC_GREEN_B_OFF 0x2C
+#define PCC_BLUE_R_OFF 0x18
+#define PCC_BLUE_G_OFF 0x24
+#define PCC_BLUE_B_OFF 0x30
+
+static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
+ struct dpu_hw_pcc_cfg *cfg)
+{
+
+ u32 base;
+
+ if (!ctx) {
+ DRM_ERROR("invalid ctx %pK\n", ctx);
+ return;
+ }
+
+ base = ctx->cap->sblk->pcc.base;
+
+ if (!base) {
+ DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
+ return;
+ }
+
+ if (!cfg) {
+ DRM_DEBUG_DRIVER("disable pcc feature\n");
+ DPU_REG_WRITE(&ctx->hw, base, PCC_DIS);
+ return;
+ }
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_R_OFF, cfg->r.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_G_OFF, cfg->r.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_B_OFF, cfg->r.b);
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_R_OFF, cfg->g.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_G_OFF, cfg->g.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_B_OFF, cfg->g.b);
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_R_OFF, cfg->b.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_G_OFF, cfg->b.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_B_OFF, cfg->b.b);
+
+ DPU_REG_WRITE(&ctx->hw, base, PCC_EN);
+}
+
+static void _setup_dspp_ops(struct dpu_hw_dspp *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_DSPP_PCC, &features))
+ c->ops.setup_pcc = dpu_setup_dspp_pcc;
+}
+
+struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_dspp *c;
+
+ if (!addr)
+ return ERR_PTR(-EINVAL);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_DSPP;
+
+ /* Assign ops */
+ c->idx = cfg->id;
+ c->cap = cfg;
+ _setup_dspp_ops(c, c->cap->features);
+
+ return c;
+}
+
+void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
+{
+ kfree(dspp);
+}
+
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
new file mode 100644
index 0000000000..bea9656813
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_DSPP_H
+#define _DPU_HW_DSPP_H
+
+struct dpu_hw_dspp;
+
+/**
+ * struct dpu_hw_pcc_coeff - PCC coefficient structure for each color
+ * component.
+ * @r: red coefficient.
+ * @g: green coefficient.
+ * @b: blue coefficient.
+ */
+
+struct dpu_hw_pcc_coeff {
+ __u32 r;
+ __u32 g;
+ __u32 b;
+};
+
+/**
+ * struct dpu_hw_pcc - pcc feature structure
+ * @r: red coefficients.
+ * @g: green coefficients.
+ * @b: blue coefficients.
+ */
+struct dpu_hw_pcc_cfg {
+ struct dpu_hw_pcc_coeff r;
+ struct dpu_hw_pcc_coeff g;
+ struct dpu_hw_pcc_coeff b;
+};
+
+/**
+ * struct dpu_hw_dspp_ops - interface to the dspp hardware driver functions
+ * Caller must call the init function to get the dspp context for each dspp
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_dspp_ops {
+ /**
+ * setup_pcc - setup dspp pcc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pcc)(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg);
+
+};
+
+/**
+ * struct dpu_hw_dspp - dspp description
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: DSPP index
+ * @cap: Pointer to layer_cfg
+ * @ops: Pointer to operations possible for this DSPP
+ */
+struct dpu_hw_dspp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* dspp */
+ int idx;
+ const struct dpu_dspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_dspp_ops ops;
+};
+
+/**
+ * dpu_hw_dspp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_dspp, base);
+}
+
+/**
+ * dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
+ * should be called once before accessing every DSPP.
+ * @cfg: DSPP catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Return: pointer to structure or ERR_PTR
+ */
+struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
+ void __iomem *addr);
+
+/**
+ * dpu_hw_dspp_destroy(): Destroys DSPP driver context
+ * @dspp: Pointer to DSPP driver context
+ */
+void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp);
+
+#endif /*_DPU_HW_DSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644
index 0000000000..e3c50439f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_trace.h"
+
+/*
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. the MDP base
+ */
+#define MDP_INTF_OFF(intf) (0x6A000 + 0x800 * (intf))
+#define MDP_INTF_INTR_EN(intf) (MDP_INTF_OFF(intf) + 0x1c0)
+#define MDP_INTF_INTR_STATUS(intf) (MDP_INTF_OFF(intf) + 0x1c4)
+#define MDP_INTF_INTR_CLEAR(intf) (MDP_INTF_OFF(intf) + 0x1c8)
+#define MDP_INTF_TEAR_OFF(intf) (0x6D700 + 0x100 * (intf))
+#define MDP_INTF_INTR_TEAR_EN(intf) (MDP_INTF_TEAR_OFF(intf) + 0x000)
+#define MDP_INTF_INTR_TEAR_STATUS(intf) (MDP_INTF_TEAR_OFF(intf) + 0x004)
+#define MDP_INTF_INTR_TEAR_CLEAR(intf) (MDP_INTF_TEAR_OFF(intf) + 0x008)
+#define MDP_AD4_OFF(ad4) (0x7C000 + 0x1000 * (ad4))
+#define MDP_AD4_INTR_EN_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x41c)
+#define MDP_AD4_INTR_CLEAR_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x424)
+#define MDP_AD4_INTR_STATUS_OFF(ad4) (MDP_AD4_OFF(ad4) + 0x420)
+#define MDP_INTF_REV_7xxx_OFF(intf) (0x34000 + 0x1000 * (intf))
+#define MDP_INTF_REV_7xxx_INTR_EN(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
+#define MDP_INTF_REV_7xxx_INTR_STATUS(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
+#define MDP_INTF_REV_7xxx_INTR_CLEAR(intf) (MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
+#define MDP_INTF_REV_7xxx_TEAR_OFF(intf) (0x34800 + 0x1000 * (intf))
+#define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
+#define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
+#define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf) (MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
+
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct dpu_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/*
+ * dpu_intr_set_legacy - List of DPU interrupt registers for DPU <= 6.x
+ */
+static const struct dpu_intr_reg dpu_intr_set_legacy[] = {
+ [MDP_SSPP_TOP0_INTR] = {
+ INTR_CLEAR,
+ INTR_EN,
+ INTR_STATUS
+ },
+ [MDP_SSPP_TOP0_INTR2] = {
+ INTR2_CLEAR,
+ INTR2_EN,
+ INTR2_STATUS
+ },
+ [MDP_SSPP_TOP0_HIST_INTR] = {
+ HIST_INTR_CLEAR,
+ HIST_INTR_EN,
+ HIST_INTR_STATUS
+ },
+ [MDP_INTF0_INTR] = {
+ MDP_INTF_INTR_CLEAR(0),
+ MDP_INTF_INTR_EN(0),
+ MDP_INTF_INTR_STATUS(0)
+ },
+ [MDP_INTF1_INTR] = {
+ MDP_INTF_INTR_CLEAR(1),
+ MDP_INTF_INTR_EN(1),
+ MDP_INTF_INTR_STATUS(1)
+ },
+ [MDP_INTF2_INTR] = {
+ MDP_INTF_INTR_CLEAR(2),
+ MDP_INTF_INTR_EN(2),
+ MDP_INTF_INTR_STATUS(2)
+ },
+ [MDP_INTF3_INTR] = {
+ MDP_INTF_INTR_CLEAR(3),
+ MDP_INTF_INTR_EN(3),
+ MDP_INTF_INTR_STATUS(3)
+ },
+ [MDP_INTF4_INTR] = {
+ MDP_INTF_INTR_CLEAR(4),
+ MDP_INTF_INTR_EN(4),
+ MDP_INTF_INTR_STATUS(4)
+ },
+ [MDP_INTF5_INTR] = {
+ MDP_INTF_INTR_CLEAR(5),
+ MDP_INTF_INTR_EN(5),
+ MDP_INTF_INTR_STATUS(5)
+ },
+ [MDP_INTF1_TEAR_INTR] = {
+ MDP_INTF_INTR_TEAR_CLEAR(1),
+ MDP_INTF_INTR_TEAR_EN(1),
+ MDP_INTF_INTR_TEAR_STATUS(1)
+ },
+ [MDP_INTF2_TEAR_INTR] = {
+ MDP_INTF_INTR_TEAR_CLEAR(2),
+ MDP_INTF_INTR_TEAR_EN(2),
+ MDP_INTF_INTR_TEAR_STATUS(2)
+ },
+ [MDP_AD4_0_INTR] = {
+ MDP_AD4_INTR_CLEAR_OFF(0),
+ MDP_AD4_INTR_EN_OFF(0),
+ MDP_AD4_INTR_STATUS_OFF(0),
+ },
+ [MDP_AD4_1_INTR] = {
+ MDP_AD4_INTR_CLEAR_OFF(1),
+ MDP_AD4_INTR_EN_OFF(1),
+ MDP_AD4_INTR_STATUS_OFF(1),
+ },
+};
+
+/*
+ * dpu_intr_set_7xxx - List of DPU interrupt registers for DPU >= 7.0
+ */
+static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
+ [MDP_SSPP_TOP0_INTR] = {
+ INTR_CLEAR,
+ INTR_EN,
+ INTR_STATUS
+ },
+ [MDP_SSPP_TOP0_INTR2] = {
+ INTR2_CLEAR,
+ INTR2_EN,
+ INTR2_STATUS
+ },
+ [MDP_SSPP_TOP0_HIST_INTR] = {
+ HIST_INTR_CLEAR,
+ HIST_INTR_EN,
+ HIST_INTR_STATUS
+ },
+ [MDP_INTF0_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_CLEAR(0),
+ MDP_INTF_REV_7xxx_INTR_EN(0),
+ MDP_INTF_REV_7xxx_INTR_STATUS(0)
+ },
+ [MDP_INTF1_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_CLEAR(1),
+ MDP_INTF_REV_7xxx_INTR_EN(1),
+ MDP_INTF_REV_7xxx_INTR_STATUS(1)
+ },
+ [MDP_INTF1_TEAR_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
+ MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
+ MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
+ },
+ [MDP_INTF2_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_CLEAR(2),
+ MDP_INTF_REV_7xxx_INTR_EN(2),
+ MDP_INTF_REV_7xxx_INTR_STATUS(2)
+ },
+ [MDP_INTF2_TEAR_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
+ MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
+ MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
+ },
+ [MDP_INTF3_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_CLEAR(3),
+ MDP_INTF_REV_7xxx_INTR_EN(3),
+ MDP_INTF_REV_7xxx_INTR_STATUS(3)
+ },
+ [MDP_INTF4_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_CLEAR(4),
+ MDP_INTF_REV_7xxx_INTR_EN(4),
+ MDP_INTF_REV_7xxx_INTR_STATUS(4)
+ },
+ [MDP_INTF5_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_CLEAR(5),
+ MDP_INTF_REV_7xxx_INTR_EN(5),
+ MDP_INTF_REV_7xxx_INTR_STATUS(5)
+ },
+ [MDP_INTF6_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_CLEAR(6),
+ MDP_INTF_REV_7xxx_INTR_EN(6),
+ MDP_INTF_REV_7xxx_INTR_STATUS(6)
+ },
+ [MDP_INTF7_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_CLEAR(7),
+ MDP_INTF_REV_7xxx_INTR_EN(7),
+ MDP_INTF_REV_7xxx_INTR_STATUS(7)
+ },
+ [MDP_INTF8_INTR] = {
+ MDP_INTF_REV_7xxx_INTR_CLEAR(8),
+ MDP_INTF_REV_7xxx_INTR_EN(8),
+ MDP_INTF_REV_7xxx_INTR_STATUS(8)
+ },
+};
+
+#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
+#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @dpu_kms: Pointer to DPU's KMS structure
+ * @irq_idx: interrupt index
+ */
+static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ VERB("irq_idx=%d\n", irq_idx);
+
+ if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
+ DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
+
+ atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
+
+ /*
+ * Perform registered function callback
+ */
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
+}
+
+irqreturn_t dpu_core_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
+ int reg_idx;
+ int irq_idx;
+ u32 irq_status;
+ u32 enable_mask;
+ int bit;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < MDP_INTR_MAX; reg_idx++) {
+ if (!test_bit(reg_idx, &intr->irq_mask))
+ continue;
+
+ /* Read interrupt status */
+ irq_status = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].status_off);
+
+ /* Read enable mask */
+ enable_mask = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].en_off);
+
+ /* and clear the interrupt */
+ if (irq_status)
+ DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
+ irq_status);
+
+ /* Finally update IRQ status based on enable mask */
+ irq_status &= enable_mask;
+
+ if (!irq_status)
+ continue;
+
+ /*
+ * Search through matching intr status.
+ */
+ while ((bit = ffs(irq_status)) != 0) {
+ irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
+
+ dpu_core_irq_callback_handler(dpu_kms, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~BIT(bit - 1);
+ }
+ }
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return IRQ_HANDLED;
+}
+
+static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ const struct dpu_intr_reg *reg;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ /*
+ * The cache_irq_mask and hardware RMW operations needs to be done
+ * under irq_lock and it's the caller's responsibility to ensure that's
+ * held.
+ */
+ assert_spin_locked(&intr->irq_lock);
+
+ reg_idx = DPU_IRQ_REG(irq_idx);
+ reg = &intr->intr_set[reg_idx];
+
+ /* Is this interrupt register supported on the platform */
+ if (WARN_ON(!reg->en_off))
+ return -EINVAL;
+
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
+ dbgstr = "already ";
+ } else {
+ dbgstr = "";
+
+ cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
+ /* Enabling interrupts with the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+ pr_debug("DPU IRQ %d %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
+ DPU_IRQ_MASK(irq_idx), cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ const struct dpu_intr_reg *reg;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ /*
+ * The cache_irq_mask and hardware RMW operations needs to be done
+ * under irq_lock and it's the caller's responsibility to ensure that's
+ * held.
+ */
+ assert_spin_locked(&intr->irq_lock);
+
+ reg_idx = DPU_IRQ_REG(irq_idx);
+ reg = &intr->intr_set[reg_idx];
+
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
+ dbgstr = "already ";
+ } else {
+ dbgstr = "";
+
+ cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
+ /* Disable interrupts based on the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+ pr_debug("DPU IRQ %d %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
+ DPU_IRQ_MASK(irq_idx), cache_irq_mask);
+
+ return 0;
+}
+
+static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
+ int i;
+
+ if (!intr)
+ return;
+
+ for (i = 0; i < MDP_INTR_MAX; i++) {
+ if (test_bit(i, &intr->irq_mask))
+ DPU_REG_WRITE(&intr->hw,
+ intr->intr_set[i].clr_off, 0xffffffff);
+ }
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
+ int i;
+
+ if (!intr)
+ return;
+
+ for (i = 0; i < MDP_INTR_MAX; i++) {
+ if (test_bit(i, &intr->irq_mask))
+ DPU_REG_WRITE(&intr->hw,
+ intr->intr_set[i].en_off, 0x00000000);
+ }
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
+ int reg_idx;
+ unsigned long irq_flags;
+ u32 intr_status;
+
+ if (!intr)
+ return 0;
+
+ if (irq_idx < 0) {
+ DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
+ if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return 0;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+ reg_idx = DPU_IRQ_REG(irq_idx);
+ intr_status = DPU_REG_READ(&intr->hw,
+ intr->intr_set[reg_idx].status_off) &
+ DPU_IRQ_MASK(irq_idx);
+ if (intr_status)
+ DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
+ intr_status);
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return intr_status;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intr *intr;
+ int nirq = MDP_INTR_MAX * 32;
+ unsigned int i;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ if (m->mdss_ver->core_major_ver >= 7)
+ intr->intr_set = dpu_intr_set_7xxx;
+ else
+ intr->intr_set = dpu_intr_set_legacy;
+
+ intr->hw.blk_addr = addr + m->mdp[0].base;
+
+ intr->total_irqs = nirq;
+
+ intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
+ BIT(MDP_SSPP_TOP0_INTR2) |
+ BIT(MDP_SSPP_TOP0_HIST_INTR);
+ for (i = 0; i < m->intf_count; i++) {
+ const struct dpu_intf_cfg *intf = &m->intf[i];
+
+ if (intf->type == INTF_NONE)
+ continue;
+
+ intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
+
+ if (intf->intr_tear_rd_ptr != -1)
+ intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
+ }
+
+ spin_lock_init(&intr->irq_lock);
+
+ return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+ kfree(intr);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ void (*irq_cb)(void *arg, int irq_idx),
+ void *irq_arg)
+{
+ unsigned long irq_flags;
+ int ret;
+
+ if (!irq_cb) {
+ DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return -EBUSY;
+ }
+
+ trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
+
+ ret = dpu_hw_intr_enable_irq_locked(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ trace_dpu_irq_register_success(irq_idx);
+
+ return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret;
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_core_irq_unregister_callback(irq_idx);
+
+ ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
+ irq_idx, ret);
+
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
+
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ trace_dpu_irq_unregister_success(irq_idx);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct dpu_kms *dpu_kms = s->private;
+ unsigned long irq_flags;
+ int i, irq_count;
+ void *cb;
+
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
+ cb = dpu_kms->hw_intr->irq_tbl[i].cb;
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ if (irq_count || cb)
+ seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
+
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ debugfs_create_file("core_irq", 0600, parent, dpu_kms,
+ &dpu_debugfs_core_irq_fops);
+}
+#endif
+
+void dpu_core_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int i;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ dpu_clear_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+ atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
+}
+
+void dpu_core_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int i;
+
+ if (!dpu_kms->hw_intr)
+ return;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+ if (dpu_kms->hw_intr->irq_tbl[i].cb)
+ DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ dpu_clear_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644
index 0000000000..dab761e548
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/* When making changes be sure to sync with dpu_intr_set */
+enum dpu_hw_intr_reg {
+ MDP_SSPP_TOP0_INTR,
+ MDP_SSPP_TOP0_INTR2,
+ MDP_SSPP_TOP0_HIST_INTR,
+ /* All MDP_INTFn_INTR should come sequentially */
+ MDP_INTF0_INTR,
+ MDP_INTF1_INTR,
+ MDP_INTF2_INTR,
+ MDP_INTF3_INTR,
+ MDP_INTF4_INTR,
+ MDP_INTF5_INTR,
+ MDP_INTF6_INTR,
+ MDP_INTF7_INTR,
+ MDP_INTF8_INTR,
+ MDP_INTF1_TEAR_INTR,
+ MDP_INTF2_TEAR_INTR,
+ MDP_AD4_0_INTR,
+ MDP_AD4_1_INTR,
+ MDP_INTR_MAX,
+};
+
+#define MDP_INTFn_INTR(intf) (MDP_INTF0_INTR + (intf - INTF_0))
+
+#define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset)
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @total_irqs: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock: spinlock for accessing IRQ resources
+ * @irq_cb_tbl: array of IRQ callbacks
+ */
+struct dpu_hw_intr {
+ struct dpu_hw_blk_reg_map hw;
+ u32 cache_irq_mask[MDP_INTR_MAX];
+ u32 *save_irq_status;
+ u32 total_irqs;
+ spinlock_t irq_lock;
+ unsigned long irq_mask;
+ const struct dpu_intr_reg *intr_set;
+
+ struct {
+ void (*cb)(void *arg, int irq_idx);
+ void *arg;
+ atomic_t count;
+ } irq_tbl[];
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m: pointer to MDSS catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644
index 0000000000..da071b1c02
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#include <linux/iopoll.h>
+
+#define INTF_TIMING_ENGINE_EN 0x000
+#define INTF_CONFIG 0x004
+#define INTF_HSYNC_CTL 0x008
+#define INTF_VSYNC_PERIOD_F0 0x00C
+#define INTF_VSYNC_PERIOD_F1 0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define INTF_DISPLAY_V_START_F0 0x01C
+#define INTF_DISPLAY_V_START_F1 0x020
+#define INTF_DISPLAY_V_END_F0 0x024
+#define INTF_DISPLAY_V_END_F1 0x028
+#define INTF_ACTIVE_V_START_F0 0x02C
+#define INTF_ACTIVE_V_START_F1 0x030
+#define INTF_ACTIVE_V_END_F0 0x034
+#define INTF_ACTIVE_V_END_F1 0x038
+#define INTF_DISPLAY_HCTL 0x03C
+#define INTF_ACTIVE_HCTL 0x040
+#define INTF_BORDER_COLOR 0x044
+#define INTF_UNDERFLOW_COLOR 0x048
+#define INTF_HSYNC_SKEW 0x04C
+#define INTF_POLARITY_CTL 0x050
+#define INTF_TEST_CTL 0x054
+#define INTF_TP_COLOR0 0x058
+#define INTF_TP_COLOR1 0x05C
+#define INTF_CONFIG2 0x060
+#define INTF_DISPLAY_DATA_HCTL 0x064
+#define INTF_ACTIVE_DATA_HCTL 0x068
+
+#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define INTF_PANEL_FORMAT 0x090
+
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_DEFLICKER_CONFIG 0x0F0
+#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define INTF_TPG_ENABLE 0x100
+#define INTF_TPG_MAIN_CONTROL 0x104
+#define INTF_TPG_VIDEO_CONFIG 0x108
+#define INTF_TPG_COMPONENT_LIMITS 0x10C
+#define INTF_TPG_RECTANGLE 0x110
+#define INTF_TPG_INITIAL_VALUE 0x114
+#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define INTF_TPG_RGB_MAPPING 0x11C
+#define INTF_PROG_FETCH_START 0x170
+#define INTF_PROG_ROT_START 0x174
+
+#define INTF_MISR_CTRL 0x180
+#define INTF_MISR_SIGNATURE 0x184
+
+#define INTF_MUX 0x25C
+#define INTF_STATUS 0x26C
+#define INTF_AVR_CONTROL 0x270
+#define INTF_AVR_MODE 0x274
+#define INTF_AVR_TRIGGER 0x278
+#define INTF_AVR_VTOTAL 0x27C
+#define INTF_TEAR_MDP_VSYNC_SEL 0x280
+#define INTF_TEAR_TEAR_CHECK_EN 0x284
+#define INTF_TEAR_SYNC_CONFIG_VSYNC 0x288
+#define INTF_TEAR_SYNC_CONFIG_HEIGHT 0x28C
+#define INTF_TEAR_SYNC_WRCOUNT 0x290
+#define INTF_TEAR_VSYNC_INIT_VAL 0x294
+#define INTF_TEAR_INT_COUNT_VAL 0x298
+#define INTF_TEAR_SYNC_THRESH 0x29C
+#define INTF_TEAR_START_POS 0x2A0
+#define INTF_TEAR_RD_PTR_IRQ 0x2A4
+#define INTF_TEAR_WR_PTR_IRQ 0x2A8
+#define INTF_TEAR_OUT_LINE_COUNT 0x2AC
+#define INTF_TEAR_LINE_COUNT 0x2B0
+#define INTF_TEAR_AUTOREFRESH_CONFIG 0x2B4
+
+#define INTF_CFG_ACTIVE_H_EN BIT(29)
+#define INTF_CFG_ACTIVE_V_EN BIT(30)
+
+#define INTF_CFG2_DATABUS_WIDEN BIT(0)
+#define INTF_CFG2_DATA_HCTL_EN BIT(4)
+#define INTF_CFG2_DCE_DATA_COMPRESS BIT(12)
+
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ const struct dpu_hw_intf_timing_params *p,
+ const struct dpu_format *fmt)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 hsync_data_start_x, hsync_data_end_x;
+ u32 active_h_start, active_h_end;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl, den_polarity;
+ u32 panel_format;
+ u32 intf_cfg, intf_cfg2 = 0;
+ u32 display_data_hctl = 0, active_data_hctl = 0;
+ u32 data_width;
+ bool dp_intf = false;
+
+ /* read interface_cfg */
+ intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+
+ if (ctx->cap->type == INTF_DP)
+ dp_intf = true;
+
+ hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+ p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+ p->v_front_porch;
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) { /* border fill added */
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) { /* border fill added */
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ intf_cfg |= INTF_CFG_ACTIVE_H_EN;
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ intf_cfg |= INTF_CFG_ACTIVE_V_EN;
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ /*
+ * DATA_HCTL_EN controls data timing which can be different from
+ * video timing. It is recommended to enable it for all cases, except
+ * if compression is enabled in 1 pixel per clock mode
+ */
+ if (p->wide_bus_en)
+ intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
+
+ data_width = p->width;
+
+ hsync_data_start_x = hsync_start_x;
+ hsync_data_end_x = hsync_start_x + data_width - 1;
+
+ display_data_hctl = (hsync_data_end_x << 16) | hsync_data_start_x;
+
+ if (dp_intf) {
+ /* DP timing adjustment */
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+
+ active_hctl = (active_h_end << 16) | active_h_start;
+ display_hctl = active_hctl;
+
+ intf_cfg |= INTF_CFG_ACTIVE_H_EN | INTF_CFG_ACTIVE_V_EN;
+ }
+
+ den_polarity = 0;
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (p->vsync_polarity << 1) | /* VSYNC Polarity */
+ (p->hsync_polarity << 0); /* HSYNC Polarity */
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ panel_format = (fmt->bits[C0_G_Y] |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (0x21 << 8));
+ else
+ /* Interface treats all the pixel data in RGB888 format */
+ panel_format = (COLOR_8BIT |
+ (COLOR_8BIT << 2) |
+ (COLOR_8BIT << 4) |
+ (0x21 << 8));
+
+ DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+ DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+ DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+ DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+ DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+ DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+ DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+ DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+ DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+ DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+ if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
+ DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
+ }
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+ struct dpu_hw_intf *intf,
+ u8 enable)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ /* Note: Display interface select is handled in top block hw layer */
+ DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+ struct dpu_hw_intf *intf,
+ const struct dpu_hw_intf_prog_fetch *fetch)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ int fetch_enable;
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+
+ fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+ if (fetch->enable) {
+ fetch_enable |= BIT(31);
+ DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+ fetch->fetch_start);
+ } else {
+ fetch_enable &= ~BIT(31);
+ }
+
+ DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_bind_pingpong_blk(
+ struct dpu_hw_intf *intf,
+ const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ u32 mux_cfg;
+
+ mux_cfg = DPU_REG_READ(c, INTF_MUX);
+ mux_cfg &= ~0xf;
+
+ if (pp)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, INTF_MUX, mux_cfg);
+}
+
+static void dpu_hw_intf_get_status(
+ struct dpu_hw_intf *intf,
+ struct dpu_hw_intf_status *s)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ unsigned long cap = intf->cap->features;
+
+ if (cap & BIT(DPU_INTF_STATUS_SUPPORTED))
+ s->is_en = DPU_REG_READ(c, INTF_STATUS) & BIT(0);
+ else
+ s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+
+ s->is_prog_fetch_en = !!(DPU_REG_READ(c, INTF_CONFIG) & BIT(31));
+ if (s->is_en) {
+ s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+ s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+ } else {
+ s->line_count = 0;
+ s->frame_count = 0;
+ }
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!intf)
+ return 0;
+
+ c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf)
+{
+ dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, 0x1);
+}
+
+static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value)
+{
+ return dpu_hw_collect_misr(&intf->hw, INTF_MISR_CTRL, INTF_MISR_SIGNATURE, misr_value);
+}
+
+static int dpu_hw_intf_enable_te(struct dpu_hw_intf *intf,
+ struct dpu_hw_tear_check *te)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int cfg;
+
+ if (!intf)
+ return -EINVAL;
+
+ c = &intf->hw;
+
+ cfg = BIT(19); /* VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ DPU_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
+ DPU_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ DPU_REG_WRITE(c, INTF_TEAR_VSYNC_INIT_VAL, te->vsync_init_val);
+ DPU_REG_WRITE(c, INTF_TEAR_RD_PTR_IRQ, te->rd_ptr_irq);
+ DPU_REG_WRITE(c, INTF_TEAR_START_POS, te->start_pos);
+ DPU_REG_WRITE(c, INTF_TEAR_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ DPU_REG_WRITE(c, INTF_TEAR_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ DPU_REG_WRITE(c, INTF_TEAR_TEAR_CHECK_EN, 1);
+
+ return 0;
+}
+
+static void dpu_hw_intf_setup_autorefresh_config(struct dpu_hw_intf *intf,
+ u32 frame_count, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 refresh_cfg;
+
+ c = &intf->hw;
+ refresh_cfg = DPU_REG_READ(c, INTF_TEAR_AUTOREFRESH_CONFIG);
+ if (enable)
+ refresh_cfg = BIT(31) | frame_count;
+ else
+ refresh_cfg &= ~BIT(31);
+
+ DPU_REG_WRITE(c, INTF_TEAR_AUTOREFRESH_CONFIG, refresh_cfg);
+}
+
+/*
+ * dpu_hw_intf_get_autorefresh_config - Get autorefresh config from HW
+ * @intf: DPU intf structure
+ * @frame_count: Used to return the current frame count from hw
+ *
+ * Returns: True if autorefresh enabled, false if disabled.
+ */
+static bool dpu_hw_intf_get_autorefresh_config(struct dpu_hw_intf *intf,
+ u32 *frame_count)
+{
+ u32 val = DPU_REG_READ(&intf->hw, INTF_TEAR_AUTOREFRESH_CONFIG);
+
+ if (frame_count != NULL)
+ *frame_count = val & 0xffff;
+ return !!((val & BIT(31)) >> 31);
+}
+
+static int dpu_hw_intf_disable_te(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!intf)
+ return -EINVAL;
+
+ c = &intf->hw;
+ DPU_REG_WRITE(c, INTF_TEAR_TEAR_CHECK_EN, 0);
+ return 0;
+}
+
+static int dpu_hw_intf_connect_external_te(struct dpu_hw_intf *intf,
+ bool enable_external_te)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ u32 cfg;
+ int orig;
+
+ if (!intf)
+ return -EINVAL;
+
+ c = &intf->hw;
+ cfg = DPU_REG_READ(c, INTF_TEAR_SYNC_CONFIG_VSYNC);
+ orig = (bool)(cfg & BIT(20));
+ if (enable_external_te)
+ cfg |= BIT(20);
+ else
+ cfg &= ~BIT(20);
+ DPU_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
+ trace_dpu_intf_connect_ext_te(intf->idx - INTF_0, cfg);
+
+ return orig;
+}
+
+static int dpu_hw_intf_get_vsync_info(struct dpu_hw_intf *intf,
+ struct dpu_hw_pp_vsync_info *info)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ u32 val;
+
+ if (!intf || !info)
+ return -EINVAL;
+
+ c = &intf->hw;
+
+ val = DPU_REG_READ(c, INTF_TEAR_VSYNC_INIT_VAL);
+ info->rd_ptr_init_val = val & 0xffff;
+
+ val = DPU_REG_READ(c, INTF_TEAR_INT_COUNT_VAL);
+ info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+ info->rd_ptr_line_count = val & 0xffff;
+
+ val = DPU_REG_READ(c, INTF_TEAR_LINE_COUNT);
+ info->wr_ptr_line_count = val & 0xffff;
+
+ val = DPU_REG_READ(c, INTF_FRAME_COUNT);
+ info->intf_frame_count = val;
+
+ return 0;
+}
+
+static void dpu_hw_intf_vsync_sel(struct dpu_hw_intf *intf,
+ u32 vsync_source)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!intf)
+ return;
+
+ c = &intf->hw;
+
+ DPU_REG_WRITE(c, INTF_TEAR_MDP_VSYNC_SEL, (vsync_source & 0xf));
+}
+
+static void dpu_hw_intf_disable_autorefresh(struct dpu_hw_intf *intf,
+ uint32_t encoder_id, u16 vdisplay)
+{
+ struct dpu_hw_pp_vsync_info info;
+ int trial = 0;
+
+ /* If autorefresh is already disabled, we have nothing to do */
+ if (!dpu_hw_intf_get_autorefresh_config(intf, NULL))
+ return;
+
+ /*
+ * If autorefresh is enabled, disable it and make sure it is safe to
+ * proceed with current frame commit/push. Sequence followed is,
+ * 1. Disable TE
+ * 2. Disable autorefresh config
+ * 4. Poll for frame transfer ongoing to be false
+ * 5. Enable TE back
+ */
+
+ dpu_hw_intf_connect_external_te(intf, false);
+ dpu_hw_intf_setup_autorefresh_config(intf, 0, false);
+
+ do {
+ udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
+ if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
+ > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
+ DPU_ERROR("enc%d intf%d disable autorefresh failed\n",
+ encoder_id, intf->idx - INTF_0);
+ break;
+ }
+
+ trial++;
+
+ dpu_hw_intf_get_vsync_info(intf, &info);
+ } while (info.wr_ptr_line_count > 0 &&
+ info.wr_ptr_line_count < vdisplay);
+
+ dpu_hw_intf_connect_external_te(intf, true);
+
+ DPU_DEBUG("enc%d intf%d disabled autorefresh\n",
+ encoder_id, intf->idx - INTF_0);
+
+}
+
+static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *ctx,
+ struct dpu_hw_intf_cmd_mode_cfg *cmd_mode_cfg)
+{
+ u32 intf_cfg2 = DPU_REG_READ(&ctx->hw, INTF_CONFIG2);
+
+ if (cmd_mode_cfg->data_compress)
+ intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
+
+ DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+ unsigned long cap, const struct dpu_mdss_version *mdss_rev)
+{
+ ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+ ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
+ ops->get_status = dpu_hw_intf_get_status;
+ ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+ ops->get_line_count = dpu_hw_intf_get_line_count;
+ if (cap & BIT(DPU_INTF_INPUT_CTRL))
+ ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
+ ops->setup_misr = dpu_hw_intf_setup_misr;
+ ops->collect_misr = dpu_hw_intf_collect_misr;
+
+ if (cap & BIT(DPU_INTF_TE)) {
+ ops->enable_tearcheck = dpu_hw_intf_enable_te;
+ ops->disable_tearcheck = dpu_hw_intf_disable_te;
+ ops->connect_external_te = dpu_hw_intf_connect_external_te;
+ ops->vsync_sel = dpu_hw_intf_vsync_sel;
+ ops->disable_autorefresh = dpu_hw_intf_disable_autorefresh;
+ }
+
+ if (mdss_rev->core_major_ver >= 7)
+ ops->program_intf_cmd_cfg = dpu_hw_intf_program_intf_cmd_cfg;
+}
+
+struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
+ void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
+{
+ struct dpu_hw_intf *c;
+
+ if (cfg->type == INTF_NONE) {
+ DPU_DEBUG("Skip intf %d with type NONE\n", cfg->id - INTF_0);
+ return NULL;
+ }
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_INTF;
+
+ /*
+ * Assign ops
+ */
+ c->idx = cfg->id;
+ c->cap = cfg;
+ _setup_intf_ops(&c->ops, c->cap->features, mdss_rev);
+
+ return c;
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+ kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644
index 0000000000..4e86108bee
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct dpu_hw_intf_timing_params {
+ u32 width; /* active width */
+ u32 height; /* active height */
+ u32 xres; /* Display panel width */
+ u32 yres; /* Display panel height */
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+ u32 hsync_polarity;
+ u32 vsync_polarity;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+
+ bool wide_bus_en;
+};
+
+struct dpu_hw_intf_prog_fetch {
+ u8 enable;
+ /* vsync counter for the front porch pixel line */
+ u32 fetch_start;
+};
+
+struct dpu_hw_intf_status {
+ u8 is_en; /* interface timing engine is enabled or not */
+ u8 is_prog_fetch_en; /* interface prog fetch counter is enabled or not */
+ u32 frame_count; /* frame count since timing engine enabled */
+ u32 line_count; /* current line count including blanking */
+};
+
+struct dpu_hw_intf_cmd_mode_cfg {
+ u8 data_compress; /* enable data compress between dpu and dsi */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ get_line_count: reads current vertical line counter
+ * @bind_pingpong_blk: enable/disable the connection with pingpong which will
+ * feed pixels to this interface
+ * @setup_misr: enable/disable MISR
+ * @collect_misr: read MISR signature
+ * @enable_tearcheck: Enables vsync generation and sets up init value of read
+ * pointer and programs the tear check configuration
+ * @disable_tearcheck: Disables tearcheck block
+ * @connect_external_te: Read, modify, write to either set or clear listening to external TE
+ * Return: 1 if TE was originally connected, 0 if not, or -ERROR
+ * @get_vsync_info: Provides the programmed and current line_count
+ * @setup_autorefresh: Configure and enable the autorefresh config
+ * @get_autorefresh: Retrieve autorefresh config from hardware
+ * Return: 0 on success, -ETIMEDOUT on timeout
+ * @vsync_sel: Select vsync signal for tear-effect configuration
+ * @program_intf_cmd_cfg: Program the DPU to interface datapath for command mode
+ */
+struct dpu_hw_intf_ops {
+ void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+ const struct dpu_hw_intf_timing_params *p,
+ const struct dpu_format *fmt);
+
+ void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+ const struct dpu_hw_intf_prog_fetch *fetch);
+
+ void (*enable_timing)(struct dpu_hw_intf *intf,
+ u8 enable);
+
+ void (*get_status)(struct dpu_hw_intf *intf,
+ struct dpu_hw_intf_status *status);
+
+ u32 (*get_line_count)(struct dpu_hw_intf *intf);
+
+ void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
+ const enum dpu_pingpong pp);
+ void (*setup_misr)(struct dpu_hw_intf *intf);
+ int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value);
+
+ // Tearcheck on INTF since DPU 5.0.0
+
+ int (*enable_tearcheck)(struct dpu_hw_intf *intf, struct dpu_hw_tear_check *cfg);
+
+ int (*disable_tearcheck)(struct dpu_hw_intf *intf);
+
+ int (*connect_external_te)(struct dpu_hw_intf *intf, bool enable_external_te);
+
+ void (*vsync_sel)(struct dpu_hw_intf *intf, u32 vsync_source);
+
+ /**
+ * Disable autorefresh if enabled
+ */
+ void (*disable_autorefresh)(struct dpu_hw_intf *intf, uint32_t encoder_id, u16 vdisplay);
+
+ void (*program_intf_cmd_cfg)(struct dpu_hw_intf *intf,
+ struct dpu_hw_intf_cmd_mode_cfg *cmd_mode_cfg);
+};
+
+struct dpu_hw_intf {
+ struct dpu_hw_blk_reg_map hw;
+
+ /* intf */
+ enum dpu_intf idx;
+ const struct dpu_intf_cfg *cap;
+
+ /* ops */
+ struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * dpu_hw_intf_init() - Initializes the INTF driver for the passed
+ * interface catalog entry.
+ * @cfg: interface catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
+ void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf: Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644
index 0000000000..a590c1f746
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+
+#define LM_OP_MODE 0x00
+#define LM_OUT_SIZE 0x04
+#define LM_BORDER_COLOR_0 0x08
+#define LM_BORDER_COLOR_1 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP 0x00
+#define LM_BLEND0_CONST_ALPHA 0x04
+#define LM_FG_COLOR_FILL_COLOR_0 0x08
+#define LM_FG_COLOR_FILL_COLOR_1 0x0C
+#define LM_FG_COLOR_FILL_SIZE 0x10
+#define LM_FG_COLOR_FILL_XY 0x14
+
+#define LM_BLEND0_FG_ALPHA 0x04
+#define LM_BLEND0_BG_ALPHA 0x08
+
+#define LM_MISR_CTRL 0x310
+#define LM_MISR_SIGNATURE 0x314
+
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @ctx: mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+ const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+ if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages)
+ return sblk->blendstage_base[stage - DPU_STAGE_0];
+
+ return -EINVAL;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *mixer)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 outsize;
+ u32 op_mode;
+
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ outsize = mixer->out_height << 16 | mixer->out_width;
+ DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+ /* SPLIT_LEFT_RIGHT */
+ if (mixer->right_mixer)
+ op_mode |= BIT(31);
+ else
+ op_mode &= ~BIT(31);
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+ (color->color_0 & 0xFFF) |
+ ((color->color_1 & 0xFFF) << 0x10));
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+ (color->color_2 & 0xFFF) |
+ ((color->color_3 & 0xFFF) << 0x10));
+ }
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx)
+{
+ dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0);
+}
+
+static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
+{
+ return dpu_hw_collect_misr(&ctx->hw, LM_MISR_CTRL, LM_MISR_SIGNATURE, misr_value);
+}
+
+static void dpu_hw_lm_setup_blend_config_combined_alpha(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+ DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode;
+
+ /* read the existing op_mode configuration */
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_mixer_out = dpu_hw_lm_setup_out;
+ if (test_bit(DPU_MIXER_COMBINED_ALPHA, &features))
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha;
+ else
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+ ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+ ops->setup_border_color = dpu_hw_lm_setup_border_color;
+ ops->setup_misr = dpu_hw_lm_setup_misr;
+ ops->collect_misr = dpu_hw_lm_collect_misr;
+}
+
+struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_mixer *c;
+
+ if (cfg->pingpong == PINGPONG_NONE) {
+ DPU_DEBUG("skip mixer %d without pingpong\n", cfg->id);
+ return NULL;
+ }
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_LM;
+
+ /* Assign ops */
+ c->idx = cfg->id;
+ c->cap = cfg;
+ _setup_mixer_ops(&c->ops, c->cap->features);
+
+ return c;
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+ kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644
index 0000000000..98b77cda65
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+ u32 out_width;
+ u32 out_height;
+ bool right_mixer;
+ int flags;
+};
+
+struct dpu_hw_color3_cfg {
+ u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+ /*
+ * Sets up mixer output width and height
+ * and border color if enabled
+ */
+ void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *cfg);
+
+ /*
+ * Alpha blending configuration
+ * for the specified stage
+ */
+ void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+ /*
+ * Alpha color component selection from either fg or bg
+ */
+ void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+ /**
+ * setup_border_color : enable/disable border color
+ */
+ void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en);
+
+ /**
+ * setup_misr: Enable/disable MISR
+ */
+ void (*setup_misr)(struct dpu_hw_mixer *ctx);
+
+ /**
+ * collect_misr: Read MISR signature
+ */
+ int (*collect_misr)(struct dpu_hw_mixer *ctx, u32 *misr_value);
+};
+
+struct dpu_hw_mixer {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* lm */
+ enum dpu_lm idx;
+ const struct dpu_lm_cfg *cap;
+ const struct dpu_mdp_cfg *mdp;
+ const struct dpu_ctl_cfg *ctl;
+
+ /* ops */
+ struct dpu_hw_lm_ops ops;
+
+ /* store mixer info specific to display */
+ struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init() - Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @cfg: mixer catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
+ void __iomem *addr);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm: Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644
index 0000000000..d85157acfb
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -0,0 +1,510 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME "dpu"
+
+#define DPU_NONE 0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE 9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE 6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE 3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES 4
+#endif
+
+#define PIPES_PER_STAGE 2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES 3
+#endif
+
+enum dpu_format_flags {
+ DPU_FORMAT_FLAG_YUV_BIT,
+ DPU_FORMAT_FLAG_DX_BIT,
+ DPU_FORMAT_FLAG_COMPRESSED_BIT,
+ DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X) \
+ (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X) \
+ (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA (1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA (1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA (1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN (1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST (0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST (1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA (1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA (1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA (1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN (1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO 0
+#define DPU_VSYNC1_SOURCE_GPIO 1
+#define DPU_VSYNC2_SOURCE_GPIO 2
+#define DPU_VSYNC_SOURCE_INTF_0 3
+#define DPU_VSYNC_SOURCE_INTF_1 4
+#define DPU_VSYNC_SOURCE_INTF_2 5
+#define DPU_VSYNC_SOURCE_INTF_3 6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4 11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3 12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2 13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1 14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0 15
+
+enum dpu_hw_blk_type {
+ DPU_HW_BLK_TOP = 0,
+ DPU_HW_BLK_SSPP,
+ DPU_HW_BLK_LM,
+ DPU_HW_BLK_CTL,
+ DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_INTF,
+ DPU_HW_BLK_WB,
+ DPU_HW_BLK_DSPP,
+ DPU_HW_BLK_MERGE_3D,
+ DPU_HW_BLK_DSC,
+ DPU_HW_BLK_MAX,
+};
+
+enum dpu_sspp {
+ SSPP_NONE,
+ SSPP_VIG0,
+ SSPP_VIG1,
+ SSPP_VIG2,
+ SSPP_VIG3,
+ SSPP_RGB0,
+ SSPP_RGB1,
+ SSPP_RGB2,
+ SSPP_RGB3,
+ SSPP_DMA0,
+ SSPP_DMA1,
+ SSPP_DMA2,
+ SSPP_DMA3,
+ SSPP_DMA4,
+ SSPP_DMA5,
+ SSPP_CURSOR0,
+ SSPP_CURSOR1,
+ SSPP_MAX
+};
+
+enum dpu_sspp_type {
+ SSPP_TYPE_VIG,
+ SSPP_TYPE_RGB,
+ SSPP_TYPE_DMA,
+ SSPP_TYPE_CURSOR,
+ SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+ LM_0 = 1,
+ LM_1,
+ LM_2,
+ LM_3,
+ LM_4,
+ LM_5,
+ LM_6,
+ LM_MAX
+};
+
+enum dpu_stage {
+ DPU_STAGE_BASE = 0,
+ DPU_STAGE_0,
+ DPU_STAGE_1,
+ DPU_STAGE_2,
+ DPU_STAGE_3,
+ DPU_STAGE_4,
+ DPU_STAGE_5,
+ DPU_STAGE_6,
+ DPU_STAGE_7,
+ DPU_STAGE_8,
+ DPU_STAGE_9,
+ DPU_STAGE_10,
+ DPU_STAGE_MAX
+};
+enum dpu_dspp {
+ DSPP_0 = 1,
+ DSPP_1,
+ DSPP_2,
+ DSPP_3,
+ DSPP_MAX
+};
+
+enum dpu_ctl {
+ CTL_0 = 1,
+ CTL_1,
+ CTL_2,
+ CTL_3,
+ CTL_4,
+ CTL_5,
+ CTL_MAX
+};
+
+enum dpu_dsc {
+ DSC_NONE = 0,
+ DSC_0,
+ DSC_1,
+ DSC_2,
+ DSC_3,
+ DSC_4,
+ DSC_5,
+ DSC_MAX
+};
+
+enum dpu_pingpong {
+ PINGPONG_NONE,
+ PINGPONG_0,
+ PINGPONG_1,
+ PINGPONG_2,
+ PINGPONG_3,
+ PINGPONG_4,
+ PINGPONG_5,
+ PINGPONG_6,
+ PINGPONG_7,
+ PINGPONG_S0,
+ PINGPONG_MAX
+};
+
+enum dpu_merge_3d {
+ MERGE_3D_0 = 1,
+ MERGE_3D_1,
+ MERGE_3D_2,
+ MERGE_3D_3,
+ MERGE_3D_MAX
+};
+
+enum dpu_intf {
+ INTF_0 = 1,
+ INTF_1,
+ INTF_2,
+ INTF_3,
+ INTF_4,
+ INTF_5,
+ INTF_6,
+ INTF_7,
+ INTF_8,
+ INTF_MAX
+};
+
+/*
+ * Historically these values correspond to the values written to the
+ * DISP_INTF_SEL register, which had to programmed manually. On newer MDP
+ * generations this register is NOP, but we keep the values for historical
+ * reasons.
+ */
+enum dpu_intf_type {
+ INTF_NONE = 0x0,
+ INTF_DSI = 0x1,
+ INTF_HDMI = 0x3,
+ INTF_LCDC = 0x5,
+ /* old eDP found on 8x74 and 8x84 */
+ INTF_EDP = 0x9,
+ /* both DP and eDP, handled by the new DP driver */
+ INTF_DP = 0xa,
+
+ /* virtual interfaces */
+ INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+ INTF_MODE_NONE = 0,
+ INTF_MODE_CMD,
+ INTF_MODE_VIDEO,
+ INTF_MODE_WB_BLOCK,
+ INTF_MODE_WB_LINE,
+ INTF_MODE_MAX
+};
+
+enum dpu_wb {
+ WB_0 = 1,
+ WB_1,
+ WB_2,
+ WB_3,
+ WB_MAX
+};
+
+enum dpu_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum dpu_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+ VBIF_RT,
+ VBIF_NRT,
+ VBIF_MAX,
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED : Color components in single plane
+ * @DPU_PLANE_PLANAR : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+ DPU_PLANE_INTERLEAVED,
+ DPU_PLANE_PLANAR,
+ DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB : No chroma subsampling
+ * @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420 : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+ DPU_CHROMA_RGB,
+ DPU_CHROMA_H2V1,
+ DPU_CHROMA_H1V2,
+ DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR : fetch is line by line
+ * @DPU_FETCH_TILE : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC : fetch and decompress data
+ */
+enum dpu_fetch_type {
+ DPU_FETCH_LINEAR,
+ DPU_FETCH_TILE,
+ DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+ COLOR_4BIT = 0,
+ COLOR_5BIT = 1, /* No 5-bit Alpha */
+ COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+ COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT : column interleaving
+ * @BLEND_3D_MAX :
+ */
+enum dpu_3d_blend_mode {
+ BLEND_3D_NONE = 0,
+ BLEND_3D_FRAME_INT,
+ BLEND_3D_H_ROW_INT,
+ BLEND_3D_V_ROW_INT,
+ BLEND_3D_COL_INT,
+ BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format structure containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+ struct msm_format base;
+ enum dpu_plane_type fetch_planes;
+ u8 element[DPU_MAX_PLANES];
+ u8 bits[DPU_MAX_PLANES];
+ enum dpu_chroma_samp_type chroma_sample;
+ u8 unpack_align_msb;
+ u8 unpack_tight;
+ u8 unpack_count;
+ u8 bpp;
+ u8 alpha_enable;
+ u8 num_planes;
+ enum dpu_fetch_type fetch_mode;
+ DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+ u16 tile_width;
+ u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+ const struct dpu_format *format;
+ uint32_t num_planes;
+ uint32_t width;
+ uint32_t height;
+ uint32_t total_size;
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ uint32_t plane_size[DPU_MAX_PLANES];
+ uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+ /* matrix coefficients in S15.16 format */
+ uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+ uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+ uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+ u32 color_0;
+ u32 color_1;
+ u32 color_2;
+ u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE (1 << 0)
+#define DPU_DBG_MASK_INTF (1 << 1)
+#define DPU_DBG_MASK_LM (1 << 2)
+#define DPU_DBG_MASK_CTL (1 << 3)
+#define DPU_DBG_MASK_PINGPONG (1 << 4)
+#define DPU_DBG_MASK_SSPP (1 << 5)
+#define DPU_DBG_MASK_WB (1 << 6)
+#define DPU_DBG_MASK_TOP (1 << 7)
+#define DPU_DBG_MASK_VBIF (1 << 8)
+#define DPU_DBG_MASK_ROT (1 << 9)
+#define DPU_DBG_MASK_DSPP (1 << 10)
+#define DPU_DBG_MASK_DSC (1 << 11)
+
+/**
+ * struct dpu_hw_tear_check - Struct contains parameters to configure
+ * tear-effect module. This structure is used to configure tear-check
+ * logic present either in ping-pong or in interface module.
+ * @vsync_count: Ratio of MDP VSYNC clk freq(Hz) to refresh rate divided
+ * by no of lines
+ * @sync_cfg_height: Total vertical lines (display height - 1)
+ * @vsync_init_val: Init value to which the read pointer gets loaded at
+ * vsync edge
+ * @sync_threshold_start: Read pointer threshold start ROI for write operation
+ * @sync_threshold_continue: The minimum number of lines the write pointer
+ * needs to be above the read pointer
+ * @start_pos: The position from which the start_threshold value is added
+ * @rd_ptr_irq: The read pointer line at which interrupt has to be generated
+ * @hw_vsync_mode: Sync with external frame sync input
+ */
+struct dpu_hw_tear_check {
+ /*
+ * This is ratio of MDP VSYNC clk freq(Hz) to
+ * refresh rate divided by no of lines
+ */
+ u32 vsync_count;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u8 hw_vsync_mode;
+};
+
+/**
+ * struct dpu_hw_pp_vsync_info - Struct contains parameters to configure
+ * read and write pointers for command mode panels
+ * @rd_ptr_init_val: Value of rd pointer at vsync edge
+ * @rd_ptr_frame_count: Num frames sent since enabling interface
+ * @rd_ptr_line_count: Current line on panel (rd ptr)
+ * @wr_ptr_line_count: Current line within pp fifo (wr ptr)
+ * @intf_frame_count: Frames read from intf
+ */
+struct dpu_hw_pp_vsync_info {
+ u32 rd_ptr_init_val;
+ u32 rd_ptr_frame_count;
+ u32 rd_ptr_line_count;
+ u32 wr_ptr_line_count;
+ u32 intf_frame_count;
+};
+
+#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
new file mode 100644
index 0000000000..90e0e05eff
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define MERGE_3D_MUX 0x000
+#define MERGE_3D_MODE 0x004
+
+static void dpu_hw_merge_3d_setup_3d_mode(struct dpu_hw_merge_3d *merge_3d,
+ enum dpu_3d_blend_mode mode_3d)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 data;
+
+
+ c = &merge_3d->hw;
+ if (mode_3d == BLEND_3D_NONE) {
+ DPU_REG_WRITE(c, MERGE_3D_MODE, 0);
+ DPU_REG_WRITE(c, MERGE_3D_MUX, 0);
+ } else {
+ data = BIT(0) | ((mode_3d - 1) << 1);
+ DPU_REG_WRITE(c, MERGE_3D_MODE, data);
+ }
+}
+
+static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
+ unsigned long features)
+{
+ c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
+};
+
+struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_merge_3d *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_PINGPONG;
+
+ c->idx = cfg->id;
+ c->caps = cfg;
+ _setup_merge_3d_ops(c, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw)
+{
+ kfree(hw);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
new file mode 100644
index 0000000000..19cec5e887
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_MERGE3D_H
+#define _DPU_HW_MERGE3D_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_merge_3d;
+
+/**
+ *
+ * struct dpu_hw_merge_3d_ops : Interface to the merge_3d Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_3d_mode : enable 3D merge
+ */
+struct dpu_hw_merge_3d_ops {
+ void (*setup_3d_mode)(struct dpu_hw_merge_3d *merge_3d,
+ enum dpu_3d_blend_mode mode_3d);
+
+};
+
+struct dpu_hw_merge_3d {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* merge_3d */
+ enum dpu_merge_3d idx;
+ const struct dpu_merge_3d_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_merge_3d_ops ops;
+};
+
+/**
+ * to_dpu_hw_merge_3d - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_merge_3d, base);
+}
+
+/**
+ * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed
+ * merge3d catalog entry.
+ * @cfg: Pingpong catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_merge_3d context
+ */
+struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
+ void __iomem *addr);
+
+/**
+ * dpu_hw_merge_3d_destroy - destroys merge_3d driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by dpu_hw_merge_3d_init
+ */
+void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *pp);
+
+#endif /*_DPU_HW_MERGE3D_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644
index 0000000000..437d9e62a8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN 0x000
+#define PP_SYNC_CONFIG_VSYNC 0x004
+#define PP_SYNC_CONFIG_HEIGHT 0x008
+#define PP_SYNC_WRCOUNT 0x00C
+#define PP_VSYNC_INIT_VAL 0x010
+#define PP_INT_COUNT_VAL 0x014
+#define PP_SYNC_THRESH 0x018
+#define PP_START_POS 0x01C
+#define PP_RD_PTR_IRQ 0x020
+#define PP_WR_PTR_IRQ 0x024
+#define PP_OUT_LINE_COUNT 0x028
+#define PP_LINE_COUNT 0x02C
+#define PP_AUTOREFRESH_CONFIG 0x030
+
+#define PP_FBC_MODE 0x034
+#define PP_FBC_BUDGET_CTL 0x038
+#define PP_FBC_LOSSY_MODE 0x03C
+#define PP_DSC_MODE 0x0a0
+#define PP_DCE_DATA_IN_SWAP 0x0ac
+#define PP_DCE_DATA_OUT_SWAP 0x0c8
+
+#define PP_DITHER_EN 0x000
+#define PP_DITHER_BITDEPTH 0x004
+#define PP_DITHER_MATRIX 0x008
+
+#define DITHER_DEPTH_MAP_INDEX 9
+
+static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
+ 0, 0, 0, 0, 0, 0, 0, 1, 2
+};
+
+static void dpu_hw_pp_setup_dither(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_dither_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 i, base, data = 0;
+
+ c = &pp->hw;
+ base = pp->caps->sblk->dither.base;
+ if (!cfg) {
+ DPU_REG_WRITE(c, base + PP_DITHER_EN, 0);
+ return;
+ }
+
+ data = dither_depth_map[cfg->c0_bitdepth] & REG_MASK(2);
+ data |= (dither_depth_map[cfg->c1_bitdepth] & REG_MASK(2)) << 2;
+ data |= (dither_depth_map[cfg->c2_bitdepth] & REG_MASK(2)) << 4;
+ data |= (dither_depth_map[cfg->c3_bitdepth] & REG_MASK(2)) << 6;
+ data |= (cfg->temporal_en) ? (1 << 8) : 0;
+
+ DPU_REG_WRITE(c, base + PP_DITHER_BITDEPTH, data);
+
+ for (i = 0; i < DITHER_MATRIX_SZ - 3; i += 4) {
+ data = (cfg->matrix[i] & REG_MASK(4)) |
+ ((cfg->matrix[i + 1] & REG_MASK(4)) << 4) |
+ ((cfg->matrix[i + 2] & REG_MASK(4)) << 8) |
+ ((cfg->matrix[i + 3] & REG_MASK(4)) << 12);
+ DPU_REG_WRITE(c, base + PP_DITHER_MATRIX + i, data);
+ }
+ DPU_REG_WRITE(c, base + PP_DITHER_EN, 1);
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *te)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int cfg;
+
+ if (!pp || !te)
+ return -EINVAL;
+ c = &pp->hw;
+
+ cfg = BIT(19); /*VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+ DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+ DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+ DPU_REG_WRITE(c, PP_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, 1);
+
+ return 0;
+}
+
+static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp,
+ u32 frame_count, bool enable)
+{
+ DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG,
+ enable ? (BIT(31) | frame_count) : 0);
+}
+
+/*
+ * dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW
+ * @pp: DPU pingpong structure
+ * @frame_count: Used to return the current frame count from hw
+ *
+ * Returns: True if autorefresh enabled, false if disabled.
+ */
+static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp,
+ u32 *frame_count)
+{
+ u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG);
+ if (frame_count != NULL)
+ *frame_count = val & 0xffff;
+ return !!((val & BIT(31)) >> 31);
+}
+
+static int dpu_hw_pp_disable_te(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!pp)
+ return -EINVAL;
+ c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, 0);
+ return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+ bool enable_external_te)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 cfg;
+ int orig;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+ orig = (bool)(cfg & BIT(20));
+ if (enable_external_te)
+ cfg |= BIT(20);
+ else
+ cfg &= ~BIT(20);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+ return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+
+ if (!pp || !info)
+ return -EINVAL;
+ c = &pp->hw;
+
+ val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+ info->rd_ptr_init_val = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+ info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+ info->rd_ptr_line_count = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_LINE_COUNT);
+ info->wr_ptr_line_count = val & 0xffff;
+
+ return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 height, init;
+ u32 line = 0xFFFF;
+
+ if (!pp)
+ return 0;
+ c = &pp->hw;
+
+ init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+ height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+ if (height < init)
+ return line;
+
+ line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+ if (line < init)
+ line += (0xFFFF - init);
+ else
+ line -= init;
+
+ return line;
+}
+
+static void dpu_hw_pp_disable_autorefresh(struct dpu_hw_pingpong *pp,
+ uint32_t encoder_id, u16 vdisplay)
+{
+ struct dpu_hw_pp_vsync_info info;
+ int trial = 0;
+
+ /* If autorefresh is already disabled, we have nothing to do */
+ if (!dpu_hw_pp_get_autorefresh_config(pp, NULL))
+ return;
+
+ /*
+ * If autorefresh is enabled, disable it and make sure it is safe to
+ * proceed with current frame commit/push. Sequence followed is,
+ * 1. Disable TE
+ * 2. Disable autorefresh config
+ * 4. Poll for frame transfer ongoing to be false
+ * 5. Enable TE back
+ */
+
+ dpu_hw_pp_connect_external_te(pp, false);
+ dpu_hw_pp_setup_autorefresh_config(pp, 0, false);
+
+ do {
+ udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
+ if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
+ > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
+ DPU_ERROR("enc%d pp%d disable autorefresh failed\n",
+ encoder_id, pp->idx - PINGPONG_0);
+ break;
+ }
+
+ trial++;
+
+ dpu_hw_pp_get_vsync_info(pp, &info);
+ } while (info.wr_ptr_line_count > 0 &&
+ info.wr_ptr_line_count < vdisplay);
+
+ dpu_hw_pp_connect_external_te(pp, true);
+
+ DPU_DEBUG("enc%d pp%d disabled autorefresh\n",
+ encoder_id, pp->idx - PINGPONG_0);
+}
+
+static int dpu_hw_pp_dsc_enable(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_DSC_MODE, 1);
+ return 0;
+}
+
+static void dpu_hw_pp_dsc_disable(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_DSC_MODE, 0);
+}
+
+static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *pp_c = &pp->hw;
+ int data;
+
+ data = DPU_REG_READ(pp_c, PP_DCE_DATA_OUT_SWAP);
+ data |= BIT(18); /* endian flip */
+ DPU_REG_WRITE(pp_c, PP_DCE_DATA_OUT_SWAP, data);
+ return 0;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_PINGPONG_TE, &features)) {
+ c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
+ c->ops.disable_tearcheck = dpu_hw_pp_disable_te;
+ c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
+ c->ops.get_line_count = dpu_hw_pp_get_line_count;
+ c->ops.disable_autorefresh = dpu_hw_pp_disable_autorefresh;
+ }
+
+ if (test_bit(DPU_PINGPONG_DSC, &features)) {
+ c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
+ c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
+ c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
+ }
+
+ if (test_bit(DPU_PINGPONG_DITHER, &features))
+ c->ops.setup_dither = dpu_hw_pp_setup_dither;
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_pingpong *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_PINGPONG;
+
+ c->idx = cfg->id;
+ c->caps = cfg;
+ _setup_pingpong_ops(c, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+ kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644
index 0000000000..d3246a9a58
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+#define DITHER_MATRIX_SZ 16
+
+struct dpu_hw_pingpong;
+
+/**
+ * struct dpu_hw_dither_cfg - dither feature structure
+ * @flags: for customizing operations
+ * @temporal_en: temperal dither enable
+ * @c0_bitdepth: c0 component bit depth
+ * @c1_bitdepth: c1 component bit depth
+ * @c2_bitdepth: c2 component bit depth
+ * @c3_bitdepth: c2 component bit depth
+ * @matrix: dither strength matrix
+ */
+struct dpu_hw_dither_cfg {
+ u64 flags;
+ u32 temporal_en;
+ u32 c0_bitdepth;
+ u32 c1_bitdepth;
+ u32 c2_bitdepth;
+ u32 c3_bitdepth;
+ u32 matrix[DITHER_MATRIX_SZ];
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @enable_tearcheck: program and enable tear check block
+ * @disable_tearcheck: disable able tear check block
+ * @setup_dither : function to program the dither hw block
+ * @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+ /**
+ * enables vysnc generation and sets up init value of
+ * read pointer and programs the tear check cofiguration
+ */
+ int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *cfg);
+
+ /**
+ * disables tear check block
+ */
+ int (*disable_tearcheck)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * read, modify, write to either set or clear listening to external TE
+ * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+ */
+ int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+ bool enable_external_te);
+
+ /**
+ * Obtain current vertical line counter
+ */
+ u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Disable autorefresh if enabled
+ */
+ void (*disable_autorefresh)(struct dpu_hw_pingpong *pp, uint32_t encoder_id, u16 vdisplay);
+
+ /**
+ * Setup dither matix for pingpong block
+ */
+ void (*setup_dither)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_dither_cfg *cfg);
+ /**
+ * Enable DSC
+ */
+ int (*enable_dsc)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Disable DSC
+ */
+ void (*disable_dsc)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Setup DSC
+ */
+ int (*setup_dsc)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_merge_3d;
+
+struct dpu_hw_pingpong {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* pingpong */
+ enum dpu_pingpong idx;
+ const struct dpu_pingpong_cfg *caps;
+ struct dpu_hw_merge_3d *merge_3d;
+
+ /* ops */
+ struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * to_dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed
+ * pingpong catalog entry.
+ * @cfg: Pingpong catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
+ void __iomem *addr);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644
index 0000000000..f2192de937
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_kms.h"
+
+#include "msm_mdss.h"
+
+#include <drm/drm_file.h>
+
+#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+/* SSPP registers */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+#define SSPP_SRC_CONSTANT_COLOR 0x3c
+#define SSPP_EXCL_REC_CTL 0x40
+#define SSPP_UBWC_STATIC_CTRL 0x44
+#define SSPP_FETCH_CONFIG 0x48
+#define SSPP_DANGER_LUT 0x60
+#define SSPP_SAFE_LUT 0x64
+#define SSPP_CREQ_LUT 0x68
+#define SSPP_QOS_CTRL 0x6C
+#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_CREQ_LUT_0 0x74
+#define SSPP_CREQ_LUT_1 0x78
+#define SSPP_DECIMATION_CONFIG 0xB4
+#define SSPP_SW_PIX_EXT_C0_LR 0x100
+#define SSPP_SW_PIX_EXT_C0_TB 0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
+#define SSPP_SW_PIX_EXT_C3_LR 0x120
+#define SSPP_SW_PIX_EXT_C3_TB 0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
+#define SSPP_TRAFFIC_SHAPER 0x130
+#define SSPP_CDP_CNTL 0x134
+#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_CDP_CNTL_REC1 0x13c
+#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
+#define SSPP_TRAFFIC_SHAPER_REC1 0x158
+#define SSPP_OUT_SIZE_REC1 0x160
+#define SSPP_OUT_XY_REC1 0x164
+#define SSPP_SRC_XY_REC1 0x168
+#define SSPP_SRC_SIZE_REC1 0x16C
+#define SSPP_MULTIRECT_OPMODE 0x170
+#define SSPP_SRC_FORMAT_REC1 0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
+#define SSPP_SRC_OP_MODE_REC1 0x17C
+#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
+#define SSPP_EXCL_REC_SIZE_REC1 0x184
+#define SSPP_EXCL_REC_XY_REC1 0x188
+#define SSPP_EXCL_REC_SIZE 0x1B4
+#define SSPP_EXCL_REC_XY 0x1B8
+
+/* SSPP_SRC_OP_MODE & OP_MODE_REC1 */
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SSPP_VIG_OP_MODE 0x0
+#define SCALE_CONFIG 0x04
+#define COMP0_3_PHASE_STEP_X 0x10
+#define COMP0_3_PHASE_STEP_Y 0x14
+#define COMP1_2_PHASE_STEP_X 0x18
+#define COMP1_2_PHASE_STEP_Y 0x1c
+#define COMP0_3_INIT_PHASE_X 0x20
+#define COMP0_3_INIT_PHASE_Y 0x24
+#define COMP1_2_INIT_PHASE_X 0x28
+#define COMP1_2_INIT_PHASE_Y 0x2C
+#define VIG_0_QSEED2_SHARP 0x30
+
+/* SSPP_TRAFFIC_SHAPER and _REC1 */
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN BIT(17)
+#define VIG_OP_MEM_PROT_CONT BIT(15)
+#define VIG_OP_MEM_PROT_VAL BIT(14)
+#define VIG_OP_MEM_PROT_SAT BIT(13)
+#define VIG_OP_MEM_PROT_HUE BIT(12)
+#define VIG_OP_HIST BIT(8)
+#define VIG_OP_SKY_COL BIT(7)
+#define VIG_OP_FOIL BIT(6)
+#define VIG_OP_SKIN_COL BIT(5)
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define SSPP_VIG_CSC_10_OP_MODE 0x0
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN BIT(0)
+#define CSC_10BIT_OFFSET 4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK 19200000
+
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_sw_pipe *pipe)
+{
+ struct dpu_hw_sspp *ctx = pipe->sspp;
+ u32 mode_mask;
+
+ if (!ctx)
+ return;
+
+ if (pipe->multirect_index == DPU_SSPP_RECT_SOLO) {
+ /**
+ * if rect index is RECT_SOLO, we cannot expect a
+ * virtual plane sharing the same SSPP id. So we go
+ * and disable multirect
+ */
+ mode_mask = 0;
+ } else {
+ mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE);
+ mode_mask |= pipe->multirect_index;
+ if (pipe->multirect_mode == DPU_SSPP_MULTIRECT_TIME_MX)
+ mode_mask |= BIT(2);
+ else
+ mode_mask &= ~BIT(2);
+ }
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_sspp *ctx,
+ u32 mask, u8 en)
+{
+ const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+ u32 opmode;
+
+ if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+ !test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, sblk->scaler_blk.base + SSPP_VIG_OP_MODE);
+
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, sblk->scaler_blk.base + SSPP_VIG_OP_MODE, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_sspp *ctx,
+ u32 mask, u8 en)
+{
+ const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+ u32 opmode;
+
+ opmode = DPU_REG_READ(&ctx->hw, sblk->csc_blk.base + SSPP_VIG_CSC_10_OP_MODE);
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, sblk->csc_blk.base + SSPP_VIG_CSC_10_OP_MODE, opmode);
+}
+
+/*
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_sw_pipe *pipe,
+ const struct dpu_format *fmt, u32 flags)
+{
+ struct dpu_hw_sspp *ctx = pipe->sspp;
+ struct dpu_hw_blk_reg_map *c;
+ u32 chroma_samp, unpack, src_format;
+ u32 opmode = 0;
+ u32 fast_clear = 0;
+ u32 op_mode_off, unpack_pat_off, format_off;
+
+ if (!ctx || !fmt)
+ return;
+
+ if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
+ pipe->multirect_index == DPU_SSPP_RECT_0) {
+ op_mode_off = SSPP_SRC_OP_MODE;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+ format_off = SSPP_SRC_FORMAT;
+ } else {
+ op_mode_off = SSPP_SRC_OP_MODE_REC1;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+ format_off = SSPP_SRC_FORMAT_REC1;
+ }
+
+ c = &ctx->hw;
+ opmode = DPU_REG_READ(c, op_mode_off);
+ opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+ MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+ if (flags & DPU_SSPP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (flags & DPU_SSPP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ chroma_samp = fmt->chroma_sample;
+ if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+ if (chroma_samp == DPU_CHROMA_H2V1)
+ chroma_samp = DPU_CHROMA_H1V2;
+ else if (chroma_samp == DPU_CHROMA_H1V2)
+ chroma_samp = DPU_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+ if (flags & DPU_SSPP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ if (flags & DPU_SSPP_SOLID_FILL)
+ src_format |= BIT(22);
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+ if (DPU_FORMAT_IS_UBWC(fmt))
+ opmode |= MDSS_MDP_OP_BWC_EN;
+ src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+ DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+ DPU_FETCH_CONFIG_RESET_VALUE |
+ ctx->ubwc->highest_bank_bit << 18);
+ switch (ctx->ubwc->ubwc_enc_version) {
+ case UBWC_1_0:
+ fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ fast_clear | (ctx->ubwc->ubwc_swizzle & 0x1) |
+ BIT(8) |
+ (ctx->ubwc->highest_bank_bit << 4));
+ break;
+ case UBWC_2_0:
+ fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ fast_clear | (ctx->ubwc->ubwc_swizzle) |
+ (ctx->ubwc->highest_bank_bit << 4));
+ break;
+ case UBWC_3_0:
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ BIT(30) | (ctx->ubwc->ubwc_swizzle) |
+ (ctx->ubwc->highest_bank_bit << 4));
+ break;
+ case UBWC_4_0:
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ DPU_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
+ break;
+ }
+ }
+
+ opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+ /* if this is YUV pixel format, enable CSC */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ src_format |= BIT(15);
+
+ if (DPU_FORMAT_IS_DX(fmt))
+ src_format |= BIT(14);
+
+ /* update scaler opmode, if appropriate */
+ if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+ else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+ _sspp_setup_csc10_opmode(ctx,
+ VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+
+ DPU_REG_WRITE(c, format_off, src_format);
+ DPU_REG_WRITE(c, unpack_pat_off, unpack);
+ DPU_REG_WRITE(c, op_mode_off, opmode);
+
+ /* clear previous UBWC error */
+ DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_sspp *ctx,
+ struct dpu_hw_pixel_ext *pe_ext)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u8 color;
+ u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+ const u32 bytemask = 0xff;
+ const u32 shortmask = 0xffff;
+
+ if (!ctx || !pe_ext)
+ return;
+
+ c = &ctx->hw;
+
+ /* program SW pixel extension override for all pipes*/
+ for (color = 0; color < DPU_MAX_PLANES; color++) {
+ /* color 2 has the same set of registers as color 1 */
+ if (color == 2)
+ continue;
+
+ lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+ ((pe_ext->right_rpt[color] & bytemask) << 16)|
+ ((pe_ext->left_ftch[color] & bytemask) << 8)|
+ (pe_ext->left_rpt[color] & bytemask);
+
+ tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+ ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+ ((pe_ext->top_ftch[color] & bytemask) << 8)|
+ (pe_ext->top_rpt[color] & bytemask);
+
+ tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+ pe_ext->num_ext_pxls_top[color] +
+ pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+ ((pe_ext->roi_w[color] +
+ pe_ext->num_ext_pxls_left[color] +
+ pe_ext->num_ext_pxls_right[color]) & shortmask);
+ }
+
+ /* color 0 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR, lr_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB, tb_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS,
+ tot_req_pixels[0]);
+
+ /* color 1 and color 2 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR, lr_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB, tb_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS,
+ tot_req_pixels[1]);
+
+ /* color 3 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS,
+ tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_sspp *ctx,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ const struct dpu_format *format)
+{
+ if (!ctx || !scaler3_cfg)
+ return;
+
+ dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg,
+ ctx->cap->sblk->scaler_blk.base,
+ ctx->cap->sblk->scaler_blk.version,
+ format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_sspp *ctx)
+{
+ if (!ctx)
+ return 0;
+
+ return dpu_hw_get_scaler3_ver(&ctx->hw,
+ ctx->cap->sblk->scaler_blk.base);
+}
+
+/*
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_sw_pipe *pipe,
+ struct dpu_sw_pipe_cfg *cfg)
+{
+ struct dpu_hw_sspp *ctx = pipe->sspp;
+ struct dpu_hw_blk_reg_map *c;
+ u32 src_size, src_xy, dst_size, dst_xy;
+ u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+
+ if (!ctx || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
+ pipe->multirect_index == DPU_SSPP_RECT_0) {
+ src_size_off = SSPP_SRC_SIZE;
+ src_xy_off = SSPP_SRC_XY;
+ out_size_off = SSPP_OUT_SIZE;
+ out_xy_off = SSPP_OUT_XY;
+ } else {
+ src_size_off = SSPP_SRC_SIZE_REC1;
+ src_xy_off = SSPP_SRC_XY_REC1;
+ out_size_off = SSPP_OUT_SIZE_REC1;
+ out_xy_off = SSPP_OUT_XY_REC1;
+ }
+
+
+ /* src and dest rect programming */
+ src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+ src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+ drm_rect_width(&cfg->src_rect);
+ dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+ dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+ drm_rect_width(&cfg->dst_rect);
+
+ /* rectangle register programming */
+ DPU_REG_WRITE(c, src_size_off, src_size);
+ DPU_REG_WRITE(c, src_xy_off, src_xy);
+ DPU_REG_WRITE(c, out_size_off, dst_size);
+ DPU_REG_WRITE(c, out_xy_off, dst_xy);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_sw_pipe *pipe,
+ struct dpu_hw_fmt_layout *layout)
+{
+ struct dpu_hw_sspp *ctx = pipe->sspp;
+ u32 ystride0, ystride1;
+ int i;
+
+ if (!ctx)
+ return;
+
+ if (pipe->multirect_index == DPU_SSPP_RECT_SOLO) {
+ for (i = 0; i < ARRAY_SIZE(layout->plane_addr); i++)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + i * 0x4,
+ layout->plane_addr[i]);
+ } else if (pipe->multirect_index == DPU_SSPP_RECT_0) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR,
+ layout->plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR,
+ layout->plane_addr[2]);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR,
+ layout->plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR,
+ layout->plane_addr[2]);
+ }
+
+ if (pipe->multirect_index == DPU_SSPP_RECT_SOLO) {
+ ystride0 = (layout->plane_pitch[0]) |
+ (layout->plane_pitch[1] << 16);
+ ystride1 = (layout->plane_pitch[2]) |
+ (layout->plane_pitch[3] << 16);
+ } else {
+ ystride0 = DPU_REG_READ(&ctx->hw, SSPP_SRC_YSTRIDE0);
+ ystride1 = DPU_REG_READ(&ctx->hw, SSPP_SRC_YSTRIDE1);
+
+ if (pipe->multirect_index == DPU_SSPP_RECT_0) {
+ ystride0 = (ystride0 & 0xFFFF0000) |
+ (layout->plane_pitch[0] & 0x0000FFFF);
+ ystride1 = (ystride1 & 0xFFFF0000)|
+ (layout->plane_pitch[2] & 0x0000FFFF);
+ } else {
+ ystride0 = (ystride0 & 0x0000FFFF) |
+ ((layout->plane_pitch[0] << 16) &
+ 0xFFFF0000);
+ ystride1 = (ystride1 & 0x0000FFFF) |
+ ((layout->plane_pitch[2] << 16) &
+ 0xFFFF0000);
+ }
+ }
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_YSTRIDE0, ystride0);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_YSTRIDE1, ystride1);
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_sspp *ctx,
+ const struct dpu_csc_cfg *data)
+{
+ u32 offset;
+ bool csc10 = false;
+
+ if (!ctx || !data)
+ return;
+
+ offset = ctx->cap->sblk->csc_blk.base;
+
+ if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+ offset += CSC_10BIT_OFFSET;
+ csc10 = true;
+ }
+
+ dpu_hw_csc_setup(&ctx->hw, offset, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_sw_pipe *pipe, u32 color)
+{
+ struct dpu_hw_sspp *ctx = pipe->sspp;
+ struct dpu_hw_fmt_layout cfg;
+
+ if (!ctx)
+ return;
+
+ /* cleanup source addresses */
+ memset(&cfg, 0, sizeof(cfg));
+ ctx->ops.setup_sourceaddress(pipe, &cfg);
+
+ if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
+ pipe->multirect_index == DPU_SSPP_RECT_0)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR, color);
+ else
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1,
+ color);
+}
+
+static void dpu_hw_sspp_setup_qos_lut(struct dpu_hw_sspp *ctx,
+ struct dpu_hw_qos_cfg *cfg)
+{
+ if (!ctx || !cfg)
+ return;
+
+ _dpu_hw_setup_qos_lut(&ctx->hw, SSPP_DANGER_LUT,
+ test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features),
+ cfg);
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_sspp *ctx,
+ bool danger_safe_en)
+{
+ if (!ctx)
+ return;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL,
+ danger_safe_en ? SSPP_QOS_CTRL_DANGER_SAFE_EN : 0);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_sw_pipe *pipe,
+ const struct dpu_format *fmt,
+ bool enable)
+{
+ struct dpu_hw_sspp *ctx = pipe->sspp;
+ u32 cdp_cntl_offset = 0;
+
+ if (!ctx)
+ return;
+
+ if (pipe->multirect_index == DPU_SSPP_RECT_SOLO ||
+ pipe->multirect_index == DPU_SSPP_RECT_0)
+ cdp_cntl_offset = SSPP_CDP_CNTL;
+ else
+ cdp_cntl_offset = SSPP_CDP_CNTL_REC1;
+
+ dpu_setup_cdp(&ctx->hw, cdp_cntl_offset, fmt, enable);
+}
+
+static void _setup_layer_ops(struct dpu_hw_sspp *c,
+ unsigned long features)
+{
+ c->ops.setup_format = dpu_hw_sspp_setup_format;
+ c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+ c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+ c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+ c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+
+ if (test_bit(DPU_SSPP_QOS, &features)) {
+ c->ops.setup_qos_lut = dpu_hw_sspp_setup_qos_lut;
+ c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+ }
+
+ if (test_bit(DPU_SSPP_CSC, &features) ||
+ test_bit(DPU_SSPP_CSC_10BIT, &features))
+ c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+ if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
+ c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+ if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
+ test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
+ test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
+ c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+ c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+ }
+
+ if (test_bit(DPU_SSPP_CDP, &features))
+ c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+#ifdef CONFIG_DEBUG_FS
+int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
+ struct dentry *entry)
+{
+ const struct dpu_sspp_cfg *cfg = hw_pipe->cap;
+ const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
+ struct dentry *debugfs_root;
+ char sspp_name[32];
+
+ snprintf(sspp_name, sizeof(sspp_name), "%d", hw_pipe->idx);
+
+ /* create overall sub-directory for the pipe */
+ debugfs_root =
+ debugfs_create_dir(sspp_name, entry);
+
+ /* don't error check these */
+ debugfs_create_xul("features", 0600,
+ debugfs_root, (unsigned long *)&hw_pipe->cap->features);
+
+ /* add register dump support */
+ dpu_debugfs_create_regset32("src_blk", 0400,
+ debugfs_root,
+ cfg->base,
+ cfg->len,
+ kms);
+
+ if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED4))
+ dpu_debugfs_create_regset32("scaler_blk", 0400,
+ debugfs_root,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+
+ if (cfg->features & BIT(DPU_SSPP_CSC) ||
+ cfg->features & BIT(DPU_SSPP_CSC_10BIT))
+ dpu_debugfs_create_regset32("csc_blk", 0400,
+ debugfs_root,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+
+ debugfs_create_u32("xin_id",
+ 0400,
+ debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ 0400,
+ debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+
+ return 0;
+}
+#endif
+
+struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
+ void __iomem *addr, const struct msm_mdss_data *mdss_data)
+{
+ struct dpu_hw_sspp *hw_pipe;
+
+ if (!addr)
+ return ERR_PTR(-EINVAL);
+
+ hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ if (!hw_pipe)
+ return ERR_PTR(-ENOMEM);
+
+ hw_pipe->hw.blk_addr = addr + cfg->base;
+ hw_pipe->hw.log_mask = DPU_DBG_MASK_SSPP;
+
+ /* Assign ops */
+ hw_pipe->ubwc = mdss_data;
+ hw_pipe->idx = cfg->id;
+ hw_pipe->cap = cfg;
+ _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+ return hw_pipe;
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx)
+{
+ kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644
index 0000000000..cbf4f95ff0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_sspp;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR BIT(0)
+#define DPU_SSPP_FLIP_UD BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90 BIT(2)
+#define DPU_SSPP_ROT_90 BIT(3)
+#define DPU_SSPP_SOLID_FILL BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \
+ BIT(DPU_SSPP_SCALER_QSEED2) | \
+ BIT(DPU_SSPP_SCALER_QSEED3) | \
+ BIT(DPU_SSPP_SCALER_QSEED3LITE) | \
+ BIT(DPU_SSPP_SCALER_QSEED4))
+
+/*
+ * Define all CSC feature bits in catalog
+ */
+#define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \
+ BIT(DPU_SSPP_CSC_10BIT))
+
+/**
+ * Component indices
+ */
+enum {
+ DPU_SSPP_COMP_0,
+ DPU_SSPP_COMP_1_2,
+ DPU_SSPP_COMP_2,
+ DPU_SSPP_COMP_3,
+
+ DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+ DPU_SSPP_RECT_SOLO = 0,
+ DPU_SSPP_RECT_0,
+ DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+ DPU_SSPP_MULTIRECT_NONE = 0,
+ DPU_SSPP_MULTIRECT_PARALLEL,
+ DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+ DPU_FRAME_LINEAR,
+ DPU_FRAME_TILE_A4X,
+ DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+ DPU_SCALE_FILTER_NEAREST = 0,
+ DPU_SCALE_FILTER_BIL,
+ DPU_SCALE_FILTER_PCMN,
+ DPU_SCALE_FILTER_CA,
+ DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+ DPU_SCALE_ALPHA_PIXEL_REP,
+ DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+ DPU_SCALE_2D_4X4,
+ DPU_SCALE_2D_CIR,
+ DPU_SCALE_1D_SEP,
+ DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+ u32 strength;
+ u32 edge_thr;
+ u32 smooth_thr;
+ u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+ /* scaling factors are enabled for this input layer */
+ uint8_t enable_pxl_ext;
+
+ int init_phase_x[DPU_MAX_PLANES];
+ int phase_step_x[DPU_MAX_PLANES];
+ int init_phase_y[DPU_MAX_PLANES];
+ int phase_step_y[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels extension in left, right, top and bottom direction
+ * for all color components. This pixel value for each color component
+ * should be sum of fetch + repeat pixels.
+ */
+ int num_ext_pxls_left[DPU_MAX_PLANES];
+ int num_ext_pxls_right[DPU_MAX_PLANES];
+ int num_ext_pxls_top[DPU_MAX_PLANES];
+ int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top and
+ * bottom directions from source image for scaling.
+ */
+ int left_ftch[DPU_MAX_PLANES];
+ int right_ftch[DPU_MAX_PLANES];
+ int top_ftch[DPU_MAX_PLANES];
+ int btm_ftch[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int left_rpt[DPU_MAX_PLANES];
+ int right_rpt[DPU_MAX_PLANES];
+ int top_rpt[DPU_MAX_PLANES];
+ int btm_rpt[DPU_MAX_PLANES];
+
+ uint32_t roi_w[DPU_MAX_PLANES];
+ uint32_t roi_h[DPU_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+ enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_sw_pipe_cfg : software pipe configuration
+ * @src_rect: src ROI, caller takes into account the different operations
+ * such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ */
+struct dpu_sw_pipe_cfg {
+ struct drm_rect src_rect;
+ struct drm_rect dst_rect;
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+ u64 size;
+ u64 time;
+};
+
+/**
+ * struct dpu_sw_pipe - software pipe description
+ * @sspp: backing SSPP pipe
+ * @index: index of the rectangle of SSPP
+ * @mode: parallel or time multiplex multirect mode
+ */
+struct dpu_sw_pipe {
+ struct dpu_hw_sspp *sspp;
+ enum dpu_sspp_multirect_index multirect_index;
+ enum dpu_sspp_multirect_mode multirect_mode;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+ /**
+ * setup_format - setup pixel format cropping rectangle, flip
+ * @pipe: Pointer to software pipe context
+ * @cfg: Pointer to pipe config structure
+ * @flags: Extra flags for format config
+ */
+ void (*setup_format)(struct dpu_sw_pipe *pipe,
+ const struct dpu_format *fmt, u32 flags);
+
+ /**
+ * setup_rects - setup pipe ROI rectangles
+ * @pipe: Pointer to software pipe context
+ * @cfg: Pointer to pipe config structure
+ */
+ void (*setup_rects)(struct dpu_sw_pipe *pipe,
+ struct dpu_sw_pipe_cfg *cfg);
+
+ /**
+ * setup_pe - setup pipe pixel extension
+ * @ctx: Pointer to pipe context
+ * @pe_ext: Pointer to pixel ext settings
+ */
+ void (*setup_pe)(struct dpu_hw_sspp *ctx,
+ struct dpu_hw_pixel_ext *pe_ext);
+
+ /**
+ * setup_sourceaddress - setup pipe source addresses
+ * @pipe: Pointer to software pipe context
+ * @layout: format layout information for programming buffer to hardware
+ */
+ void (*setup_sourceaddress)(struct dpu_sw_pipe *ctx,
+ struct dpu_hw_fmt_layout *layout);
+
+ /**
+ * setup_csc - setup color space coversion
+ * @ctx: Pointer to pipe context
+ * @data: Pointer to config structure
+ */
+ void (*setup_csc)(struct dpu_hw_sspp *ctx, const struct dpu_csc_cfg *data);
+
+ /**
+ * setup_solidfill - enable/disable colorfill
+ * @pipe: Pointer to software pipe context
+ * @const_color: Fill color value
+ * @flags: Pipe flags
+ */
+ void (*setup_solidfill)(struct dpu_sw_pipe *pipe, u32 color);
+
+ /**
+ * setup_multirect - setup multirect configuration
+ * @pipe: Pointer to software pipe context
+ */
+
+ void (*setup_multirect)(struct dpu_sw_pipe *pipe);
+
+ /**
+ * setup_sharpening - setup sharpening
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to config structure
+ */
+ void (*setup_sharpening)(struct dpu_hw_sspp *ctx,
+ struct dpu_hw_sharp_cfg *cfg);
+
+
+ /**
+ * setup_qos_lut - setup QoS LUTs
+ * @ctx: Pointer to pipe context
+ * @cfg: LUT configuration
+ */
+ void (*setup_qos_lut)(struct dpu_hw_sspp *ctx,
+ struct dpu_hw_qos_cfg *cfg);
+
+ /**
+ * setup_qos_ctrl - setup QoS control
+ * @ctx: Pointer to pipe context
+ * @danger_safe_en: flags controlling enabling of danger/safe QoS/LUT
+ */
+ void (*setup_qos_ctrl)(struct dpu_hw_sspp *ctx,
+ bool danger_safe_en);
+
+ /**
+ * setup_histogram - setup histograms
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to histogram configuration
+ */
+ void (*setup_histogram)(struct dpu_hw_sspp *ctx,
+ void *cfg);
+
+ /**
+ * setup_scaler - setup scaler
+ * @scaler3_cfg: Pointer to scaler configuration
+ * @format: pixel format parameters
+ */
+ void (*setup_scaler)(struct dpu_hw_sspp *ctx,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ const struct dpu_format *format);
+
+ /**
+ * get_scaler_ver - get scaler h/w version
+ * @ctx: Pointer to pipe context
+ */
+ u32 (*get_scaler_ver)(struct dpu_hw_sspp *ctx);
+
+ /**
+ * setup_cdp - setup client driven prefetch
+ * @pipe: Pointer to software pipe context
+ * @fmt: format used by the sw pipe
+ * @enable: whether the CDP should be enabled for this pipe
+ */
+ void (*setup_cdp)(struct dpu_sw_pipe *pipe,
+ const struct dpu_format *fmt,
+ bool enable);
+};
+
+/**
+ * struct dpu_hw_sspp - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @ubwc: UBWC configuration data
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_sspp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+ const struct msm_mdss_data *ubwc;
+
+ /* Pipe */
+ enum dpu_sspp idx;
+ const struct dpu_sspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_sspp_ops ops;
+};
+
+struct dpu_kms;
+/**
+ * dpu_hw_sspp_init() - Initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @cfg: Pipe catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @mdss_data: UBWC / MDSS configuration data
+ */
+struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
+ void __iomem *addr, const struct msm_mdss_data *mdss_data);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx);
+
+int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
+ struct dentry *entry);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644
index 0000000000..cff48763ce
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_kms.h"
+
+#define FLD_SPLIT_DISPLAY_CMD BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define TRAFFIC_SHAPER_EN BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+#define MDP_TICK_COUNT 16
+#define XO_CLK_RATE 19200
+#define MS_TICKS_IN_SEC 1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+ ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 upper_pipe = 0;
+ u32 lower_pipe = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->en) {
+ if (cfg->mode == INTF_MODE_CMD) {
+ lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+ /* interface controlling sw trigger */
+ if (cfg->intf == INTF_2)
+ lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+ else
+ lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = lower_pipe;
+ } else {
+ if (cfg->intf == INTF_2) {
+ lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+ } else {
+ lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+ }
+ }
+ }
+
+ DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off, bit_off;
+ u32 reg_val, new_val;
+ bool clk_forced_on;
+
+ if (!mdp)
+ return false;
+
+ c = &mdp->hw;
+
+ if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+ return false;
+
+ reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+ bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+ reg_val = DPU_REG_READ(c, reg_off);
+
+ if (enable)
+ new_val = reg_val | BIT(bit_off);
+ else
+ new_val = reg_val & ~BIT(bit_off);
+
+ DPU_REG_WRITE(c, reg_off, new_val);
+
+ clk_forced_on = !(reg_val & BIT(bit_off));
+
+ return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, DANGER_STATUS);
+ status->mdp = (value >> 0) & 0x3;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg, wd_load_value, wd_ctl, wd_ctl2;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+ cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+ switch (cfg->vsync_source) {
+ case DPU_VSYNC_SOURCE_WD_TIMER_4:
+ wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_4_CTL;
+ wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_3:
+ wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_3_CTL;
+ wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_2:
+ wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_2_CTL;
+ wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_1:
+ wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_1_CTL;
+ wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_0:
+ default:
+ wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_0_CTL;
+ wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+ break;
+ }
+
+ DPU_REG_WRITE(c, wd_load_value,
+ CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+ DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+ reg = DPU_REG_READ(c, wd_ctl2);
+ reg |= BIT(8); /* enable heartbeat timer */
+ reg |= BIT(0); /* enable WD timer */
+ DPU_REG_WRITE(c, wd_ctl2, reg);
+
+ /* make sure that timers are enabled/disabled for vsync state */
+ wmb();
+ }
+}
+
+static void dpu_hw_setup_vsync_source_and_vsync_sel(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg, i;
+ static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+ if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+ return;
+
+ c = &mdp->hw;
+
+ reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+ for (i = 0; i < cfg->pp_count; i++) {
+ int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+ if (pp_idx >= ARRAY_SIZE(pp_offset))
+ continue;
+
+ reg &= ~(0xf << pp_offset[pp_idx]);
+ reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+ }
+ DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+ dpu_hw_setup_vsync_source(mdp, cfg);
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, SAFE_STATUS);
+ status->mdp = (value >> 0) & 0x1;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!mdp)
+ return;
+
+ c = &mdp->hw;
+
+ DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+ ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+ ops->get_danger_status = dpu_hw_get_danger_status;
+
+ if (cap & BIT(DPU_MDP_VSYNC_SEL))
+ ops->setup_vsync_source = dpu_hw_setup_vsync_source_and_vsync_sel;
+ else
+ ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+
+ ops->get_safe_status = dpu_hw_get_safe_status;
+
+ if (cap & BIT(DPU_MDP_AUDIO_SELECT))
+ ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mdp *mdp;
+
+ if (!addr)
+ return ERR_PTR(-EINVAL);
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return ERR_PTR(-ENOMEM);
+
+ mdp->hw.blk_addr = addr + cfg->base;
+ mdp->hw.log_mask = DPU_DBG_MASK_TOP;
+
+ /*
+ * Assign ops
+ */
+ mdp->caps = cfg;
+ _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+ return mdp;
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+ kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644
index 0000000000..8b1463d2b2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+ bool en;
+ bool rd_client;
+ u32 client_id;
+ u32 bpc_denom;
+ u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en : Enable/disable dual pipe configuration
+ * @mode : Panel interface mode
+ * @intf : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ * flushed
+ */
+struct split_pipe_cfg {
+ bool en;
+ enum dpu_intf_mode mode;
+ enum dpu_intf intf;
+ bool split_flush_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+ u8 mdp;
+ u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ * watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+ u32 pp_count;
+ u32 frame_rate;
+ u32 ppnumber[PINGPONG_MAX];
+ u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+ /** setup_split_pipe() : Registers are not double buffered, thisk
+ * function should be called before timing control enable
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *p);
+
+ /**
+ * setup_traffic_shaper() : Setup traffic shaper control
+ * @mdp : mdp top context driver
+ * @cfg : traffic shaper configuration
+ */
+ void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+ struct traffic_shaper_cfg *cfg);
+
+ /**
+ * setup_clk_force_ctrl - set clock force control
+ * @mdp: mdp top context driver
+ * @clk_ctrl: clock to be controlled
+ * @enable: force on enable
+ * @return: if the clock is forced-on by this function
+ */
+ bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+ /**
+ * get_danger_status - get danger status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * setup_vsync_source - setup vsync source configuration details
+ * @mdp: mdp top context driver
+ * @cfg: vsync source selection configuration
+ */
+ void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg);
+
+ /**
+ * get_safe_status - get safe status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * intf_audio_select - select the external interface for audio
+ * @mdp: mdp top context driver
+ */
+ void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* top */
+ const struct dpu_mdp_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed config
+ * @cfg: MDP TOP configuration from catalog
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
new file mode 100644
index 0000000000..6eee9f68ab
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
+
+/* DPU_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION 0x00
+#define QSEED3_OP_MODE 0x04
+#define QSEED3_RGB2Y_COEFF 0x08
+#define QSEED3_PHASE_INIT 0x0C
+#define QSEED3_PHASE_STEP_Y_H 0x10
+#define QSEED3_PHASE_STEP_Y_V 0x14
+#define QSEED3_PHASE_STEP_UV_H 0x18
+#define QSEED3_PHASE_STEP_UV_V 0x1C
+#define QSEED3_PRELOAD 0x20
+#define QSEED3_DE_SHARPEN 0x24
+#define QSEED3_DE_SHARPEN_CTL 0x28
+#define QSEED3_DE_SHAPE_CTL 0x2C
+#define QSEED3_DE_THRESHOLD 0x30
+#define QSEED3_DE_ADJUST_DATA_0 0x34
+#define QSEED3_DE_ADJUST_DATA_1 0x38
+#define QSEED3_DE_ADJUST_DATA_2 0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
+#define QSEED3_SRC_SIZE_UV 0x44
+#define QSEED3_DST_SIZE 0x48
+#define QSEED3_COEF_LUT_CTRL 0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT 0
+#define QSEED3_COEF_LUT_DIR_BIT 1
+#define QSEED3_COEF_LUT_Y_CIR_BIT 2
+#define QSEED3_COEF_LUT_UV_CIR_BIT 3
+#define QSEED3_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3_BUFFER_CTRL 0x50
+#define QSEED3_CLK_CTRL0 0x54
+#define QSEED3_CLK_CTRL1 0x58
+#define QSEED3_CLK_STATUS 0x5C
+#define QSEED3_PHASE_INIT_Y_H 0x90
+#define QSEED3_PHASE_INIT_Y_V 0x94
+#define QSEED3_PHASE_INIT_UV_H 0x98
+#define QSEED3_PHASE_INIT_UV_V 0x9C
+#define QSEED3_COEF_LUT 0x100
+#define QSEED3_FILTERS 5
+#define QSEED3_LUT_REGIONS 4
+#define QSEED3_CIRCULAR_LUTS 9
+#define QSEED3_SEPARABLE_LUTS 10
+#define QSEED3_LUT_SIZE 60
+#define QSEED3_ENABLE 2
+#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+/* DPU_SCALER_QSEED3LITE */
+#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3LITE_COEF_LUT_CTRL 0x4C
+#define QSEED3LITE_COEF_LUT_SWAP_BIT 0
+#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60
+#define QSEED3LITE_FILTERS 2
+#define QSEED3LITE_SEPARABLE_LUTS 10
+#define QSEED3LITE_LUT_SIZE 33
+#define QSEED3LITE_SEP_LUT_SIZE \
+ (QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32))
+
+/* QOS_LUT */
+#define QOS_DANGER_LUT 0x00
+#define QOS_SAFE_LUT 0x04
+#define QOS_CREQ_LUT 0x08
+#define QOS_QOS_CTRL 0x0C
+#define QOS_CREQ_LUT_0 0x14
+#define QOS_CREQ_LUT_1 0x18
+
+/* QOS_QOS_CTRL */
+#define QOS_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+#define QOS_QOS_CTRL_DANGER_VBLANK_MASK GENMASK(5, 4)
+#define QOS_QOS_CTRL_VBLANK_EN BIT(16)
+#define QOS_QOS_CTRL_CREQ_VBLANK_MASK GENMASK(21, 20)
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name)
+{
+ /* don't need to mutex protect this */
+ if (c->log_mask & dpu_hw_util_log_mask)
+ DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+ name, reg_off, val);
+ writel_relaxed(val, c->blk_addr + reg_off);
+}
+
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
+{
+ return readl_relaxed(c->blk_addr + reg_off);
+}
+
+u32 *dpu_hw_util_get_log_mask_ptr(void)
+{
+ return &dpu_hw_util_log_mask;
+}
+
+static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int i, j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset, lut_len;
+ u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+ {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+ {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+ {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+ {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+ {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+ };
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+ (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->dir_lut;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->cir_lut +
+ scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[2] = scaler3_cfg->cir_lut +
+ scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[3] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[4] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+ lut_addr = QSEED3_COEF_LUT + offset
+ + off_tbl[filter][i][1];
+ lut_len = off_tbl[filter][i][0] << 2;
+ for (j = 0; j < lut_len; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset;
+ u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 };
+
+ DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight);
+
+ if (!scaler3_cfg->sep_lut)
+ return;
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter];
+ for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
+{
+ u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+ u32 adjust_a, adjust_b, adjust_c;
+
+ if (!de_cfg->enable)
+ return;
+
+ sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+ ((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+ sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+ ((de_cfg->prec_shift & 0x7) << 13) |
+ ((de_cfg->clip & 0x7) << 16);
+
+ shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+ ((de_cfg->thr_dieout & 0x3FF) << 16);
+
+ de_thr = (de_cfg->thr_low & 0x3FF) |
+ ((de_cfg->thr_high & 0x3FF) << 16);
+
+ adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+ ((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+ adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+ ((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+ adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+ ((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
+
+}
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format)
+{
+ u32 op_mode = 0;
+ u32 phase_init, preload, src_y_rgb, src_uv, dst;
+
+ if (!scaler3_cfg->enable)
+ goto end;
+
+ op_mode |= BIT(0);
+ op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+ if (format && DPU_FORMAT_IS_YUV(format)) {
+ op_mode |= BIT(12);
+ op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+ }
+
+ op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+ op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+ preload =
+ ((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+ ((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+ ((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+ ((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+ src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+ src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+ dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+ ((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+ if (scaler3_cfg->de.enable) {
+ _dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
+ op_mode |= BIT(8);
+ }
+
+ if (scaler3_cfg->lut_flag) {
+ if (scaler_version < 0x2004)
+ _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset);
+ else
+ _dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset);
+ }
+
+ if (scaler_version == 0x1002) {
+ phase_init =
+ ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+ ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+ ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+ ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
+ } else {
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
+ scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
+ scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
+ scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
+ scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+ }
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
+ scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
+ scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
+ scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
+ scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
+
+ DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
+
+end:
+ if (format && !DPU_FORMAT_IS_DX(format))
+ op_mode |= BIT(14);
+
+ if (format && format->alpha_enable) {
+ op_mode |= BIT(10);
+ if (scaler_version == 0x1002)
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+ else
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+ }
+
+ DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
+}
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset)
+{
+ return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
+}
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ const struct dpu_csc_cfg *data, bool csc10)
+{
+ static const u32 matrix_shift = 7;
+ u32 clamp_shift = csc10 ? 16 : 8;
+ u32 val;
+
+ /* matrix coeff - convert S15.16 to S4.9 */
+ val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off, val);
+ val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+ DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
+
+/**
+ * _dpu_hw_get_qos_lut - get LUT mapping based on fill level
+ * @tbl: Pointer to LUT table
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl)
+{
+ int i;
+
+ if (!tbl || !tbl->nentry || !tbl->entries)
+ return 0;
+
+ for (i = 0; i < tbl->nentry; i++)
+ if (total_fl <= tbl->entries[i].fl)
+ return tbl->entries[i].lut;
+
+ /* if last fl is zero, use as default */
+ if (!tbl->entries[i-1].fl)
+ return tbl->entries[i-1].lut;
+
+ return 0;
+}
+
+void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
+ bool qos_8lvl,
+ const struct dpu_hw_qos_cfg *cfg)
+{
+ DPU_REG_WRITE(c, offset + QOS_DANGER_LUT, cfg->danger_lut);
+ DPU_REG_WRITE(c, offset + QOS_SAFE_LUT, cfg->safe_lut);
+
+ if (qos_8lvl) {
+ DPU_REG_WRITE(c, offset + QOS_CREQ_LUT_0, cfg->creq_lut);
+ DPU_REG_WRITE(c, offset + QOS_CREQ_LUT_1, cfg->creq_lut >> 32);
+ } else {
+ DPU_REG_WRITE(c, offset + QOS_CREQ_LUT, cfg->creq_lut);
+ }
+
+ DPU_REG_WRITE(c, offset + QOS_QOS_CTRL,
+ cfg->danger_safe_en ? QOS_QOS_CTRL_DANGER_SAFE_EN : 0);
+}
+
+/*
+ * note: Aside from encoders, input_sel should be set to 0x0 by default
+ */
+void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset, u8 input_sel)
+{
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, misr_ctrl_offset, MISR_CTRL_STATUS_CLEAR);
+
+ /* Clear old MISR value (in case it's read before a new value is calculated)*/
+ wmb();
+
+ config = MISR_FRAME_COUNT | MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK |
+ ((input_sel & 0xF) << 24);
+ DPU_REG_WRITE(c, misr_ctrl_offset, config);
+}
+
+int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset,
+ u32 misr_signature_offset,
+ u32 *misr_value)
+{
+ u32 ctrl = 0;
+
+ if (!misr_value)
+ return -EINVAL;
+
+ ctrl = DPU_REG_READ(c, misr_ctrl_offset);
+
+ if (!(ctrl & MISR_CTRL_ENABLE))
+ return -ENODATA;
+
+ if (!(ctrl & MISR_CTRL_STATUS))
+ return -EINVAL;
+
+ *misr_value = DPU_REG_READ(c, misr_signature_offset);
+
+ return 0;
+}
+
+#define CDP_ENABLE BIT(0)
+#define CDP_UBWC_META_ENABLE BIT(1)
+#define CDP_TILE_AMORTIZE_ENABLE BIT(2)
+#define CDP_PRELOAD_AHEAD_64 BIT(3)
+
+void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset,
+ const struct dpu_format *fmt, bool enable)
+{
+ u32 cdp_cntl = CDP_PRELOAD_AHEAD_64;
+
+ if (enable)
+ cdp_cntl |= CDP_ENABLE;
+ if (DPU_FORMAT_IS_UBWC(fmt))
+ cdp_cntl |= CDP_UBWC_META_ENABLE;
+ if (DPU_FORMAT_IS_UBWC(fmt) ||
+ DPU_FORMAT_IS_TILE(fmt))
+ cdp_cntl |= CDP_TILE_AMORTIZE_ENABLE;
+
+ DPU_REG_WRITE(c, offset, cdp_cntl);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
new file mode 100644
index 0000000000..0aed54d7f6
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_UTIL_H
+#define _DPU_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+#define MISR_FRAME_COUNT 0x1
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @blk_addr: hw block register mapped address
+ * @log_mask: log mask for this block
+ */
+struct dpu_hw_blk_reg_map {
+ void __iomem *blk_addr;
+ u32 log_mask;
+};
+
+/**
+ * struct dpu_hw_blk - opaque hardware block object
+ */
+struct dpu_hw_blk {
+ /* opaque */
+};
+
+/**
+ * struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable: detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip: clip shift
+ * @ limit: limit value
+ * @ thr_quiet: quiet threshold
+ * @ thr_dieout: dieout threshold
+ * @ thr_high: low threshold
+ * @ thr_high: high threshold
+ * @ prec_shift: precision shift
+ * @ adjust_a: A-coefficients for mapping curve
+ * @ adjust_b: B-coefficients for mapping curve
+ * @ adjust_c: C-coefficients for mapping curve
+ */
+struct dpu_hw_scaler3_de_cfg {
+ u32 enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+
+/**
+ * struct dpu_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable: scaler enable
+ * @dir_en: direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x: horizontal preload value
+ * @ preload_y: vertical preload value
+ * @ src_width: source width
+ * @ src_height: source height
+ * @ dst_width: destination width
+ * @ dst_height: destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg: blend coefficients configuration
+ * @ lut_flag: scaler LUT update flags
+ * 0x1 swap LUT bank
+ * 0x2 update 2D filter LUT
+ * 0x4 update y circular filter LUT
+ * 0x8 update uv circular filter LUT
+ * 0x10 update y separable filter LUT
+ * 0x20 update uv separable filter LUT
+ * @ dir_lut_idx: 2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut: pointer to 2D LUT
+ * @ cir_lut: pointer to circular filter LUT
+ * @ sep_lut: pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ * @ dir_weight: Directional weight
+ */
+struct dpu_hw_scaler3_cfg {
+ u32 enable;
+ u32 dir_en;
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ u32 preload_x[DPU_MAX_PLANES];
+ u32 preload_y[DPU_MAX_PLANES];
+ u32 src_width[DPU_MAX_PLANES];
+ u32 src_height[DPU_MAX_PLANES];
+
+ u32 dst_width;
+ u32 dst_height;
+
+ u32 y_rgb_filter_cfg;
+ u32 uv_filter_cfg;
+ u32 alpha_filter_cfg;
+ u32 blend_cfg;
+
+ u32 lut_flag;
+ u32 dir_lut_idx;
+
+ u32 y_rgb_cir_lut_idx;
+ u32 uv_cir_lut_idx;
+ u32 y_rgb_sep_lut_idx;
+ u32 uv_sep_lut_idx;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_hw_scaler3_de_cfg de;
+
+ u32 dir_weight;
+};
+
+/**
+ * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch: Number of extra pixels to overfetch from left
+ * @right_ftch: Number of extra pixels to overfetch from right
+ * @top_ftch: Number of extra lines to overfetch from top
+ * @btm_ftch: Number of extra lines to overfetch from bottom
+ * @left_rpt: Number of extra pixels to repeat from left
+ * @right_rpt: Number of extra pixels to repeat from right
+ * @top_rpt: Number of extra lines to repeat from top
+ * @btm_rpt: Number of extra lines to repeat from bottom
+ */
+struct dpu_drm_pix_ext_v1 {
+ /*
+ * Number of pixels ext in left, right, top and bottom direction
+ * for all color components.
+ */
+ int32_t num_ext_pxls_lr[DPU_MAX_PLANES];
+ int32_t num_ext_pxls_tb[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top
+ * and bottom directions from source image for scaling.
+ */
+ int32_t left_ftch[DPU_MAX_PLANES];
+ int32_t right_ftch[DPU_MAX_PLANES];
+ int32_t top_ftch[DPU_MAX_PLANES];
+ int32_t btm_ftch[DPU_MAX_PLANES];
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int32_t left_rpt[DPU_MAX_PLANES];
+ int32_t right_rpt[DPU_MAX_PLANES];
+ int32_t top_rpt[DPU_MAX_PLANES];
+ int32_t btm_rpt[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable: Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip: Clip coefficient
+ * @limit: Detail enhancer limit factor
+ * @thr_quiet: Quite zone threshold
+ * @thr_dieout: Die-out zone threshold
+ * @thr_low: Linear zone left threshold
+ * @thr_high: Linear zone right threshold
+ * @prec_shift: Detail enhancer precision
+ * @adjust_a: Mapping curves A coefficients
+ * @adjust_b: Mapping curves B coefficients
+ * @adjust_c: Mapping curves C coefficients
+ */
+struct dpu_drm_de_v1 {
+ uint32_t enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+/**
+ * struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler
+ * @enable: Scaler enable
+ * @dir_en: Detail enhancer enable
+ * @pe: Pixel extension settings
+ * @horz_decimate: Horizontal decimation factor
+ * @vert_decimate: Vertical decimation factor
+ * @init_phase_x: Initial scaler phase values for x
+ * @phase_step_x: Phase step values for x
+ * @init_phase_y: Initial scaler phase values for y
+ * @phase_step_y: Phase step values for y
+ * @preload_x: Horizontal preload value
+ * @preload_y: Vertical preload value
+ * @src_width: Source width
+ * @src_height: Source height
+ * @dst_width: Destination width
+ * @dst_height: Destination height
+ * @y_rgb_filter_cfg: Y/RGB plane filter configuration
+ * @uv_filter_cfg: UV plane filter configuration
+ * @alpha_filter_cfg: Alpha filter configuration
+ * @blend_cfg: Selection of blend coefficients
+ * @lut_flag: LUT configuration flags
+ * @dir_lut_idx: 2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx: UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx: UV separable LUT index
+ * @de: Detail enhancer settings
+ */
+struct dpu_drm_scaler_v2 {
+ /*
+ * General definitions
+ */
+ uint32_t enable;
+ uint32_t dir_en;
+
+ /*
+ * Pix ext settings
+ */
+ struct dpu_drm_pix_ext_v1 pe;
+
+ /*
+ * Decimation settings
+ */
+ uint32_t horz_decimate;
+ uint32_t vert_decimate;
+
+ /*
+ * Phase settings
+ */
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ uint32_t preload_x[DPU_MAX_PLANES];
+ uint32_t preload_y[DPU_MAX_PLANES];
+ uint32_t src_width[DPU_MAX_PLANES];
+ uint32_t src_height[DPU_MAX_PLANES];
+
+ uint32_t dst_width;
+ uint32_t dst_height;
+
+ uint32_t y_rgb_filter_cfg;
+ uint32_t uv_filter_cfg;
+ uint32_t alpha_filter_cfg;
+ uint32_t blend_cfg;
+
+ uint32_t lut_flag;
+ uint32_t dir_lut_idx;
+
+ /* for Y(RGB) and UV planes*/
+ uint32_t y_rgb_cir_lut_idx;
+ uint32_t uv_cir_lut_idx;
+ uint32_t y_rgb_sep_lut_idx;
+ uint32_t uv_sep_lut_idx;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_drm_de_v1 de;
+};
+
+/**
+ * struct dpu_hw_qos_cfg: pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u64 creq_lut;
+ bool danger_safe_en;
+};
+
+u32 *dpu_hw_util_get_log_mask_ptr(void);
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name);
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
+
+#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
+#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
+
+void *dpu_hw_util_get_dir(void);
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format);
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset);
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ const struct dpu_csc_cfg *data, bool csc10);
+
+void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset,
+ const struct dpu_format *fmt, bool enable);
+
+u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl);
+
+void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
+ bool qos_8lvl,
+ const struct dpu_hw_qos_cfg *cfg);
+
+void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset, u8 input_sel);
+
+int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset,
+ u32 misr_signature_offset,
+ u32 *misr_value);
+
+#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644
index 0000000000..a5121a50b2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+
+#define VBIF_VERSION 0x0000
+#define VBIF_CLK_FORCE_CTRL0 0x0008
+#define VBIF_CLK_FORCE_CTRL1 0x000C
+#define VBIF_QOS_REMAP_00 0x0020
+#define VBIF_QOS_REMAP_01 0x0024
+#define VBIF_QOS_REMAP_10 0x0028
+#define VBIF_QOS_REMAP_11 0x002C
+#define VBIF_WRITE_GATHER_EN 0x00AC
+#define VBIF_IN_RD_LIM_CONF0 0x00B0
+#define VBIF_IN_RD_LIM_CONF1 0x00B4
+#define VBIF_IN_RD_LIM_CONF2 0x00B8
+#define VBIF_IN_WR_LIM_CONF0 0x00C0
+#define VBIF_IN_WR_LIM_CONF1 0x00C4
+#define VBIF_IN_WR_LIM_CONF2 0x00C8
+#define VBIF_OUT_RD_LIM_CONF0 0x00D0
+#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
+#define VBIF_XIN_PND_ERR 0x0190
+#define VBIF_XIN_SRC_ERR 0x0194
+#define VBIF_XIN_CLR_ERR 0x019C
+#define VBIF_XIN_HALT_CTRL0 0x0200
+#define VBIF_XIN_HALT_CTRL1 0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000(vbif) (VBIF_XINL_QOS_RP_REMAP_000 + (vbif)->cap->qos_rp_remap_size)
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 pnd, src;
+
+ if (!vbif)
+ return;
+ c = &vbif->hw;
+ pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+ src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+ if (pnd_errors)
+ *pnd_errors = pnd;
+ if (src_errors)
+ *src_errors = src;
+
+ DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register so
+ * 16 bit fields maximum across two registers
+ */
+ if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+ return;
+
+ c = &vbif->hw;
+
+ if (xin_id >= 8) {
+ xin_id -= 8;
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+ } else {
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+ }
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (value & 0x7) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0xFF << bit_off);
+ reg_val |= (limit) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+ u32 limit;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ limit = (reg_val >> bit_off) & 0xFF;
+
+ return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+ if (enable)
+ reg_val |= BIT(xin_id);
+ else
+ reg_val &= ~BIT(xin_id);
+
+ DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+ return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+ if (!vbif)
+ return;
+
+ c = &vbif->hw;
+
+ reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(vbif);
+ reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+ reg_shift = (xin_id & 0x7) * 4;
+
+ reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+ reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high);
+
+ mask = 0x7 << reg_shift;
+
+ reg_val &= ~mask;
+ reg_val |= (remap_level << reg_shift) & mask;
+
+ reg_val_lvl &= ~mask;
+ reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+ DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val;
+
+ if (!vbif || xin_id >= MAX_XIN_COUNT)
+ return;
+
+ c = &vbif->hw;
+
+ reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+ reg_val |= BIT(xin_id);
+ DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+ unsigned long cap)
+{
+ ops->set_limit_conf = dpu_hw_set_limit_conf;
+ ops->get_limit_conf = dpu_hw_get_limit_conf;
+ ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+ ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+ if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+ ops->set_qos_remap = dpu_hw_set_qos_remap;
+ ops->set_mem_type = dpu_hw_set_mem_type;
+ ops->clear_errors = dpu_hw_clear_errors;
+ ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_vbif *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_VBIF;
+
+ /*
+ * Assign ops
+ */
+ c->idx = cfg->id;
+ c->cap = cfg;
+ _setup_vbif_ops(&c->ops, c->cap->features);
+
+ /* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+ return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+ kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644
index 0000000000..7e10d2a172
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+ /**
+ * set_limit_conf - set transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @limit: outstanding transaction limit
+ */
+ void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit);
+
+ /**
+ * get_limit_conf - get transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @return: outstanding transaction limit
+ */
+ u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd);
+
+ /**
+ * set_halt_ctrl - set halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @enable: halt control enable
+ */
+ void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable);
+
+ /**
+ * get_halt_ctrl - get halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @return: halt control enable
+ */
+ bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id);
+
+ /**
+ * set_qos_remap - set QoS priority remap
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @level: priority level
+ * @remap_level: remapped level
+ */
+ void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level);
+
+ /**
+ * set_mem_type - set memory type
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @value: memory type value
+ */
+ void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value);
+
+ /**
+ * clear_errors - clear any vbif errors
+ * This function clears any detected pending/source errors
+ * on the VBIF interface, and optionally returns the detected
+ * error mask(s).
+ * @vbif: vbif context driver
+ * @pnd_errors: pointer to pending error reporting variable
+ * @src_errors: pointer to source error reporting variable
+ */
+ void (*clear_errors)(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors);
+
+ /**
+ * set_write_gather_en - set write_gather enable
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ */
+ void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+ /* base */
+ struct dpu_hw_blk_reg_map hw;
+
+ /* vbif */
+ enum dpu_vbif idx;
+ const struct dpu_vbif_cfg *cap;
+
+ /* ops */
+ struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed
+ * VBIF catalog entry.
+ * @cfg: VBIF catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
+ void __iomem *addr);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
new file mode 100644
index 0000000000..ebc4164003
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+ /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_wb.h"
+#include "dpu_formats.h"
+#include "dpu_kms.h"
+
+#define WB_DST_FORMAT 0x000
+#define WB_DST_OP_MODE 0x004
+#define WB_DST_PACK_PATTERN 0x008
+#define WB_DST0_ADDR 0x00C
+#define WB_DST1_ADDR 0x010
+#define WB_DST2_ADDR 0x014
+#define WB_DST3_ADDR 0x018
+#define WB_DST_YSTRIDE0 0x01C
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_DITHER_BITDEPTH 0x024
+#define WB_DST_MATRIX_ROW0 0x030
+#define WB_DST_MATRIX_ROW1 0x034
+#define WB_DST_MATRIX_ROW2 0x038
+#define WB_DST_MATRIX_ROW3 0x03C
+#define WB_DST_WRITE_CONFIG 0x048
+#define WB_ROTATION_DNSCALER 0x050
+#define WB_ROTATOR_PIPE_DOWNSCALER 0x054
+#define WB_N16_INIT_PHASE_X_C03 0x060
+#define WB_N16_INIT_PHASE_X_C12 0x064
+#define WB_N16_INIT_PHASE_Y_C03 0x068
+#define WB_N16_INIT_PHASE_Y_C12 0x06C
+#define WB_OUT_SIZE 0x074
+#define WB_ALPHA_X_VALUE 0x078
+#define WB_DANGER_LUT 0x084
+#define WB_SAFE_LUT 0x088
+#define WB_QOS_CTRL 0x090
+#define WB_CREQ_LUT_0 0x098
+#define WB_CREQ_LUT_1 0x09C
+#define WB_UBWC_STATIC_CTRL 0x144
+#define WB_MUX 0x150
+#define WB_CROP_CTRL 0x154
+#define WB_CROP_OFFSET 0x158
+#define WB_CSC_BASE 0x260
+#define WB_DST_ADDR_SW_STATUS 0x2B0
+#define WB_CDP_CNTL 0x2B4
+#define WB_OUT_IMAGE_SIZE 0x2C0
+#define WB_OUT_XY 0x2C4
+
+static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *data)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ DPU_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
+ DPU_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
+ DPU_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
+ DPU_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
+}
+
+static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *data)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt = data->dest.format;
+ u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+ u32 write_config = 0;
+ u32 opmode = 0;
+ u32 dst_addr_sw = 0;
+
+ chroma_samp = fmt->chroma_sample;
+
+ dst_format = (chroma_samp << 23) |
+ (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C0_G_Y] << 0);
+
+ if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+ dst_format |= BIT(8); /* DSTC3_EN */
+ if (!fmt->alpha_enable ||
+ !(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA)))
+ dst_format |= BIT(14); /* DST_ALPHA_X */
+ }
+
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
+
+ dst_format |= (fmt->unpack_align_msb << 18) |
+ (fmt->unpack_tight << 17) |
+ ((fmt->unpack_count - 1) << 12) |
+ ((fmt->bpp - 1) << 9);
+
+ ystride0 = data->dest.plane_pitch[0] |
+ (data->dest.plane_pitch[1] << 16);
+ ystride1 = data->dest.plane_pitch[2] |
+ (data->dest.plane_pitch[3] << 16);
+
+ if (drm_rect_height(&data->roi) && drm_rect_width(&data->roi))
+ outsize = (drm_rect_height(&data->roi) << 16) | drm_rect_width(&data->roi);
+ else
+ outsize = (data->dest.height << 16) | data->dest.width;
+
+ DPU_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
+ DPU_REG_WRITE(c, WB_DST_FORMAT, dst_format);
+ DPU_REG_WRITE(c, WB_DST_OP_MODE, opmode);
+ DPU_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
+ DPU_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
+ DPU_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
+ DPU_REG_WRITE(c, WB_OUT_SIZE, outsize);
+ DPU_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
+ DPU_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
+}
+
+static void dpu_hw_wb_roi(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 image_size, out_size, out_xy;
+
+ image_size = (wb->dest.height << 16) | wb->dest.width;
+ out_xy = 0;
+ out_size = (drm_rect_height(&wb->roi) << 16) | drm_rect_width(&wb->roi);
+
+ DPU_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
+ DPU_REG_WRITE(c, WB_OUT_XY, out_xy);
+ DPU_REG_WRITE(c, WB_OUT_SIZE, out_size);
+}
+
+static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx,
+ struct dpu_hw_qos_cfg *cfg)
+{
+ if (!ctx || !cfg)
+ return;
+
+ _dpu_hw_setup_qos_lut(&ctx->hw, WB_DANGER_LUT,
+ test_bit(DPU_WB_QOS_8LVL, &ctx->caps->features),
+ cfg);
+}
+
+static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx,
+ const struct dpu_format *fmt,
+ bool enable)
+{
+ if (!ctx)
+ return;
+
+ dpu_setup_cdp(&ctx->hw, WB_CDP_CNTL, fmt, enable);
+}
+
+static void dpu_hw_wb_bind_pingpong_blk(
+ struct dpu_hw_wb *ctx,
+ const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int mux_cfg;
+
+ if (!ctx)
+ return;
+
+ c = &ctx->hw;
+
+ mux_cfg = DPU_REG_READ(c, WB_MUX);
+ mux_cfg &= ~0xf;
+
+ if (pp)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, WB_MUX, mux_cfg);
+}
+
+static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
+ unsigned long features)
+{
+ ops->setup_outaddress = dpu_hw_wb_setup_outaddress;
+ ops->setup_outformat = dpu_hw_wb_setup_format;
+
+ if (test_bit(DPU_WB_XY_ROI_OFFSET, &features))
+ ops->setup_roi = dpu_hw_wb_roi;
+
+ if (test_bit(DPU_WB_QOS, &features))
+ ops->setup_qos_lut = dpu_hw_wb_setup_qos_lut;
+
+ if (test_bit(DPU_WB_CDP, &features))
+ ops->setup_cdp = dpu_hw_wb_setup_cdp;
+
+ if (test_bit(DPU_WB_INPUT_CTRL, &features))
+ ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
+}
+
+struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_wb *c;
+
+ if (!addr)
+ return ERR_PTR(-EINVAL);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_WB;
+
+ /* Assign ops */
+ c->idx = cfg->id;
+ c->caps = cfg;
+ _setup_wb_ops(&c->ops, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb)
+{
+ kfree(hw_wb);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
new file mode 100644
index 0000000000..2d7db2efa3
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#ifndef _DPU_HW_WB_H
+#define _DPU_HW_WB_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_pingpong.h"
+
+struct dpu_hw_wb;
+
+struct dpu_hw_wb_cfg {
+ struct dpu_hw_fmt_layout dest;
+ enum dpu_intf_mode intf_mode;
+ struct drm_rect roi;
+ struct drm_rect crop;
+};
+
+/**
+ *
+ * struct dpu_hw_wb_ops : Interface to the wb hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_outaddress: setup output address from the writeback job
+ * @setup_outformat: setup output format of writeback block from writeback job
+ * @setup_qos_lut: setup qos LUT for writeback block based on input
+ * @setup_cdp: setup chroma down prefetch block for writeback block
+ * @bind_pingpong_blk: enable/disable the connection with ping-pong block
+ */
+struct dpu_hw_wb_ops {
+ void (*setup_outaddress)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_outformat)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_roi)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_qos_lut)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_qos_cfg *cfg);
+
+ void (*setup_cdp)(struct dpu_hw_wb *ctx,
+ const struct dpu_format *fmt,
+ bool enable);
+
+ void (*bind_pingpong_blk)(struct dpu_hw_wb *ctx,
+ const enum dpu_pingpong pp);
+};
+
+/**
+ * struct dpu_hw_wb : WB driver object
+ * @hw: block hardware details
+ * @idx: hardware index number within type
+ * @wb_hw_caps: hardware capabilities
+ * @ops: function pointers
+ */
+struct dpu_hw_wb {
+ struct dpu_hw_blk_reg_map hw;
+
+ /* wb path */
+ int idx;
+ const struct dpu_wb_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_wb_ops ops;
+};
+
+/**
+ * dpu_hw_wb_init() - Initializes the writeback hw driver object.
+ * @cfg: wb_path catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_wb context
+ */
+struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
+ void __iomem *addr);
+
+/**
+ * dpu_hw_wb_destroy(): Destroy writeback hw driver object.
+ * @hw_wb: Pointer to writeback hw driver object
+ */
+void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb);
+
+#endif /*_DPU_HW_WB_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644
index 0000000000..5acd5683d2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL 0x004
+#define INTR_EN 0x010
+#define INTR_STATUS 0x014
+#define INTR_CLEAR 0x018
+#define INTR2_EN 0x008
+#define INTR2_STATUS 0x00c
+#define SSPP_SPARE 0x028
+#define INTR2_CLEAR 0x02c
+#define HIST_INTR_EN 0x01c
+#define HIST_INTR_STATUS 0x020
+#define HIST_INTR_CLEAR 0x024
+#define SPLIT_DISPLAY_EN 0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define DANGER_STATUS 0x360
+#define SAFE_STATUS 0x364
+#define HW_EVENTS_CTL 0x37C
+#define MDP_WD_TIMER_0_CTL 0x380
+#define MDP_WD_TIMER_0_CTL2 0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+#define MDP_WD_TIMER_1_CTL 0x390
+#define MDP_WD_TIMER_1_CTL2 0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
+#define CLK_CTRL3 0x3A8
+#define CLK_STATUS3 0x3AC
+#define CLK_CTRL4 0x3B0
+#define CLK_STATUS4 0x3B4
+#define CLK_CTRL5 0x3B8
+#define CLK_STATUS5 0x3BC
+#define CLK_CTRL7 0x3D0
+#define CLK_STATUS7 0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
+#define INTF_SW_RESET_MASK 0x3FC
+#define HDMI_DP_CORE_SELECT 0x408
+#define MDP_OUT_CTL_0 0x410
+#define MDP_VSYNC_SEL 0x414
+#define MDP_WD_TIMER_2_CTL 0x420
+#define MDP_WD_TIMER_2_CTL2 0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
+#define MDP_WD_TIMER_3_CTL 0x430
+#define MDP_WD_TIMER_3_CTL2 0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
+#define MDP_WD_TIMER_4_CTL 0x440
+#define MDP_WD_TIMER_4_CTL2 0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
+#define DCE_SEL 0x450
+
+#define MDP_PERIPH_TOP0 MDP_WD_TIMER_0_CTL
+#define MDP_PERIPH_TOP0_END CLK_CTRL3
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644
index 0000000000..aa6ba2cf4b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -0,0 +1,1402 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/of_irq.h>
+#include <linux/pm_opp.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_writeback.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_mdss.h"
+#include "msm_gem.h"
+#include "disp/msm_disp_snapshot.h"
+
+#include "dpu_core_irq.h"
+#include "dpu_crtc.h"
+#include "dpu_encoder.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_kms.h"
+#include "dpu_plane.h"
+#include "dpu_vbif.h"
+#include "dpu_writeback.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+ bool danger_status)
+{
+ struct dpu_danger_safe_status status;
+ struct dpu_kms *kms = s->private;
+ int i;
+
+ if (!kms->hw_mdp) {
+ DPU_ERROR("invalid arg(s)\n");
+ return 0;
+ }
+
+ memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+ pm_runtime_get_sync(&kms->pdev->dev);
+ if (danger_status) {
+ seq_puts(s, "\nDanger signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ } else {
+ seq_puts(s, "\nSafe signal status:\n");
+ if (kms->hw_mdp->ops.get_safe_status)
+ kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
+ &status);
+ }
+ pm_runtime_put_sync(&kms->pdev->dev);
+
+ seq_printf(s, "MDP : 0x%x\n", status.mdp);
+
+ for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+ seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0,
+ status.sspp[i]);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, true);
+}
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, false);
+}
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
+
+static ssize_t _dpu_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ int len;
+ char buf[40];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ dpu_plane_danger_signal_ctrl(plane, enable);
+ DPU_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ int disable_panic;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
+ if (ret)
+ return ret;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ DPU_DEBUG("Disabling danger:\n");
+ _dpu_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ DPU_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _dpu_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+ .open = simple_open,
+ .read = _dpu_plane_danger_read,
+ .write = _dpu_plane_danger_write,
+};
+
+static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ struct dentry *entry = debugfs_create_dir("danger", parent);
+
+ debugfs_create_file("danger_status", 0600, entry,
+ dpu_kms, &dpu_debugfs_danger_stats_fops);
+ debugfs_create_file("safe_status", 0600, entry,
+ dpu_kms, &dpu_debugfs_safe_stats_fops);
+ debugfs_create_file("disable_danger", 0600, entry,
+ dpu_kms, &dpu_plane_danger_enable);
+
+}
+
+/*
+ * Companion structure for dpu_debugfs_create_regset32.
+ */
+struct dpu_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct dpu_kms *dpu_kms;
+};
+
+static int dpu_regset32_show(struct seq_file *s, void *data)
+{
+ struct dpu_debugfs_regset32 *regset = s->private;
+ struct dpu_kms *dpu_kms = regset->dpu_kms;
+ void __iomem *base;
+ uint32_t i, addr;
+
+ if (!dpu_kms->mmio)
+ return 0;
+
+ base = dpu_kms->mmio + regset->offset;
+
+ /* insert padding spaces, if needed */
+ if (regset->offset & 0xF) {
+ seq_printf(s, "[%x]", regset->offset & ~0xF);
+ for (i = 0; i < (regset->offset & 0xF); i += 4)
+ seq_puts(s, " ");
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* main register output */
+ for (i = 0; i < regset->blk_len; i += 4) {
+ addr = regset->offset + i;
+ if ((addr & 0xF) == 0x0)
+ seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+ seq_printf(s, " %08x", readl_relaxed(base + i));
+ }
+ seq_puts(s, "\n");
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dpu_regset32);
+
+void dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+ struct dpu_debugfs_regset32 *regset;
+
+ if (WARN_ON(!name || !dpu_kms || !length))
+ return;
+
+ regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return;
+
+ /* make sure offset is a multiple of 4 */
+ regset->offset = round_down(offset, 4);
+ regset->blk_len = length;
+ regset->dpu_kms = dpu_kms;
+
+ debugfs_create_file(name, mode, parent, regset, &dpu_regset32_fops);
+}
+
+static void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+ struct dentry *entry = debugfs_create_dir("sspp", debugfs_root);
+ int i;
+
+ if (IS_ERR(entry))
+ return;
+
+ for (i = SSPP_NONE; i < SSPP_MAX; i++) {
+ struct dpu_hw_sspp *hw = dpu_rm_get_sspp(&dpu_kms->rm, i);
+
+ if (!hw)
+ continue;
+
+ _dpu_hw_sspp_init_debugfs(hw, dpu_kms, entry);
+ }
+}
+
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ void *p = dpu_hw_util_get_log_mask_ptr();
+ struct dentry *entry;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!p)
+ return -EINVAL;
+
+ /* Only create a set of debugfs for the primary node, ignore render nodes */
+ if (minor->type != DRM_MINOR_PRIMARY)
+ return 0;
+
+ dev = dpu_kms->dev;
+ priv = dev->dev_private;
+
+ entry = debugfs_create_dir("debug", minor->debugfs_root);
+
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
+
+ dpu_debugfs_danger_init(dpu_kms, entry);
+ dpu_debugfs_vbif_init(dpu_kms, entry);
+ dpu_debugfs_core_irq_init(dpu_kms, entry);
+ dpu_debugfs_sspp_init(dpu_kms, entry);
+
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (priv->dp[i])
+ msm_dp_debugfs_init(priv->dp[i], minor);
+ }
+
+ return dpu_core_perf_debugfs_init(dpu_kms, entry);
+}
+#endif
+
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct dpu_global_state *
+dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
+{
+ return to_dpu_global_state(dpu_kms->global_state.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_private_state *priv_state;
+ int ret;
+
+ ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv_state = drm_atomic_get_private_obj_state(s,
+ &dpu_kms->global_state);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_dpu_global_state(priv_state);
+}
+
+static struct drm_private_state *
+dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dpu_global_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dpu_global_state *dpu_state = to_dpu_global_state(state);
+
+ kfree(dpu_state);
+}
+
+static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
+ .atomic_duplicate_state = dpu_kms_global_duplicate_state,
+ .atomic_destroy_state = dpu_kms_global_destroy_state,
+};
+
+static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct dpu_global_state *state;
+
+ drm_modeset_lock_init(&dpu_kms->global_state_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
+ &state->base,
+ &dpu_kms_global_state_funcs);
+ return 0;
+}
+
+static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
+{
+ struct icc_path *path0;
+ struct icc_path *path1;
+ struct drm_device *dev = dpu_kms->dev;
+ struct device *dpu_dev = dev->dev;
+
+ path0 = msm_icc_get(dpu_dev, "mdp0-mem");
+ path1 = msm_icc_get(dpu_dev, "mdp1-mem");
+
+ if (IS_ERR_OR_NULL(path0))
+ return PTR_ERR_OR_ZERO(path0);
+
+ dpu_kms->path[0] = path0;
+ dpu_kms->num_paths = 1;
+
+ if (!IS_ERR_OR_NULL(path1)) {
+ dpu_kms->path[1] = path1;
+ dpu_kms->num_paths++;
+ }
+ return 0;
+}
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_enable_commit(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+}
+
+static void dpu_kms_disable_commit(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
+static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
+ if (!crtc->state->active)
+ continue;
+
+ trace_dpu_kms_commit(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_crtc *crtc;
+
+ DPU_ATRACE_BEGIN("kms_complete_commit");
+
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
+ dpu_crtc_complete_commit(crtc);
+
+ DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ int ret;
+
+ if (!kms || !crtc || !crtc->state) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ dev = crtc->dev;
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!drm_atomic_crtc_effectively_active(crtc->state)) {
+ DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ /*
+ * Wait for post-flush if necessary to delay before
+ * plane_cleanup. For example, wait for vsync in case of video
+ * mode panels. This may be a no-op for command mode panels.
+ */
+ trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+ ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+ if (ret && ret != -EWOULDBLOCK) {
+ DPU_ERROR("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+}
+
+static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
+ dpu_kms_wait_for_commit_done(kms, crtc);
+}
+
+static int _dpu_kms_initialize_dsi(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
+ int i, rc = 0;
+
+ if (!(priv->dsi[0] || priv->dsi[1]))
+ return rc;
+
+ /*
+ * We support following confiurations:
+ * - Single DSI host (dsi0 or dsi1)
+ * - Two independent DSI hosts
+ * - Bonded DSI0 and DSI1 hosts
+ *
+ * TODO: Support swapping DSI0 and DSI1 in the bonded setup.
+ */
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ int other = (i + 1) % 2;
+
+ if (!priv->dsi[i])
+ continue;
+
+ if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
+ !msm_dsi_is_master_dsi(priv->dsi[i]))
+ continue;
+
+ memset(&info, 0, sizeof(info));
+ info.intf_type = INTF_DSI;
+
+ info.h_tile_instance[info.num_of_h_tiles++] = i;
+ if (msm_dsi_is_bonded_dsi(priv->dsi[i]))
+ info.h_tile_instance[info.num_of_h_tiles++] = other;
+
+ info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI, &info);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ i, rc);
+ break;
+ }
+
+ if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
+ rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ other, rc);
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int _dpu_kms_initialize_displayport(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
+ int rc;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (!priv->dp[i])
+ continue;
+
+ memset(&info, 0, sizeof(info));
+ info.num_of_h_tiles = 1;
+ info.h_tile_instance[0] = i;
+ info.intf_type = INTF_DP;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
+ drm_encoder_cleanup(encoder);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
+ int rc;
+
+ if (!priv->hdmi)
+ return 0;
+
+ memset(&info, 0, sizeof(info));
+ info.num_of_h_tiles = 1;
+ info.h_tile_instance[0] = 0;
+ info.intf_type = INTF_HDMI;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for HDMI display\n");
+ return PTR_ERR(encoder);
+ }
+
+ rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
+ drm_encoder_cleanup(encoder);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int _dpu_kms_initialize_writeback(struct drm_device *dev,
+ struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
+ const u32 *wb_formats, int n_formats)
+{
+ struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
+ int rc;
+
+ memset(&info, 0, sizeof(info));
+
+ info.num_of_h_tiles = 1;
+ /* use only WB idx 2 instance for DPU */
+ info.h_tile_instance[0] = WB_2;
+ info.intf_type = INTF_WB;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL, &info);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ rc = dpu_writeback_init(dev, encoder, wb_formats,
+ n_formats);
+ if (rc) {
+ DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
+ drm_encoder_cleanup(encoder);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ * for underlying displays
+ * @dev: Pointer to drm device structure
+ * @priv: Pointer to private drm device data
+ * @dpu_kms: Pointer to dpu kms structure
+ * Returns: Zero on success
+ */
+static int _dpu_kms_setup_displays(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ int rc = 0;
+ int i;
+
+ rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+ if (rc) {
+ DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
+ if (rc) {
+ DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = _dpu_kms_initialize_hdmi(dev, priv, dpu_kms);
+ if (rc) {
+ DPU_ERROR("initialize HDMI failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ /* Since WB isn't a driver check the catalog before initializing */
+ if (dpu_kms->catalog->wb_count) {
+ for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
+ if (dpu_kms->catalog->wb[i].id == WB_2) {
+ rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
+ dpu_kms->catalog->wb[i].format_list,
+ dpu_kms->catalog->wb[i].num_formats);
+ if (rc) {
+ DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
+ return rc;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+#define MAX_PLANES 20
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ unsigned int num_encoders;
+
+ struct msm_drm_private *priv;
+ const struct dpu_mdss_cfg *catalog;
+
+ int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
+ int max_crtc_count;
+ dev = dpu_kms->dev;
+ priv = dev->dev_private;
+ catalog = dpu_kms->catalog;
+
+ /*
+ * Create encoder and query display drivers to create
+ * bridges and connectors
+ */
+ ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
+ if (ret)
+ return ret;
+
+ num_encoders = 0;
+ drm_for_each_encoder(encoder, dev)
+ num_encoders++;
+
+ max_crtc_count = min(catalog->mixer_count, num_encoders);
+
+ /* Create the planes, keeping track of one primary/cursor per crtc */
+ for (i = 0; i < catalog->sspp_count; i++) {
+ enum drm_plane_type type;
+
+ if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
+ && cursor_planes_idx < max_crtc_count)
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_planes_idx < max_crtc_count)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
+ type, catalog->sspp[i].features,
+ catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
+
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
+ (1UL << max_crtc_count) - 1);
+ if (IS_ERR(plane)) {
+ DPU_ERROR("dpu_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ return ret;
+ }
+
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ cursor_planes[cursor_planes_idx++] = plane;
+ else if (type == DRM_PLANE_TYPE_PRIMARY)
+ primary_planes[primary_planes_idx++] = plane;
+ }
+
+ max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+ /* Create one CRTC per encoder */
+ for (i = 0; i < max_crtc_count; i++) {
+ crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ return ret;
+ }
+ priv->num_crtcs++;
+ }
+
+ /* All CRTCs are compatible with all encoders */
+ drm_for_each_encoder(encoder, dev)
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+ int i;
+
+ if (dpu_kms->hw_intr)
+ dpu_hw_intr_destroy(dpu_kms->hw_intr);
+ dpu_kms->hw_intr = NULL;
+
+ /* safe to call these more than once during shutdown */
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ if (dpu_kms->catalog) {
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i]) {
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
+ dpu_kms->hw_vbif[i] = NULL;
+ }
+ }
+ }
+
+ if (dpu_kms->rm_init)
+ dpu_rm_destroy(&dpu_kms->rm);
+ dpu_kms->rm_init = false;
+
+ dpu_kms->catalog = NULL;
+
+ if (dpu_kms->vbif[VBIF_NRT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+
+ if (dpu_kms->vbif[VBIF_RT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+
+ if (dpu_kms->hw_mdp)
+ dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
+ dpu_kms->hw_mdp = NULL;
+
+ if (dpu_kms->mmio)
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+ dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+
+ _dpu_kms_hw_destroy(dpu_kms);
+
+ msm_kms_destroy(&dpu_kms->base);
+
+ if (dpu_kms->rpm_enabled)
+ pm_runtime_disable(&dpu_kms->pdev->dev);
+}
+
+static int dpu_irq_postinstall(struct msm_kms *kms)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int i;
+
+ if (!dpu_kms || !dpu_kms->dev)
+ return -EINVAL;
+
+ priv = dpu_kms->dev->dev_private;
+ if (!priv)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
+ msm_dp_irq_postinstall(priv->dp[i]);
+
+ return 0;
+}
+
+static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
+{
+ int i;
+ struct dpu_kms *dpu_kms;
+ const struct dpu_mdss_cfg *cat;
+ void __iomem *base;
+
+ dpu_kms = to_dpu_kms(kms);
+
+ cat = dpu_kms->catalog;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* dump CTL sub-blocks HW regs info */
+ for (i = 0; i < cat->ctl_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
+ dpu_kms->mmio + cat->ctl[i].base, cat->ctl[i].name);
+
+ /* dump DSPP sub-blocks HW regs info */
+ for (i = 0; i < cat->dspp_count; i++) {
+ base = dpu_kms->mmio + cat->dspp[i].base;
+ msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base, cat->dspp[i].name);
+
+ if (cat->dspp[i].sblk && cat->dspp[i].sblk->pcc.len > 0)
+ msm_disp_snapshot_add_block(disp_state, cat->dspp[i].sblk->pcc.len,
+ base + cat->dspp[i].sblk->pcc.base, "%s_%s",
+ cat->dspp[i].name,
+ cat->dspp[i].sblk->pcc.name);
+ }
+
+ /* dump INTF sub-blocks HW regs info */
+ for (i = 0; i < cat->intf_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
+ dpu_kms->mmio + cat->intf[i].base, cat->intf[i].name);
+
+ /* dump PP sub-blocks HW regs info */
+ for (i = 0; i < cat->pingpong_count; i++) {
+ base = dpu_kms->mmio + cat->pingpong[i].base;
+ msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, base,
+ cat->pingpong[i].name);
+
+ /* TE2 sub-block has length of 0, so will not print it */
+
+ if (cat->pingpong[i].sblk && cat->pingpong[i].sblk->dither.len > 0)
+ msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].sblk->dither.len,
+ base + cat->pingpong[i].sblk->dither.base,
+ "%s_%s", cat->pingpong[i].name,
+ cat->pingpong[i].sblk->dither.name);
+ }
+
+ /* dump SSPP sub-blocks HW regs info */
+ for (i = 0; i < cat->sspp_count; i++) {
+ base = dpu_kms->mmio + cat->sspp[i].base;
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base, cat->sspp[i].name);
+
+ if (cat->sspp[i].sblk && cat->sspp[i].sblk->scaler_blk.len > 0)
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->scaler_blk.len,
+ base + cat->sspp[i].sblk->scaler_blk.base,
+ "%s_%s", cat->sspp[i].name,
+ cat->sspp[i].sblk->scaler_blk.name);
+
+ if (cat->sspp[i].sblk && cat->sspp[i].sblk->csc_blk.len > 0)
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->csc_blk.len,
+ base + cat->sspp[i].sblk->csc_blk.base,
+ "%s_%s", cat->sspp[i].name,
+ cat->sspp[i].sblk->csc_blk.name);
+ }
+
+ /* dump LM sub-blocks HW regs info */
+ for (i = 0; i < cat->mixer_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
+ dpu_kms->mmio + cat->mixer[i].base, cat->mixer[i].name);
+
+ /* dump WB sub-blocks HW regs info */
+ for (i = 0; i < cat->wb_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
+ dpu_kms->mmio + cat->wb[i].base, cat->wb[i].name);
+
+ if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
+ msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
+ dpu_kms->mmio + cat->mdp[0].base, "top");
+ msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END,
+ dpu_kms->mmio + cat->mdp[0].base + MDP_PERIPH_TOP0_END, "top_2");
+ } else {
+ msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
+ dpu_kms->mmio + cat->mdp[0].base, "top");
+ }
+
+ /* dump DSC sub-blocks HW regs info */
+ for (i = 0; i < cat->dsc_count; i++) {
+ base = dpu_kms->mmio + cat->dsc[i].base;
+ msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base, cat->dsc[i].name);
+
+ if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
+ struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
+ struct dpu_dsc_blk ctl = cat->dsc[i].sblk->ctl;
+
+ msm_disp_snapshot_add_block(disp_state, enc.len, base + enc.base, "%s_%s",
+ cat->dsc[i].name, enc.name);
+ msm_disp_snapshot_add_block(disp_state, ctl.len, base + ctl.base, "%s_%s",
+ cat->dsc[i].name, ctl.name);
+ }
+ }
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = dpu_kms_hw_init,
+ .irq_preinstall = dpu_core_irq_preinstall,
+ .irq_postinstall = dpu_irq_postinstall,
+ .irq_uninstall = dpu_core_irq_uninstall,
+ .irq = dpu_core_irq,
+ .enable_commit = dpu_kms_enable_commit,
+ .disable_commit = dpu_kms_disable_commit,
+ .flush_commit = dpu_kms_flush_commit,
+ .wait_flush = dpu_kms_wait_flush,
+ .complete_commit = dpu_kms_complete_commit,
+ .enable_vblank = dpu_kms_enable_vblank,
+ .disable_vblank = dpu_kms_disable_vblank,
+ .check_modified_format = dpu_format_check_modified_format,
+ .get_format = dpu_get_msm_format,
+ .destroy = dpu_kms_destroy,
+ .snapshot = dpu_kms_mdp_snapshot,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = dpu_kms_debugfs_init,
+#endif
+};
+
+static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_mmu *mmu;
+
+ if (!dpu_kms->base.aspace)
+ return;
+
+ mmu = dpu_kms->base.aspace->mmu;
+
+ mmu->funcs->detach(mmu);
+ msm_gem_address_space_put(dpu_kms->base.aspace);
+
+ dpu_kms->base.aspace = NULL;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+ struct msm_gem_address_space *aspace;
+
+ aspace = msm_kms_init_aspace(dpu_kms->dev);
+ if (IS_ERR(aspace))
+ return PTR_ERR(aspace);
+
+ dpu_kms->base.aspace = aspace;
+
+ return 0;
+}
+
+unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+ struct clk *clk;
+
+ clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
+ if (!clk)
+ return 0;
+
+ return clk_get_rate(clk);
+}
+
+#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ int i, rc = -EINVAL;
+ unsigned long max_core_clk_rate;
+ u32 core_rev;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return rc;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+
+ dev->mode_config.cursor_width = 512;
+ dev->mode_config.cursor_height = 512;
+
+ rc = dpu_kms_global_obj_init(dpu_kms);
+ if (rc)
+ return rc;
+
+ atomic_set(&dpu_kms->bandwidth_ref, 0);
+
+ dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
+ if (IS_ERR(dpu_kms->mmio)) {
+ rc = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", rc);
+ dpu_kms->mmio = NULL;
+ goto error;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", rc);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ goto error;
+ }
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ }
+
+ dpu_kms_parse_data_bus_icc_path(dpu_kms);
+
+ rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
+ if (rc < 0)
+ goto error;
+
+ core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+
+ pr_info("dpu hardware revision:0x%x\n", core_rev);
+
+ dpu_kms->catalog = of_device_get_match_data(dev->dev);
+ if (!dpu_kms->catalog) {
+ DPU_ERROR("device config not known!\n");
+ rc = -EINVAL;
+ goto power_error;
+ }
+
+ /*
+ * Now we need to read the HW catalog and initialize resources such as
+ * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ */
+ rc = _dpu_kms_mmu_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent);
+ if (IS_ERR(dpu_kms->mdss)) {
+ rc = PTR_ERR(dpu_kms->mdss);
+ DPU_ERROR("failed to get MDSS data: %d\n", rc);
+ goto power_error;
+ }
+
+ if (!dpu_kms->mdss) {
+ rc = -EINVAL;
+ DPU_ERROR("NULL MDSS data\n");
+ goto power_error;
+ }
+
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
+ if (rc) {
+ DPU_ERROR("rm init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ dpu_kms->rm_init = true;
+
+ dpu_kms->hw_mdp = dpu_hw_mdptop_init(dpu_kms->catalog->mdp,
+ dpu_kms->mmio,
+ dpu_kms->catalog);
+ if (IS_ERR(dpu_kms->hw_mdp)) {
+ rc = PTR_ERR(dpu_kms->hw_mdp);
+ DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+ dpu_kms->hw_mdp = NULL;
+ goto power_error;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ struct dpu_hw_vbif *hw;
+ const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ hw = dpu_hw_vbif_init(vbif, dpu_kms->vbif[vbif->id]);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc);
+ goto power_error;
+ }
+
+ dpu_kms->hw_vbif[vbif->id] = hw;
+ }
+
+ /* TODO: use the same max_freq as in dpu_kms_hw_init */
+ max_core_clk_rate = dpu_kms_get_clk_rate(dpu_kms, "core");
+ if (!max_core_clk_rate) {
+ DPU_DEBUG("max core clk rate not determined, using default\n");
+ max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+ }
+
+ rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate);
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ goto perf_err;
+ }
+
+ dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+ rc = PTR_ERR(dpu_kms->hw_intr);
+ DPU_ERROR("hw_intr init failed: %d\n", rc);
+ dpu_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width =
+ dpu_kms->catalog->caps->max_mixer_width * 2;
+ dev->mode_config.max_height = 4096;
+
+ dev->max_vblank_count = 0xffffffff;
+ /* Disable vblank irqs aggressively for power-saving */
+ dev->vblank_disable_immediate = true;
+
+ /*
+ * _dpu_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _dpu_kms_drm_obj_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
+ dpu_vbif_init_memtypes(dpu_kms);
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+
+drm_obj_init_err:
+hw_intr_init_err:
+perf_err:
+power_error:
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+ _dpu_kms_hw_destroy(dpu_kms);
+
+ return rc;
+}
+
+static int dpu_kms_init(struct drm_device *ddev)
+{
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct device *dev = ddev->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms;
+ int irq;
+ struct dev_pm_opp *opp;
+ int ret = 0;
+ unsigned long max_freq = ULONG_MAX;
+
+ dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+ if (!dpu_kms)
+ return -ENOMEM;
+
+ ret = devm_pm_opp_set_clkname(dev, "core");
+ if (ret)
+ return ret;
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err(dev, "invalid OPP table in device tree\n");
+ return ret;
+ }
+
+ ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
+ if (ret < 0) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ return ret;
+ }
+ dpu_kms->num_clocks = ret;
+
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
+
+ dev_pm_opp_set_rate(dev, max_freq);
+
+ ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
+ if (ret) {
+ DPU_ERROR("failed to init kms, ret=%d\n", ret);
+ return ret;
+ }
+ dpu_kms->dev = ddev;
+ dpu_kms->pdev = pdev;
+
+ pm_runtime_enable(&pdev->dev);
+ dpu_kms->rpm_enabled = true;
+
+ priv->kms = &dpu_kms->base;
+
+ irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+ if (!irq) {
+ DPU_ERROR("failed to get irq\n");
+ return -EINVAL;
+ }
+ dpu_kms->base.irq = irq;
+
+ return 0;
+}
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+ return msm_drv_probe(&pdev->dev, dpu_kms_init);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &msm_drm_ops);
+
+ return 0;
+}
+
+static int __maybe_unused dpu_runtime_suspend(struct device *dev)
+{
+ int i;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(dev, 0);
+ clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
+
+ for (i = 0; i < dpu_kms->num_paths; i++)
+ icc_set_bw(dpu_kms->path[i], 0, 0);
+
+ return 0;
+}
+
+static int __maybe_unused dpu_runtime_resume(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_encoder *encoder;
+ struct drm_device *ddev;
+
+ ddev = dpu_kms->dev;
+
+ rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
+ if (rc) {
+ DPU_ERROR("clock enable failed rc:%d\n", rc);
+ return rc;
+ }
+
+ dpu_vbif_init_memtypes(dpu_kms);
+
+ drm_for_each_encoder(encoder, ddev)
+ dpu_encoder_virt_runtime_resume(encoder);
+
+ return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+ SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+ { .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
+ { .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
+ { .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, },
+ { .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, },
+ { .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, },
+ { .compatible = "qcom,sc8180x-dpu", .data = &dpu_sc8180x_cfg, },
+ { .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
+ { .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
+ { .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, },
+ { .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
+ { .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
+ { .compatible = "qcom,sm8150-dpu", .data = &dpu_sm8150_cfg, },
+ { .compatible = "qcom,sm8250-dpu", .data = &dpu_sm8250_cfg, },
+ { .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, },
+ { .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
+ { .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+ .probe = dpu_dev_probe,
+ .remove = dpu_dev_remove,
+ .shutdown = msm_drv_shutdown,
+ .driver = {
+ .name = "msm_dpu",
+ .of_match_table = dpu_dt_match,
+ .pm = &dpu_pm_ops,
+ },
+};
+
+void __init msm_dpu_register(void)
+{
+ platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+ platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644
index 0000000000..b6f53ca6e9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include <linux/interconnect.h>
+
+#include <drm/drm_drv.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...) \
+ do { \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
+ DRM_DEBUG(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...) \
+ do { \
+ if (drm_debug_enabled(DRM_UT_DRIVER)) \
+ DRM_ERROR(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ * This macro is similar to the standard ktime_compare() function, but
+ * attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+ ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+struct dpu_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ const struct dpu_mdss_cfg *catalog;
+ const struct msm_mdss_data *mdss;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif[VBIF_MAX];
+
+ struct regulator *vdd;
+ struct regulator *mmagic;
+ struct regulator *venus;
+
+ struct dpu_hw_intr *hw_intr;
+
+ struct dpu_core_perf perf;
+
+ /*
+ * Global private object state, Do not access directly, use
+ * dpu_kms_global_get_state()
+ */
+ struct drm_modeset_lock global_state_lock;
+ struct drm_private_obj global_state;
+
+ struct dpu_rm rm;
+ bool rm_init;
+
+ struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+
+ bool has_danger_ctrl;
+
+ struct platform_device *pdev;
+ bool rpm_enabled;
+
+ struct clk_bulk_data *clocks;
+ size_t num_clocks;
+
+ /* reference count bandwidth requests, so we know when we can
+ * release bandwidth. Each atomic update increments, and frame-
+ * done event decrements. Additionally, for video mode, the
+ * reference is incremented when crtc is enabled, and decremented
+ * when disabled.
+ */
+ atomic_t bandwidth_ref;
+ struct icc_path *path[2];
+ u32 num_paths;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
+};
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base)
+
+/* Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+struct dpu_global_state {
+ struct drm_private_state base;
+
+ uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
+ uint32_t mixer_to_enc_id[LM_MAX - LM_0];
+ uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
+ uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
+ uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
+};
+
+struct dpu_global_state
+ *dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms);
+struct dpu_global_state
+ *__must_check dpu_kms_get_global_state(struct drm_atomic_state *s);
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.rst
+ *
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ */
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE 4096
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms: pointer to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644
index 0000000000..0be195f914
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -0,0 +1,1468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DRM_DEBUG_ATOMIC("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT 8
+#define SHARP_NOISE_THR_DEFAULT 2
+
+#define DPU_PLANE_COLOR_FILL_FLAG BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/*
+ * Default Preload Values
+ */
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+#define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2
+#define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4
+
+#define DEFAULT_REFRESH_RATE 60
+
+static const uint32_t qcom_compressed_supported_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_BGR565,
+
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+ struct drm_plane base;
+
+ struct mutex lock;
+
+ enum dpu_sspp pipe;
+
+ uint32_t color_fill;
+ bool is_error;
+ bool is_rt_pipe;
+ const struct dpu_mdss_cfg *catalog;
+};
+
+static const uint64_t supported_format_modifiers[] = {
+ DRM_FORMAT_MOD_QCOM_COMPRESSED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+
+ return to_dpu_kms(priv->kms);
+}
+
+/**
+ * _dpu_plane_calc_bw - calculate bandwidth required for a plane
+ * @catalog: Points to dpu catalog structure
+ * @fmt: Pointer to source buffer format
+ * @mode: Pointer to drm display mode
+ * @pipe_cfg: Pointer to pipe configuration
+ * Result: Updates calculated bandwidth in the plane state.
+ * BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
+ * Prefill BW Equation: line src bytes * line_time
+ */
+static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
+ const struct dpu_format *fmt,
+ const struct drm_display_mode *mode,
+ struct dpu_sw_pipe_cfg *pipe_cfg)
+{
+ int src_width, src_height, dst_height, fps;
+ u64 plane_pixel_rate, plane_bit_rate;
+ u64 plane_prefill_bw;
+ u64 plane_bw;
+ u32 hw_latency_lines;
+ u64 scale_factor;
+ int vbp, vpw, vfp;
+
+ src_width = drm_rect_width(&pipe_cfg->src_rect);
+ src_height = drm_rect_height(&pipe_cfg->src_rect);
+ dst_height = drm_rect_height(&pipe_cfg->dst_rect);
+ fps = drm_mode_vrefresh(mode);
+ vbp = mode->vtotal - mode->vsync_end;
+ vpw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ hw_latency_lines = catalog->perf->min_prefill_lines;
+ scale_factor = src_height > dst_height ?
+ mult_frac(src_height, 1, dst_height) : 1;
+
+ plane_pixel_rate = src_width * mode->vtotal * fps;
+ plane_bit_rate = plane_pixel_rate * fmt->bpp;
+
+ plane_bw = plane_bit_rate * scale_factor;
+
+ plane_prefill_bw = plane_bw * hw_latency_lines;
+
+ if ((vbp+vpw) > hw_latency_lines)
+ do_div(plane_prefill_bw, (vbp+vpw));
+ else if ((vbp+vpw+vfp) < hw_latency_lines)
+ do_div(plane_prefill_bw, (vbp+vpw+vfp));
+ else
+ do_div(plane_prefill_bw, hw_latency_lines);
+
+
+ return max(plane_bw, plane_prefill_bw);
+}
+
+/**
+ * _dpu_plane_calc_clk - calculate clock required for a plane
+ * @mode: Pointer to drm display mode
+ * @pipe_cfg: Pointer to pipe configuration
+ * Result: Updates calculated clock in the plane state.
+ * Clock equation: dst_w * v_total * fps * (src_h / dst_h)
+ */
+static u64 _dpu_plane_calc_clk(const struct drm_display_mode *mode,
+ struct dpu_sw_pipe_cfg *pipe_cfg)
+{
+ int dst_width, src_height, dst_height, fps;
+ u64 plane_clk;
+
+ src_height = drm_rect_height(&pipe_cfg->src_rect);
+ dst_width = drm_rect_width(&pipe_cfg->dst_rect);
+ dst_height = drm_rect_height(&pipe_cfg->dst_rect);
+ fps = drm_mode_vrefresh(mode);
+
+ plane_clk =
+ dst_width * mode->vtotal * fps;
+
+ if (src_height > dst_height) {
+ plane_clk *= src_height;
+ do_div(plane_clk, dst_height);
+ }
+
+ return plane_clk;
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane: Pointer to drm plane
+ * @pipe: Pointer to software pipe
+ * @lut_usage: LUT usecase
+ * @fmt: Pointer to source buffer format
+ * @src_width: width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+ struct dpu_sw_pipe *pipe,
+ enum dpu_qos_lut_usage lut_usage,
+ const struct dpu_format *fmt, u32 src_width)
+{
+ struct dpu_plane *pdpu;
+ u32 fixed_buff_size;
+ u32 total_fl;
+
+ if (!fmt || !pipe || !src_width || !fmt->bpp) {
+ DPU_ERROR("invalid arguments\n");
+ return 0;
+ }
+
+ if (lut_usage == DPU_QOS_LUT_USAGE_NRT)
+ return 0;
+
+ pdpu = to_dpu_plane(plane);
+ fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
+
+ /* FIXME: in multirect case account for the src_width of all the planes */
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == DPU_CHROMA_420) {
+ /* NV12 */
+ total_fl = (fixed_buff_size / 2) /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ /* non NV12 */
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ } else {
+ if (pipe->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ total_fl = (fixed_buff_size) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ }
+
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s w:%u fl:%u\n",
+ pipe->sspp->idx - SSPP_VIG0,
+ (char *)&fmt->base.pixel_format,
+ src_width, total_fl);
+
+ return total_fl;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @pipe: Pointer to software pipe
+ * @fmt: Pointer to source buffer format
+ * @pipe_cfg: Pointer to pipe configuration
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+ struct dpu_sw_pipe *pipe,
+ const struct dpu_format *fmt, struct dpu_sw_pipe_cfg *pipe_cfg)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_hw_qos_cfg cfg;
+ u32 total_fl, lut_usage;
+
+ if (!pdpu->is_rt_pipe) {
+ lut_usage = DPU_QOS_LUT_USAGE_NRT;
+ } else {
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+ lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+ else
+ lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+ }
+
+ total_fl = _dpu_plane_calc_fill_level(plane, pipe, lut_usage, fmt,
+ drm_rect_width(&pipe_cfg->src_rect));
+
+ cfg.creq_lut = _dpu_hw_get_qos_lut(&pdpu->catalog->perf->qos_lut_tbl[lut_usage], total_fl);
+ cfg.danger_lut = pdpu->catalog->perf->danger_lut_tbl[lut_usage];
+ cfg.safe_lut = pdpu->catalog->perf->safe_lut_tbl[lut_usage];
+
+ if (pipe->sspp->idx != SSPP_CURSOR0 &&
+ pipe->sspp->idx != SSPP_CURSOR1 &&
+ pdpu->is_rt_pipe)
+ cfg.danger_safe_en = true;
+
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d is_rt:%d\n",
+ pdpu->pipe - SSPP_VIG0,
+ cfg.danger_safe_en,
+ pdpu->is_rt_pipe);
+
+ trace_dpu_perf_set_qos_luts(pipe->sspp->idx - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ pdpu->is_rt_pipe, total_fl, cfg.creq_lut, lut_usage);
+
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ pdpu->is_rt_pipe, total_fl, cfg.creq_lut);
+
+ trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->fetch_mode : 0,
+ cfg.danger_lut,
+ cfg.safe_lut);
+
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ fmt ? fmt->fetch_mode : -1,
+ cfg.danger_lut,
+ cfg.safe_lut);
+
+ pipe->sspp->ops.setup_qos_lut(pipe->sspp, &cfg);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane: Pointer to drm plane
+ * @pipe: Pointer to software pipe
+ * @enable: true to enable QoS control
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+ struct dpu_sw_pipe *pipe,
+ bool enable)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->is_rt_pipe)
+ enable = false;
+
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d is_rt:%d\n",
+ pdpu->pipe - SSPP_VIG0,
+ enable,
+ pdpu->is_rt_pipe);
+
+ pipe->sspp->ops.setup_qos_ctrl(pipe->sspp,
+ enable);
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane: Pointer to drm plane
+ * @pipe: Pointer to software pipe
+ * @pipe_cfg: Pointer to pipe configuration
+ * @frame_rate: CRTC's frame rate
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+ struct dpu_sw_pipe *pipe,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ int frame_rate)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_vbif_set_ot_params ot_params;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = pipe->sspp->cap->xin_id;
+ ot_params.num = pipe->sspp->idx - SSPP_NONE;
+ ot_params.width = drm_rect_width(&pipe_cfg->src_rect);
+ ot_params.height = drm_rect_height(&pipe_cfg->src_rect);
+ ot_params.is_wfd = !pdpu->is_rt_pipe;
+ ot_params.frame_rate = frame_rate;
+ ot_params.vbif_idx = VBIF_RT;
+ ot_params.clk_ctrl = pipe->sspp->cap->clk_ctrl;
+ ot_params.rd = true;
+
+ dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_qos_remap - set vbif QoS for the given plane
+ * @plane: Pointer to drm plane
+ * @pipe: Pointer to software pipe
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane,
+ struct dpu_sw_pipe *pipe)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_vbif_set_qos_params qos_params;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = VBIF_RT;
+ qos_params.clk_ctrl = pipe->sspp->cap->clk_ctrl;
+ qos_params.xin_id = pipe->sspp->cap->xin_id;
+ qos_params.num = pipe->sspp->idx - SSPP_VIG0;
+ qos_params.is_rt = pdpu->is_rt_pipe;
+
+ DPU_DEBUG_PLANE(pdpu, "pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+ qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt,
+ qos_params.clk_ctrl);
+
+ dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw,
+ uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+ struct dpu_hw_scaler3_cfg *scale_cfg,
+ const struct dpu_format *fmt,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v,
+ unsigned int rotation)
+{
+ uint32_t i;
+ bool inline_rotation = rotation & DRM_MODE_ROTATE_90;
+
+ /*
+ * For inline rotation cases, scaler config is post-rotation,
+ * so swap the dimensions here. However, pixel extension will
+ * need pre-rotation settings.
+ */
+ if (inline_rotation)
+ swap(src_w, src_h);
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ scale_cfg->src_width[i] = src_w;
+ scale_cfg->src_height[i] = src_h;
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ scale_cfg->src_width[i] /= chroma_subsmpl_h;
+ scale_cfg->src_height[i] /= chroma_subsmpl_v;
+ }
+
+ if (pipe_hw->cap->features &
+ BIT(DPU_SSPP_SCALER_QSEED4)) {
+ scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
+ } else {
+ scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+ }
+ }
+ if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+ && (src_w == dst_w))
+ return;
+
+ scale_cfg->dst_width = dst_w;
+ scale_cfg->dst_height = dst_h;
+ scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+ scale_cfg->lut_flag = 0;
+ scale_cfg->blend_cfg = 1;
+ scale_cfg->enable = 1;
+}
+
+static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
+ struct dpu_hw_pixel_ext *pixel_ext,
+ uint32_t src_w, uint32_t src_h,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ int i;
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ src_w /= chroma_subsmpl_h;
+ src_h /= chroma_subsmpl_v;
+ }
+
+ pixel_ext->num_ext_pxls_top[i] = src_h;
+ pixel_ext->num_ext_pxls_left[i] = src_w;
+ }
+}
+
+static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+};
+
+static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+};
+
+static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe,
+ const struct dpu_format *fmt)
+{
+ const struct dpu_csc_cfg *csc_ptr;
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return NULL;
+
+ if (BIT(DPU_SSPP_CSC_10BIT) & pipe->sspp->cap->features)
+ csc_ptr = &dpu_csc10_YUV2RGB_601L;
+ else
+ csc_ptr = &dpu_csc_YUV2RGB_601L;
+
+ return csc_ptr;
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe,
+ const struct dpu_format *fmt, bool color_fill,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ unsigned int rotation)
+{
+ struct dpu_hw_sspp *pipe_hw = pipe->sspp;
+ const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
+ struct dpu_hw_scaler3_cfg scaler3_cfg;
+ struct dpu_hw_pixel_ext pixel_ext;
+ u32 src_width = drm_rect_width(&pipe_cfg->src_rect);
+ u32 src_height = drm_rect_height(&pipe_cfg->src_rect);
+ u32 dst_width = drm_rect_width(&pipe_cfg->dst_rect);
+ u32 dst_height = drm_rect_height(&pipe_cfg->dst_rect);
+
+ memset(&scaler3_cfg, 0, sizeof(scaler3_cfg));
+ memset(&pixel_ext, 0, sizeof(pixel_ext));
+
+ /* don't chroma subsample if decimating */
+ /* update scaler. calculate default config for QSEED3 */
+ _dpu_plane_setup_scaler3(pipe_hw,
+ src_width,
+ src_height,
+ dst_width,
+ dst_height,
+ &scaler3_cfg, fmt,
+ info->hsub, info->vsub,
+ rotation);
+
+ /* configure pixel extension based on scalar config */
+ _dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext,
+ src_width, src_height, info->hsub, info->vsub);
+
+ if (pipe_hw->ops.setup_pe)
+ pipe_hw->ops.setup_pe(pipe_hw,
+ &pixel_ext);
+
+ /**
+ * when programmed in multirect mode, scalar block will be
+ * bypassed. Still we need to update alpha and bitwidth
+ * ONLY for RECT0
+ */
+ if (pipe_hw->ops.setup_scaler &&
+ pipe->multirect_index != DPU_SSPP_RECT_1)
+ pipe_hw->ops.setup_scaler(pipe_hw,
+ &scaler3_cfg,
+ fmt);
+}
+
+static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate,
+ struct dpu_sw_pipe *pipe,
+ struct drm_rect *dst_rect,
+ u32 fill_color,
+ const struct dpu_format *fmt)
+{
+ struct dpu_sw_pipe_cfg pipe_cfg;
+
+ /* update sspp */
+ if (!pipe->sspp->ops.setup_solidfill)
+ return;
+
+ pipe->sspp->ops.setup_solidfill(pipe, fill_color);
+
+ /* override scaler/decimation if solid fill */
+ pipe_cfg.dst_rect = *dst_rect;
+
+ pipe_cfg.src_rect.x1 = 0;
+ pipe_cfg.src_rect.y1 = 0;
+ pipe_cfg.src_rect.x2 =
+ drm_rect_width(&pipe_cfg.dst_rect);
+ pipe_cfg.src_rect.y2 =
+ drm_rect_height(&pipe_cfg.dst_rect);
+
+ if (pipe->sspp->ops.setup_format)
+ pipe->sspp->ops.setup_format(pipe, fmt, DPU_SSPP_SOLID_FILL);
+
+ if (pipe->sspp->ops.setup_rects)
+ pipe->sspp->ops.setup_rects(pipe, &pipe_cfg);
+
+ _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg, pstate->rotation);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu: Pointer to DPU plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ */
+static void _dpu_plane_color_fill(struct dpu_plane *pdpu,
+ uint32_t color, uint32_t alpha)
+{
+ const struct dpu_format *fmt;
+ const struct drm_plane *plane = &pdpu->base;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
+ u32 fill_color = (color & 0xFFFFFF) | ((alpha & 0xFF) << 24);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /*
+ * select fill format to match user property expectation,
+ * h/w only supports RGB variants
+ */
+ fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+ /* should not happen ever */
+ if (!fmt)
+ return;
+
+ /* update sspp */
+ _dpu_plane_color_fill_pipe(pstate, &pstate->pipe, &pstate->pipe_cfg.dst_rect,
+ fill_color, fmt);
+
+ if (pstate->r_pipe.sspp)
+ _dpu_plane_color_fill_pipe(pstate, &pstate->r_pipe, &pstate->r_pipe_cfg.dst_rect,
+ fill_color, fmt);
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+ struct dpu_hw_fmt_layout layout;
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+ /* cache aspace */
+ pstate->aspace = kms->base.aspace;
+
+ /*
+ * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+ * we can use msm_atomic_prepare_fb() instead of doing the
+ * implicit fence and fb prepare by hand here.
+ */
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
+ if (pstate->aspace) {
+ ret = msm_framebuffer_prepare(new_state->fb,
+ pstate->aspace, pstate->needs_dirtyfb);
+ if (ret) {
+ DPU_ERROR("failed to prepare framebuffer\n");
+ return ret;
+ }
+ }
+
+ /* validate framebuffer layout before commit */
+ ret = dpu_format_populate_layout(pstate->aspace,
+ new_state->fb, &layout);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *old_pstate;
+
+ if (!old_state || !old_state->fb)
+ return;
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace,
+ old_pstate->needs_dirtyfb);
+}
+
+static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
+ const struct dpu_sspp_sub_blks *sblk,
+ struct drm_rect src, const struct dpu_format *fmt)
+{
+ size_t num_formats;
+ const u32 *supported_formats;
+
+ if (!sblk->rotation_cfg) {
+ DPU_ERROR("invalid rotation cfg\n");
+ return -EINVAL;
+ }
+
+ if (drm_rect_width(&src) > sblk->rotation_cfg->rot_maxheight) {
+ DPU_DEBUG_PLANE(pdpu, "invalid height for inline rot:%d max:%d\n",
+ src.y2, sblk->rotation_cfg->rot_maxheight);
+ return -EINVAL;
+ }
+
+ supported_formats = sblk->rotation_cfg->rot_format_list;
+ num_formats = sblk->rotation_cfg->rot_num_formats;
+
+ if (!DPU_FORMAT_IS_UBWC(fmt) ||
+ !dpu_find_format(fmt->base.pixel_format, supported_formats, num_formats))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
+ struct dpu_sw_pipe *pipe,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct dpu_format *fmt,
+ const struct drm_display_mode *mode)
+{
+ uint32_t min_src_size;
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+
+ min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+ if (DPU_FORMAT_IS_YUV(fmt) &&
+ (!(pipe->sspp->cap->features & DPU_SSPP_SCALER) ||
+ !(pipe->sspp->cap->features & DPU_SSPP_CSC_ANY))) {
+ DPU_DEBUG_PLANE(pdpu,
+ "plane doesn't have scaler/csc for yuv\n");
+ return -EINVAL;
+ }
+
+ /* check src bounds */
+ if (drm_rect_width(&pipe_cfg->src_rect) < min_src_size ||
+ drm_rect_height(&pipe_cfg->src_rect) < min_src_size) {
+ DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect));
+ return -E2BIG;
+ }
+
+ /* valid yuv image */
+ if (DPU_FORMAT_IS_YUV(fmt) &&
+ (pipe_cfg->src_rect.x1 & 0x1 ||
+ pipe_cfg->src_rect.y1 & 0x1 ||
+ drm_rect_width(&pipe_cfg->src_rect) & 0x1 ||
+ drm_rect_height(&pipe_cfg->src_rect) & 0x1)) {
+ DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect));
+ return -EINVAL;
+ }
+
+ /* min dst support */
+ if (drm_rect_width(&pipe_cfg->dst_rect) < 0x1 ||
+ drm_rect_height(&pipe_cfg->dst_rect) < 0x1) {
+ DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&pipe_cfg->dst_rect));
+ return -EINVAL;
+ }
+
+ /* max clk check */
+ if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) {
+ DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n");
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ int ret = 0, min_scale;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ const struct drm_crtc_state *crtc_state = NULL;
+ const struct dpu_format *fmt;
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
+ struct drm_rect fb_rect = { 0 };
+ uint32_t max_linewidth;
+ unsigned int rotation;
+ uint32_t supported_rotations;
+ const struct dpu_sspp_cfg *pipe_hw_caps = pstate->pipe.sspp->cap;
+ const struct dpu_sspp_sub_blks *sblk = pstate->pipe.sspp->cap->sblk;
+
+ if (new_plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
+
+ min_scale = FRAC_16_16(1, sblk->maxupscale);
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ min_scale,
+ sblk->maxdwnscale << 16,
+ true, true);
+ if (ret) {
+ DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+ return ret;
+ }
+ if (!new_plane_state->visible)
+ return 0;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ r_pipe->sspp = NULL;
+
+ pstate->stage = DPU_STAGE_0 + pstate->base.normalized_zpos;
+ if (pstate->stage >= pdpu->catalog->caps->max_mixer_blendstages) {
+ DPU_ERROR("> %d plane stages assigned\n",
+ pdpu->catalog->caps->max_mixer_blendstages - DPU_STAGE_0);
+ return -EINVAL;
+ }
+
+ pipe_cfg->src_rect = new_plane_state->src;
+
+ /* state->src is 16.16, src_rect is not */
+ pipe_cfg->src_rect.x1 >>= 16;
+ pipe_cfg->src_rect.x2 >>= 16;
+ pipe_cfg->src_rect.y1 >>= 16;
+ pipe_cfg->src_rect.y2 >>= 16;
+
+ pipe_cfg->dst_rect = new_plane_state->dst;
+
+ fb_rect.x2 = new_plane_state->fb->width;
+ fb_rect.y2 = new_plane_state->fb->height;
+
+ /* Ensure fb size is supported */
+ if (drm_rect_width(&fb_rect) > MAX_IMG_WIDTH ||
+ drm_rect_height(&fb_rect) > MAX_IMG_HEIGHT) {
+ DPU_DEBUG_PLANE(pdpu, "invalid framebuffer " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&fb_rect));
+ return -E2BIG;
+ }
+
+ fmt = to_dpu_format(msm_framebuffer_format(new_plane_state->fb));
+
+ max_linewidth = pdpu->catalog->caps->max_linewidth;
+
+ if (drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
+ /*
+ * In parallel multirect case only the half of the usual width
+ * is supported for tiled formats. If we are here, we know that
+ * full width is more than max_linewidth, thus each rect is
+ * wider than allowed.
+ */
+ if (DPU_FORMAT_IS_UBWC(fmt)) {
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
+ return -E2BIG;
+ }
+
+ if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
+ return -E2BIG;
+ }
+
+ if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
+ drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) ||
+ (!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) &&
+ !test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) ||
+ DPU_FORMAT_IS_YUV(fmt)) {
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
+ return -E2BIG;
+ }
+
+ /*
+ * Use multirect for wide plane. We do not support dynamic
+ * assignment of SSPPs, so we know the configuration.
+ */
+ pipe->multirect_index = DPU_SSPP_RECT_0;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ r_pipe->sspp = pipe->sspp;
+ r_pipe->multirect_index = DPU_SSPP_RECT_1;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ *r_pipe_cfg = *pipe_cfg;
+ pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
+ pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
+ r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
+ r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
+ }
+
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
+ if (ret)
+ return ret;
+
+ if (r_pipe->sspp) {
+ ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
+ &crtc_state->adjusted_mode);
+ if (ret)
+ return ret;
+ }
+
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+
+ if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_90;
+
+ rotation = drm_rotation_simplify(new_plane_state->rotation,
+ supported_rotations);
+
+ if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) &&
+ (rotation & DRM_MODE_ROTATE_90)) {
+ ret = dpu_plane_check_inline_rotation(pdpu, sblk, pipe_cfg->src_rect, fmt);
+ if (ret)
+ return ret;
+ }
+
+ pstate->rotation = rotation;
+ pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
+
+ return 0;
+}
+
+static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)
+{
+ const struct dpu_format *format =
+ to_dpu_format(msm_framebuffer_format(pdpu->base.state->fb));
+ const struct dpu_csc_cfg *csc_ptr;
+
+ if (!pipe->sspp || !pipe->sspp->ops.setup_csc)
+ return;
+
+ csc_ptr = _dpu_plane_get_csc(pipe, format);
+ if (!csc_ptr)
+ return;
+
+ DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+ csc_ptr->csc_mv[0],
+ csc_ptr->csc_mv[1],
+ csc_ptr->csc_mv[2]);
+
+ pipe->sspp->ops.setup_csc(pipe->sspp, csc_ptr);
+
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+
+ /*
+ * These updates have to be done immediately before the plane flush
+ * timing, and may not be moved to the atomic_update/mode_set functions.
+ */
+ if (pdpu->is_error)
+ /* force white frame with 100% alpha pipe output on error */
+ _dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+ else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+ /* force 100% alpha */
+ _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+ else {
+ dpu_plane_flush_csc(pdpu, &pstate->pipe);
+ dpu_plane_flush_csc(pdpu, &pstate->r_pipe);
+ }
+
+ /* flag h/w flush complete */
+ if (plane->state)
+ pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ * @error: error value to set
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = error;
+}
+
+static void dpu_plane_sspp_update_pipe(struct drm_plane *plane,
+ struct dpu_sw_pipe *pipe,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct dpu_format *fmt,
+ int frame_rate,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t src_flags;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+
+ if (layout && pipe->sspp->ops.setup_sourceaddress) {
+ trace_dpu_plane_set_scanout(pipe, layout);
+ pipe->sspp->ops.setup_sourceaddress(pipe, layout);
+ }
+
+ /* override for color fill */
+ if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+ _dpu_plane_set_qos_ctrl(plane, pipe, false);
+
+ /* skip remaining processing on color fill */
+ return;
+ }
+
+ if (pipe->sspp->ops.setup_rects) {
+ pipe->sspp->ops.setup_rects(pipe,
+ pipe_cfg);
+ }
+
+ _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg, pstate->rotation);
+
+ if (pipe->sspp->ops.setup_multirect)
+ pipe->sspp->ops.setup_multirect(
+ pipe);
+
+ if (pipe->sspp->ops.setup_format) {
+ unsigned int rotation = pstate->rotation;
+
+ src_flags = 0x0;
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ src_flags |= DPU_SSPP_FLIP_LR;
+
+ if (rotation & DRM_MODE_REFLECT_Y)
+ src_flags |= DPU_SSPP_FLIP_UD;
+
+ if (rotation & DRM_MODE_ROTATE_90)
+ src_flags |= DPU_SSPP_ROT_90;
+
+ /* update format */
+ pipe->sspp->ops.setup_format(pipe, fmt, src_flags);
+
+ if (pipe->sspp->ops.setup_cdp) {
+ const struct dpu_perf_cfg *perf = pdpu->catalog->perf;
+
+ pipe->sspp->ops.setup_cdp(pipe, fmt,
+ perf->cdp_cfg[DPU_PERF_CDP_USAGE_RT].rd_enable);
+ }
+ }
+
+ _dpu_plane_set_qos_lut(plane, pipe, fmt, pipe_cfg);
+
+ if (pipe->sspp->idx != SSPP_CURSOR0 &&
+ pipe->sspp->idx != SSPP_CURSOR1)
+ _dpu_plane_set_ot_limit(plane, pipe, pipe_cfg, frame_rate);
+
+ if (pstate->needs_qos_remap)
+ _dpu_plane_set_qos_remap(plane, pipe);
+}
+
+static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ bool is_rt_pipe;
+ const struct dpu_format *fmt =
+ to_dpu_format(msm_framebuffer_format(fb));
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ struct msm_gem_address_space *aspace = kms->base.aspace;
+ struct dpu_hw_fmt_layout layout;
+ bool layout_valid = false;
+ int ret;
+
+ ret = dpu_format_populate_layout(aspace, fb, &layout);
+ if (ret)
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ else
+ layout_valid = true;
+
+ pstate->pending = true;
+
+ is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+ pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
+ pdpu->is_rt_pipe = is_rt_pipe;
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
+ ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
+ crtc->base.id, DRM_RECT_ARG(&state->dst),
+ (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
+
+ dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt,
+ drm_mode_vrefresh(&crtc->mode),
+ layout_valid ? &layout : NULL);
+
+ if (r_pipe->sspp) {
+ dpu_plane_sspp_update_pipe(plane, r_pipe, r_pipe_cfg, fmt,
+ drm_mode_vrefresh(&crtc->mode),
+ layout_valid ? &layout : NULL);
+ }
+
+ if (pstate->needs_qos_remap)
+ pstate->needs_qos_remap = false;
+
+ pstate->plane_fetch_bw = _dpu_plane_calc_bw(pdpu->catalog, fmt,
+ &crtc->mode, pipe_cfg);
+
+ pstate->plane_clk = _dpu_plane_calc_clk(&crtc->mode, pipe_cfg);
+
+ if (r_pipe->sspp) {
+ pstate->plane_fetch_bw += _dpu_plane_calc_bw(pdpu->catalog, fmt, &crtc->mode, r_pipe_cfg);
+
+ pstate->plane_clk = max(pstate->plane_clk, _dpu_plane_calc_clk(&crtc->mode, r_pipe_cfg));
+ }
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+
+ trace_dpu_plane_disable(DRMID(plane), false,
+ pstate->pipe.multirect_mode);
+
+ if (r_pipe->sspp) {
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ if (r_pipe->sspp->ops.setup_multirect)
+ r_pipe->sspp->ops.setup_multirect(r_pipe);
+ }
+
+ pstate->pending = true;
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+
+ pdpu->is_error = false;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (!new_state->visible) {
+ _dpu_plane_atomic_disable(plane);
+ } else {
+ dpu_plane_sspp_atomic_update(plane);
+ }
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+ struct dpu_plane_state *pstate;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (pdpu) {
+ pstate = to_dpu_plane_state(plane->state);
+ _dpu_plane_set_qos_ctrl(plane, &pstate->pipe, false);
+
+ if (pstate->r_pipe.sspp)
+ _dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, false);
+
+ mutex_destroy(&pdpu->lock);
+
+ /* this will destroy the states as well */
+ drm_plane_cleanup(plane);
+
+ kfree(pdpu);
+ }
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_dpu_plane_state(state));
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return NULL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return NULL;
+ }
+
+ old_state = to_dpu_plane_state(plane->state);
+ pdpu = to_dpu_plane(plane);
+ pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return NULL;
+ }
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ pstate->pending = false;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+ return &pstate->base;
+}
+
+static const char * const multirect_mode_name[] = {
+ [DPU_SSPP_MULTIRECT_NONE] = "none",
+ [DPU_SSPP_MULTIRECT_PARALLEL] = "parallel",
+ [DPU_SSPP_MULTIRECT_TIME_MX] = "time_mx",
+};
+
+static const char * const multirect_index_name[] = {
+ [DPU_SSPP_RECT_SOLO] = "solo",
+ [DPU_SSPP_RECT_0] = "rect_0",
+ [DPU_SSPP_RECT_1] = "rect_1",
+};
+
+static const char *dpu_get_multirect_mode(enum dpu_sspp_multirect_mode mode)
+{
+ if (WARN_ON(mode >= ARRAY_SIZE(multirect_mode_name)))
+ return "unknown";
+
+ return multirect_mode_name[mode];
+}
+
+static const char *dpu_get_multirect_index(enum dpu_sspp_multirect_index index)
+{
+ if (WARN_ON(index >= ARRAY_SIZE(multirect_index_name)))
+ return "unknown";
+
+ return multirect_index_name[index];
+}
+
+static void dpu_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ const struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ const struct dpu_sw_pipe *pipe = &pstate->pipe;
+ const struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ const struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ const struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
+
+ drm_printf(p, "\tstage=%d\n", pstate->stage);
+
+ drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
+ drm_printf(p, "\tmultirect_mode[0]=%s\n", dpu_get_multirect_mode(pipe->multirect_mode));
+ drm_printf(p, "\tmultirect_index[0]=%s\n",
+ dpu_get_multirect_index(pipe->multirect_index));
+ drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
+ drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
+
+ if (r_pipe->sspp) {
+ drm_printf(p, "\tsspp[1]=%s\n", r_pipe->sspp->cap->name);
+ drm_printf(p, "\tmultirect_mode[1]=%s\n",
+ dpu_get_multirect_mode(r_pipe->multirect_mode));
+ drm_printf(p, "\tmultirect_index[1]=%s\n",
+ dpu_get_multirect_index(r_pipe->multirect_index));
+ drm_printf(p, "\tsrc[1]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&r_pipe_cfg->src_rect));
+ drm_printf(p, "\tdst[1]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&r_pipe_cfg->dst_rect));
+ }
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* remove previous state, if present */
+ if (plane->state) {
+ dpu_plane_destroy_state(plane, plane->state);
+ plane->state = NULL;
+ }
+
+ pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return;
+ }
+
+ /*
+ * Set the SSPP here until we have proper virtualized DPU planes.
+ * This is the place where the state is allocated, so fill it fully.
+ */
+ pstate->pipe.sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe);
+ pstate->pipe.multirect_index = DPU_SSPP_RECT_SOLO;
+ pstate->pipe.multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ pstate->r_pipe.sspp = NULL;
+
+ __drm_atomic_helper_plane_reset(plane, &pstate->base);
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+
+ if (!pdpu->is_rt_pipe)
+ return;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ _dpu_plane_set_qos_ctrl(plane, &pstate->pipe, enable);
+ if (pstate->r_pipe.sspp)
+ _dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, enable);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+#endif
+
+static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format, uint64_t modifier)
+{
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED)
+ return dpu_find_format(format, qcom_compressed_supported_formats,
+ ARRAY_SIZE(qcom_compressed_supported_formats));
+
+ return false;
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = dpu_plane_destroy,
+ .reset = dpu_plane_reset,
+ .atomic_duplicate_state = dpu_plane_duplicate_state,
+ .atomic_destroy_state = dpu_plane_destroy_state,
+ .atomic_print_state = dpu_plane_atomic_print_state,
+ .format_mod_supported = dpu_plane_format_mod_supported,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+ .prepare_fb = dpu_plane_prepare_fb,
+ .cleanup_fb = dpu_plane_cleanup_fb,
+ .atomic_check = dpu_plane_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, enum drm_plane_type type,
+ unsigned long possible_crtcs)
+{
+ struct drm_plane *plane = NULL;
+ const uint32_t *format_list;
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
+ struct dpu_hw_sspp *pipe_hw;
+ uint32_t num_formats;
+ uint32_t supported_rotations;
+ int ret = -EINVAL;
+
+ /* create and zero local structure */
+ pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+ if (!pdpu) {
+ DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ /* cache local stuff for later */
+ plane = &pdpu->base;
+ pdpu->pipe = pipe;
+
+ /* initialize underlying h/w driver */
+ pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
+ if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
+ DPU_ERROR("[%u]SSPP is invalid\n", pipe);
+ goto clean_plane;
+ }
+
+ format_list = pipe_hw->cap->sblk->format_list;
+ num_formats = pipe_hw->cap->sblk->num_formats;
+
+ ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+ format_list, num_formats,
+ supported_format_modifiers, type, NULL);
+ if (ret)
+ goto clean_plane;
+
+ pdpu->catalog = kms->catalog;
+
+ ret = drm_plane_create_zpos_property(plane, 0, 0, DPU_ZPOS_MAX);
+ if (ret)
+ DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+
+ if (pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_MASK;
+
+ drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0, supported_rotations);
+
+ drm_plane_enable_fb_damage_clips(plane);
+
+ /* success! finalize initialization */
+ drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+ mutex_init(&pdpu->lock);
+
+ DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
+ pipe, plane->base.id);
+ return plane;
+
+clean_plane:
+ kfree(pdpu);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644
index 0000000000..abd6b21a04
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base: base drm plane state object
+ * @aspace: pointer to address space for input/output buffers
+ * @pipe: software pipe description
+ * @r_pipe: software pipe description of the second pipe
+ * @pipe_cfg: software pipe configuration
+ * @r_pipe_cfg: software pipe configuration for the second pipe
+ * @stage: assigned by crtc blender
+ * @needs_qos_remap: qos remap settings need to be updated
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending: whether the current update is still pending
+ * @plane_fetch_bw: calculated BW per plane
+ * @plane_clk: calculated clk per plane
+ * @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed
+ * @rotation: simplified drm rotation hint
+ */
+struct dpu_plane_state {
+ struct drm_plane_state base;
+ struct msm_gem_address_space *aspace;
+ struct dpu_sw_pipe pipe;
+ struct dpu_sw_pipe r_pipe;
+ struct dpu_sw_pipe_cfg pipe_cfg;
+ struct dpu_sw_pipe_cfg r_pipe_cfg;
+ enum dpu_stage stage;
+ bool needs_qos_remap;
+ bool pending;
+
+ u64 plane_fetch_bw;
+ u64 plane_clk;
+
+ bool needs_dirtyfb;
+ unsigned int rotation;
+};
+
+#define to_dpu_plane_state(x) \
+ container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, enum drm_plane_type type,
+ unsigned long possible_crtcs);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane: Pointer to DRM plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable);
+#else
+static inline void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) {}
+#endif
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644
index 0000000000..f9215643c7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_wb.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_hw_dsc.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+
+static inline bool reserved_by_other(uint32_t *res_map, int idx,
+ uint32_t enc_id)
+{
+ return res_map[idx] && res_map[idx] != enc_id;
+}
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @topology: selected topology for the display
+ * @hw_res: Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+ struct msm_display_topology topology;
+};
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
+ struct dpu_hw_dspp *hw;
+
+ if (rm->dspp_blks[i]) {
+ hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
+ dpu_hw_dspp_destroy(hw);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
+ struct dpu_hw_pingpong *hw;
+
+ if (rm->pingpong_blks[i]) {
+ hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
+ dpu_hw_pingpong_destroy(hw);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
+ struct dpu_hw_merge_3d *hw;
+
+ if (rm->merge_3d_blks[i]) {
+ hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
+ dpu_hw_merge_3d_destroy(hw);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
+ struct dpu_hw_mixer *hw;
+
+ if (rm->mixer_blks[i]) {
+ hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
+ dpu_hw_lm_destroy(hw);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
+ struct dpu_hw_ctl *hw;
+
+ if (rm->ctl_blks[i]) {
+ hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
+ dpu_hw_ctl_destroy(hw);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
+ dpu_hw_intf_destroy(rm->hw_intf[i]);
+
+ for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
+ struct dpu_hw_dsc *hw;
+
+ if (rm->dsc_blks[i]) {
+ hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
+ dpu_hw_dsc_destroy(hw);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
+ dpu_hw_wb_destroy(rm->hw_wb[i]);
+
+ for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++)
+ dpu_hw_sspp_destroy(rm->hw_sspp[i]);
+
+ return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+ const struct dpu_mdss_cfg *cat,
+ const struct msm_mdss_data *mdss_data,
+ void __iomem *mmio)
+{
+ int rc, i;
+
+ if (!rm || !cat || !mmio) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ /* Clear, setup lists */
+ memset(rm, 0, sizeof(*rm));
+
+ /* Interrogate HW catalog and create tracking items for hw blocks */
+ for (i = 0; i < cat->mixer_count; i++) {
+ struct dpu_hw_mixer *hw;
+ const struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+ hw = dpu_hw_lm_init(lm, mmio);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed lm object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->mixer_blks[lm->id - LM_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->merge_3d_count; i++) {
+ struct dpu_hw_merge_3d *hw;
+ const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
+
+ hw = dpu_hw_merge_3d_init(merge_3d, mmio);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed merge_3d object creation: err %d\n",
+ rc);
+ goto fail;
+ }
+ rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->pingpong_count; i++) {
+ struct dpu_hw_pingpong *hw;
+ const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
+
+ hw = dpu_hw_pingpong_init(pp, mmio);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed pingpong object creation: err %d\n",
+ rc);
+ goto fail;
+ }
+ if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
+ hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]);
+ rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->intf_count; i++) {
+ struct dpu_hw_intf *hw;
+ const struct dpu_intf_cfg *intf = &cat->intf[i];
+
+ hw = dpu_hw_intf_init(intf, mmio, cat->mdss_ver);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed intf object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->hw_intf[intf->id - INTF_0] = hw;
+ }
+
+ for (i = 0; i < cat->wb_count; i++) {
+ struct dpu_hw_wb *hw;
+ const struct dpu_wb_cfg *wb = &cat->wb[i];
+
+ hw = dpu_hw_wb_init(wb, mmio);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed wb object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->hw_wb[wb->id - WB_0] = hw;
+ }
+
+ for (i = 0; i < cat->ctl_count; i++) {
+ struct dpu_hw_ctl *hw;
+ const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
+
+ hw = dpu_hw_ctl_init(ctl, mmio, cat->mixer_count, cat->mixer);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed ctl object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->dspp_count; i++) {
+ struct dpu_hw_dspp *hw;
+ const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
+
+ hw = dpu_hw_dspp_init(dspp, mmio);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed dspp object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->dsc_count; i++) {
+ struct dpu_hw_dsc *hw;
+ const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
+
+ if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
+ hw = dpu_hw_dsc_init_1_2(dsc, mmio);
+ else
+ hw = dpu_hw_dsc_init(dsc, mmio);
+
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed dsc object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->sspp_count; i++) {
+ struct dpu_hw_sspp *hw;
+ const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
+
+ hw = dpu_hw_sspp_init(sspp, mmio, mdss_data);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed sspp object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->hw_sspp[sspp->id - SSPP_NONE] = hw;
+ }
+
+ return 0;
+
+fail:
+ dpu_rm_destroy(rm);
+
+ return rc ? rc : -EFAULT;
+}
+
+static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
+{
+ return top->num_intf > 1;
+}
+
+/**
+ * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
+ * @rm: dpu resource manager handle
+ * @primary_idx: index of primary mixer in rm->mixer_blks[]
+ */
+static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
+{
+ const struct dpu_lm_cfg *prim_lm_cfg;
+
+ prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
+
+ if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX)
+ return prim_lm_cfg->lm_pair - LM_0;
+ return -EINVAL;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ * proposed use case requirements, incl. hardwired dependent blocks like
+ * pingpong
+ * @rm: dpu resource manager handle
+ * @global_state: resources shared across multiple kms objects
+ * @enc_id: encoder id requesting for allocation
+ * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
+ * if lm, and all other hardwired blocks connected to the lm (pp) is
+ * available and appropriate
+ * @pp_idx: output parameter, index of pingpong block attached to the layer
+ * mixer in rm->pingpong_blks[].
+ * @dspp_idx: output parameter, index of dspp block attached to the layer
+ * mixer in rm->dspp_blks[].
+ * @reqs: input parameter, rm requirements for HW blocks needed in the
+ * datapath.
+ * Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
+ struct dpu_rm_requirements *reqs)
+{
+ const struct dpu_lm_cfg *lm_cfg;
+ int idx;
+
+ /* Already reserved? */
+ if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
+ DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
+ return false;
+ }
+
+ lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
+ idx = lm_cfg->pingpong - PINGPONG_0;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
+ DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+ return false;
+ }
+
+ if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
+ DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
+ lm_cfg->pingpong);
+ return false;
+ }
+ *pp_idx = idx;
+
+ if (!reqs->topology.num_dspp)
+ return true;
+
+ idx = lm_cfg->dspp - DSPP_0;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
+ DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
+ return false;
+ }
+
+ if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
+ DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
+ lm_cfg->dspp);
+ return false;
+ }
+ *dspp_idx = idx;
+
+ return true;
+}
+
+static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
+ struct dpu_rm_requirements *reqs)
+
+{
+ int lm_idx[MAX_BLOCKS];
+ int pp_idx[MAX_BLOCKS];
+ int dspp_idx[MAX_BLOCKS] = {0};
+ int i, lm_count = 0;
+
+ if (!reqs->topology.num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
+ return -EINVAL;
+ }
+
+ /* Find a primary mixer */
+ for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
+ lm_count < reqs->topology.num_lm; i++) {
+ if (!rm->mixer_blks[i])
+ continue;
+
+ lm_count = 0;
+ lm_idx[lm_count] = i;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
+ enc_id, i, &pp_idx[lm_count],
+ &dspp_idx[lm_count], reqs)) {
+ continue;
+ }
+
+ ++lm_count;
+
+ /* Valid primary mixer found, find matching peers */
+ if (lm_count < reqs->topology.num_lm) {
+ int j = _dpu_rm_get_lm_peer(rm, i);
+
+ /* ignore the peer if there is an error or if the peer was already processed */
+ if (j < 0 || j < i)
+ continue;
+
+ if (!rm->mixer_blks[j])
+ continue;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
+ global_state, enc_id, j,
+ &pp_idx[lm_count], &dspp_idx[lm_count],
+ reqs)) {
+ continue;
+ }
+
+ lm_idx[lm_count] = j;
+ ++lm_count;
+ }
+ }
+
+ if (lm_count != reqs->topology.num_lm) {
+ DPU_DEBUG("unable to find appropriate mixers\n");
+ return -ENAVAIL;
+ }
+
+ for (i = 0; i < lm_count; i++) {
+ global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
+ global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
+ global_state->dspp_to_enc_id[dspp_idx[i]] =
+ reqs->topology.num_dspp ? enc_id : 0;
+
+ trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
+ pp_idx[i] + PINGPONG_0);
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_ctls(
+ struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
+ const struct msm_display_topology *top)
+{
+ int ctl_idx[MAX_BLOCKS];
+ int i = 0, j, num_ctls;
+ bool needs_split_display;
+
+ /* each hw_intf needs its own hw_ctrl to program its control path */
+ num_ctls = top->num_intf;
+
+ needs_split_display = _dpu_rm_needs_split_display(top);
+
+ for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
+ const struct dpu_hw_ctl *ctl;
+ unsigned long features;
+ bool has_split_display;
+
+ if (!rm->ctl_blks[j])
+ continue;
+ if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
+ continue;
+
+ ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
+ features = ctl->caps->features;
+ has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+ DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features);
+
+ if (needs_split_display != has_split_display)
+ continue;
+
+ ctl_idx[i] = j;
+ DPU_DEBUG("ctl %d match\n", j + CTL_0);
+
+ if (++i == num_ctls)
+ break;
+
+ }
+
+ if (i != num_ctls)
+ return -ENAVAIL;
+
+ for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
+ global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
+ trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc,
+ const struct msm_display_topology *top)
+{
+ int num_dsc = top->num_dsc;
+ int i;
+
+ /* check if DSC required are allocated or not */
+ for (i = 0; i < num_dsc; i++) {
+ if (!rm->dsc_blks[i]) {
+ DPU_ERROR("DSC %d does not exist\n", i);
+ return -EIO;
+ }
+
+ if (global_state->dsc_to_enc_id[i]) {
+ DPU_ERROR("DSC %d is already allocated\n", i);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < num_dsc; i++)
+ global_state->dsc_to_enc_id[i] = enc->base.id;
+
+ return 0;
+}
+
+static int _dpu_rm_make_reservation(
+ struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc,
+ struct dpu_rm_requirements *reqs)
+{
+ int ret;
+
+ ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
+ if (ret) {
+ DPU_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
+ &reqs->topology);
+ if (ret) {
+ DPU_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+ struct drm_encoder *enc,
+ struct dpu_rm_requirements *reqs,
+ struct msm_display_topology req_topology)
+{
+ reqs->topology = req_topology;
+
+ DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
+ reqs->topology.num_lm, reqs->topology.num_dsc,
+ reqs->topology.num_intf);
+
+ return 0;
+}
+
+static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
+ uint32_t enc_id)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ if (res_mapping[i] == enc_id)
+ res_mapping[i] = 0;
+ }
+}
+
+void dpu_rm_release(struct dpu_global_state *global_state,
+ struct drm_encoder *enc)
+{
+ _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
+ ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
+ ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
+ ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
+ ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
+ ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
+}
+
+int dpu_rm_reserve(
+ struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct msm_display_topology topology)
+{
+ struct dpu_rm_requirements reqs;
+ int ret;
+
+ /* Check if this is just a page-flip */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ if (IS_ERR(global_state)) {
+ DPU_ERROR("failed to global state\n");
+ return PTR_ERR(global_state);
+ }
+
+ DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
+ enc->base.id, crtc_state->crtc->base.id);
+
+ ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
+ if (ret) {
+ DPU_ERROR("failed to populate hw requirements\n");
+ return ret;
+ }
+
+ ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
+ if (ret)
+ DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+
+
+
+ return ret;
+}
+
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ struct dpu_global_state *global_state, uint32_t enc_id,
+ enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
+{
+ struct dpu_hw_blk **hw_blks;
+ uint32_t *hw_to_enc_id;
+ int i, num_blks, max_blks;
+
+ switch (type) {
+ case DPU_HW_BLK_PINGPONG:
+ hw_blks = rm->pingpong_blks;
+ hw_to_enc_id = global_state->pingpong_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->pingpong_blks);
+ break;
+ case DPU_HW_BLK_LM:
+ hw_blks = rm->mixer_blks;
+ hw_to_enc_id = global_state->mixer_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->mixer_blks);
+ break;
+ case DPU_HW_BLK_CTL:
+ hw_blks = rm->ctl_blks;
+ hw_to_enc_id = global_state->ctl_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->ctl_blks);
+ break;
+ case DPU_HW_BLK_DSPP:
+ hw_blks = rm->dspp_blks;
+ hw_to_enc_id = global_state->dspp_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->dspp_blks);
+ break;
+ case DPU_HW_BLK_DSC:
+ hw_blks = rm->dsc_blks;
+ hw_to_enc_id = global_state->dsc_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->dsc_blks);
+ break;
+ default:
+ DPU_ERROR("blk type %d not managed by rm\n", type);
+ return 0;
+ }
+
+ num_blks = 0;
+ for (i = 0; i < max_blks; i++) {
+ if (hw_to_enc_id[i] != enc_id)
+ continue;
+
+ if (num_blks == blks_size) {
+ DPU_ERROR("More than %d resources assigned to enc %d\n",
+ blks_size, enc_id);
+ break;
+ }
+ if (!hw_blks[i]) {
+ DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
+ type, enc_id);
+ break;
+ }
+ blks[num_blks++] = hw_blks[i];
+ }
+
+ return num_blks;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644
index 0000000000..2b551566cb
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+struct dpu_global_state;
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @pingpong_blks: array of pingpong hardware resources
+ * @mixer_blks: array of layer mixer hardware resources
+ * @ctl_blks: array of ctl hardware resources
+ * @hw_intf: array of intf hardware resources
+ * @hw_wb: array of wb hardware resources
+ * @dspp_blks: array of dspp hardware resources
+ * @hw_sspp: array of sspp hardware resources
+ */
+struct dpu_rm {
+ struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
+ struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
+ struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
+ struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0];
+ struct dpu_hw_wb *hw_wb[WB_MAX - WB_0];
+ struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
+ struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
+ struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
+ struct dpu_hw_sspp *hw_sspp[SSPP_MAX - SSPP_NONE];
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mdss_data: Pointer to MDSS / UBWC configuration
+ * @mmio: mapped register io address of MDP
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+ const struct dpu_mdss_cfg *cat,
+ const struct msm_mdss_data *mdss_data,
+ void __iomem *mmio);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through dpu_rm_get_* functions.
+ * HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @topology: Pointer to topology info for the display
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct msm_display_topology topology);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_global_state *global_state,
+ struct drm_encoder *enc);
+
+/**
+ * Get hw resources of the given type that are assigned to this encoder.
+ */
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ struct dpu_global_state *global_state, uint32_t enc_id,
+ enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
+
+/**
+ * dpu_rm_get_intf - Return a struct dpu_hw_intf instance given it's index.
+ * @rm: DPU Resource Manager handle
+ * @intf_idx: INTF's index
+ */
+static inline struct dpu_hw_intf *dpu_rm_get_intf(struct dpu_rm *rm, enum dpu_intf intf_idx)
+{
+ return rm->hw_intf[intf_idx - INTF_0];
+}
+
+/**
+ * dpu_rm_get_wb - Return a struct dpu_hw_wb instance given it's index.
+ * @rm: DPU Resource Manager handle
+ * @wb_idx: WB index
+ */
+static inline struct dpu_hw_wb *dpu_rm_get_wb(struct dpu_rm *rm, enum dpu_wb wb_idx)
+{
+ return rm->hw_wb[wb_idx - WB_0];
+}
+
+/**
+ * dpu_rm_get_sspp - Return a struct dpu_hw_sspp instance given it's index.
+ * @rm: DPU Resource Manager handle
+ * @sspp_idx: SSPP index
+ */
+static inline struct dpu_hw_sspp *dpu_rm_get_sspp(struct dpu_rm *rm, enum dpu_sspp sspp_idx)
+{
+ return rm->hw_sspp[sspp_idx - SSPP_NONE];
+}
+
+#endif /* __DPU_RM_H__ */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644
index 0000000000..c74b9be25e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -0,0 +1,968 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+ u32 lut, u32 lut_usage),
+ TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(bool, rt)
+ __field(u32, fl)
+ __field(u64, lut)
+ __field(u32, lut_usage)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->rt = rt;
+ __entry->fl = fl;
+ __entry->lut = lut;
+ __entry->lut_usage = lut_usage;
+ ),
+ TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+ __entry->pnum, __entry->fmt,
+ __entry->rt, __entry->fl,
+ __entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+ u32 safe_lut),
+ TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, danger_lut)
+ __field(u32, safe_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->danger_lut = danger_lut;
+ __entry->safe_lut = safe_lut;
+ ),
+ TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->danger_lut,
+ __entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+ TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ __field(u32, vbif_idx)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ __entry->vbif_idx = vbif_idx;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim,
+ __entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_cmd_release_bw,
+ TP_PROTO(u32 crtc_id),
+ TP_ARGS(crtc_id),
+ TP_STRUCT__entry(
+ __field(u32, crtc_id)
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ ),
+ TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+ TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate,
+ bool stop_req, bool update_bus, bool update_clk),
+ TP_ARGS(crtc, bw_ctl, core_clk_rate, stop_req, update_bus, update_clk),
+ TP_STRUCT__entry(
+ __field(u32, crtc)
+ __field(u64, bw_ctl)
+ __field(u32, core_clk_rate)
+ __field(bool, stop_req)
+ __field(u32, update_bus)
+ __field(u32, update_clk)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->bw_ctl = bw_ctl;
+ __entry->core_clk_rate = core_clk_rate;
+ __entry->stop_req = stop_req;
+ __entry->update_bus = update_bus;
+ __entry->update_clk = update_clk;
+ ),
+ TP_printk(
+ "crtc=%d bw_ctl=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+ __entry->crtc,
+ __entry->bw_ctl,
+ __entry->core_clk_rate,
+ __entry->stop_req,
+ __entry->update_bus,
+ __entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_irq_template,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("irq=%d", __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_irq_template, dpu_irq_register_success,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx)
+);
+DEFINE_EVENT(dpu_irq_template, dpu_irq_unregister_success,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+ TP_PROTO(uint32_t drm_id, void *func,
+ int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+ TP_ARGS(drm_id, func, irq_idx, pp_idx, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( void *, func )
+ __field( int, irq_idx )
+ __field( enum dpu_pingpong, pp_idx )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->func = func;
+ __entry->irq_idx = irq_idx;
+ __entry->pp_idx = pp_idx;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->func,
+ __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ ),
+ TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+ TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+ TP_ARGS(drm_id, hdisplay, vdisplay),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, hdisplay )
+ __field( int, vdisplay )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hdisplay = hdisplay;
+ __entry->vdisplay = vdisplay;
+ ),
+ TP_printk("id=%u, mode=%dx%d",
+ __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+ TP_PROTO(uint32_t drm_id, int val),
+ TP_ARGS(drm_id, val),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, val )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->val = val;
+ ),
+ TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+ TP_PROTO(uint32_t drm_id, int count),
+ TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+ TP_PROTO(uint32_t drm_id, int ctl_idx),
+ TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+ TP_PROTO(uint32_t drm_id, unsigned int flags),
+ TP_ARGS(drm_id, flags),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, flags )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->flags = flags;
+ ),
+ TP_printk("id=%u, flags=%u",
+ __entry->drm_id, __entry->flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ ),
+ TP_printk("id=%u, enable=%s",
+ __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+ TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+ int rc_state, const char *stage),
+ TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, sw_event )
+ __field( bool, idle_pc_supported )
+ __field( int, rc_state )
+ __string( stage_str, stage )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->sw_event = sw_event;
+ __entry->idle_pc_supported = idle_pc_supported;
+ __entry->rc_state = rc_state;
+ __assign_str(stage_str, stage);
+ ),
+ TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d",
+ __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+ __entry->idle_pc_supported ? "true" : "false",
+ __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+ TP_PROTO(uint32_t drm_id, u32 event, char *intf_mode, enum dpu_intf intf_idx,
+ enum dpu_wb wb_idx),
+ TP_ARGS(drm_id, event, intf_mode, intf_idx, wb_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ __string( intf_mode_str, intf_mode )
+ __field( enum dpu_intf, intf_idx )
+ __field( enum dpu_wb, wb_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ __assign_str(intf_mode_str, intf_mode);
+ __entry->intf_idx = intf_idx;
+ __entry->wb_idx = wb_idx;
+ ),
+ TP_printk("id=%u, event=%u, intf_mode=%s intf=%d wb=%d", __entry->drm_id,
+ __entry->event, __get_str(intf_mode_str),
+ __entry->intf_idx, __entry->wb_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+ TP_PROTO(uint32_t drm_id, unsigned int idx,
+ unsigned long frame_busy_mask),
+ TP_ARGS(drm_id, idx, frame_busy_mask),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, idx )
+ __field( unsigned long, frame_busy_mask )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->idx = idx;
+ __entry->frame_busy_mask = frame_busy_mask;
+ ),
+ TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+ __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+ TP_PROTO(uint32_t drm_id, char *intf_mode, enum dpu_intf intf_idx, enum dpu_wb wb_idx,
+ int pending_kickoff_cnt, int ctl_idx, u32 extra_flush_bits,
+ u32 pending_flush_ret),
+ TP_ARGS(drm_id, intf_mode, intf_idx, wb_idx, pending_kickoff_cnt, ctl_idx,
+ extra_flush_bits, pending_flush_ret),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __string( intf_mode_str, intf_mode )
+ __field( enum dpu_intf, intf_idx )
+ __field( enum dpu_wb, wb_idx )
+ __field( int, pending_kickoff_cnt )
+ __field( int, ctl_idx )
+ __field( u32, extra_flush_bits )
+ __field( u32, pending_flush_ret )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __assign_str(intf_mode_str, intf_mode);
+ __entry->intf_idx = intf_idx;
+ __entry->wb_idx = wb_idx;
+ __entry->pending_kickoff_cnt = pending_kickoff_cnt;
+ __entry->ctl_idx = ctl_idx;
+ __entry->extra_flush_bits = extra_flush_bits;
+ __entry->pending_flush_ret = pending_flush_ret;
+ ),
+ TP_printk("id=%u, intf_mode=%s, intf_idx=%d, wb_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+ "extra_flush_bits=0x%x pending_flush_ret=0x%x",
+ __entry->drm_id, __get_str(intf_mode_str), __entry->intf_idx, __entry->wb_idx,
+ __entry->pending_kickoff_cnt, __entry->ctl_idx,
+ __entry->extra_flush_bits, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+ TP_PROTO(uint32_t drm_id, int irq_idx, int rc, s64 time,
+ s64 expected_time, int atomic_cnt),
+ TP_ARGS(drm_id, irq_idx, rc, time, expected_time, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, irq_idx )
+ __field( int, rc )
+ __field( s64, time )
+ __field( s64, expected_time )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->irq_idx = irq_idx;
+ __entry->rc = rc;
+ __entry->time = time;
+ __entry->expected_time = expected_time;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, irq_idx=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+ __entry->drm_id, __entry->irq_idx, __entry->rc, __entry->time,
+ __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, pp, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+ __entry->pp, __entry->enable ? "true" : "false",
+ __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+ u32 event),
+ TP_ARGS(drm_id, pp, new_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, new_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->new_count = new_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+ __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+ int kickoff_count, u32 event),
+ TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, timeout_count )
+ __field( int, kickoff_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->timeout_count = timeout_count;
+ __entry->kickoff_count = kickoff_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+ __entry->drm_id, __entry->pp, __entry->timeout_count,
+ __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, intf_idx, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+ __entry->intf_idx, __entry->enable ? "true" : "false",
+ __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+ TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+ struct drm_plane_state *state, struct dpu_plane_state *pstate,
+ uint32_t stage_idx, uint32_t pixel_format,
+ uint64_t modifier),
+ TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx,
+ pixel_format, modifier),
+ TP_STRUCT__entry(
+ __field( uint32_t, crtc_id )
+ __field( uint32_t, plane_id )
+ __field( uint32_t, fb_id )
+ __field_struct( struct drm_rect, src_rect )
+ __field_struct( struct drm_rect, dst_rect )
+ __field( uint32_t, stage_idx )
+ __field( enum dpu_stage, stage )
+ __field( enum dpu_sspp, sspp )
+ __field( uint32_t, multirect_idx )
+ __field( uint32_t, multirect_mode )
+ __field( uint32_t, pixel_format )
+ __field( uint64_t, modifier )
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ __entry->plane_id = plane_id;
+ __entry->fb_id = state ? state->fb->base.id : 0;
+ __entry->src_rect = drm_plane_state_src(state);
+ __entry->dst_rect = drm_plane_state_dest(state);
+ __entry->stage_idx = stage_idx;
+ __entry->stage = pstate->stage;
+ __entry->sspp = pstate->pipe.sspp->idx;
+ __entry->multirect_idx = pstate->pipe.multirect_index;
+ __entry->multirect_mode = pstate->pipe.multirect_mode;
+ __entry->pixel_format = pixel_format;
+ __entry->modifier = modifier;
+ ),
+ TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:" DRM_RECT_FP_FMT
+ " dst:" DRM_RECT_FMT " stage_idx:%u stage:%d, sspp:%d "
+ "multirect_index:%d multirect_mode:%u pix_format:%u "
+ "modifier:%llu",
+ __entry->crtc_id, __entry->plane_id, __entry->fb_id,
+ DRM_RECT_FP_ARG(&__entry->src_rect),
+ DRM_RECT_ARG(&__entry->dst_rect),
+ __entry->stage_idx, __entry->stage, __entry->sspp,
+ __entry->multirect_idx, __entry->multirect_mode,
+ __entry->pixel_format, __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+ TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+ TP_ARGS(drm_id, mixer, bounds),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, mixer )
+ __field_struct( struct drm_rect, bounds )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->mixer = mixer;
+ __entry->bounds = *bounds;
+ ),
+ TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+ __entry->mixer, DRM_RECT_ARG(&__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+ TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+ struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enc_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( uint32_t, enc_id )
+ __field( bool, enable )
+ __field( bool, enabled )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enc_id = enc_id;
+ __entry->enable = enable;
+ __entry->enabled = crtc->enabled;
+ ),
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s}",
+ __entry->drm_id, __entry->enc_id,
+ __entry->enable ? "true" : "false",
+ __entry->enabled ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ __field( bool, enabled )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ __entry->enabled = crtc->enabled;
+ ),
+ TP_printk("id:%u enable:%s state{enabled:%s}",
+ __entry->drm_id, __entry->enable ? "true" : "false",
+ __entry->enabled ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+ TP_PROTO(uint32_t drm_id, int frame_pending),
+ TP_ARGS(drm_id, frame_pending),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, frame_pending )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->frame_pending = frame_pending;
+ ),
+ TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+ __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+ TP_PROTO(struct dpu_sw_pipe *pipe, struct dpu_hw_fmt_layout *layout),
+ TP_ARGS(pipe, layout),
+ TP_STRUCT__entry(
+ __field( enum dpu_sspp, index )
+ __field_struct( struct dpu_hw_fmt_layout, layout )
+ __field( enum dpu_sspp_multirect_index, multirect_index)
+ ),
+ TP_fast_assign(
+ __entry->index = pipe->sspp->idx;
+ __entry->layout = *layout;
+ __entry->multirect_index = pipe->multirect_index;
+ ),
+ TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+ "multirect_index:%d", __entry->index, __entry->layout.width,
+ __entry->layout.height, __entry->layout.plane_addr[0],
+ __entry->layout.plane_size[0],
+ __entry->layout.plane_addr[1],
+ __entry->layout.plane_size[1],
+ __entry->layout.plane_addr[2],
+ __entry->layout.plane_size[2],
+ __entry->layout.plane_addr[3],
+ __entry->layout.plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+ TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+ TP_ARGS(drm_id, is_virtual, multirect_mode),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, is_virtual )
+ __field( uint32_t, multirect_mode )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->is_virtual = is_virtual;
+ __entry->multirect_mode = multirect_mode;
+ ),
+ TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+ __entry->is_virtual ? "true" : "false",
+ __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( uint32_t, enc_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->enc_id = enc_id;
+ ),
+ TP_printk("id:%d enc_id:%u", __entry->id, __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+ TP_PROTO(uint32_t id, uint32_t enc_id, uint32_t pp_id),
+ TP_ARGS(id, enc_id, pp_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( uint32_t, enc_id )
+ __field( uint32_t, pp_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->enc_id = enc_id;
+ __entry->pp_id = pp_id;
+ ),
+ TP_printk("id:%d enc_id:%u pp_id:%u", __entry->id,
+ __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+ TP_PROTO(enum dpu_vbif index, u32 xin_id),
+ TP_ARGS(index, xin_id),
+ TP_STRUCT__entry(
+ __field( enum dpu_vbif, index )
+ __field( u32, xin_id )
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->xin_id = xin_id;
+ ),
+ TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+ TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+ TP_ARGS(pp, cfg),
+ TP_STRUCT__entry(
+ __field( enum dpu_pingpong, pp )
+ __field( u32, cfg )
+ ),
+ TP_fast_assign(
+ __entry->pp = pp;
+ __entry->cfg = cfg;
+ ),
+ TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+TRACE_EVENT(dpu_intf_connect_ext_te,
+ TP_PROTO(enum dpu_intf intf, u32 cfg),
+ TP_ARGS(intf, cfg),
+ TP_STRUCT__entry(
+ __field( enum dpu_intf, intf )
+ __field( u32, cfg )
+ ),
+ TP_fast_assign(
+ __entry->intf = intf;
+ __entry->cfg = cfg;
+ ),
+ TP_printk("intf:%d cfg:%u", __entry->intf, __entry->cfg)
+);
+
+TRACE_EVENT(dpu_core_irq_register_callback,
+ TP_PROTO(int irq_idx, void *callback),
+ TP_ARGS(irq_idx, callback),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( void *, callback)
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->callback = callback;
+ ),
+ TP_printk("irq_idx:%d callback:%ps", __entry->irq_idx,
+ __entry->callback)
+);
+
+TRACE_EVENT(dpu_core_irq_unregister_callback,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("irq_idx:%d", __entry->irq_idx)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+ TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+ TP_ARGS(dev, stop_req, clk_rate),
+ TP_STRUCT__entry(
+ __string( dev_name, dev->unique )
+ __field( bool, stop_req )
+ __field( u64, clk_rate )
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name, dev->unique);
+ __entry->stop_req = stop_req;
+ __entry->clk_rate = clk_rate;
+ ),
+ TP_printk("dev:%s stop_req:%s clk_rate:%llu", __get_str(dev_name),
+ __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+TRACE_EVENT(dpu_hw_ctl_update_pending_flush,
+ TP_PROTO(u32 new_bits, u32 pending_mask),
+ TP_ARGS(new_bits, pending_mask),
+ TP_STRUCT__entry(
+ __field( u32, new_bits )
+ __field( u32, pending_mask )
+ ),
+ TP_fast_assign(
+ __entry->new_bits = new_bits;
+ __entry->pending_mask = pending_mask;
+ ),
+ TP_printk("new=%x existing=%x", __entry->new_bits,
+ __entry->pending_mask)
+);
+
+DECLARE_EVENT_CLASS(dpu_hw_ctl_pending_flush_template,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush),
+ TP_STRUCT__entry(
+ __field( u32, pending_mask )
+ __field( u32, ctl_flush )
+ ),
+ TP_fast_assign(
+ __entry->pending_mask = pending_mask;
+ __entry->ctl_flush = ctl_flush;
+ ),
+ TP_printk("pending_mask=%x CTL_FLUSH=%x", __entry->pending_mask,
+ __entry->ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_clear_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template,
+ dpu_hw_ctl_trigger_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_prepare,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_start,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+ trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644
index 0000000000..1305e250b7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx)
+{
+ if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif))
+ return dpu_kms->hw_vbif[vbif_idx];
+
+ return NULL;
+}
+
+static const char *dpu_vbif_name(enum dpu_vbif idx)
+{
+ switch (idx) {
+ case VBIF_RT:
+ return "VBIF_RT";
+ case VBIF_NRT:
+ return "VBIF_NRT";
+ default:
+ return "??";
+ }
+}
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif: Pointer to hardware vbif driver
+ * @xin_id: Client interface identifier
+ * @return: 0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ ktime_t timeout;
+ bool status;
+ int rc;
+
+ if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
+ return -EINVAL;
+ }
+
+ timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+ for (;;) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ if (status)
+ break;
+ if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ break;
+ }
+ usleep_range(501, 1000);
+ }
+
+ if (!status) {
+ rc = -ETIMEDOUT;
+ DPU_ERROR("%s client %d not halting. TIMEDOUT.\n",
+ dpu_vbif_name(vbif->idx), xin_id);
+ } else {
+ rc = 0;
+ DRM_DEBUG_ATOMIC("%s client %d is halted\n",
+ dpu_vbif_name(vbif->idx), xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @ot_lim: Pointer to OT limit to be modified
+ * @params: Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+ u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+ u64 pps;
+ const struct dpu_vbif_dynamic_ot_tbl *tbl;
+ u32 i;
+
+ if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+ return;
+
+ /* Dynamic OT setting done only for WFD */
+ if (!params->is_wfd)
+ return;
+
+ pps = params->frame_rate;
+ pps *= params->width;
+ pps *= params->height;
+
+ tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+ &vbif->cap->dynamic_ot_wr_tbl;
+
+ for (i = 0; i < tbl->count; i++) {
+ if (pps <= tbl->cfg[i].pps) {
+ *ot_lim = tbl->cfg[i].ot_limit;
+ break;
+ }
+ }
+
+ DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ dpu_vbif_name(vbif->idx), params->xin_id,
+ params->width, params->height, params->frame_rate,
+ pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ * @return: OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+ struct dpu_vbif_set_ot_params *params)
+{
+ u32 ot_lim = 0;
+ u32 val;
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
+ return -EINVAL;
+ }
+
+ if (vbif->cap->default_ot_wr_limit && !params->rd)
+ ot_lim = vbif->cap->default_ot_wr_limit;
+ else if (vbif->cap->default_ot_rd_limit && params->rd)
+ ot_lim = vbif->cap->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt/catalog,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+ if (vbif && vbif->ops.get_limit_conf) {
+ val = vbif->ops.get_limit_conf(vbif,
+ params->xin_id, params->rd);
+ if (val == ot_lim)
+ ot_lim = 0;
+ }
+
+exit:
+ DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n",
+ dpu_vbif_name(vbif->idx), params->xin_id, ot_lim);
+ return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @dpu_kms: DPU handler
+ * @params: Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params)
+{
+ struct dpu_hw_vbif *vbif;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ u32 ot_lim;
+ int ret;
+
+ mdp = dpu_kms->hw_mdp;
+
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
+ if (!vbif || !mdp) {
+ DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
+ vbif != NULL, mdp != NULL);
+ return;
+ }
+
+ if (!mdp->ops.setup_clk_force_ctrl ||
+ !vbif->ops.set_limit_conf ||
+ !vbif->ops.set_halt_ctrl)
+ return;
+
+ /* set write_gather_en for all write clients */
+ if (vbif->ops.set_write_gather_en && !params->rd)
+ vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+ ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+ if (ot_lim == 0)
+ return;
+
+ trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+ params->vbif_idx);
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+ ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+ if (ret)
+ trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params)
+{
+ struct dpu_hw_vbif *vbif;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ const struct dpu_vbif_qos_tbl *qos_tbl;
+ int i;
+
+ if (!params || !dpu_kms->hw_mdp) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+ return;
+ }
+
+ if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+ DRM_DEBUG_ATOMIC("qos remap not supported\n");
+ return;
+ }
+
+ qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+ &vbif->cap->qos_nrt_tbl;
+
+ if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+ DRM_DEBUG_ATOMIC("qos tbl not defined\n");
+ return;
+ }
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+ DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n",
+ dpu_vbif_name(params->vbif_idx), params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ }
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ u32 i, pnd, src;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->ops.clear_errors) {
+ vbif->ops.clear_errors(vbif, &pnd, &src);
+ if (pnd || src) {
+ DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n",
+ dpu_vbif_name(vbif->idx), pnd, src);
+ }
+ }
+ }
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+ for (j = 0; j < vbif->cap->memtype_count; j++)
+ vbif->ops.set_mem_type(
+ vbif, j, vbif->cap->memtype[j]);
+ }
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+ char vbif_name[32];
+ struct dentry *entry, *debugfs_vbif;
+ int i, j;
+
+ entry = debugfs_create_dir("vbif", debugfs_root);
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+ debugfs_vbif = debugfs_create_dir(vbif_name, entry);
+
+ debugfs_create_u32("features", 0600, debugfs_vbif,
+ (u32 *)&vbif->features);
+
+ debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+ (u32 *)&vbif->xin_halt_timeout);
+
+ debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_rd_limit);
+
+ debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_wr_limit);
+
+ for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ const struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_rd_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+
+ for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ const struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_wr_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644
index 0000000000..ab490177d8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ u32 frame_rate;
+ bool rd;
+ bool is_wfd;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+ u32 xin_id;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+ bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+ u32 vbif_idx;
+ u32 xin_id;
+ u32 clk_ctrl;
+ u32 num;
+ bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms: DPU handler
+ * @params: Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms: DPU handler
+ * @params: Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
new file mode 100644
index 0000000000..2a5a683665
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <drm/drm_edid.h>
+
+#include "dpu_writeback.h"
+
+static int dpu_wb_conn_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+
+ /*
+ * We should ideally be limiting the modes only to the maxlinewidth but
+ * on some chipsets this will allow even 4k modes to be added which will
+ * fail the per SSPP bandwidth checks. So, till we have dual-SSPP support
+ * and source split support added lets limit the modes based on max_mixer_width
+ * as 4K modes can then be supported.
+ */
+ return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width,
+ dev->mode_config.max_height);
+}
+
+static const struct drm_connector_funcs dpu_wb_conn_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int dpu_wb_conn_prepare_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+
+ struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
+
+ if (!job->fb)
+ return 0;
+
+ dpu_encoder_prepare_wb_job(dpu_wb_conn->wb_enc, job);
+
+ return 0;
+}
+
+static void dpu_wb_conn_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
+
+ if (!job->fb)
+ return;
+
+ dpu_encoder_cleanup_wb_job(dpu_wb_conn->wb_enc, job);
+}
+
+static const struct drm_connector_helper_funcs dpu_wb_conn_helper_funcs = {
+ .get_modes = dpu_wb_conn_get_modes,
+ .prepare_writeback_job = dpu_wb_conn_prepare_job,
+ .cleanup_writeback_job = dpu_wb_conn_cleanup_job,
+};
+
+int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
+ const u32 *format_list, u32 num_formats)
+{
+ struct dpu_wb_connector *dpu_wb_conn;
+ int rc = 0;
+
+ dpu_wb_conn = devm_kzalloc(dev->dev, sizeof(*dpu_wb_conn), GFP_KERNEL);
+ if (!dpu_wb_conn)
+ return -ENOMEM;
+
+ drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
+
+ /* DPU initializes the encoder and sets it up completely for writeback
+ * cases and hence should use the new API drm_writeback_connector_init_with_encoder
+ * to initialize the writeback connector
+ */
+ rc = drm_writeback_connector_init_with_encoder(dev, &dpu_wb_conn->base, enc,
+ &dpu_wb_conn_funcs, format_list, num_formats);
+
+ if (!rc)
+ dpu_wb_conn->wb_enc = enc;
+
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
new file mode 100644
index 0000000000..5a75ea9161
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_WRITEBACK_H
+#define _DPU_WRITEBACK_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_writeback.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_encoder_phys.h"
+
+struct dpu_wb_connector {
+ struct drm_writeback_connector base;
+ struct drm_encoder *wb_enc;
+};
+
+static inline struct dpu_wb_connector *to_dpu_wb_conn(struct drm_writeback_connector *conn)
+{
+ return container_of(conn, struct dpu_wb_connector, base);
+}
+
+int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
+ const u32 *format_list, u32 num_formats);
+
+#endif /*_DPU_WRITEBACK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
new file mode 100644
index 0000000000..9fc9dbde8a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -0,0 +1,1155 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+ ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+ (((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+ /* Venus NV12:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV12,
+
+ /* Venus NV21:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * V U V U V U V U V U V U . . . . ^
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV21,
+ /* Venus NV12_MVTB:
+ * Two YUV 4:2:0 images/views one after the other
+ * in a top-bottom layout, same as NV12
+ * with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_1
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_2
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * View_1 begin at: 0 (zero)
+ * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((2*(Y_Stride * Y_Scanlines)
+ * + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+ */
+ COLOR_FMT_NV12_MVTB,
+ /*
+ * The buffer can be of 2 types:
+ * (1) Venus NV12 UBWC Progressive
+ * (2) Venus NV12 UBWC Interlaced
+ *
+ * (1) Venus NV12 UBWC Progressive Buffer Format:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Y_Stride = align(Width, 128)
+ * UV_Stride = align(Width, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ *
+ *
+ * (2) Venus NV12 UBWC Interlaced Buffer Format:
+ * Compressed Macro-tile format for NV12 interlaced.
+ * Contains 8 planes in the following order -
+ * (A) Y_Meta_Top_Field_Plane
+ * (B) Y_UBWC_Top_Field_Plane
+ * (C) UV_Meta_Top_Field_Plane
+ * (D) UV_UBWC_Top_Field_Plane
+ * (E) Y_Meta_Bottom_Field_Plane
+ * (F) Y_UBWC_Bottom_Field_Plane
+ * (G) UV_Meta_Bottom_Field_Plane
+ * (H) UV_UBWC_Bottom_Field_Plane
+ * Y_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Top_Field_Plane.
+ * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+ * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit Y samples for top field of an interlaced frame.
+ *
+ * UV_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Top_Field_Plane.
+ * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+ * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit subsampled color difference samples for top field of an
+ * interlaced frame.
+ *
+ * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+ * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+ * format for bottom field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+ * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+ *
+ * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+ * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+ * macro-tile format for bottom field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+ * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit subsampled color difference samples for bottom
+ * field of an interlaced frame.
+ *
+ * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * <-----Y_TF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_TF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_TF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_TF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_TF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-----Y_BF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_BF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_BF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_BF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_BF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Half_height = (Height+1)>>1
+ * Y_TF_Stride = align(Width, 128)
+ * UV_TF_Stride = align(Width, 128)
+ * Y_TF_Scanlines = align(Half_height, 32)
+ * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+ * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+ * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_TF_Meta_Plane_size =
+ * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+ * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_TF_Meta_Plane_size =
+ * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+ * Y_BF_Stride = align(Width, 128)
+ * UV_BF_Stride = align(Width, 128)
+ * Y_BF_Scanlines = align(Half_height, 32)
+ * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+ * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+ * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_BF_Meta_Plane_size =
+ * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+ * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_BF_Meta_Plane_size =
+ * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+ * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+ * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+ * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+ * + max(Extradata, Y_TF_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_UBWC,
+ /* Venus NV12 10-bit UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 4/3, 128)
+ * UV_Stride = align(Width * 4/3, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_BPP10_UBWC,
+ /* Venus RGBA8888 format:
+ * Contains 1 plane in the following order -
+ * (A) RGBA plane
+ *
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Plane_size + Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888,
+ /* Venus RGBA8888 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888_UBWC,
+ /* Venus RGBA1010102 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 256)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA1010102_UBWC,
+ /* Venus RGB565 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGB plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 2, 128)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGB565_UBWC,
+ /* P010 UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 2, 256)
+ * UV_Stride = align(Width * 2, 256)
+ * Y_Scanlines = align(Height, 16)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_P010_UBWC,
+ /* Venus P010:
+ * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+ * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width * 2 aligned to 128
+ * UV_Stride : Width * 2 aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010 COLOR_FMT_P010
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+ unsigned int stride = 0;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 128);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
+ break;
+ case COLOR_FMT_P010:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
+ break;
+ }
+
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+ unsigned int stride = 0;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 128);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
+ break;
+ case COLOR_FMT_P010:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
+ break;
+ }
+
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+ unsigned int sclines = 0;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010:
+ sclines = MSM_MEDIA_ALIGN(height, 32);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ sclines = MSM_MEDIA_ALIGN(height, 16);
+ break;
+ }
+
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+ unsigned int sclines = 0;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ case COLOR_FMT_P010:
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16);
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32);
+ break;
+ }
+
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+ int y_tile_width = 0, y_meta_stride;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_width = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_tile_width = 48;
+ break;
+ default:
+ return 0;
+ }
+
+ y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+ return MSM_MEDIA_ALIGN(y_meta_stride, 64);
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+ int y_tile_height = 0, y_meta_scanlines;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ y_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_height = 4;
+ break;
+ default:
+ return 0;
+ }
+
+ y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+ return MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+ int uv_tile_width = 0, uv_meta_stride;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_width = 16;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ uv_tile_width = 24;
+ break;
+ default:
+ return 0;
+ }
+
+ uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+ return MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+ int uv_tile_height = 0, uv_meta_scanlines;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ uv_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_height = 4;
+ break;
+ default:
+ return 0;
+ }
+
+ uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+ return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+}
+
+static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment = 0, bpp = 4;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 128;
+ break;
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 256;
+ bpp = 2;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ alignment = 256;
+ break;
+ default:
+ return 0;
+ }
+
+ return MSM_MEDIA_ALIGN(width * bpp, alignment);
+}
+
+static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment = 0;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 32;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 16;
+ break;
+ default:
+ return 0;
+ }
+
+ return MSM_MEDIA_ALIGN(height, alignment);
+}
+
+static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+ int rgb_meta_stride;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16);
+ return MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+ }
+
+ return 0;
+}
+
+static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+ int rgb_meta_scanlines;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4);
+ return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
new file mode 100644
index 0000000000..cc8fde4508
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
@@ -0,0 +1,1181 @@
+#ifndef MDP4_XML
+#define MDP4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
+
+Copyright (C) 2013-2022 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp4_pipe {
+ VG1 = 0,
+ VG2 = 1,
+ RGB1 = 2,
+ RGB2 = 3,
+ RGB3 = 4,
+ VG3 = 5,
+ VG4 = 6,
+};
+
+enum mdp4_mixer {
+ MIXER0 = 0,
+ MIXER1 = 1,
+ MIXER2 = 2,
+};
+
+enum mdp4_intf {
+ INTF_LCDC_DTV = 0,
+ INTF_DSI_VIDEO = 1,
+ INTF_DSI_CMD = 2,
+ INTF_EBI2_TV = 3,
+};
+
+enum mdp4_cursor_format {
+ CURSOR_ARGB = 1,
+ CURSOR_XRGB = 2,
+};
+
+enum mdp4_frame_format {
+ FRAME_LINEAR = 0,
+ FRAME_TILE_ARGB_4X4 = 1,
+ FRAME_TILE_YCBCR_420 = 2,
+};
+
+enum mdp4_scale_unit {
+ SCALE_FIR = 0,
+ SCALE_MN_PHASE = 1,
+ SCALE_PIXEL_RPT = 2,
+};
+
+enum mdp4_dma {
+ DMA_P = 0,
+ DMA_S = 1,
+ DMA_E = 2,
+};
+
+#define MDP4_IRQ_OVERLAY0_DONE 0x00000001
+#define MDP4_IRQ_OVERLAY1_DONE 0x00000002
+#define MDP4_IRQ_DMA_S_DONE 0x00000004
+#define MDP4_IRQ_DMA_E_DONE 0x00000008
+#define MDP4_IRQ_DMA_P_DONE 0x00000010
+#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020
+#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040
+#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080
+#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100
+#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200
+#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400
+#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800
+#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000
+#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000
+#define MDP4_IRQ_OVERLAY2_DONE 0x40000000
+#define REG_MDP4_VERSION 0x00000000
+#define MDP4_VERSION_MINOR__MASK 0x00ff0000
+#define MDP4_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
+}
+#define MDP4_VERSION_MAJOR__MASK 0xff000000
+#define MDP4_VERSION_MAJOR__SHIFT 24
+static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP4_OVLP0_KICK 0x00000004
+
+#define REG_MDP4_OVLP1_KICK 0x00000008
+
+#define REG_MDP4_OVLP2_KICK 0x000000d0
+
+#define REG_MDP4_DMA_P_KICK 0x0000000c
+
+#define REG_MDP4_DMA_S_KICK 0x00000010
+
+#define REG_MDP4_DMA_E_KICK 0x00000014
+
+#define REG_MDP4_DISP_STATUS 0x00000018
+
+#define REG_MDP4_DISP_INTF_SEL 0x00000038
+#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003
+#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0
+static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
+}
+#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c
+#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2
+static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
+}
+#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030
+#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4
+static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
+}
+#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040
+#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080
+
+#define REG_MDP4_RESET_STATUS 0x0000003c
+
+#define REG_MDP4_READ_CNFG 0x0000004c
+
+#define REG_MDP4_INTR_ENABLE 0x00000050
+
+#define REG_MDP4_INTR_STATUS 0x00000054
+
+#define REG_MDP4_INTR_CLEAR 0x00000058
+
+#define REG_MDP4_EBI2_LCD0 0x00000060
+
+#define REG_MDP4_EBI2_LCD1 0x00000064
+
+#define REG_MDP4_PORTMAP_MODE 0x00000070
+
+#define REG_MDP4_CS_CONTROLLER0 0x000000c0
+
+#define REG_MDP4_CS_CONTROLLER1 0x000000c4
+
+#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000
+
+#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc
+
+#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000
+
+#define REG_MDP4_VG2_SRC_FORMAT 0x00030050
+
+#define REG_MDP4_VG2_CONST_COLOR 0x00031008
+
+#define REG_MDP4_OVERLAY_FLUSH 0x00018000
+#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001
+#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002
+#define MDP4_OVERLAY_FLUSH_VG1 0x00000004
+#define MDP4_OVERLAY_FLUSH_VG2 0x00000008
+#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010
+#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020
+
+static inline uint32_t __offset_OVLP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00010000;
+ case 1: return 0x00018000;
+ case 2: return 0x00088000;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
+#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
+}
+#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
+
+static inline uint32_t __offset_STAGE(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000104;
+ case 1: return 0x00000124;
+ case 2: return 0x00000144;
+ case 3: return 0x00000160;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004
+#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040
+#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080
+#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100
+#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00001004;
+ case 1: return 0x00001404;
+ case 2: return 0x00001804;
+ case 3: return 0x00001b84;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
+
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+#define REG_MDP4_DMA_P_OP_MODE 0x00090070
+
+static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+#define REG_MDP4_DMA_S_OP_MODE 0x000a0028
+
+static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
+
+static inline uint32_t __offset_DMA(enum mdp4_dma idx)
+{
+ switch (idx) {
+ case DMA_P: return 0x00090000;
+ case DMA_S: return 0x000a0000;
+ case DMA_E: return 0x000b0000;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
+#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
+#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
+#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080
+#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00
+#define MDP4_DMA_CONFIG_PACK__SHIFT 8
+static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
+}
+#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000
+#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000
+
+static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
+#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
+#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
+}
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff
+#define MDP4_DMA_CURSOR_POS_X__SHIFT 0
+static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
+}
+#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000
+#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16
+static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1
+static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
+{
+ return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
+}
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
+
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
+#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_XY_X__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
+#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
+#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
+#define MDP4_PIPE_DST_XY_Y__SHIFT 16
+static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
+}
+#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff
+#define MDP4_PIPE_DST_XY_X__SHIFT 0
+static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP3_BASE(enum mdp4_pipe i0) { return 0x0002001c + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SSTILE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
+#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
+#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
+#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK 0x00180000
+#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT 19
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT) & MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
+#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x0c000000
+#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 26
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK 0x60000000
+#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT 29
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(enum mdp4_frame_format val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT) & MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
+#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
+#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
+#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK 0x0000000c
+#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT 2
+static inline uint32_t MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(enum mdp4_scale_unit val)
+{
+ return ((val) << MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK;
+}
+#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK 0x00000030
+#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT 4
+static inline uint32_t MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(enum mdp4_scale_unit val)
+{
+ return ((val) << MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK;
+}
+#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
+#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
+#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
+#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000
+#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000
+#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000
+#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000
+#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
+#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
+
+static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
+
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+#define REG_MDP4_LCDC 0x000c0000
+
+#define REG_MDP4_LCDC_ENABLE 0x000c0000
+
+#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008
+
+#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c
+
+#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010
+#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014
+
+#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018
+
+#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c
+#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020
+
+#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024
+
+#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028
+
+#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030
+
+#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034
+
+#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038
+#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+#define REG_MDP4_LCDC_LVDS_INTF_CTL 0x000c2000
+#define MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL 0x00000004
+#define MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT 0x00000008
+#define MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP 0x00000010
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_RES_BIT 0x00000020
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_RES_BIT 0x00000040
+#define MDP4_LCDC_LVDS_INTF_CTL_ENABLE 0x00000080
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN 0x00000100
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN 0x00000200
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN 0x00000400
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN 0x00000800
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN 0x00001000
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN 0x00002000
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN 0x00004000
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN 0x00008000
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN 0x00010000
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN 0x00020000
+
+static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL(uint32_t i0) { return 0x000c2014 + 0x8*i0; }
+
+static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(uint32_t i0) { return 0x000c2014 + 0x8*i0; }
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK 0x000000ff
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT 0
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK;
+}
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK 0x0000ff00
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT 8
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK;
+}
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK 0x00ff0000
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT 16
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK;
+}
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK 0xff000000
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT 24
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK;
+}
+
+static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(uint32_t i0) { return 0x000c2018 + 0x8*i0; }
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK 0x000000ff
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT 0
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK;
+}
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK 0x0000ff00
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT 8
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK;
+}
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK 0x00ff0000
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT 16
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK;
+}
+
+#define REG_MDP4_LCDC_LVDS_PHY_RESET 0x000c2034
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_0 0x000c3000
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_1 0x000c3004
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_2 0x000c3008
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_3 0x000c300c
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_5 0x000c3014
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_6 0x000c3018
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_7 0x000c301c
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_8 0x000c3020
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_9 0x000c3024
+
+#define REG_MDP4_LVDS_PHY_PLL_LOCKED 0x000c3080
+
+#define REG_MDP4_LVDS_PHY_CFG2 0x000c3108
+
+#define REG_MDP4_LVDS_PHY_CFG0 0x000c3100
+#define MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE 0x00000010
+#define MDP4_LVDS_PHY_CFG0_CHANNEL0 0x00000040
+#define MDP4_LVDS_PHY_CFG0_CHANNEL1 0x00000080
+
+#define REG_MDP4_DTV 0x000d0000
+
+#define REG_MDP4_DTV_ENABLE 0x000d0000
+
+#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008
+
+#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c
+
+#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018
+#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c
+
+#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020
+
+#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c
+#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030
+
+#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038
+
+#define REG_MDP4_DTV_BORDER_CLR 0x000d0040
+
+#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048
+
+#define REG_MDP4_DTV_TEST_CNTL 0x000d004c
+
+#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050
+#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+#define REG_MDP4_DSI 0x000e0000
+
+#define REG_MDP4_DSI_ENABLE 0x000e0000
+
+#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008
+
+#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c
+
+#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010
+#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014
+
+#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018
+
+#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c
+#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020
+
+#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024
+
+#define REG_MDP4_DSI_BORDER_CLR 0x000e0028
+
+#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030
+
+#define REG_MDP4_DSI_TEST_CNTL 0x000e0034
+
+#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038
+#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+
+#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
new file mode 100644
index 0000000000..3100957225
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "mdp4_kms.h"
+#include "msm_gem.h"
+
+struct mdp4_crtc {
+ struct drm_crtc base;
+ char name[8];
+ int id;
+ int ovlp;
+ enum mdp4_dma dma;
+ bool enabled;
+
+ /* which mixer/encoder we route output to: */
+ int mixer;
+
+ struct {
+ spinlock_t lock;
+ bool stale;
+ uint32_t width, height;
+ uint32_t x, y;
+
+ /* next cursor to scan-out: */
+ uint32_t next_iova;
+ struct drm_gem_object *next_bo;
+
+ /* current cursor being scanned out: */
+ struct drm_gem_object *scanout_bo;
+ } cursor;
+
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+
+ /* Bits have been flushed at the last commit,
+ * used to decide if a vsync has happened since last commit.
+ */
+ u32 flushed_mask;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP 0x2
+ atomic_t pending;
+
+ /* for unref'ing cursor bo's after scanout completes: */
+ struct drm_flip_work unref_cursor_work;
+
+ struct mdp_irq vblank;
+ struct mdp_irq err;
+};
+#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
+
+static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ atomic_or(pending, &mdp4_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ uint32_t flush = 0;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
+ }
+
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_crtc->flushed_mask = flush;
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = mdp4_crtc->event;
+ if (event) {
+ mdp4_crtc->event = NULL;
+ DBG("%s: send event: %p", mdp4_crtc->name, event);
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void unref_cursor_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(work, struct mdp4_crtc, unref_cursor_work);
+ struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
+ struct msm_kms *kms = &mdp4_kms->base.base;
+
+ msm_gem_unpin_iova(val, kms->aspace);
+ drm_gem_object_put(val);
+}
+
+static void mdp4_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
+
+ kfree(mdp4_crtc);
+}
+
+/* statically (for now) map planes to mixer stage (z-order): */
+static const int idxs[] = {
+ [VG1] = 1,
+ [VG2] = 2,
+ [RGB1] = 0,
+ [RGB2] = 0,
+ [RGB3] = 0,
+ [VG3] = 3,
+ [VG4] = 4,
+
+};
+
+/* setup mixer config, for which we need to consider all crtc's and
+ * the planes attached to them
+ *
+ * TODO may possibly need some extra locking here
+ */
+static void setup_mixer(struct mdp4_kms *mdp4_kms)
+{
+ struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
+ struct drm_crtc *crtc;
+ uint32_t mixer_cfg = 0;
+ static const enum mdp_mixer_stage_id stages[] = {
+ STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
+ };
+
+ list_for_each_entry(crtc, &config->crtc_list, head) {
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_plane *plane;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ int idx = idxs[pipe_id];
+ mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
+ pipe_id, stages[idx]);
+ }
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ int i, ovlp = mdp4_crtc->ovlp;
+ bool alpha[4]= { false, false, false, false };
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ int idx = idxs[pipe_id];
+ if (idx > 0) {
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(plane->state->fb));
+ alpha[idx-1] = format->alpha_enable;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ uint32_t op;
+
+ if (alpha[i]) {
+ op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
+ MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
+ MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
+ } else {
+ op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
+ MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
+ }
+
+ setup_mixer(mdp4_kms);
+}
+
+static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ enum mdp4_dma dma = mdp4_crtc->dma;
+ int ovlp = mdp4_crtc->ovlp;
+ struct drm_display_mode *mode;
+
+ if (WARN_ON(!crtc->state))
+ return;
+
+ mode = &crtc->state->adjusted_mode;
+
+ DBG("%s: set mode: " DRM_MODE_FMT,
+ mdp4_crtc->name, DRM_MODE_ARG(mode));
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
+ MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
+ MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
+
+ /* take data from pipe: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
+ MDP4_DMA_DST_SIZE_WIDTH(0) |
+ MDP4_DMA_DST_SIZE_HEIGHT(0));
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
+ MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
+ MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
+
+ if (dma == DMA_E) {
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
+ }
+}
+
+static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ unsigned long flags;
+
+ DBG("%s", mdp4_crtc->name);
+
+ if (WARN_ON(!mdp4_crtc->enabled))
+ return;
+
+ /* Disable/save vblank irq handling before power is disabled */
+ drm_crtc_vblank_off(crtc);
+
+ mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
+ mdp4_disable(mdp4_kms);
+
+ if (crtc->state->event && !crtc->state->active) {
+ WARN_ON(mdp4_crtc->event);
+ spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
+ }
+
+ mdp4_crtc->enabled = false;
+}
+
+static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+
+ DBG("%s", mdp4_crtc->name);
+
+ if (WARN_ON(mdp4_crtc->enabled))
+ return;
+
+ mdp4_enable(mdp4_kms);
+
+ /* Restore vblank irq handling after power is enabled */
+ drm_crtc_vblank_on(crtc);
+
+ mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
+
+ crtc_flush(crtc);
+
+ mdp4_crtc->enabled = true;
+}
+
+static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s: check", mdp4_crtc->name);
+ // TODO anything else to check?
+ return 0;
+}
+
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s: begin", mdp4_crtc->name);
+}
+
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
+
+ WARN_ON(mdp4_crtc->event);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ mdp4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ blend_setup(crtc);
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_FLIP);
+}
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+/* called from IRQ to update cursor related registers (if needed). The
+ * cursor registers, other than x/y position, appear not to be double
+ * buffered, and changing them other than from vblank seems to trigger
+ * underflow.
+ */
+static void update_cursor(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct msm_kms *kms = &mdp4_kms->base.base;
+ enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ if (mdp4_crtc->cursor.stale) {
+ struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
+ struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
+ uint64_t iova = mdp4_crtc->cursor.next_iova;
+
+ if (next_bo) {
+ /* take a obj ref + iova ref when we start scanning out: */
+ drm_gem_object_get(next_bo);
+ msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
+
+ /* enable cursor: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
+ MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
+ MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
+ MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
+ MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
+ } else {
+ /* disable cursor: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+ mdp4_kms->blank_cursor_iova);
+ }
+
+ /* and drop the iova ref + obj rev when done scanning out: */
+ if (prev_bo)
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
+
+ mdp4_crtc->cursor.scanout_bo = next_bo;
+ mdp4_crtc->cursor.stale = false;
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
+ MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
+
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+}
+
+static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct msm_kms *kms = &mdp4_kms->base.base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *cursor_bo, *old_bo;
+ unsigned long flags;
+ uint64_t iova;
+ int ret;
+
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ return -EINVAL;
+ }
+
+ if (handle) {
+ cursor_bo = drm_gem_object_lookup(file_priv, handle);
+ if (!cursor_bo)
+ return -ENOENT;
+ } else {
+ cursor_bo = NULL;
+ }
+
+ if (cursor_bo) {
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
+ if (ret)
+ goto fail;
+ } else {
+ iova = 0;
+ }
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ old_bo = mdp4_crtc->cursor.next_bo;
+ mdp4_crtc->cursor.next_bo = cursor_bo;
+ mdp4_crtc->cursor.next_iova = iova;
+ mdp4_crtc->cursor.width = width;
+ mdp4_crtc->cursor.height = height;
+ mdp4_crtc->cursor.stale = true;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ if (old_bo) {
+ /* drop our previous reference: */
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
+ }
+
+ request_pending(crtc, PENDING_CURSOR);
+
+ return 0;
+
+fail:
+ drm_gem_object_put(cursor_bo);
+ return ret;
+}
+
+static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ mdp4_crtc->cursor.x = x;
+ mdp4_crtc->cursor.y = y;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_CURSOR);
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs mdp4_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = mdp4_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .cursor_set = mdp4_crtc_cursor_set,
+ .cursor_move = mdp4_crtc_cursor_move,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+};
+
+static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
+ .mode_set_nofb = mdp4_crtc_mode_set_nofb,
+ .atomic_check = mdp4_crtc_atomic_check,
+ .atomic_begin = mdp4_crtc_atomic_begin,
+ .atomic_flush = mdp4_crtc_atomic_flush,
+ .atomic_enable = mdp4_crtc_atomic_enable,
+ .atomic_disable = mdp4_crtc_atomic_disable,
+};
+
+static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ unsigned pending;
+
+ mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+
+ pending = atomic_xchg(&mdp4_crtc->pending, 0);
+
+ if (pending & PENDING_FLIP) {
+ complete_flip(crtc, NULL);
+ }
+
+ if (pending & PENDING_CURSOR) {
+ update_cursor(crtc);
+ drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+ }
+}
+
+static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
+ crtc_flush(crtc);
+}
+
+static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ int ret;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ return;
+
+ ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
+ !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
+ mdp4_crtc->flushed_mask),
+ msecs_to_jiffies(50));
+ if (ret <= 0)
+ dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
+
+ mdp4_crtc->flushed_mask = 0;
+
+ drm_crtc_vblank_put(crtc);
+}
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ return mdp4_crtc->vblank.irqmask;
+}
+
+/* set dma config, ie. the format the encoder wants. */
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t intf_sel;
+
+ intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
+
+ switch (mdp4_crtc->dma) {
+ case DMA_P:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
+ break;
+ case DMA_S:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
+ break;
+ case DMA_E:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
+ break;
+ }
+
+ if (intf == INTF_DSI_VIDEO) {
+ intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
+ intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
+ } else if (intf == INTF_DSI_CMD) {
+ intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
+ intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
+ }
+
+ mdp4_crtc->mixer = mixer;
+
+ blend_setup(crtc);
+
+ DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
+}
+
+void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
+{
+ /* wait_for_flush_done is the only case for now.
+ * Later we will have command mode CRTC to wait for
+ * other event.
+ */
+ mdp4_crtc_wait_for_flush_done(crtc);
+}
+
+static const char *dma_names[] = {
+ "DMA_P", "DMA_S", "DMA_E",
+};
+
+/* initialize crtc */
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id, int ovlp_id,
+ enum mdp4_dma dma_id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct mdp4_crtc *mdp4_crtc;
+
+ mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
+ if (!mdp4_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &mdp4_crtc->base;
+
+ mdp4_crtc->id = id;
+
+ mdp4_crtc->ovlp = ovlp_id;
+ mdp4_crtc->dma = dma_id;
+
+ mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
+ mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
+
+ mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
+ mdp4_crtc->err.irq = mdp4_crtc_err_irq;
+
+ snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
+ dma_names[dma_id], ovlp_id);
+
+ spin_lock_init(&mdp4_crtc->cursor.lock);
+
+ drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
+ "unref cursor", unref_cursor_worker);
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
+ NULL);
+ drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
+
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
new file mode 100644
index 0000000000..39b8fe53c2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, Inforce Computing. All rights reserved.
+ *
+ * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mdp4_kms.h"
+
+#ifdef CONFIG_DRM_MSM_DSI
+
+struct mdp4_dsi_encoder {
+ struct drm_encoder base;
+ struct drm_panel *panel;
+ bool enabled;
+};
+#define to_mdp4_dsi_encoder(x) container_of(x, struct mdp4_dsi_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(mdp4_dsi_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
+ .destroy = mdp4_dsi_encoder_destroy,
+};
+
+static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t dsi_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP4_DSI_CTRL_POLARITY_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP4_DSI_CTRL_POLARITY_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dsi_hsync_skew = 0; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dsi_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dsi_hsync_skew - 1;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_CTRL,
+ MDP4_DSI_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP4_DSI_HSYNC_CTRL_PERIOD(mode->htotal));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_PERIOD, vsync_period);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_LEN, vsync_len);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_HCTRL,
+ MDP4_DSI_DISPLAY_HCTRL_START(hsync_start_x) |
+ MDP4_DSI_DISPLAY_HCTRL_END(hsync_end_x));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VSTART, display_v_start);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VEND, display_v_end);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_CTRL_POLARITY, ctrl_pol);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_UNDERFLOW_CLR,
+ MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY |
+ MDP4_DSI_UNDERFLOW_CLR_COLOR(0xff));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_HCTL,
+ MDP4_DSI_ACTIVE_HCTL_START(0) |
+ MDP4_DSI_ACTIVE_HCTL_END(0));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_SKEW, dsi_hsync_skew);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_BORDER_CLR, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VSTART, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VEND, 0);
+}
+
+static void mdp4_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+
+ if (!mdp4_dsi_encoder->enabled)
+ return;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
+
+ mdp4_dsi_encoder->enabled = false;
+}
+
+static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+
+ if (mdp4_dsi_encoder->enabled)
+ return;
+
+ mdp4_crtc_set_config(encoder->crtc,
+ MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
+ MDP4_DMA_CONFIG_DEFLKR_EN |
+ MDP4_DMA_CONFIG_DITHER_EN |
+ MDP4_DMA_CONFIG_R_BPC(BPC8) |
+ MDP4_DMA_CONFIG_G_BPC(BPC8) |
+ MDP4_DMA_CONFIG_B_BPC(BPC8) |
+ MDP4_DMA_CONFIG_PACK(0x21));
+
+ mdp4_crtc_set_intf(encoder->crtc, INTF_DSI_VIDEO, 0);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 1);
+
+ mdp4_dsi_encoder->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
+ .mode_set = mdp4_dsi_encoder_mode_set,
+ .disable = mdp4_dsi_encoder_disable,
+ .enable = mdp4_dsi_encoder_enable,
+};
+
+/* initialize encoder */
+struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder;
+ int ret;
+
+ mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL);
+ if (!mdp4_dsi_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ encoder = &mdp4_dsi_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp4_dsi_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
+#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
new file mode 100644
index 0000000000..88645dbc37
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mdp4_kms.h"
+
+struct mdp4_dtv_encoder {
+ struct drm_encoder base;
+ struct clk *hdmi_clk;
+ struct clk *mdp_clk;
+ unsigned long int pixclock;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp4_dtv_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
+ .destroy = mdp4_dtv_encoder_destroy,
+};
+
+static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+
+ mdp4_dtv_encoder->pixclock = mode->clock * 1000;
+
+ DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dtv_hsync_skew = 0; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
+ MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
+ MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
+ MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
+ MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
+ MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
+ MDP4_DTV_ACTIVE_HCTL_START(0) |
+ MDP4_DTV_ACTIVE_HCTL_END(0));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
+}
+
+static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+
+ if (WARN_ON(!mdp4_dtv_encoder->enabled))
+ return;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
+
+ clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
+ clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
+
+ mdp4_dtv_encoder->enabled = false;
+}
+
+static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ unsigned long pc = mdp4_dtv_encoder->pixclock;
+ int ret;
+
+ if (WARN_ON(mdp4_dtv_encoder->enabled))
+ return;
+
+ mdp4_crtc_set_config(encoder->crtc,
+ MDP4_DMA_CONFIG_R_BPC(BPC8) |
+ MDP4_DMA_CONFIG_G_BPC(BPC8) |
+ MDP4_DMA_CONFIG_B_BPC(BPC8) |
+ MDP4_DMA_CONFIG_PACK(0x21));
+ mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1);
+
+ DBG("setting mdp_clk=%lu", pc);
+
+ ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+ pc, ret);
+
+ ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+
+ ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
+
+ mdp4_dtv_encoder->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
+ .mode_set = mdp4_dtv_encoder_mode_set,
+ .enable = mdp4_dtv_encoder_enable,
+ .disable = mdp4_dtv_encoder_disable,
+};
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate);
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder;
+ int ret;
+
+ mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
+ if (!mdp4_dtv_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ encoder = &mdp4_dtv_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
+
+ mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
+ if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
+ goto fail;
+ }
+
+ mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
+ if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
+ goto fail;
+ }
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp4_dtv_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
new file mode 100644
index 0000000000..ddcdd5e878
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask)
+{
+ mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR,
+ irqmask ^ (irqmask & old_irqmask));
+ mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
+}
+
+static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
+ DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev);
+ drm_state_dump(mdp4_kms->dev, &p);
+ }
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_enable(mdp4_kms);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+ mdp4_disable(mdp4_kms);
+}
+
+int mdp4_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+ struct mdp_irq *error_handler = &mdp4_kms->error_handler;
+
+ error_handler->irq = mdp4_irq_error_handler;
+ error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
+ MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+
+ mdp_irq_register(mdp_kms, error_handler);
+
+ return 0;
+}
+
+void mdp4_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_enable(mdp4_kms);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+ mdp4_disable(mdp4_kms);
+}
+
+irqreturn_t mdp4_irq(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+ struct drm_device *dev = mdp4_kms->dev;
+ struct drm_crtc *crtc;
+ uint32_t status, enable;
+
+ enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE);
+ status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable;
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ mdp_dispatch_irqs(mdp_kms, status);
+
+ drm_for_each_crtc(crtc, dev)
+ if (status & mdp4_crtc_vblank(crtc))
+ drm_crtc_handle_vblank(crtc);
+
+ return IRQ_HANDLED;
+}
+
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+ mdp4_enable(mdp4_kms);
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp4_crtc_vblank(crtc), true);
+ mdp4_disable(mdp4_kms);
+
+ return 0;
+}
+
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+ mdp4_enable(mdp4_kms);
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp4_crtc_vblank(crtc), false);
+ mdp4_disable(mdp4_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
new file mode 100644
index 0000000000..700df4040e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_vblank.h>
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "mdp4_kms.h"
+
+static int mdp4_hw_init(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct drm_device *dev = mdp4_kms->dev;
+ u32 dmap_cfg, vg_cfg;
+ unsigned long clk;
+
+ pm_runtime_get_sync(dev->dev);
+
+ if (mdp4_kms->rev > 1) {
+ mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
+ mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
+
+ /* max read pending cmd config, 3 pending requests: */
+ mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
+
+ clk = clk_get_rate(mdp4_kms->clk);
+
+ if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
+ dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
+ vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
+ } else {
+ dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
+ vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
+ }
+
+ DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
+
+ if (mdp4_kms->rev >= 2)
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
+
+ /* disable CSC matrix / YUV by default: */
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
+
+ if (mdp4_kms->rev > 1)
+ mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
+
+ pm_runtime_put_sync(dev->dev);
+
+ return 0;
+}
+
+static void mdp4_enable_commit(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_enable(mdp4_kms);
+}
+
+static void mdp4_disable_commit(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_disable(mdp4_kms);
+}
+
+static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ /* TODO */
+}
+
+static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
+ mdp4_crtc_wait_for_commit_done(crtc);
+}
+
+static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+}
+
+static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ /* if we had >1 encoder, we'd need something more clever: */
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_TMDS:
+ return mdp4_dtv_round_pixclk(encoder, rate);
+ case DRM_MODE_ENCODER_LVDS:
+ case DRM_MODE_ENCODER_DSI:
+ default:
+ return rate;
+ }
+}
+
+static void mdp4_destroy(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct device *dev = mdp4_kms->dev->dev;
+ struct msm_gem_address_space *aspace = kms->aspace;
+
+ if (mdp4_kms->blank_cursor_iova)
+ msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+ drm_gem_object_put(mdp4_kms->blank_cursor_bo);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
+ }
+
+ if (mdp4_kms->rpm_enabled)
+ pm_runtime_disable(dev);
+
+ mdp_kms_destroy(&mdp4_kms->base);
+
+ kfree(mdp4_kms);
+}
+
+static const struct mdp_kms_funcs kms_funcs = {
+ .base = {
+ .hw_init = mdp4_hw_init,
+ .irq_preinstall = mdp4_irq_preinstall,
+ .irq_postinstall = mdp4_irq_postinstall,
+ .irq_uninstall = mdp4_irq_uninstall,
+ .irq = mdp4_irq,
+ .enable_vblank = mdp4_enable_vblank,
+ .disable_vblank = mdp4_disable_vblank,
+ .enable_commit = mdp4_enable_commit,
+ .disable_commit = mdp4_disable_commit,
+ .flush_commit = mdp4_flush_commit,
+ .wait_flush = mdp4_wait_flush,
+ .complete_commit = mdp4_complete_commit,
+ .get_format = mdp_get_format,
+ .round_pixclk = mdp4_round_pixclk,
+ .destroy = mdp4_destroy,
+ },
+ .set_irqmask = mdp4_set_irqmask,
+};
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms)
+{
+ DBG("");
+
+ clk_disable_unprepare(mdp4_kms->clk);
+ clk_disable_unprepare(mdp4_kms->pclk);
+ clk_disable_unprepare(mdp4_kms->lut_clk);
+ clk_disable_unprepare(mdp4_kms->axi_clk);
+
+ return 0;
+}
+
+int mdp4_enable(struct mdp4_kms *mdp4_kms)
+{
+ DBG("");
+
+ clk_prepare_enable(mdp4_kms->clk);
+ clk_prepare_enable(mdp4_kms->pclk);
+ clk_prepare_enable(mdp4_kms->lut_clk);
+ clk_prepare_enable(mdp4_kms->axi_clk);
+
+ return 0;
+}
+
+
+static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
+ int intf_type)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct device_node *panel_node;
+ int dsi_id;
+ int ret;
+
+ switch (intf_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ /*
+ * bail out early if there is no panel node (no need to
+ * initialize LCDC encoder and LVDS connector)
+ */
+ panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
+ if (!panel_node)
+ return 0;
+
+ encoder = mdp4_lcdc_encoder_init(dev, panel_node);
+ if (IS_ERR(encoder)) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
+ of_node_put(panel_node);
+ return PTR_ERR(encoder);
+ }
+
+ /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
+ encoder->possible_crtcs = 1 << DMA_P;
+
+ connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
+ if (IS_ERR(connector)) {
+ DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
+ of_node_put(panel_node);
+ return PTR_ERR(connector);
+ }
+
+ break;
+ case DRM_MODE_ENCODER_TMDS:
+ encoder = mdp4_dtv_encoder_init(dev);
+ if (IS_ERR(encoder)) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
+ return PTR_ERR(encoder);
+ }
+
+ /* DTV can be hooked to DMA_E: */
+ encoder->possible_crtcs = 1 << 1;
+
+ if (priv->hdmi) {
+ /* Construct bridge/connector for HDMI: */
+ ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ return ret;
+ }
+ }
+
+ break;
+ case DRM_MODE_ENCODER_DSI:
+ /* only DSI1 supported for now */
+ dsi_id = 0;
+
+ if (!priv->dsi[dsi_id])
+ break;
+
+ encoder = mdp4_dsi_encoder_init(dev);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ DRM_DEV_ERROR(dev->dev,
+ "failed to construct DSI encoder: %d\n", ret);
+ return ret;
+ }
+
+ /* TODO: Add DMA_S later? */
+ encoder->possible_crtcs = 1 << DMA_P;
+
+ ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
+ ret);
+ return ret;
+ }
+
+ break;
+ default:
+ DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int modeset_init(struct mdp4_kms *mdp4_kms)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int i, ret;
+ static const enum mdp4_pipe rgb_planes[] = {
+ RGB1, RGB2,
+ };
+ static const enum mdp4_pipe vg_planes[] = {
+ VG1, VG2,
+ };
+ static const enum mdp4_dma mdp4_crtcs[] = {
+ DMA_P, DMA_E,
+ };
+ static const char * const mdp4_crtc_names[] = {
+ "DMA_P", "DMA_E",
+ };
+ static const int mdp4_intfs[] = {
+ DRM_MODE_ENCODER_LVDS,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_ENCODER_TMDS,
+ };
+
+ /* construct non-private planes: */
+ for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
+ plane = mdp4_plane_init(dev, vg_planes[i], false);
+ if (IS_ERR(plane)) {
+ DRM_DEV_ERROR(dev->dev,
+ "failed to construct plane for VG%d\n", i + 1);
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
+ plane = mdp4_plane_init(dev, rgb_planes[i], true);
+ if (IS_ERR(plane)) {
+ DRM_DEV_ERROR(dev->dev,
+ "failed to construct plane for RGB%d\n", i + 1);
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+
+ crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
+ mdp4_crtcs[i]);
+ if (IS_ERR(crtc)) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
+ mdp4_crtc_names[i]);
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+
+ priv->num_crtcs++;
+ }
+
+ /*
+ * we currently set up two relatively fixed paths:
+ *
+ * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
+ * or
+ * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
+ *
+ * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
+ */
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
+ ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
+ i, ret);
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
+ u32 *major, u32 *minor)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ u32 version;
+
+ mdp4_enable(mdp4_kms);
+ version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+ mdp4_disable(mdp4_kms);
+
+ *major = FIELD(version, MDP4_VERSION_MAJOR);
+ *minor = FIELD(version, MDP4_VERSION_MINOR);
+
+ DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
+}
+
+static int mdp4_kms_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp4_kms *mdp4_kms;
+ struct msm_kms *kms = NULL;
+ struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
+ int irq, ret;
+ u32 major, minor;
+ unsigned long max_clk;
+
+ /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
+ max_clk = 266667000;
+
+ mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
+ if (!mdp4_kms) {
+ DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
+ return -ENOMEM;
+ }
+
+ ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to init kms\n");
+ goto fail;
+ }
+
+ priv->kms = &mdp4_kms->base.base;
+ kms = priv->kms;
+
+ mdp4_kms->dev = dev;
+
+ mdp4_kms->mmio = msm_ioremap(pdev, NULL);
+ if (IS_ERR(mdp4_kms->mmio)) {
+ ret = PTR_ERR(mdp4_kms->mmio);
+ goto fail;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto fail;
+ }
+
+ kms->irq = irq;
+
+ /* NOTE: driver for this regulator still missing upstream.. use
+ * _get_exclusive() and ignore the error if it does not exist
+ * (and hope that the bootloader left it on for us)
+ */
+ mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
+ if (IS_ERR(mdp4_kms->vdd))
+ mdp4_kms->vdd = NULL;
+
+ if (mdp4_kms->vdd) {
+ ret = regulator_enable(mdp4_kms->vdd);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(mdp4_kms->clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
+ ret = PTR_ERR(mdp4_kms->clk);
+ goto fail;
+ }
+
+ mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(mdp4_kms->pclk))
+ mdp4_kms->pclk = NULL;
+
+ mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(mdp4_kms->axi_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
+ ret = PTR_ERR(mdp4_kms->axi_clk);
+ goto fail;
+ }
+
+ clk_set_rate(mdp4_kms->clk, max_clk);
+
+ read_mdp_hw_revision(mdp4_kms, &major, &minor);
+
+ if (major != 4) {
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ mdp4_kms->rev = minor;
+
+ if (mdp4_kms->rev >= 2) {
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
+ clk_set_rate(mdp4_kms->lut_clk, max_clk);
+ }
+
+ pm_runtime_enable(dev->dev);
+ mdp4_kms->rpm_enabled = true;
+
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp4_enable(mdp4_kms);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+ mdp4_disable(mdp4_kms);
+ mdelay(16);
+
+ mmu = msm_iommu_new(&pdev->dev, 0);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ goto fail;
+ } else if (!mmu) {
+ DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
+ "contig buffers for scanout\n");
+ aspace = NULL;
+ } else {
+ aspace = msm_gem_address_space_create(mmu,
+ "mdp4", 0x1000, 0x100000000 - 0x1000);
+
+ if (IS_ERR(aspace)) {
+ if (!IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ kms->aspace = aspace;
+ }
+
+ ret = modeset_init(mdp4_kms);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
+ goto fail;
+ }
+
+ mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
+ if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
+ ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
+ DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+ mdp4_kms->blank_cursor_bo = NULL;
+ goto fail;
+ }
+
+ ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+ &mdp4_kms->blank_cursor_iova);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+ goto fail;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ return 0;
+
+fail:
+ if (kms)
+ mdp4_destroy(kms);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mdp4_pm_ops = {
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
+};
+
+static int mdp4_probe(struct platform_device *pdev)
+{
+ return msm_drv_probe(&pdev->dev, mdp4_kms_init);
+}
+
+static int mdp4_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &msm_drm_ops);
+
+ return 0;
+}
+
+static const struct of_device_id mdp4_dt_match[] = {
+ { .compatible = "qcom,mdp4" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mdp4_dt_match);
+
+static struct platform_driver mdp4_platform_driver = {
+ .probe = mdp4_probe,
+ .remove = mdp4_remove,
+ .shutdown = msm_drv_shutdown,
+ .driver = {
+ .name = "mdp4",
+ .of_match_table = mdp4_dt_match,
+ .pm = &mdp4_pm_ops,
+ },
+};
+
+void __init msm_mdp4_register(void)
+{
+ platform_driver_register(&mdp4_platform_driver);
+}
+
+void __exit msm_mdp4_unregister(void)
+{
+ platform_driver_unregister(&mdp4_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
new file mode 100644
index 0000000000..01179e764a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MDP4_KMS_H__
+#define __MDP4_KMS_H__
+
+#include <drm/drm_panel.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "disp/mdp_kms.h"
+#include "mdp4.xml.h"
+
+struct device_node;
+
+struct mdp4_kms {
+ struct mdp_kms base;
+
+ struct drm_device *dev;
+
+ int rev;
+
+ void __iomem *mmio;
+
+ struct regulator *vdd;
+
+ struct clk *clk;
+ struct clk *pclk;
+ struct clk *lut_clk;
+ struct clk *axi_clk;
+
+ struct mdp_irq error_handler;
+
+ bool rpm_enabled;
+
+ /* empty/blank cursor bo to use when cursor is "disabled" */
+ struct drm_gem_object *blank_cursor_bo;
+ uint64_t blank_cursor_iova;
+};
+#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
+
+static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
+{
+ msm_writel(data, mdp4_kms->mmio + reg);
+}
+
+static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
+{
+ return msm_readl(mdp4_kms->mmio + reg);
+}
+
+static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
+{
+ switch (pipe) {
+ case VG1: return MDP4_OVERLAY_FLUSH_VG1;
+ case VG2: return MDP4_OVERLAY_FLUSH_VG2;
+ case RGB1: return MDP4_OVERLAY_FLUSH_RGB1;
+ case RGB2: return MDP4_OVERLAY_FLUSH_RGB2;
+ default: return 0;
+ }
+}
+
+static inline uint32_t ovlp2flush(int ovlp)
+{
+ switch (ovlp) {
+ case 0: return MDP4_OVERLAY_FLUSH_OVLP0;
+ case 1: return MDP4_OVERLAY_FLUSH_OVLP1;
+ default: return 0;
+ }
+}
+
+static inline uint32_t dma2irq(enum mdp4_dma dma)
+{
+ switch (dma) {
+ case DMA_P: return MDP4_IRQ_DMA_P_DONE;
+ case DMA_S: return MDP4_IRQ_DMA_S_DONE;
+ case DMA_E: return MDP4_IRQ_DMA_E_DONE;
+ default: return 0;
+ }
+}
+
+static inline uint32_t dma2err(enum mdp4_dma dma)
+{
+ switch (dma) {
+ case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
+ case DMA_S: return 0; // ???
+ case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+ default: return 0;
+ }
+}
+
+static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
+ enum mdp4_pipe pipe, enum mdp_mixer_stage_id stage)
+{
+ switch (pipe) {
+ case VG1:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
+ break;
+ case VG2:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
+ break;
+ case RGB1:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+ break;
+ case RGB2:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
+ break;
+ case RGB3:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
+ break;
+ case VG3:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
+ break;
+ case VG4:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
+ break;
+ default:
+ WARN(1, "invalid pipe");
+ break;
+ }
+
+ return mixer_cfg;
+}
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms);
+int mdp4_enable(struct mdp4_kms *mdp4_kms);
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
+void mdp4_irq_preinstall(struct msm_kms *kms);
+int mdp4_irq_postinstall(struct msm_kms *kms);
+void mdp4_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp4_irq(struct msm_kms *kms);
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe)
+{
+ switch (pipe) {
+ case VG1:
+ case VG2:
+ case VG3:
+ case VG4:
+ return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
+ case RGB1:
+ case RGB2:
+ case RGB3:
+ return MDP_PIPE_CAP_SCALE;
+ default:
+ return 0;
+ }
+}
+
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ enum mdp4_pipe pipe_id, bool private_plane);
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
+void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc);
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id, int ovlp_id,
+ enum mdp4_dma dma_id);
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
+
+long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
+ struct device_node *panel_node);
+
+struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
+ struct device_node *panel_node, struct drm_encoder *encoder);
+
+#ifdef CONFIG_DRM_MSM_DSI
+struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
+#else
+static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#ifdef CONFIG_COMMON_CLK
+struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
+#else
+static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
new file mode 100644
index 0000000000..10eb3e5b21
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mdp4_kms.h"
+
+struct mdp4_lcdc_encoder {
+ struct drm_encoder base;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct clk *lcdc_clk;
+ unsigned long int pixclock;
+ struct regulator *regs[3];
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp4_lcdc_encoder(x) container_of(x, struct mdp4_lcdc_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp4_lcdc_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = {
+ .destroy = mdp4_lcdc_encoder_destroy,
+};
+
+/* this should probably be a helper: */
+static struct drm_connector *get_connector(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return connector;
+
+ return NULL;
+}
+
+static void setup_phy(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector = get_connector(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t lvds_intf = 0, lvds_phy_cfg0 = 0;
+ int bpp, nchan, swap;
+
+ if (!connector)
+ return;
+
+ bpp = 3 * connector->display_info.bpc;
+
+ if (!bpp)
+ bpp = 18;
+
+ /* TODO, these should come from panel somehow: */
+ nchan = 1;
+ swap = 0;
+
+ switch (bpp) {
+ case 24:
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x08) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x05) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x04) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x03));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x02) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x01) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x00));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x11) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x10) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0d) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0c));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0b) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0a) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x09));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x15));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x14) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x13) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x12));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(3),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1b) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x17) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x16) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0f));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(3),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0e) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x07) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x06));
+ if (nchan == 2) {
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
+ } else {
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
+ }
+ break;
+
+ case 18:
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x0a) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x07) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x06) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x05));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x04) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x03) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x02));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x13) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x12) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0f) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0e));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0d) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0c) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x0b));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x17));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x16) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x15) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x14));
+ if (nchan == 2) {
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
+ } else {
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
+ }
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT;
+ break;
+
+ default:
+ DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
+ return;
+ }
+
+ switch (nchan) {
+ case 1:
+ lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0;
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL;
+ break;
+ case 2:
+ lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0 |
+ MDP4_LVDS_PHY_CFG0_CHANNEL1;
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
+ break;
+ default:
+ DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
+ return;
+ }
+
+ if (swap)
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP;
+
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_ENABLE;
+
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_INTF_CTL, lvds_intf);
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG2, 0x30);
+
+ mb();
+ udelay(1);
+ lvds_phy_cfg0 |= MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE;
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
+}
+
+static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t lcdc_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+
+ mdp4_lcdc_encoder->pixclock = mode->clock * 1000;
+
+ DBG("pixclock=%lu", mdp4_lcdc_encoder->pixclock);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ lcdc_hsync_skew = 0; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + lcdc_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + lcdc_hsync_skew - 1;
+
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_CTRL,
+ MDP4_LCDC_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP4_LCDC_HSYNC_CTRL_PERIOD(mode->htotal));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_PERIOD, vsync_period);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_LEN, vsync_len);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_HCTRL,
+ MDP4_LCDC_DISPLAY_HCTRL_START(hsync_start_x) |
+ MDP4_LCDC_DISPLAY_HCTRL_END(hsync_end_x));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VSTART, display_v_start);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VEND, display_v_end);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_BORDER_CLR, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_UNDERFLOW_CLR,
+ MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY |
+ MDP4_LCDC_UNDERFLOW_CLR_COLOR(0xff));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_SKEW, lcdc_hsync_skew);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_CTRL_POLARITY, ctrl_pol);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_HCTL,
+ MDP4_LCDC_ACTIVE_HCTL_START(0) |
+ MDP4_LCDC_ACTIVE_HCTL_END(0));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VSTART, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VEND, 0);
+}
+
+static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ struct drm_panel *panel;
+ int i, ret;
+
+ if (WARN_ON(!mdp4_lcdc_encoder->enabled))
+ return;
+
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+
+ panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
+ if (!IS_ERR(panel)) {
+ drm_panel_disable(panel);
+ drm_panel_unprepare(panel);
+ }
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
+
+ clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk);
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
+ ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
+ }
+
+ mdp4_lcdc_encoder->enabled = false;
+}
+
+static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ unsigned long pc = mdp4_lcdc_encoder->pixclock;
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ struct drm_panel *panel;
+ uint32_t config;
+ int i, ret;
+
+ if (WARN_ON(mdp4_lcdc_encoder->enabled))
+ return;
+
+ /* TODO: hard-coded for 18bpp: */
+ config =
+ MDP4_DMA_CONFIG_R_BPC(BPC6) |
+ MDP4_DMA_CONFIG_G_BPC(BPC6) |
+ MDP4_DMA_CONFIG_B_BPC(BPC6) |
+ MDP4_DMA_CONFIG_PACK(0x21) |
+ MDP4_DMA_CONFIG_DEFLKR_EN |
+ MDP4_DMA_CONFIG_DITHER_EN;
+
+ if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb"))
+ config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB;
+
+ mdp4_crtc_set_config(encoder->crtc, config);
+ mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
+ ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
+ }
+
+ DBG("setting lcdc_clk=%lu", pc);
+ ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
+ ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
+
+ panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
+ if (!IS_ERR(panel)) {
+ drm_panel_prepare(panel);
+ drm_panel_enable(panel);
+ }
+
+ setup_phy(encoder);
+
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1);
+
+ mdp4_lcdc_encoder->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
+ .mode_set = mdp4_lcdc_encoder_mode_set,
+ .disable = mdp4_lcdc_encoder_disable,
+ .enable = mdp4_lcdc_encoder_enable,
+};
+
+long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
+{
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate);
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
+ struct device_node *panel_node)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
+ struct regulator *reg;
+ int ret;
+
+ mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL);
+ if (!mdp4_lcdc_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdp4_lcdc_encoder->panel_node = panel_node;
+
+ encoder = &mdp4_lcdc_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
+ drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
+
+ /* TODO: do we need different pll in other cases? */
+ mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
+ if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
+ ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
+ goto fail;
+ }
+
+ /* TODO: different regulators in other cases? */
+ reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
+ goto fail;
+ }
+ mdp4_lcdc_encoder->regs[0] = reg;
+
+ reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
+ goto fail;
+ }
+ mdp4_lcdc_encoder->regs[1] = reg;
+
+ reg = devm_regulator_get(dev->dev, "lvds-vdda");
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
+ goto fail;
+ }
+ mdp4_lcdc_encoder->regs[2] = reg;
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp4_lcdc_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
new file mode 100644
index 0000000000..7444b75c42
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
+ */
+
+#include "mdp4_kms.h"
+
+struct mdp4_lvds_connector {
+ struct drm_connector base;
+ struct drm_encoder *encoder;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+};
+#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
+
+static enum drm_connector_status mdp4_lvds_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct mdp4_lvds_connector *mdp4_lvds_connector =
+ to_mdp4_lvds_connector(connector);
+
+ if (!mdp4_lvds_connector->panel) {
+ mdp4_lvds_connector->panel =
+ of_drm_find_panel(mdp4_lvds_connector->panel_node);
+ if (IS_ERR(mdp4_lvds_connector->panel))
+ mdp4_lvds_connector->panel = NULL;
+ }
+
+ return mdp4_lvds_connector->panel ?
+ connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
+{
+ struct mdp4_lvds_connector *mdp4_lvds_connector =
+ to_mdp4_lvds_connector(connector);
+
+ drm_connector_cleanup(connector);
+
+ kfree(mdp4_lvds_connector);
+}
+
+static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
+{
+ struct mdp4_lvds_connector *mdp4_lvds_connector =
+ to_mdp4_lvds_connector(connector);
+ struct drm_panel *panel = mdp4_lvds_connector->panel;
+ int ret = 0;
+
+ if (panel)
+ ret = drm_panel_get_modes(panel, connector);
+
+ return ret;
+}
+
+static enum drm_mode_status
+mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct mdp4_lvds_connector *mdp4_lvds_connector =
+ to_mdp4_lvds_connector(connector);
+ struct drm_encoder *encoder = mdp4_lvds_connector->encoder;
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = mdp4_lcdc_round_pixclk(encoder, requested);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
+ .detect = mdp4_lvds_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mdp4_lvds_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
+ .get_modes = mdp4_lvds_connector_get_modes,
+ .mode_valid = mdp4_lvds_connector_mode_valid,
+};
+
+/* initialize connector */
+struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
+ struct device_node *panel_node, struct drm_encoder *encoder)
+{
+ struct drm_connector *connector = NULL;
+ struct mdp4_lvds_connector *mdp4_lvds_connector;
+
+ mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
+ if (!mdp4_lvds_connector)
+ return ERR_PTR(-ENOMEM);
+
+ mdp4_lvds_connector->encoder = encoder;
+ mdp4_lvds_connector->panel_node = panel_node;
+
+ connector = &mdp4_lvds_connector->base;
+
+ drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs);
+
+ connector->polled = 0;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return connector;
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
new file mode 100644
index 0000000000..ab8c0c187f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "mdp4_kms.h"
+
+struct mdp4_lvds_pll {
+ struct clk_hw pll_hw;
+ struct drm_device *dev;
+ unsigned long pixclk;
+};
+#define to_mdp4_lvds_pll(x) container_of(x, struct mdp4_lvds_pll, pll_hw)
+
+static struct mdp4_kms *get_kms(struct mdp4_lvds_pll *lvds_pll)
+{
+ struct msm_drm_private *priv = lvds_pll->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+struct pll_rate {
+ unsigned long rate;
+ struct {
+ uint32_t val;
+ uint32_t reg;
+ } conf[32];
+};
+
+/* NOTE: keep sorted highest freq to lowest: */
+static const struct pll_rate freqtbl[] = {
+ { 72000000, {
+ { 0x8f, REG_MDP4_LVDS_PHY_PLL_CTRL_1 },
+ { 0x30, REG_MDP4_LVDS_PHY_PLL_CTRL_2 },
+ { 0xc6, REG_MDP4_LVDS_PHY_PLL_CTRL_3 },
+ { 0x10, REG_MDP4_LVDS_PHY_PLL_CTRL_5 },
+ { 0x07, REG_MDP4_LVDS_PHY_PLL_CTRL_6 },
+ { 0x62, REG_MDP4_LVDS_PHY_PLL_CTRL_7 },
+ { 0x41, REG_MDP4_LVDS_PHY_PLL_CTRL_8 },
+ { 0x0d, REG_MDP4_LVDS_PHY_PLL_CTRL_9 },
+ { 0, 0 } }
+ },
+};
+
+static const struct pll_rate *find_rate(unsigned long rate)
+{
+ int i;
+ for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
+ if (rate > freqtbl[i].rate)
+ return &freqtbl[i-1];
+ return &freqtbl[i-1];
+}
+
+static int mpd4_lvds_pll_enable(struct clk_hw *hw)
+{
+ struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
+ struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
+ const struct pll_rate *pll_rate = find_rate(lvds_pll->pixclk);
+ int i;
+
+ DBG("pixclk=%lu (%lu)", lvds_pll->pixclk, pll_rate->rate);
+
+ if (WARN_ON(!pll_rate))
+ return -EINVAL;
+
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_PHY_RESET, 0x33);
+
+ for (i = 0; pll_rate->conf[i].reg; i++)
+ mdp4_write(mdp4_kms, pll_rate->conf[i].reg, pll_rate->conf[i].val);
+
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x01);
+
+ /* Wait until LVDS PLL is locked and ready */
+ while (!mdp4_read(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_LOCKED))
+ cpu_relax();
+
+ return 0;
+}
+
+static void mpd4_lvds_pll_disable(struct clk_hw *hw)
+{
+ struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
+ struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
+
+ DBG("");
+
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, 0x0);
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x0);
+}
+
+static unsigned long mpd4_lvds_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
+ return lvds_pll->pixclk;
+}
+
+static long mpd4_lvds_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pll_rate *pll_rate = find_rate(rate);
+ return pll_rate->rate;
+}
+
+static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
+ lvds_pll->pixclk = rate;
+ return 0;
+}
+
+
+static const struct clk_ops mpd4_lvds_pll_ops = {
+ .enable = mpd4_lvds_pll_enable,
+ .disable = mpd4_lvds_pll_disable,
+ .recalc_rate = mpd4_lvds_pll_recalc_rate,
+ .round_rate = mpd4_lvds_pll_round_rate,
+ .set_rate = mpd4_lvds_pll_set_rate,
+};
+
+static const char *mpd4_lvds_pll_parents[] = {
+ "pxo",
+};
+
+static struct clk_init_data pll_init = {
+ .name = "mpd4_lvds_pll",
+ .ops = &mpd4_lvds_pll_ops,
+ .parent_names = mpd4_lvds_pll_parents,
+ .num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
+};
+
+struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
+{
+ struct mdp4_lvds_pll *lvds_pll;
+ struct clk *clk;
+ int ret;
+
+ lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL);
+ if (!lvds_pll) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ lvds_pll->dev = dev;
+
+ lvds_pll->pll_hw.init = &pll_init;
+ clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto fail;
+ }
+
+ return clk;
+
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
new file mode 100644
index 0000000000..b689b618da
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+
+#include "mdp4_kms.h"
+
+#define DOWN_SCALE_MAX 8
+#define UP_SCALE_MAX 8
+
+struct mdp4_plane {
+ struct drm_plane base;
+ const char *name;
+
+ enum mdp4_pipe pipe;
+
+ uint32_t caps;
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ bool enabled;
+};
+#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
+
+/* MDP format helper functions */
+static inline
+enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb)
+{
+ bool is_tile = false;
+
+ if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
+ is_tile = true;
+
+ if (fb->format->format == DRM_FORMAT_NV12 && is_tile)
+ return FRAME_TILE_YCBCR_420;
+
+ return FRAME_LINEAR;
+}
+
+static void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+static int mdp4_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+
+static struct mdp4_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp4_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+ drm_plane_cleanup(plane);
+
+ kfree(mdp4_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+static void mdp4_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ // XXX
+}
+
+static int mdp4_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp4_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = mdp4_plane_destroy,
+ .set_property = mdp4_plane_set_property,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int mdp4_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ if (!new_state->fb)
+ return 0;
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
+ return msm_framebuffer_prepare(new_state->fb, kms->aspace, false);
+}
+
+static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ struct msm_kms *kms = &mdp4_kms->base.base;
+ struct drm_framebuffer *fb = old_state->fb;
+
+ if (!fb)
+ return;
+
+ DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
+ msm_framebuffer_cleanup(fb, kms->aspace, false);
+}
+
+
+static int mdp4_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ return 0;
+}
+
+static void mdp4_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ int ret;
+
+ ret = mdp4_plane_mode_set(plane,
+ new_state->crtc, new_state->fb,
+ new_state->crtc_x, new_state->crtc_y,
+ new_state->crtc_w, new_state->crtc_h,
+ new_state->src_x, new_state->src_y,
+ new_state->src_w, new_state->src_h);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+}
+
+static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
+ .prepare_fb = mdp4_plane_prepare_fb,
+ .cleanup_fb = mdp4_plane_cleanup_fb,
+ .atomic_check = mdp4_plane_atomic_check,
+ .atomic_update = mdp4_plane_atomic_update,
+};
+
+static void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ struct msm_kms *kms = &mdp4_kms->base.base;
+ enum mdp4_pipe pipe = mdp4_plane->pipe;
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
+ MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
+ MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 0));
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 1));
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 2));
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 3));
+}
+
+static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
+ enum mdp4_pipe pipe, struct csc_cfg *csc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(csc->matrix); i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_MV(pipe, i),
+ csc->matrix[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(csc->post_bias) ; i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_BV(pipe, i),
+ csc->pre_bias[i]);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_BV(pipe, i),
+ csc->post_bias[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(csc->post_clamp) ; i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_LV(pipe, i),
+ csc->pre_clamp[i]);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_LV(pipe, i),
+ csc->post_clamp[i]);
+ }
+}
+
+#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
+
+static int mdp4_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ enum mdp4_pipe pipe = mdp4_plane->pipe;
+ const struct mdp_format *format;
+ uint32_t op_mode = 0;
+ uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
+ uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
+ enum mdp4_frame_format frame_type;
+
+ if (!(crtc && fb)) {
+ DBG("%s: disabled!", mdp4_plane->name);
+ return 0;
+ }
+
+ frame_type = mdp4_get_frame_format(fb);
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+ format = to_mdp_format(msm_framebuffer_format(fb));
+
+ if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
+ DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
+ DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (crtc_w > (src_w * UP_SCALE_MAX)) {
+ DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (crtc_h > (src_h * UP_SCALE_MAX)) {
+ DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (src_w != crtc_w) {
+ uint32_t sel_unit = SCALE_FIR;
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
+
+ if (MDP_FORMAT_IS_YUV(format)) {
+ if (crtc_w > src_w)
+ sel_unit = SCALE_PIXEL_RPT;
+ else if (crtc_w <= (src_w / 4))
+ sel_unit = SCALE_MN_PHASE;
+
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(sel_unit);
+ phasex_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT,
+ src_w, crtc_w);
+ }
+ }
+
+ if (src_h != crtc_h) {
+ uint32_t sel_unit = SCALE_FIR;
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
+
+ if (MDP_FORMAT_IS_YUV(format)) {
+
+ if (crtc_h > src_h)
+ sel_unit = SCALE_PIXEL_RPT;
+ else if (crtc_h <= (src_h / 4))
+ sel_unit = SCALE_MN_PHASE;
+
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(sel_unit);
+ phasey_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT,
+ src_h, crtc_h);
+ }
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
+ MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
+ MDP4_PIPE_SRC_XY_X(src_x) |
+ MDP4_PIPE_SRC_XY_Y(src_y));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
+ MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
+ MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
+ MDP4_PIPE_DST_XY_X(crtc_x) |
+ MDP4_PIPE_DST_XY_Y(crtc_y));
+
+ mdp4_plane_set_scanout(plane, fb);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
+ MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) |
+ MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) |
+ MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) |
+ COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
+ MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ if (MDP_FORMAT_IS_YUV(format)) {
+ struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB);
+
+ op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR;
+ op_mode |= MDP4_PIPE_OP_MODE_CSC_EN;
+ mdp4_write_csc_config(mdp4_kms, pipe, csc);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
+
+ if (frame_type != FRAME_LINEAR)
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SSTILE_FRAME_SIZE(pipe),
+ MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(src_w) |
+ MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(src_h));
+
+ return 0;
+}
+
+static const char *pipe_names[] = {
+ "VG1", "VG2",
+ "RGB1", "RGB2", "RGB3",
+ "VG3", "VG4",
+};
+
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ return mdp4_plane->pipe;
+}
+
+static const uint64_t supported_format_modifiers[] = {
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+/* initialize plane */
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ enum mdp4_pipe pipe_id, bool private_plane)
+{
+ struct drm_plane *plane = NULL;
+ struct mdp4_plane *mdp4_plane;
+ int ret;
+ enum drm_plane_type type;
+
+ mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
+ if (!mdp4_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp4_plane->base;
+
+ mdp4_plane->pipe = pipe_id;
+ mdp4_plane->name = pipe_names[pipe_id];
+ mdp4_plane->caps = mdp4_pipe_caps(pipe_id);
+
+ mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats,
+ ARRAY_SIZE(mdp4_plane->formats),
+ !pipe_supports_yuv(mdp4_plane->caps));
+
+ type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+ mdp4_plane->formats, mdp4_plane->nformats,
+ supported_format_modifiers, type, NULL);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
+
+ mdp4_plane_install_properties(plane, &plane->base);
+
+ drm_plane_enable_fb_damage_clips(plane);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp4_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
new file mode 100644
index 0000000000..270e11c904
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
@@ -0,0 +1,1979 @@
+#ifndef MDP5_XML
+#define MDP5_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
+
+Copyright (C) 2013-2022 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp5_intf_type {
+ INTF_DISABLED = 0,
+ INTF_DSI = 1,
+ INTF_HDMI = 3,
+ INTF_LCDC = 5,
+ INTF_eDP = 9,
+ INTF_VIRTUAL = 100,
+ INTF_WB = 101,
+};
+
+enum mdp5_intfnum {
+ NO_INTF = 0,
+ INTF0 = 1,
+ INTF1 = 2,
+ INTF2 = 3,
+ INTF3 = 4,
+};
+
+enum mdp5_pipe {
+ SSPP_NONE = 0,
+ SSPP_VIG0 = 1,
+ SSPP_VIG1 = 2,
+ SSPP_VIG2 = 3,
+ SSPP_RGB0 = 4,
+ SSPP_RGB1 = 5,
+ SSPP_RGB2 = 6,
+ SSPP_DMA0 = 7,
+ SSPP_DMA1 = 8,
+ SSPP_VIG3 = 9,
+ SSPP_RGB3 = 10,
+ SSPP_CURSOR0 = 11,
+ SSPP_CURSOR1 = 12,
+};
+
+enum mdp5_format {
+ DUMMY = 0,
+};
+
+enum mdp5_ctl_mode {
+ MODE_NONE = 0,
+ MODE_WB_0_BLOCK = 1,
+ MODE_WB_1_BLOCK = 2,
+ MODE_WB_0_LINE = 3,
+ MODE_WB_1_LINE = 4,
+ MODE_WB_2_LINE = 5,
+};
+
+enum mdp5_pack_3d {
+ PACK_3D_FRAME_INT = 0,
+ PACK_3D_H_ROW_INT = 1,
+ PACK_3D_V_ROW_INT = 2,
+ PACK_3D_COL_INT = 3,
+};
+
+enum mdp5_scale_filter {
+ SCALE_FILTER_NEAREST = 0,
+ SCALE_FILTER_BIL = 1,
+ SCALE_FILTER_PCMN = 2,
+ SCALE_FILTER_CA = 3,
+};
+
+enum mdp5_pipe_bwc {
+ BWC_LOSSLESS = 0,
+ BWC_Q_HIGH = 1,
+ BWC_Q_MED = 2,
+};
+
+enum mdp5_cursor_format {
+ CURSOR_FMT_ARGB8888 = 0,
+ CURSOR_FMT_ARGB1555 = 2,
+ CURSOR_FMT_ARGB4444 = 4,
+};
+
+enum mdp5_cursor_alpha {
+ CURSOR_ALPHA_CONST = 0,
+ CURSOR_ALPHA_PER_PIXEL = 2,
+};
+
+enum mdp5_igc_type {
+ IGC_VIG = 0,
+ IGC_RGB = 1,
+ IGC_DMA = 2,
+ IGC_DSPP = 3,
+};
+
+enum mdp5_data_format {
+ DATA_FORMAT_RGB = 0,
+ DATA_FORMAT_YUV = 1,
+};
+
+enum mdp5_block_size {
+ BLOCK_SIZE_64 = 0,
+ BLOCK_SIZE_128 = 1,
+};
+
+enum mdp5_rotate_mode {
+ ROTATE_0 = 0,
+ ROTATE_90 = 1,
+};
+
+enum mdp5_chroma_downsample_method {
+ DS_MTHD_NO_PIXEL_DROP = 0,
+ DS_MTHD_PIXEL_DROP = 1,
+};
+
+#define MDP5_IRQ_WB_0_DONE 0x00000001
+#define MDP5_IRQ_WB_1_DONE 0x00000002
+#define MDP5_IRQ_WB_2_DONE 0x00000010
+#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100
+#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200
+#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400
+#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800
+#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000
+#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000
+#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000
+#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000
+#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000
+#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000
+#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000
+#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000
+#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000
+#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000
+#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000
+#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000
+#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
+#define MDP5_IRQ_INTF0_VSYNC 0x02000000
+#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
+#define MDP5_IRQ_INTF1_VSYNC 0x08000000
+#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000
+#define MDP5_IRQ_INTF2_VSYNC 0x20000000
+#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
+#define MDP5_IRQ_INTF3_VSYNC 0x80000000
+#define REG_MDSS_HW_VERSION 0x00000000
+#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDSS_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK;
+}
+#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDSS_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK;
+}
+#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDSS_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDSS_HW_INTR_STATUS 0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001
+#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020
+#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
+#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
+
+#define REG_MDP5_HW_VERSION 0x00000000
+#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDP5_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK;
+}
+#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDP5_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK;
+}
+#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP5_DISP_INTF_SEL 0x00000004
+#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+}
+
+#define REG_MDP5_INTR_EN 0x00000010
+
+#define REG_MDP5_INTR_STATUS 0x00000014
+
+#define REG_MDP5_INTR_CLEAR 0x00000018
+
+#define REG_MDP5_HIST_INTR_EN 0x0000001c
+
+#define REG_MDP5_HIST_INTR_STATUS 0x00000020
+
+#define REG_MDP5_HIST_INTR_CLEAR 0x00000024
+
+#define REG_MDP5_SPARE_0 0x00000028
+#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
+{
+ switch (idx) {
+ case IGC_VIG: return 0x00000200;
+ case IGC_RGB: return 0x00000210;
+ case IGC_DMA: return 0x00000220;
+ case IGC_DSPP: return 0x00000300;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+{
+ return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+}
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+
+#define REG_MDP5_SPLIT_DPL_EN 0x000002f4
+
+#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
+#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
+
+#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
+#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
+
+static inline uint32_t __offset_CTL(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->ctl.base[0]);
+ case 1: return (mdp5_cfg->ctl.base[1]);
+ case 2: return (mdp5_cfg->ctl.base[2]);
+ case 3: return (mdp5_cfg->ctl.base[3]);
+ case 4: return (mdp5_cfg->ctl.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); }
+
+static inline uint32_t __offset_LAYER(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000000;
+ case 1: return 0x00000004;
+ case 2: return 0x00000008;
+ case 3: return 0x0000000c;
+ case 4: return 0x00000010;
+ case 5: return 0x00000024;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
+#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
+#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038
+#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0
+#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00
+#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000
+#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000
+#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000
+#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000
+#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
+#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
+#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000
+#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000
+#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
+}
+
+static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); }
+#define MDP5_CTL_OP_MODE__MASK 0x0000000f
+#define MDP5_CTL_OP_MODE__SHIFT 0
+static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
+{
+ return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK;
+}
+#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070
+#define MDP5_CTL_OP_INTF_NUM__SHIFT 4
+static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val)
+{
+ return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK;
+}
+#define MDP5_CTL_OP_CMD_MODE 0x00020000
+#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000
+#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000
+#define MDP5_CTL_OP_PACK_3D__SHIFT 20
+static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
+{
+ return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
+}
+
+static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); }
+#define MDP5_CTL_FLUSH_VIG0 0x00000001
+#define MDP5_CTL_FLUSH_VIG1 0x00000002
+#define MDP5_CTL_FLUSH_VIG2 0x00000004
+#define MDP5_CTL_FLUSH_RGB0 0x00000008
+#define MDP5_CTL_FLUSH_RGB1 0x00000010
+#define MDP5_CTL_FLUSH_RGB2 0x00000020
+#define MDP5_CTL_FLUSH_LM0 0x00000040
+#define MDP5_CTL_FLUSH_LM1 0x00000080
+#define MDP5_CTL_FLUSH_LM2 0x00000100
+#define MDP5_CTL_FLUSH_LM3 0x00000200
+#define MDP5_CTL_FLUSH_LM4 0x00000400
+#define MDP5_CTL_FLUSH_DMA0 0x00000800
+#define MDP5_CTL_FLUSH_DMA1 0x00001000
+#define MDP5_CTL_FLUSH_DSPP0 0x00002000
+#define MDP5_CTL_FLUSH_DSPP1 0x00004000
+#define MDP5_CTL_FLUSH_DSPP2 0x00008000
+#define MDP5_CTL_FLUSH_WB 0x00010000
+#define MDP5_CTL_FLUSH_CTL 0x00020000
+#define MDP5_CTL_FLUSH_VIG3 0x00040000
+#define MDP5_CTL_FLUSH_RGB3 0x00080000
+#define MDP5_CTL_FLUSH_LM5 0x00100000
+#define MDP5_CTL_FLUSH_DSPP3 0x00200000
+#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000
+#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000
+#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000
+#define MDP5_CTL_FLUSH_TIMING_3 0x10000000
+#define MDP5_CTL_FLUSH_TIMING_2 0x20000000
+#define MDP5_CTL_FLUSH_TIMING_1 0x40000000
+#define MDP5_CTL_FLUSH_TIMING_0 0x80000000
+
+static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
+
+static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
+
+static inline uint32_t __offset_LAYER_EXT(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000040;
+ case 1: return 0x00000044;
+ case 2: return 0x00000048;
+ case 3: return 0x0000004c;
+ case 4: return 0x00000050;
+ case 5: return 0x00000054;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001
+#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004
+#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010
+#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040
+#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100
+#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400
+#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000
+#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000
+#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000
+#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK;
+}
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK;
+}
+
+static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
+{
+ switch (idx) {
+ case SSPP_NONE: return (INVALID_IDX(idx));
+ case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]);
+ case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]);
+ case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]);
+ case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]);
+ case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]);
+ case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]);
+ case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]);
+ case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]);
+ case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]);
+ case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]);
+ case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]);
+ case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_OP_MODE(enum mdp5_pipe i0) { return 0x00000200 + __offset_PIPE(i0); }
+#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00080000
+#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 19
+static inline uint32_t MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(enum mdp5_data_format val)
+{
+ return ((val) << MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK;
+}
+#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00040000
+#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 18
+static inline uint32_t MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(enum mdp5_data_format val)
+{
+ return ((val) << MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK;
+}
+#define MDP5_PIPE_OP_MODE_CSC_1_EN 0x00020000
+
+static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(enum mdp5_pipe i0) { return 0x00000320 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(enum mdp5_pipe i0) { return 0x00000324 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(enum mdp5_pipe i0) { return 0x00000328 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(enum mdp5_pipe i0) { return 0x0000032c + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(enum mdp5_pipe i0) { return 0x00000330 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK 0x000000ff
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK;
+}
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK 0x0000ff00
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT 8
+static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK 0x000000ff
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK;
+}
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK 0x0000ff00
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT 8
+static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK 0x000001ff
+#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK 0x000001ff
+#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
+#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_XY_X__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); }
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); }
+#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
+#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
+static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK;
+}
+#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff
+#define MDP5_PIPE_OUT_XY_X__SHIFT 0
+static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
+#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
+#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
+#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
+#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
+static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
+{
+ return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK;
+}
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
+#define MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE 0x80000000
+
+static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); }
+#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
+#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
+static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK;
+}
+#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00
+#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8
+static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
+}
+
+static inline uint32_t __offset_SW_PIX_EXT(enum mdp_component_type idx)
+{
+ switch (idx) {
+ case COMP_0: return 0x00000100;
+ case COMP_1_2: return 0x00000110;
+ case COMP_3: return 0x00000120;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
+
+static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_LR(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
+#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK 0x000000ff
+#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT 0
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK 0x0000ff00
+#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT 8
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(int32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK 0x00ff0000
+#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK 0xff000000
+#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT 24
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(int32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_TB(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000004 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
+#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK 0x000000ff
+#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT 0
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK 0x0000ff00
+#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT 8
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(int32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK 0x00ff0000
+#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK 0xff000000
+#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT 24
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(int32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000008 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
+#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK 0x0000ffff
+#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT 0
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK 0xffff0000
+#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT 16
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000218 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x0000021c + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); }
+
+static inline uint32_t __offset_LM(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->lm.base[0]);
+ case 1: return (mdp5_cfg->lm.base[1]);
+ case 2: return (mdp5_cfg->lm.base[2]);
+ case 3: return (mdp5_cfg->lm.base[3]);
+ case 4: return (mdp5_cfg->lm.base[4]);
+ case 5: return (mdp5_cfg->lm.base[5]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080
+#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000
+
+static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); }
+#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
+
+static inline uint32_t __offset_BLEND(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000020;
+ case 1: return 0x00000050;
+ case 2: return 0x00000080;
+ case 3: return 0x000000b0;
+ case 4: return 0x00000230;
+ case 5: return 0x00000260;
+ case 6: return 0x00000290;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004
+#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010
+#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400
+#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
+#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_W(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK;
+}
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK 0xffff0000
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_H(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_SIZE_ROI_W__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_W(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_W__MASK;
+}
+#define MDP5_LM_CURSOR_SIZE_ROI_H__MASK 0xffff0000
+#define MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_H(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_H__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_XY_SRC_X__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_XY_SRC_X__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_XY_SRC_X(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_XY_SRC_X__SHIFT) & MDP5_LM_CURSOR_XY_SRC_X__MASK;
+}
+#define MDP5_LM_CURSOR_XY_SRC_Y__MASK 0xffff0000
+#define MDP5_LM_CURSOR_XY_SRC_Y__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_XY_SRC_Y(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_XY_SRC_Y__SHIFT) & MDP5_LM_CURSOR_XY_SRC_Y__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_STRIDE_STRIDE__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_STRIDE_STRIDE(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT) & MDP5_LM_CURSOR_STRIDE_STRIDE__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_FORMAT_FORMAT__MASK 0x00000007
+#define MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_FORMAT_FORMAT(enum mdp5_cursor_format val)
+{
+ return ((val) << MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT) & MDP5_LM_CURSOR_FORMAT_FORMAT__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_START_XY_X_START__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_START_XY_X_START__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_START_XY_X_START(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_START_XY_X_START__SHIFT) & MDP5_LM_CURSOR_START_XY_X_START__MASK;
+}
+#define MDP5_LM_CURSOR_START_XY_Y_START__MASK 0xffff0000
+#define MDP5_LM_CURSOR_START_XY_Y_START__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_START_XY_Y_START(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_START_XY_Y_START__SHIFT) & MDP5_LM_CURSOR_START_XY_Y_START__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN 0x00000001
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK 0x00000006
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT 1
+static inline uint32_t MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(enum mdp5_cursor_alpha val)
+{
+ return ((val) << MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT) & MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK;
+}
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN 0x00000008
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); }
+
+static inline uint32_t __offset_DSPP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->dspp.base[0]);
+ case 1: return (mdp5_cfg->dspp.base[1]);
+ case 2: return (mdp5_cfg->dspp.base[2]);
+ case 3: return (mdp5_cfg->dspp.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
+#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
+static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
+{
+ return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK;
+}
+#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010
+#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100
+#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000
+#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000
+#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000
+#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000
+#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
+#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
+
+static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
+
+static inline uint32_t __offset_PP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->pp.base[0]);
+ case 1: return (mdp5_cfg->pp.base[1]);
+ case 2: return (mdp5_cfg->pp.base[2]);
+ case 3: return (mdp5_cfg->pp.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000
+#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); }
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); }
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK;
+}
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff
+#define MDP5_PP_SYNC_THRESH_START__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK;
+}
+#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000
+#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16
+static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); }
+
+static inline uint32_t __offset_WB(uint32_t idx)
+{
+ switch (idx) {
+#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */
+ case 0: return (mdp5_cfg->wb.base[0]);
+ case 1: return (mdp5_cfg->wb.base[1]);
+ case 2: return (mdp5_cfg->wb.base[2]);
+ case 3: return (mdp5_cfg->wb.base[3]);
+ case 4: return (mdp5_cfg->wb.base[4]);
+#endif
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_WB(uint32_t i0) { return 0x00000000 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST_FORMAT(uint32_t i0) { return 0x00000000 + __offset_WB(i0); }
+#define MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK 0x00000003
+#define MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT 0
+static inline uint32_t MDP5_WB_DST_FORMAT_DSTC0_OUT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK 0x0000000c
+#define MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT 2
+static inline uint32_t MDP5_WB_DST_FORMAT_DSTC1_OUT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK 0x00000030
+#define MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT 4
+static inline uint32_t MDP5_WB_DST_FORMAT_DSTC2_OUT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK 0x000000c0
+#define MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT 6
+static inline uint32_t MDP5_WB_DST_FORMAT_DSTC3_OUT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DSTC3_EN 0x00000100
+#define MDP5_WB_DST_FORMAT_DST_BPP__MASK 0x00000600
+#define MDP5_WB_DST_FORMAT_DST_BPP__SHIFT 9
+static inline uint32_t MDP5_WB_DST_FORMAT_DST_BPP(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DST_BPP__SHIFT) & MDP5_WB_DST_FORMAT_DST_BPP__MASK;
+}
+#define MDP5_WB_DST_FORMAT_PACK_COUNT__MASK 0x00003000
+#define MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT 12
+static inline uint32_t MDP5_WB_DST_FORMAT_PACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT) & MDP5_WB_DST_FORMAT_PACK_COUNT__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DST_ALPHA_X 0x00004000
+#define MDP5_WB_DST_FORMAT_PACK_TIGHT 0x00020000
+#define MDP5_WB_DST_FORMAT_PACK_ALIGN_MSB 0x00040000
+#define MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK 0x00180000
+#define MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT 19
+static inline uint32_t MDP5_WB_DST_FORMAT_WRITE_PLANES(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT) & MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DST_DITHER_EN 0x00400000
+#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK 0x03800000
+#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT 23
+static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK 0x3c000000
+#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT 26
+static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SITE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK;
+}
+#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK 0xc0000000
+#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT 30
+static inline uint32_t MDP5_WB_DST_FORMAT_FRAME_FORMAT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT) & MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_DST_OP_MODE(uint32_t i0) { return 0x00000004 + __offset_WB(i0); }
+#define MDP5_WB_DST_OP_MODE_BWC_ENC_EN 0x00000001
+#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK 0x00000006
+#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT 1
+static inline uint32_t MDP5_WB_DST_OP_MODE_BWC_ENC_OP(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT) & MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK 0x00000010
+#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT 4
+static inline uint32_t MDP5_WB_DST_OP_MODE_BLOCK_SIZE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT) & MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_ROT_MODE__MASK 0x00000020
+#define MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT 5
+static inline uint32_t MDP5_WB_DST_OP_MODE_ROT_MODE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT) & MDP5_WB_DST_OP_MODE_ROT_MODE__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_ROT_EN 0x00000040
+#define MDP5_WB_DST_OP_MODE_CSC_EN 0x00000100
+#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00000200
+#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 9
+static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00000400
+#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 10
+static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_EN 0x00000800
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK 0x00001000
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT 12
+static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK 0x00002000
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT 13
+static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK 0x00004000
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT 14
+static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_DST_PACK_PATTERN(uint32_t i0) { return 0x00000008 + __offset_WB(i0); }
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK 0x00000003
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT 0
+static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT0(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK;
+}
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK 0x00000300
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT 8
+static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT1(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK;
+}
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK 0x00030000
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT 16
+static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT2(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK;
+}
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK 0x03000000
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT 24
+static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT3(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_DST0_ADDR(uint32_t i0) { return 0x0000000c + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST1_ADDR(uint32_t i0) { return 0x00000010 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST2_ADDR(uint32_t i0) { return 0x00000014 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST3_ADDR(uint32_t i0) { return 0x00000018 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST_YSTRIDE0(uint32_t i0) { return 0x0000001c + __offset_WB(i0); }
+#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK 0x0000ffff
+#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT 0
+static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK;
+}
+#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK 0xffff0000
+#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT 16
+static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_DST_YSTRIDE1(uint32_t i0) { return 0x00000020 + __offset_WB(i0); }
+#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK 0x0000ffff
+#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT 0
+static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK;
+}
+#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK 0xffff0000
+#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT 16
+static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_DST_DITHER_BITDEPTH(uint32_t i0) { return 0x00000024 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW0(uint32_t i0) { return 0x00000030 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW1(uint32_t i0) { return 0x00000034 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW2(uint32_t i0) { return 0x00000038 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW3(uint32_t i0) { return 0x0000003c + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST_WRITE_CONFIG(uint32_t i0) { return 0x00000048 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_ROTATION_DNSCALER(uint32_t i0) { return 0x00000050 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_0_3(uint32_t i0) { return 0x00000060 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_1_2(uint32_t i0) { return 0x00000064 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_0_3(uint32_t i0) { return 0x00000068 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_1_2(uint32_t i0) { return 0x0000006c + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_OUT_SIZE(uint32_t i0) { return 0x00000074 + __offset_WB(i0); }
+#define MDP5_WB_OUT_SIZE_DST_W__MASK 0x0000ffff
+#define MDP5_WB_OUT_SIZE_DST_W__SHIFT 0
+static inline uint32_t MDP5_WB_OUT_SIZE_DST_W(uint32_t val)
+{
+ return ((val) << MDP5_WB_OUT_SIZE_DST_W__SHIFT) & MDP5_WB_OUT_SIZE_DST_W__MASK;
+}
+#define MDP5_WB_OUT_SIZE_DST_H__MASK 0xffff0000
+#define MDP5_WB_OUT_SIZE_DST_H__SHIFT 16
+static inline uint32_t MDP5_WB_OUT_SIZE_DST_H(uint32_t val)
+{
+ return ((val) << MDP5_WB_OUT_SIZE_DST_H__SHIFT) & MDP5_WB_OUT_SIZE_DST_H__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_ALPHA_X_VALUE(uint32_t i0) { return 0x00000078 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_0(uint32_t i0) { return 0x00000260 + __offset_WB(i0); }
+#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff
+#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK;
+}
+#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000
+#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT 16
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_1(uint32_t i0) { return 0x00000264 + __offset_WB(i0); }
+#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff
+#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK;
+}
+#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000
+#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT 16
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_2(uint32_t i0) { return 0x00000268 + __offset_WB(i0); }
+#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff
+#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK;
+}
+#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000
+#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT 16
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_3(uint32_t i0) { return 0x0000026c + __offset_WB(i0); }
+#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff
+#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK;
+}
+#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000
+#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT 16
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_4(uint32_t i0) { return 0x00000270 + __offset_WB(i0); }
+#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff
+#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; }
+#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK 0x000000ff
+#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK;
+}
+#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK 0x0000ff00
+#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT 8
+static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; }
+#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK 0x000000ff
+#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK;
+}
+#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK 0x0000ff00
+#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT 8
+static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS_REG(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; }
+#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK 0x000001ff
+#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS_REG(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; }
+#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK 0x000001ff
+#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK;
+}
+
+static inline uint32_t __offset_INTF(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->intf.base[0]);
+ case 1: return (mdp5_cfg->intf.base[1]);
+ case 2: return (mdp5_cfg->intf.base[2]);
+ case 3: return (mdp5_cfg->intf.base[3]);
+ case 4: return (mdp5_cfg->intf.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); }
+#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
+#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK;
+}
+#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000
+#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); }
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK;
+}
+#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); }
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); }
+#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
+#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK;
+}
+#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000
+#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); }
+#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
+
+static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); }
+#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
+#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
+#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
+
+static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); }
+
+static inline uint32_t __offset_AD(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->ad.base[0]);
+ case 1: return (mdp5_cfg->ad.base[1]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); }
+
+
+#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
new file mode 100644
index 0000000000..694d543413
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -0,0 +1,1415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp5_cfg.h"
+
+struct mdp5_cfg_handler {
+ int revision;
+ struct mdp5_cfg config;
+};
+
+/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
+const struct mdp5_cfg_hw *mdp5_cfg = NULL;
+
+static const struct mdp5_cfg_hw msm8x74v1_config = {
+ .name = "msm8x74v1",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 22,
+ .mmb_size = 4096,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
+ .flush_hw_mask = 0x0003ffff,
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01100, 0x01500, 0x01900 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01d00, 0x02100, 0x02500 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02900, 0x02d00 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ 0,
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04500, 0x04900, 0x04d00 },
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x21a00, 0x21b00, 0x21c00 },
+ },
+ .intf = {
+ .base = { 0x21000, 0x21200, 0x21400, 0x21600 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 200,
+ .ib_inefficiency = 120,
+ .clk_inefficiency = 125
+ },
+ .max_clk = 200000000,
+};
+
+static const struct mdp5_cfg_hw msm8x26_config = {
+ .name = "msm8x26",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 7,
+ .mmb_size = 4096,
+ .clients = {
+ [SSPP_VIG0] = 1,
+ [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7,
+ },
+ },
+ .ctl = {
+ .count = 2,
+ .base = { 0x00500, 0x00600 },
+ .flush_hw_mask = 0x0003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x01100 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 1,
+ .base = { 0x01d00 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x02900 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ 0,
+ },
+ .lm = {
+ .count = 2,
+ .base = { 0x03100, 0x03d00 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 2,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x04500 },
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x21a00 },
+ },
+ .intf = {
+ .base = { 0x00000, 0x21200 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 125
+ },
+ .max_clk = 200000000,
+};
+
+static const struct mdp5_cfg_hw msm8x74v2_config = {
+ .name = "msm8x74",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 22,
+ .mmb_size = 4096,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
+ .flush_hw_mask = 0x0003ffff,
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01100, 0x01500, 0x01900 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01d00, 0x02100, 0x02500 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02900, 0x02d00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04500, 0x04900, 0x04d00 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x13000, 0x13200 },
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x12c00, 0x12d00, 0x12e00 },
+ },
+ .intf = {
+ .base = { 0x12400, 0x12600, 0x12800, 0x12a00 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 200,
+ .ib_inefficiency = 120,
+ .clk_inefficiency = 125
+ },
+ .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_hw apq8084_config = {
+ .name = "apq8084",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 44,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
+ [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+ [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+ },
+ .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
+ .reserved = {
+ /* Two SMP blocks are statically tied to RGB pipes: */
+ [16] = 2, [17] = 2, [18] = 2, [22] = 2,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
+ .flush_hw_mask = 0x003fffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x01100, 0x01500, 0x01900, 0x01d00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x02100, 0x02500, 0x02900, 0x02d00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x03100, 0x03500 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = 3,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
+ .nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x05100, 0x05500, 0x05900, 0x05d00 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x13400, 0x13600, 0x13800 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 },
+ },
+ .intf = {
+ .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 200,
+ .ib_inefficiency = 120,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_hw msm8x16_config = {
+ .name = "msm8x16",
+ .mdp = {
+ .count = 1,
+ .base = { 0x0 },
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 8,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0x4003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 2, /* LM0 and LM3 */
+ .base = { 0x44000, 0x47000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .intf = {
+ .base = { 0x00000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_hw msm8x36_config = {
+ .name = "msm8x36",
+ .mdp = {
+ .count = 1,
+ .base = { 0x0 },
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 8,
+ .mmb_size = 10240,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0x4003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x47000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .ad = {
+ .count = 1,
+ .base = { 0x78000 },
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+ },
+ .intf = {
+ .base = { 0x00000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 366670000,
+};
+
+static const struct mdp5_cfg_hw msm8x94_config = {
+ .name = "msm8x94",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 44,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
+ [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+ [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+ },
+ .reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */
+ .reserved = {
+ [1] = 1, [4] = 1, [7] = 1, [19] = 1,
+ [16] = 5, [17] = 5, [18] = 5, [22] = 5,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf0ffffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x24000, 0x26000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = 3,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x54000, 0x56000, 0x58000, 0x5a000 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x78000, 0x78800, 0x79000 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 100,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 400000000,
+};
+
+static const struct mdp5_cfg_hw msm8x96_config = {
+ .name = "msm8x96",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_DSC |
+ MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf4ffffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x24000, 0x26000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 2,
+ .base = { 0x34000, 0x36000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 6,
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 2,
+ .base = { 0x54000, 0x56000 },
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x78000, 0x78800, 0x79000 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_hw msm8x76_config = {
+ .name = "msm8x76",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ MDP_CAP_DSC |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .smp = {
+ .mmb_count = 10,
+ .mmb_size = 10240,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 9,
+ [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .pipe_vig = {
+ .count = 2,
+ .base = { 0x04000, 0x06000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x440DC },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x70000, 0x70800, 0x72000 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .max_clk = 360000000,
+};
+
+static const struct mdp5_cfg_hw msm8x53_config = {
+ .name = "msm8x53",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 3,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR },
+ { .id = 1, .pp = 1, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY },
+ },
+ .nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 2,
+ .base = { 0x70000, 0x70800 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 400000000,
+};
+
+static const struct mdp5_cfg_hw msm8917_config = {
+ .name = "msm8917",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_hw msm8998_config = {
+ .name = "msm8998",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_DSC |
+ MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf7ffffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2, /* driver supports max of 2 currently */
+ .base = { 0x24000, 0x26000, 0x28000, 0x2a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 2,
+ .base = { 0x34000, 0x36000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 6,
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 2,
+ .base = { 0x54000, 0x56000 },
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x78000, 0x78800, 0x79000 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_hw sdm630_config = {
+ .name = "sdm630",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf4ffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2, /* driver supports max of 2 currently */
+ .base = { 0x24000, 0x26000, 0x28000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x46000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x78000, 0x78800 },
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x70000, 0x71000, 0x72000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_hw sdm660_config = {
+ .name = "sdm660",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_DSC |
+ MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf4ffffff,
+ },
+ .pipe_vig = {
+ .count = 2,
+ .base = { 0x04000, 0x6000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2, /* driver supports max of 2 currently */
+ .base = { 0x24000, 0x26000, 0x28000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 4,
+ .base = { 0x44000, 0x45000, 0x46000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 2,
+ .base = { 0x54000, 0x56000 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x78000, 0x78800 },
+ },
+ .pp = {
+ .count = 5,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800, 0x72000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
+ { .revision = 0, .config = { .hw = &msm8x74v1_config } },
+ { .revision = 1, .config = { .hw = &msm8x26_config } },
+ { .revision = 2, .config = { .hw = &msm8x74v2_config } },
+ { .revision = 3, .config = { .hw = &apq8084_config } },
+ { .revision = 6, .config = { .hw = &msm8x16_config } },
+ { .revision = 8, .config = { .hw = &msm8x36_config } },
+ { .revision = 9, .config = { .hw = &msm8x94_config } },
+ { .revision = 7, .config = { .hw = &msm8x96_config } },
+ { .revision = 11, .config = { .hw = &msm8x76_config } },
+ { .revision = 15, .config = { .hw = &msm8917_config } },
+ { .revision = 16, .config = { .hw = &msm8x53_config } },
+};
+
+static const struct mdp5_cfg_handler cfg_handlers_v3[] = {
+ { .revision = 0, .config = { .hw = &msm8998_config } },
+ { .revision = 2, .config = { .hw = &sdm660_config } },
+ { .revision = 3, .config = { .hw = &sdm630_config } },
+};
+
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
+{
+ return cfg_handler->config.hw;
+}
+
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
+{
+ return &cfg_handler->config;
+}
+
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
+{
+ return cfg_handler->revision;
+}
+
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
+{
+ kfree(cfg_handler);
+}
+
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+ uint32_t major, uint32_t minor)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct mdp5_cfg_handler *cfg_handler;
+ const struct mdp5_cfg_handler *cfg_handlers;
+ int i, ret = 0, num_handlers;
+
+ cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
+ if (unlikely(!cfg_handler)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ switch (major) {
+ case 1:
+ cfg_handlers = cfg_handlers_v1;
+ num_handlers = ARRAY_SIZE(cfg_handlers_v1);
+ break;
+ case 3:
+ cfg_handlers = cfg_handlers_v3;
+ num_handlers = ARRAY_SIZE(cfg_handlers_v3);
+ break;
+ default:
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ /* only after mdp5_cfg global pointer's init can we access the hw */
+ for (i = 0; i < num_handlers; i++) {
+ if (cfg_handlers[i].revision != minor)
+ continue;
+ mdp5_cfg = cfg_handlers[i].config.hw;
+
+ break;
+ }
+ if (unlikely(!mdp5_cfg)) {
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ cfg_handler->revision = minor;
+ cfg_handler->config.hw = mdp5_cfg;
+
+ DBG("MDP5: %s hw config selected", mdp5_cfg->name);
+
+ return cfg_handler;
+
+fail:
+ if (cfg_handler)
+ mdp5_cfg_destroy(cfg_handler);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
new file mode 100644
index 0000000000..c2502cc338
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDP5_CFG_H__
+#define __MDP5_CFG_H__
+
+#include "msm_drv.h"
+
+/*
+ * mdp5_cfg
+ *
+ * This module configures the dynamic offsets used by mdp5.xml.h
+ * (initialized in mdp5_cfg.c)
+ */
+extern const struct mdp5_cfg_hw *mdp5_cfg;
+
+#define MAX_CTL 8
+#define MAX_BASES 8
+#define MAX_SMP_BLOCKS 44
+#define MAX_CLIENTS 32
+
+typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
+
+#define MDP5_SUB_BLOCK_DEFINITION \
+ unsigned int count; \
+ uint32_t base[MAX_BASES]
+
+struct mdp5_sub_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+};
+
+struct mdp5_lm_instance {
+ int id;
+ int pp;
+ int dspp;
+ uint32_t caps;
+};
+
+struct mdp5_lm_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ struct mdp5_lm_instance instances[MAX_BASES];
+ uint32_t nb_stages; /* number of stages per blender */
+ uint32_t max_width; /* Maximum output resolution */
+ uint32_t max_height;
+};
+
+struct mdp5_pipe_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t caps; /* pipe capabilities */
+};
+
+struct mdp5_ctl_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t flush_hw_mask; /* FLUSH register's hardware mask */
+};
+
+struct mdp5_smp_block {
+ int mmb_count; /* number of SMP MMBs */
+ int mmb_size; /* MMB: size in bytes */
+ uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */
+ mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
+ uint8_t reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
+};
+
+struct mdp5_mdp_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t caps; /* MDP capabilities: MDP_CAP_xxx bits */
+};
+
+#define MDP5_INTF_NUM_MAX 5
+
+struct mdp5_intf_block {
+ uint32_t base[MAX_BASES];
+ u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
+};
+
+struct mdp5_perf_block {
+ u32 ab_inefficiency;
+ u32 ib_inefficiency;
+ u32 clk_inefficiency;
+};
+
+struct mdp5_cfg_hw {
+ char *name;
+
+ struct mdp5_mdp_block mdp;
+ struct mdp5_smp_block smp;
+ struct mdp5_ctl_block ctl;
+ struct mdp5_pipe_block pipe_vig;
+ struct mdp5_pipe_block pipe_rgb;
+ struct mdp5_pipe_block pipe_dma;
+ struct mdp5_pipe_block pipe_cursor;
+ struct mdp5_lm_block lm;
+ struct mdp5_sub_block dspp;
+ struct mdp5_sub_block ad;
+ struct mdp5_sub_block pp;
+ struct mdp5_sub_block dsc;
+ struct mdp5_sub_block cdm;
+ struct mdp5_intf_block intf;
+ struct mdp5_perf_block perf;
+
+ uint32_t max_clk;
+};
+
+struct mdp5_cfg {
+ const struct mdp5_cfg_hw *hw;
+};
+
+struct mdp5_kms;
+struct mdp5_cfg_handler;
+
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
+
+#define mdp5_cfg_intf_is_virtual(intf_type) ({ \
+ typeof(intf_type) __val = (intf_type); \
+ (__val) >= INTF_VIRTUAL ? true : false; })
+
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+ uint32_t major, uint32_t minor);
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
+
+#endif /* __MDP5_CFG_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
new file mode 100644
index 0000000000..a640af22ea
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mdp5_kms.h"
+
+#ifdef CONFIG_DRM_MSM_DSI
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+#define VSYNC_CLK_RATE 19200000
+static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct device *dev = encoder->dev->dev;
+ u32 total_lines, vclks_line, cfg;
+ long vsync_clk_speed;
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
+
+ if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
+ DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
+ return -EINVAL;
+ }
+
+ total_lines = mode->vtotal * drm_mode_vrefresh(mode);
+ if (!total_lines) {
+ DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ __func__, mode->vtotal, drm_mode_vrefresh(mode));
+ return -EINVAL;
+ }
+
+ vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
+ if (vsync_clk_speed <= 0) {
+ DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
+ vsync_clk_speed);
+ return -EINVAL;
+ }
+ vclks_line = vsync_clk_speed / total_lines;
+
+ cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
+ | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
+ cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
+
+ /*
+ * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
+ * the vsync_clk equating to roughly half the desired panel refresh rate.
+ * This is only necessary as stability fallback if interrupts from the
+ * panel arrive too late or not at all, but is currently used by default
+ * because these panel interrupts are not wired up yet.
+ */
+ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
+ mdp5_write(mdp5_kms,
+ REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
+
+ mdp5_write(mdp5_kms,
+ REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id),
+ MDP5_PP_SYNC_THRESH_START(4) |
+ MDP5_PP_SYNC_THRESH_CONTINUE(4));
+ mdp5_write(mdp5_kms, REG_MDP5_PP_AUTOREFRESH_CONFIG(pp_id), 0x0);
+
+ return 0;
+}
+
+static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
+ int ret;
+
+ ret = clk_set_rate(mdp5_kms->vsync_clk,
+ clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
+ if (ret) {
+ DRM_DEV_ERROR(encoder->dev->dev,
+ "vsync_clk clk_set_rate failed, %d\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(mdp5_kms->vsync_clk);
+ if (ret) {
+ DRM_DEV_ERROR(encoder->dev->dev,
+ "vsync_clk clk_prepare_enable failed, %d\n", ret);
+ return ret;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1);
+
+ return 0;
+}
+
+static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
+ clk_disable_unprepare(mdp5_kms->vsync_clk);
+}
+
+void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ mode = adjusted_mode;
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+ pingpong_tearcheck_setup(encoder, mode);
+ mdp5_crtc_set_pipeline(encoder->crtc);
+}
+
+void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
+ struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
+ struct mdp5_interface *intf = mdp5_cmd_enc->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+
+ if (WARN_ON(!mdp5_cmd_enc->enabled))
+ return;
+
+ pingpong_tearcheck_disable(encoder);
+
+ mdp5_ctl_set_encoder_state(ctl, pipeline, false);
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
+
+ mdp5_cmd_enc->enabled = false;
+}
+
+void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
+ struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
+ struct mdp5_interface *intf = mdp5_cmd_enc->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+
+ if (WARN_ON(mdp5_cmd_enc->enabled))
+ return;
+
+ if (pingpong_tearcheck_enable(encoder))
+ return;
+
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
+
+ mdp5_ctl_set_encoder_state(ctl, pipeline, true);
+
+ mdp5_cmd_enc->enabled = true;
+}
+
+int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
+{
+ struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms;
+ struct device *dev;
+ int intf_num;
+ u32 data = 0;
+
+ if (!encoder || !slave_encoder)
+ return -EINVAL;
+
+ mdp5_kms = get_kms(encoder);
+ intf_num = mdp5_cmd_enc->intf->num;
+
+ /* Switch slave encoder's trigger MUX, to use the master's
+ * start signal for the slave encoder
+ */
+ if (intf_num == 1)
+ data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
+ else if (intf_num == 2)
+ data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
+ else
+ return -EINVAL;
+
+ /* Smart Panel, Sync mode */
+ data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
+
+ dev = &mdp5_kms->pdev->dev;
+
+ /* Make sure clocks are on when connectors calling this function. */
+ pm_runtime_get_sync(dev);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
+
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
+ MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
new file mode 100644
index 0000000000..86036dd4e1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -0,0 +1,1360 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/sort.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "mdp5_kms.h"
+#include "msm_gem.h"
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+struct mdp5_crtc {
+ struct drm_crtc base;
+ int id;
+ bool enabled;
+
+ spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+
+ /* Bits have been flushed at the last commit,
+ * used to decide if a vsync has happened since last commit.
+ */
+ u32 flushed_mask;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP 0x2
+ atomic_t pending;
+
+ /* for unref'ing cursor bo's after scanout completes: */
+ struct drm_flip_work unref_cursor_work;
+
+ struct mdp_irq vblank;
+ struct mdp_irq err;
+ struct mdp_irq pp_done;
+
+ struct completion pp_completion;
+
+ bool lm_cursor_enabled;
+
+ struct {
+ /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
+ spinlock_t lock;
+
+ /* current cursor being scanned out: */
+ struct drm_gem_object *scanout_bo;
+ uint64_t iova;
+ uint32_t width, height;
+ int x, y;
+ } cursor;
+};
+#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
+
+static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
+
+static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ atomic_or(pending, &mdp5_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+}
+
+static void request_pp_done_pending(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ reinit_completion(&mdp5_crtc->pp_completion);
+}
+
+static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ bool start = !mdp5_cstate->defer_start;
+
+ mdp5_cstate->defer_start = false;
+
+ DBG("%s: flush=%08x", crtc->name, flush_mask);
+
+ return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
+}
+
+/*
+ * flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+static u32 crtc_flush_all(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_hw_mixer *mixer, *r_mixer;
+ struct drm_plane *plane;
+ uint32_t flush_mask = 0;
+
+ /* this should not happen: */
+ if (WARN_ON(!mdp5_cstate->ctl))
+ return 0;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (!plane->state->visible)
+ continue;
+ flush_mask |= mdp5_plane_get_flush(plane);
+ }
+
+ mixer = mdp5_cstate->pipeline.mixer;
+ flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
+
+ r_mixer = mdp5_cstate->pipeline.r_mixer;
+ if (r_mixer)
+ flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
+
+ return crtc_flush(crtc, flush_mask);
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = mdp5_crtc->event;
+ if (event) {
+ mdp5_crtc->event = NULL;
+ DBG("%s: send event: %p", crtc->name, event);
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (ctl && !crtc->state->enable) {
+ /* set STAGE_UNUSED for all layers */
+ mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
+ /* XXX: What to do here? */
+ /* mdp5_crtc->ctl = NULL; */
+ }
+}
+
+static void unref_cursor_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp5_crtc *mdp5_crtc =
+ container_of(work, struct mdp5_crtc, unref_cursor_work);
+ struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
+ struct msm_kms *kms = &mdp5_kms->base.base;
+
+ msm_gem_unpin_iova(val, kms->aspace);
+ drm_gem_object_put(val);
+}
+
+static void mdp5_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
+
+ kfree(mdp5_crtc);
+}
+
+static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
+{
+ switch (stage) {
+ case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
+ case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
+ case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
+ case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
+ case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
+ case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
+ case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * left/right pipe offsets for the stage array used in blend_setup()
+ */
+#define PIPE_LEFT 0
+#define PIPE_RIGHT 1
+
+/*
+ * blend_setup() - blend all the planes of a CRTC
+ *
+ * If no base layer is available, border will be enabled as the base layer.
+ * Otherwise all layers will be blended based on their stage calculated
+ * in mdp5_crtc_atomic_check.
+ */
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
+ const struct mdp_format *format;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ uint32_t lm = mixer->lm;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
+ uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
+ unsigned long flags;
+ enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+ enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+ int i, plane_cnt = 0;
+ bool bg_alpha_enabled = false;
+ u32 mixer_op_mode = 0;
+ u32 val;
+#define blender(stage) ((stage) - STAGE0)
+
+ spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
+
+ /* ctl could be released already when we are shutting down: */
+ /* XXX: Can this happen now? */
+ if (!ctl)
+ goto out;
+
+ /* Collect all plane information */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp5_pipe right_pipe;
+
+ if (!plane->state->visible)
+ continue;
+
+ pstate = to_mdp5_plane_state(plane->state);
+ pstates[pstate->stage] = pstate;
+ stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
+ /*
+ * if we have a right mixer, stage the same pipe as we
+ * have on the left mixer
+ */
+ if (r_mixer)
+ r_stage[pstate->stage][PIPE_LEFT] =
+ mdp5_plane_pipe(plane);
+ /*
+ * if we have a right pipe (i.e, the plane comprises of 2
+ * hwpipes, then stage the right pipe on the right side of both
+ * the layer mixers
+ */
+ right_pipe = mdp5_plane_right_pipe(plane);
+ if (right_pipe) {
+ stage[pstate->stage][PIPE_RIGHT] = right_pipe;
+ r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
+ }
+
+ plane_cnt++;
+ }
+
+ if (!pstates[STAGE_BASE]) {
+ ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
+ DBG("Border Color is enabled");
+ } else if (plane_cnt) {
+ format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
+
+ if (format->alpha_enable)
+ bg_alpha_enabled = true;
+ }
+
+ /* The reset for blending */
+ for (i = STAGE0; i <= STAGE_MAX; i++) {
+ if (!pstates[i])
+ continue;
+
+ format = to_mdp_format(
+ msm_framebuffer_format(pstates[i]->base.fb));
+ plane = pstates[i]->base.plane;
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
+ fg_alpha = pstates[i]->base.alpha >> 8;
+ bg_alpha = 0xFF - fg_alpha;
+
+ if (!format->alpha_enable && bg_alpha_enabled)
+ mixer_op_mode = 0;
+ else
+ mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
+
+ DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
+
+ if (format->alpha_enable &&
+ pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+ }
+ } else if (format->alpha_enable &&
+ pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+ }
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
+ blender(i)), blend_op);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
+ blender(i)), fg_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
+ blender(i)), bg_alpha);
+ if (r_mixer) {
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
+ blender(i)), blend_op);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
+ blender(i)), fg_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
+ blender(i)), bg_alpha);
+ }
+ }
+
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
+ val | mixer_op_mode);
+ if (r_mixer) {
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
+ val | mixer_op_mode);
+ }
+
+ mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
+ ctl_blend_flags);
+out:
+ spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
+}
+
+static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
+ struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
+ uint32_t lm = mixer->lm;
+ u32 mixer_width, val;
+ unsigned long flags;
+ struct drm_display_mode *mode;
+
+ if (WARN_ON(!crtc->state))
+ return;
+
+ mode = &crtc->state->adjusted_mode;
+
+ DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
+
+ mixer_width = mode->hdisplay;
+ if (r_mixer)
+ mixer_width /= 2;
+
+ spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
+ MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
+ MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ /* Assign mixer to LEFT side in source split mode */
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
+ val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
+
+ if (r_mixer) {
+ u32 r_lm = r_mixer->lm;
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
+ MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
+ MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ /* Assign mixer to RIGHT side in source split mode */
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
+ val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
+ }
+
+ spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
+}
+
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
+static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pipe = crtc->index;
+ struct drm_encoder *encoder;
+ int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", pipe);
+ return false;
+ }
+
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ /*
+ * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+ * the end of VFP. Translate the porch values relative to the line
+ * counter positions.
+ */
+
+ vactive_start = vsw + vbp + 1;
+
+ vactive_end = vactive_start + mode->crtc_vdisplay;
+
+ /* last scan line before VSYNC */
+ vfp_end = mode->crtc_vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ line = mdp5_encoder_get_linecount(encoder);
+
+ if (line < vactive_start)
+ line -= vactive_start;
+ else if (line > vactive_end)
+ line = line - vfp_end - vactive_start;
+ else
+ line -= vactive_start;
+
+ *vpos = line;
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
+static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder)
+ return 0;
+
+ return mdp5_encoder_get_framecount(encoder);
+}
+
+static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct device *dev = &mdp5_kms->pdev->dev;
+ unsigned long flags;
+
+ DBG("%s", crtc->name);
+
+ if (WARN_ON(!mdp5_crtc->enabled))
+ return;
+
+ /* Disable/save vblank irq handling before power is disabled */
+ drm_crtc_vblank_off(crtc);
+
+ if (mdp5_cstate->cmd_mode)
+ mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
+
+ mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
+ pm_runtime_put_sync(dev);
+
+ if (crtc->state->event && !crtc->state->active) {
+ WARN_ON(mdp5_crtc->event);
+ spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
+ }
+
+ mdp5_crtc->enabled = false;
+}
+
+static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
+ u32 count;
+
+ count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
+ drm_crtc_set_max_vblank_count(crtc, count);
+
+ drm_crtc_vblank_on(crtc);
+}
+
+static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ DBG("%s", crtc->name);
+
+ if (WARN_ON(mdp5_crtc->enabled))
+ return;
+
+ pm_runtime_get_sync(dev);
+
+ if (mdp5_crtc->lm_cursor_enabled) {
+ /*
+ * Restore LM cursor state, as it might have been lost
+ * with suspend:
+ */
+ if (mdp5_crtc->cursor.iova) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
+ mdp5_crtc_restore_cursor(crtc);
+ spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+
+ mdp5_ctl_set_cursor(mdp5_cstate->ctl,
+ &mdp5_cstate->pipeline, 0, true);
+ } else {
+ mdp5_ctl_set_cursor(mdp5_cstate->ctl,
+ &mdp5_cstate->pipeline, 0, false);
+ }
+ }
+
+ /* Restore vblank irq handling after power is enabled */
+ mdp5_crtc_vblank_on(crtc);
+
+ mdp5_crtc_mode_set_nofb(crtc);
+
+ mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
+
+ if (mdp5_cstate->cmd_mode)
+ mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
+
+ mdp5_crtc->enabled = true;
+}
+
+static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state,
+ bool need_right_mixer)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ to_mdp5_crtc_state(new_crtc_state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_interface *intf;
+ bool new_mixer = false;
+
+ new_mixer = !pipeline->mixer;
+
+ if ((need_right_mixer && !pipeline->r_mixer) ||
+ (!need_right_mixer && pipeline->r_mixer))
+ new_mixer = true;
+
+ if (new_mixer) {
+ struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
+ u32 caps;
+ int ret;
+
+ caps = MDP_LM_CAP_DISPLAY;
+ if (need_right_mixer)
+ caps |= MDP_LM_CAP_PAIR;
+
+ ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
+ &pipeline->mixer, need_right_mixer ?
+ &pipeline->r_mixer : NULL);
+ if (ret)
+ return ret;
+
+ ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ if (ret)
+ return ret;
+
+ if (old_r_mixer) {
+ ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ if (ret)
+ return ret;
+
+ if (!need_right_mixer)
+ pipeline->r_mixer = NULL;
+ }
+ }
+
+ /*
+ * these should have been already set up in the encoder's atomic
+ * check (called by drm_atomic_helper_check_modeset)
+ */
+ intf = pipeline->intf;
+
+ mdp5_cstate->err_irqmask = intf2err(intf->num);
+ mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
+
+ if ((intf->type == INTF_DSI) &&
+ (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
+ mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
+ mdp5_cstate->cmd_mode = true;
+ } else {
+ mdp5_cstate->pp_done_irqmask = 0;
+ mdp5_cstate->cmd_mode = false;
+ }
+
+ return 0;
+}
+
+struct plane_state {
+ struct drm_plane *plane;
+ struct mdp5_plane_state *state;
+};
+
+static int pstate_cmp(const void *a, const void *b)
+{
+ struct plane_state *pa = (struct plane_state *)a;
+ struct plane_state *pb = (struct plane_state *)b;
+ return pa->state->base.normalized_zpos - pb->state->base.normalized_zpos;
+}
+
+/* is there a helper for this? */
+static bool is_fullscreen(struct drm_crtc_state *cstate,
+ struct drm_plane_state *pstate)
+{
+ return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
+ ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
+ ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
+}
+
+static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state,
+ struct drm_plane_state *bpstate)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ to_mdp5_crtc_state(new_crtc_state);
+
+ /*
+ * if we're in source split mode, it's mandatory to have
+ * border out on the base stage
+ */
+ if (mdp5_cstate->pipeline.r_mixer)
+ return STAGE0;
+
+ /* if the bottom-most layer is not fullscreen, we need to use
+ * it for solid-color:
+ */
+ if (!is_fullscreen(new_crtc_state, bpstate))
+ return STAGE0;
+
+ return STAGE_BASE;
+}
+
+static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
+ struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ struct drm_device *dev = crtc->dev;
+ struct plane_state pstates[STAGE_MAX + 1];
+ const struct mdp5_cfg_hw *hw_cfg;
+ const struct drm_plane_state *pstate;
+ const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ bool cursor_plane = false;
+ bool need_right_mixer = false;
+ int cnt = 0, i;
+ int ret;
+ enum mdp_mixer_stage_id start;
+
+ DBG("%s: check", crtc->name);
+
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
+ struct mdp5_plane_state *mdp5_pstate =
+ to_mdp5_plane_state(pstate);
+
+ if (!pstate->visible)
+ continue;
+
+ pstates[cnt].plane = plane;
+ pstates[cnt].state = to_mdp5_plane_state(pstate);
+
+ mdp5_pstate->needs_dirtyfb =
+ intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
+
+ /*
+ * if any plane on this crtc uses 2 hwpipes, then we need
+ * the crtc to have a right hwmixer.
+ */
+ if (pstates[cnt].state->r_hwpipe)
+ need_right_mixer = true;
+ cnt++;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ cursor_plane = true;
+ }
+
+ /* bail out early if there aren't any planes */
+ if (!cnt)
+ return 0;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /*
+ * we need a right hwmixer if the mode's width is greater than a single
+ * LM's max width
+ */
+ if (mode->hdisplay > hw_cfg->lm.max_width)
+ need_right_mixer = true;
+
+ ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
+ return ret;
+ }
+
+ /* assign a stage based on sorted zpos property */
+ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+
+ /* trigger a warning if cursor isn't the highest zorder */
+ WARN_ON(cursor_plane &&
+ (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
+
+ start = get_start_stage(crtc, crtc_state, &pstates[0].state->base);
+
+ /* verify that there are not too many planes attached to crtc
+ * and that we don't have conflicting mixer stages:
+ */
+ if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
+ DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
+ cnt, start);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (cursor_plane && (i == (cnt - 1)))
+ pstates[i].state->stage = hw_cfg->lm.nb_stages;
+ else
+ pstates[i].state->stage = start + i;
+ DBG("%s: assign pipe %s on stage=%d", crtc->name,
+ pstates[i].plane->name,
+ pstates[i].state->stage);
+ }
+
+ return 0;
+}
+
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ DBG("%s: begin", crtc->name);
+}
+
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ DBG("%s: event: %p", crtc->name, crtc->state->event);
+
+ WARN_ON(mdp5_crtc->event);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ mdp5_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /*
+ * If no CTL has been allocated in mdp5_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ /* XXX: Can this happen now ? */
+ if (unlikely(!mdp5_cstate->ctl))
+ return;
+
+ blend_setup(crtc);
+
+ /* PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+ if (mdp5_cstate->cmd_mode)
+ request_pp_done_pending(crtc);
+
+ mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
+
+ /* XXX are we leaking out state here? */
+ mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
+ mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
+ mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
+
+ request_pending(crtc, PENDING_FLIP);
+}
+
+static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ uint32_t xres = crtc->mode.hdisplay;
+ uint32_t yres = crtc->mode.vdisplay;
+
+ /*
+ * Cursor Region Of Interest (ROI) is a plane read from cursor
+ * buffer to render. The ROI region is determined by the visibility of
+ * the cursor point. In the default Cursor image the cursor point will
+ * be at the top left of the cursor image.
+ *
+ * Without rotation:
+ * If the cursor point reaches the right (xres - x < cursor.width) or
+ * bottom (yres - y < cursor.height) boundary of the screen, then ROI
+ * width and ROI height need to be evaluated to crop the cursor image
+ * accordingly.
+ * (xres-x) will be new cursor width when x > (xres - cursor.width)
+ * (yres-y) will be new cursor height when y > (yres - cursor.height)
+ *
+ * With rotation:
+ * We get negative x and/or y coordinates.
+ * (cursor.width - abs(x)) will be new cursor width when x < 0
+ * (cursor.height - abs(y)) will be new cursor width when y < 0
+ */
+ if (mdp5_crtc->cursor.x >= 0)
+ *roi_w = min(mdp5_crtc->cursor.width, xres -
+ mdp5_crtc->cursor.x);
+ else
+ *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
+ if (mdp5_crtc->cursor.y >= 0)
+ *roi_h = min(mdp5_crtc->cursor.height, yres -
+ mdp5_crtc->cursor.y);
+ else
+ *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
+}
+
+static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
+ uint32_t blendcfg, stride;
+ uint32_t x, y, src_x, src_y, width, height;
+ uint32_t roi_w, roi_h;
+ int lm;
+
+ assert_spin_locked(&mdp5_crtc->cursor.lock);
+
+ lm = mdp5_cstate->pipeline.mixer->lm;
+
+ x = mdp5_crtc->cursor.x;
+ y = mdp5_crtc->cursor.y;
+ width = mdp5_crtc->cursor.width;
+ height = mdp5_crtc->cursor.height;
+
+ stride = width * info->cpp[0];
+
+ get_roi(crtc, &roi_w, &roi_h);
+
+ /* If cusror buffer overlaps due to rotation on the
+ * upper or left screen border the pixel offset inside
+ * the cursor buffer of the ROI is the positive overlap
+ * distance.
+ */
+ if (mdp5_crtc->cursor.x < 0) {
+ src_x = abs(mdp5_crtc->cursor.x);
+ x = 0;
+ } else {
+ src_x = 0;
+ }
+ if (mdp5_crtc->cursor.y < 0) {
+ src_y = abs(mdp5_crtc->cursor.y);
+ y = 0;
+ } else {
+ src_y = 0;
+ }
+ DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
+ crtc->name, x, y, roi_w, roi_h, src_x, src_y);
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
+ MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
+ MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
+ MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
+ MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
+ MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
+ MDP5_LM_CURSOR_START_XY_Y_START(y) |
+ MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
+ MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
+ MDP5_LM_CURSOR_XY_SRC_X(src_x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
+ mdp5_crtc->cursor.iova);
+
+ blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
+ blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
+}
+
+static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file, uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct drm_device *dev = crtc->dev;
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct platform_device *pdev = mdp5_kms->pdev;
+ struct msm_kms *kms = &mdp5_kms->base.base;
+ struct drm_gem_object *cursor_bo, *old_bo = NULL;
+ struct mdp5_ctl *ctl;
+ int ret;
+ uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+ bool cursor_enable = true;
+ unsigned long flags;
+
+ if (!mdp5_crtc->lm_cursor_enabled) {
+ dev_warn(dev->dev,
+ "cursor_set is deprecated with cursor planes\n");
+ return -EINVAL;
+ }
+
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ return -EINVAL;
+ }
+
+ ctl = mdp5_cstate->ctl;
+ if (!ctl)
+ return -EINVAL;
+
+ /* don't support LM cursors when we have source split enabled */
+ if (mdp5_cstate->pipeline.r_mixer)
+ return -EINVAL;
+
+ if (!handle) {
+ DBG("Cursor off");
+ cursor_enable = false;
+ mdp5_crtc->cursor.iova = 0;
+ pm_runtime_get_sync(&pdev->dev);
+ goto set_cursor;
+ }
+
+ cursor_bo = drm_gem_object_lookup(file, handle);
+ if (!cursor_bo)
+ return -ENOENT;
+
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
+ &mdp5_crtc->cursor.iova);
+ if (ret) {
+ drm_gem_object_put(cursor_bo);
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
+ old_bo = mdp5_crtc->cursor.scanout_bo;
+
+ mdp5_crtc->cursor.scanout_bo = cursor_bo;
+ mdp5_crtc->cursor.width = width;
+ mdp5_crtc->cursor.height = height;
+
+ mdp5_crtc_restore_cursor(crtc);
+
+ spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+
+set_cursor:
+ ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
+ cursor_enable ? "en" : "dis", ret);
+ goto end;
+ }
+
+ crtc_flush(crtc, flush_mask);
+
+end:
+ pm_runtime_put_sync(&pdev->dev);
+ if (old_bo) {
+ drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
+ /* enable vblank to complete cursor work: */
+ request_pending(crtc, PENDING_CURSOR);
+ }
+ return ret;
+}
+
+static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+ struct drm_device *dev = crtc->dev;
+ uint32_t roi_w;
+ uint32_t roi_h;
+ unsigned long flags;
+
+ if (!mdp5_crtc->lm_cursor_enabled) {
+ dev_warn(dev->dev,
+ "cursor_move is deprecated with cursor planes\n");
+ return -EINVAL;
+ }
+
+ /* don't support LM cursors when we have source split enabled */
+ if (mdp5_cstate->pipeline.r_mixer)
+ return -EINVAL;
+
+ /* In case the CRTC is disabled, just drop the cursor update */
+ if (unlikely(!crtc->state->enable))
+ return 0;
+
+ /* accept negative x/y coordinates up to maximum cursor overlap */
+ mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
+ mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
+
+ get_roi(crtc, &roi_w, &roi_h);
+
+ pm_runtime_get_sync(&mdp5_kms->pdev->dev);
+
+ spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
+ mdp5_crtc_restore_cursor(crtc);
+ spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc, flush_mask);
+
+ pm_runtime_put_sync(&mdp5_kms->pdev->dev);
+
+ return 0;
+}
+
+static void
+mdp5_crtc_atomic_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
+
+ if (WARN_ON(!pipeline))
+ return;
+
+ if (mdp5_cstate->ctl)
+ drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
+
+ drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
+ pipeline->mixer->name : "(null)");
+
+ if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
+ drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
+ pipeline->r_mixer->name : "(null)");
+
+ drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
+}
+
+static struct drm_crtc_state *
+mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
+ sizeof(*mdp5_cstate), GFP_KERNEL);
+ if (!mdp5_cstate)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
+
+ return &mdp5_cstate->base;
+}
+
+static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(mdp5_cstate);
+}
+
+static void mdp5_crtc_reset(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
+
+ if (crtc->state)
+ mdp5_crtc_destroy_state(crtc, crtc->state);
+
+ if (mdp5_cstate)
+ __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+}
+
+static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = mdp5_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = mdp5_crtc_reset,
+ .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+ .atomic_destroy_state = mdp5_crtc_destroy_state,
+ .atomic_print_state = mdp5_crtc_atomic_print_state,
+ .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs mdp5_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = mdp5_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = mdp5_crtc_reset,
+ .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+ .atomic_destroy_state = mdp5_crtc_destroy_state,
+ .cursor_set = mdp5_crtc_cursor_set,
+ .cursor_move = mdp5_crtc_cursor_move,
+ .atomic_print_state = mdp5_crtc_atomic_print_state,
+ .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
+ .mode_set_nofb = mdp5_crtc_mode_set_nofb,
+ .atomic_check = mdp5_crtc_atomic_check,
+ .atomic_begin = mdp5_crtc_atomic_begin,
+ .atomic_flush = mdp5_crtc_atomic_flush,
+ .atomic_enable = mdp5_crtc_atomic_enable,
+ .atomic_disable = mdp5_crtc_atomic_disable,
+ .get_scanout_position = mdp5_crtc_get_scanout_position,
+};
+
+static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ unsigned pending;
+
+ mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+
+ pending = atomic_xchg(&mdp5_crtc->pending, 0);
+
+ if (pending & PENDING_FLIP) {
+ complete_flip(crtc, NULL);
+ }
+
+ if (pending & PENDING_CURSOR)
+ drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
+}
+
+static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
+
+ DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
+}
+
+static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
+ pp_done);
+
+ complete_all(&mdp5_crtc->pp_completion);
+}
+
+static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ int ret;
+
+ ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
+ msecs_to_jiffies(50));
+ if (ret == 0)
+ dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
+ mdp5_cstate->pipeline.mixer->lm);
+}
+
+static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ int ret;
+
+ /* Should not call this function if crtc is disabled. */
+ if (!ctl)
+ return;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ return;
+
+ ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
+ ((mdp5_ctl_get_commit_status(ctl) &
+ mdp5_crtc->flushed_mask) == 0),
+ msecs_to_jiffies(50));
+ if (ret <= 0)
+ dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
+
+ mdp5_crtc->flushed_mask = 0;
+
+ drm_crtc_vblank_put(crtc);
+}
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return mdp5_crtc->vblank.irqmask;
+}
+
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+
+ /* should this be done elsewhere ? */
+ mdp_irq_update(&mdp5_kms->base);
+
+ mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
+}
+
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ return mdp5_cstate->ctl;
+}
+
+struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc))
+ return ERR_PTR(-EINVAL);
+
+ mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
+ ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
+}
+
+struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc))
+ return ERR_PTR(-EINVAL);
+
+ mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ return &mdp5_cstate->pipeline;
+}
+
+void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ if (mdp5_cstate->cmd_mode)
+ mdp5_crtc_wait_for_pp_done(crtc);
+ else
+ mdp5_crtc_wait_for_flush_done(crtc);
+}
+
+/* initialize crtc */
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ struct drm_plane *cursor_plane, int id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct mdp5_crtc *mdp5_crtc;
+
+ mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
+ if (!mdp5_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &mdp5_crtc->base;
+
+ mdp5_crtc->id = id;
+
+ spin_lock_init(&mdp5_crtc->lm_lock);
+ spin_lock_init(&mdp5_crtc->cursor.lock);
+ init_completion(&mdp5_crtc->pp_completion);
+
+ mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
+ mdp5_crtc->err.irq = mdp5_crtc_err_irq;
+ mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
+
+ mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
+
+ drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
+ cursor_plane ?
+ &mdp5_crtc_no_lm_cursor_funcs :
+ &mdp5_crtc_funcs, NULL);
+
+ drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
+ "unref cursor", unref_cursor_worker);
+
+ drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
+
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
new file mode 100644
index 0000000000..1220f2b20e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp5_ctl.h"
+
+/*
+ * CTL - MDP Control Pool Manager
+ *
+ * Controls are shared between all display interfaces.
+ *
+ * They are intended to be used for data path configuration.
+ * The top level register programming describes the complete data path for
+ * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
+ *
+ * Hardware capabilities determine the number of concurrent data paths
+ *
+ * In certain use cases (high-resolution dual pipe), one single CTL can be
+ * shared across multiple CRTCs.
+ */
+
+#define CTL_STAT_BUSY 0x1
+#define CTL_STAT_BOOKED 0x2
+
+struct mdp5_ctl {
+ struct mdp5_ctl_manager *ctlm;
+
+ u32 id;
+
+ /* CTL status bitmask */
+ u32 status;
+
+ bool encoder_enabled;
+
+ /* pending flush_mask bits */
+ u32 flush_mask;
+
+ /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
+ spinlock_t hw_lock;
+ u32 reg_offset;
+
+ /* when do CTL registers need to be flushed? (mask of trigger bits) */
+ u32 pending_ctl_trigger;
+
+ bool cursor_on;
+
+ /* True if the current CTL has FLUSH bits pending for single FLUSH. */
+ bool flush_pending;
+
+ struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
+};
+
+struct mdp5_ctl_manager {
+ struct drm_device *dev;
+
+ /* number of CTL / Layer Mixers in this hw config: */
+ u32 nlm;
+ u32 nctl;
+
+ /* to filter out non-present bits in the current hardware config */
+ u32 flush_hw_mask;
+
+ /* status for single FLUSH */
+ bool single_flush_supported;
+ u32 single_flush_pending_mask;
+
+ /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
+ spinlock_t pool_lock;
+ struct mdp5_ctl ctls[MAX_CTL];
+};
+
+static inline
+struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
+{
+ struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
+
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline
+void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+
+ (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
+ mdp5_write(mdp5_kms, reg, data);
+}
+
+static inline
+u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+
+ (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
+ return mdp5_read(mdp5_kms, reg);
+}
+
+static void set_display_intf(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf)
+{
+ unsigned long flags;
+ u32 intf_sel;
+
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
+
+ switch (intf->num) {
+ case 0:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
+ break;
+ case 1:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
+ break;
+ case 2:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
+ break;
+ case 3:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
+}
+
+static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
+{
+ unsigned long flags;
+ struct mdp5_interface *intf = pipeline->intf;
+ u32 ctl_op = 0;
+
+ if (!mdp5_cfg_intf_is_virtual(intf->type))
+ ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
+
+ switch (intf->type) {
+ case INTF_DSI:
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ ctl_op |= MDP5_CTL_OP_CMD_MODE;
+ break;
+
+ case INTF_WB:
+ if (intf->mode == MDP5_INTF_WB_MODE_LINE)
+ ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
+ break;
+
+ default:
+ break;
+ }
+
+ if (pipeline->r_mixer)
+ ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
+ MDP5_CTL_OP_PACK_3D(1);
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+ struct mdp5_interface *intf = pipeline->intf;
+
+ /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
+ if (!mdp5_cfg_intf_is_virtual(intf->type))
+ set_display_intf(mdp5_kms, intf);
+
+ set_ctl_op(ctl, pipeline);
+
+ return 0;
+}
+
+static bool start_signal_needed(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline)
+{
+ struct mdp5_interface *intf = pipeline->intf;
+
+ if (!ctl->encoder_enabled)
+ return false;
+
+ switch (intf->type) {
+ case INTF_WB:
+ return true;
+ case INTF_DSI:
+ return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
+ default:
+ return false;
+ }
+}
+
+/*
+ * send_start_signal() - Overlay Processor Start Signal
+ *
+ * For a given control operation (display pipeline), a START signal needs to be
+ * executed in order to kick off operation and activate all layers.
+ * e.g.: DSI command mode, Writeback
+ */
+static void send_start_signal(struct mdp5_ctl *ctl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+/**
+ * mdp5_ctl_set_encoder_state() - set the encoder state
+ *
+ * @ctl: the CTL instance
+ * @pipeline: the encoder's INTF + MIXER configuration
+ * @enabled: true, when encoder is ready for data streaming; false, otherwise.
+ *
+ * Note:
+ * This encoder state is needed to trigger START signal (data path kickoff).
+ */
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline,
+ bool enabled)
+{
+ struct mdp5_interface *intf = pipeline->intf;
+
+ if (WARN_ON(!ctl))
+ return -EINVAL;
+
+ ctl->encoder_enabled = enabled;
+ DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
+
+ if (start_signal_needed(ctl, pipeline)) {
+ send_start_signal(ctl);
+ }
+
+ return 0;
+}
+
+/*
+ * Note:
+ * CTL registers need to be flushed after calling this function
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
+ */
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ int cursor_id, bool enable)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ unsigned long flags;
+ u32 blend_cfg;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+
+ if (WARN_ON(!mixer)) {
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+ ctl->id);
+ return -EINVAL;
+ }
+
+ if (pipeline->r_mixer) {
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+
+ blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
+
+ if (enable)
+ blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
+ else
+ blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
+ ctl->cursor_on = enable;
+
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
+
+ return 0;
+}
+
+static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
+ case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
+ case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
+ case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
+ case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
+ case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
+ case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
+ case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
+ case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
+ case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+ case SSPP_CURSOR0:
+ case SSPP_CURSOR1:
+ default: return 0;
+ }
+}
+
+static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
+ return 0;
+
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
+ case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
+ case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
+ case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
+ case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
+ case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
+ case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
+ case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
+ case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
+ case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
+ case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
+ case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
+ default: return 0;
+ }
+}
+
+static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
+{
+ unsigned long flags;
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ int i;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+
+ for (i = 0; i < ctl_mgr->nlm; i++) {
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
+ }
+
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+#define PIPE_LEFT 0
+#define PIPE_RIGHT 1
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ enum mdp5_pipe stage[][MAX_PIPE_STAGE],
+ enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
+ u32 stage_cnt, u32 ctl_blend_op_flags)
+{
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
+ unsigned long flags;
+ u32 blend_cfg = 0, blend_ext_cfg = 0;
+ u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
+ int i, start_stage;
+
+ mdp5_ctl_reset_blend_regs(ctl);
+
+ if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
+ start_stage = STAGE0;
+ blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+ if (r_mixer)
+ r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+ } else {
+ start_stage = STAGE_BASE;
+ }
+
+ for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
+ blend_cfg |=
+ mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
+ blend_ext_cfg |=
+ mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
+ if (r_mixer) {
+ r_blend_cfg |=
+ mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
+ r_blend_ext_cfg |=
+ mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
+ }
+ }
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ if (ctl->cursor_on)
+ blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
+ blend_ext_cfg);
+ if (r_mixer) {
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
+ r_blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
+ r_blend_ext_cfg);
+ }
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
+ if (r_mixer)
+ ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
+
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
+ blend_cfg, blend_ext_cfg);
+ if (r_mixer)
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
+ r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
+
+ return 0;
+}
+
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
+{
+ if (intf->type == INTF_WB)
+ return MDP5_CTL_FLUSH_WB;
+
+ switch (intf->num) {
+ case 0: return MDP5_CTL_FLUSH_TIMING_0;
+ case 1: return MDP5_CTL_FLUSH_TIMING_1;
+ case 2: return MDP5_CTL_FLUSH_TIMING_2;
+ case 3: return MDP5_CTL_FLUSH_TIMING_3;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_cursor(int cursor_id)
+{
+ switch (cursor_id) {
+ case 0: return MDP5_CTL_FLUSH_CURSOR_0;
+ case 1: return MDP5_CTL_FLUSH_CURSOR_1;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
+ case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
+ case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_lm(int lm)
+{
+ switch (lm) {
+ case 0: return MDP5_CTL_FLUSH_LM0;
+ case 1: return MDP5_CTL_FLUSH_LM1;
+ case 2: return MDP5_CTL_FLUSH_LM2;
+ case 3: return MDP5_CTL_FLUSH_LM3;
+ case 4: return MDP5_CTL_FLUSH_LM4;
+ case 5: return MDP5_CTL_FLUSH_LM5;
+ default: return 0;
+ }
+}
+
+static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ u32 flush_mask)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ u32 sw_mask = 0;
+#define BIT_NEEDS_SW_FIX(bit) \
+ (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
+
+ /* for some targets, cursor bit is the same as LM bit */
+ if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
+ sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
+
+ return sw_mask;
+}
+
+static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
+ u32 *flush_id)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+
+ if (ctl->pair) {
+ DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
+ ctl->flush_pending = true;
+ ctl_mgr->single_flush_pending_mask |= (*flush_mask);
+ *flush_mask = 0;
+
+ if (ctl->pair->flush_pending) {
+ *flush_id = min_t(u32, ctl->id, ctl->pair->id);
+ *flush_mask = ctl_mgr->single_flush_pending_mask;
+
+ ctl->flush_pending = false;
+ ctl->pair->flush_pending = false;
+ ctl_mgr->single_flush_pending_mask = 0;
+
+ DBG("Single FLUSH mask %x,ID %d", *flush_mask,
+ *flush_id);
+ }
+ }
+}
+
+/**
+ * mdp5_ctl_commit() - Register Flush
+ *
+ * @ctl: the CTL instance
+ * @pipeline: the encoder's INTF + MIXER configuration
+ * @flush_mask: bitmask of display controller hw blocks to flush
+ * @start: if true, immediately update flush registers and set START
+ * bit, otherwise accumulate flush_mask bits until we are
+ * ready to START
+ *
+ * The flush register is used to indicate several registers are all
+ * programmed, and are safe to update to the back copy of the double
+ * buffered registers.
+ *
+ * Some registers FLUSH bits are shared when the hardware does not have
+ * dedicated bits for them; handling these is the job of fix_sw_flush().
+ *
+ * CTL registers need to be flushed in some circumstances; if that is the
+ * case, some trigger bits will be present in both flush mask and
+ * ctl->pending_ctl_trigger.
+ *
+ * Return H/W flushed bit mask.
+ */
+u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline,
+ u32 flush_mask, bool start)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ unsigned long flags;
+ u32 flush_id = ctl->id;
+ u32 curr_ctl_flush_mask;
+
+ VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
+
+ if (ctl->pending_ctl_trigger & flush_mask) {
+ flush_mask |= MDP5_CTL_FLUSH_CTL;
+ ctl->pending_ctl_trigger = 0;
+ }
+
+ flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
+
+ flush_mask &= ctl_mgr->flush_hw_mask;
+
+ curr_ctl_flush_mask = flush_mask;
+
+ fix_for_single_flush(ctl, &flush_mask, &flush_id);
+
+ if (!start) {
+ ctl->flush_mask |= flush_mask;
+ return curr_ctl_flush_mask;
+ } else {
+ flush_mask |= ctl->flush_mask;
+ ctl->flush_mask = 0;
+ }
+
+ if (flush_mask) {
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ }
+
+ if (start_signal_needed(ctl, pipeline)) {
+ send_start_signal(ctl);
+ }
+
+ return curr_ctl_flush_mask;
+}
+
+u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
+{
+ return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
+}
+
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
+{
+ return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+}
+
+/*
+ * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
+ */
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
+ struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+ /* do nothing silently if hw doesn't support */
+ if (!ctl_mgr->single_flush_supported)
+ return 0;
+
+ if (!enable) {
+ ctlx->pair = NULL;
+ ctly->pair = NULL;
+ mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
+ return 0;
+ } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
+ return -EINVAL;
+ } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+ return -EINVAL;
+ }
+
+ ctlx->pair = ctly;
+ ctly->pair = ctlx;
+
+ mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
+ MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+
+ return 0;
+}
+
+/*
+ * mdp5_ctl_request() - CTL allocation
+ *
+ * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
+ * If no CTL is available in preferred category, allocate from the other one.
+ *
+ * @return fail if no CTL is available.
+ */
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
+ int intf_num)
+{
+ struct mdp5_ctl *ctl = NULL;
+ const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
+ u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
+ unsigned long flags;
+ int c;
+
+ spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+
+ /* search the preferred */
+ for (c = 0; c < ctl_mgr->nctl; c++)
+ if ((ctl_mgr->ctls[c].status & checkm) == match)
+ goto found;
+
+ dev_warn(ctl_mgr->dev->dev,
+ "fall back to the other CTL category for INTF %d!\n", intf_num);
+
+ match ^= CTL_STAT_BOOKED;
+ for (c = 0; c < ctl_mgr->nctl; c++)
+ if ((ctl_mgr->ctls[c].status & checkm) == match)
+ goto found;
+
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
+ goto unlock;
+
+found:
+ ctl = &ctl_mgr->ctls[c];
+ ctl->status |= CTL_STAT_BUSY;
+ ctl->pending_ctl_trigger = 0;
+ DBG("CTL %d allocated", ctl->id);
+
+unlock:
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ return ctl;
+}
+
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
+{
+ unsigned long flags;
+ int c;
+
+ for (c = 0; c < ctl_mgr->nctl; c++) {
+ struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ }
+}
+
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
+{
+ kfree(ctl_mgr);
+}
+
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+ void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
+{
+ struct mdp5_ctl_manager *ctl_mgr;
+ const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
+ int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
+ unsigned dsi_cnt = 0;
+ const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
+ unsigned long flags;
+ int c, ret;
+
+ ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
+ if (!ctl_mgr) {
+ DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
+ DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
+ ctl_cfg->count);
+ ret = -ENOSPC;
+ goto fail;
+ }
+
+ /* initialize the CTL manager: */
+ ctl_mgr->dev = dev;
+ ctl_mgr->nlm = hw_cfg->lm.count;
+ ctl_mgr->nctl = ctl_cfg->count;
+ ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
+ spin_lock_init(&ctl_mgr->pool_lock);
+
+ /* initialize each CTL of the pool: */
+ spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+ for (c = 0; c < ctl_mgr->nctl; c++) {
+ struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
+
+ if (WARN_ON(!ctl_cfg->base[c])) {
+ DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ goto fail;
+ }
+ ctl->ctlm = ctl_mgr;
+ ctl->id = c;
+ ctl->reg_offset = ctl_cfg->base[c];
+ ctl->status = 0;
+ spin_lock_init(&ctl->hw_lock);
+ }
+
+ /*
+ * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI
+ * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
+ * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
+ * Single FLUSH is supported from hw rev v3.0.
+ */
+ for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
+ if (hw_cfg->intf.connect[c] == INTF_DSI)
+ dsi_cnt++;
+ if ((rev >= 3) && (dsi_cnt > 1)) {
+ ctl_mgr->single_flush_supported = true;
+ /* Reserve CTL0/1 for INTF1/2 */
+ ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
+ ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
+ }
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
+
+ return ctl_mgr;
+
+fail:
+ if (ctl_mgr)
+ mdp5_ctlm_destroy(ctl_mgr);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
new file mode 100644
index 0000000000..c2af68aa77
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDP5_CTL_H__
+#define __MDP5_CTL_H__
+
+#include "msm_drv.h"
+
+/*
+ * CTL Manager prototypes:
+ * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
+ * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
+ */
+struct mdp5_ctl_manager;
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+ void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
+
+/*
+ * CTL prototypes:
+ * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
+ * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
+ */
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
+
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
+
+struct mdp5_interface;
+struct mdp5_pipeline;
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p);
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p,
+ bool enabled);
+
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ int cursor_id, bool enable);
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
+
+#define MAX_PIPE_STAGE 2
+
+/*
+ * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
+ *
+ * @stage: array to contain the pipe num for each stage
+ * @stage_cnt: valid stage number in stage array
+ * @ctl_blend_op_flags: blender operation mode flags
+ *
+ * Note:
+ * CTL registers need to be flushed after calling this function
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
+ */
+#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ enum mdp5_pipe stage[][MAX_PIPE_STAGE],
+ enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
+ u32 stage_cnt, u32 ctl_blend_op_flags);
+
+/**
+ * mdp_ctl_flush_mask...() - Register FLUSH masks
+ *
+ * These masks are used to specify which block(s) need to be flushed
+ * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask).
+ */
+u32 mdp_ctl_flush_mask_lm(int lm);
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe);
+u32 mdp_ctl_flush_mask_cursor(int cursor_id);
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
+
+/* @flush_mask: see CTL flush masks definitions below */
+u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ u32 flush_mask, bool start);
+u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
+
+
+
+#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
new file mode 100644
index 0000000000..79d67c4957
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mdp5_kms.h"
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp5_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp5_encoder);
+}
+
+static const struct drm_encoder_funcs mdp5_encoder_funcs = {
+ .destroy = mdp5_encoder_destroy,
+};
+
+static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+ int intf = mdp5_encoder->intf->num;
+ uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+ uint32_t format = 0x2100;
+ unsigned long flags;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+
+ ctrl_pol = 0;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (mdp5_encoder->intf->type != INTF_DSI) {
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
+ }
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dtv_hsync_skew = 0; /* get this from panel? */
+
+ /* Get color format from panel, default is 8bpc */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ switch (connector->display_info.bpc) {
+ case 4:
+ format |= 0;
+ break;
+ case 5:
+ format |= 0x15;
+ break;
+ case 6:
+ format |= 0x2A;
+ break;
+ case 8:
+ default:
+ format |= 0x3F;
+ break;
+ }
+ break;
+ }
+ }
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ if (mdp5_encoder->intf->type == INTF_eDP) {
+ display_v_start += mode->htotal - mode->hsync_start;
+ display_v_end -= mode->hsync_start - mode->hdisplay;
+ }
+
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
+ MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf),
+ MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) |
+ MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf),
+ MDP5_INTF_ACTIVE_HCTL_START(0) |
+ MDP5_INTF_ACTIVE_HCTL_END(0));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
+
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+
+ mdp5_crtc_set_pipeline(encoder->crtc);
+}
+
+static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ int intfn = mdp5_encoder->intf->num;
+ unsigned long flags;
+
+ if (WARN_ON(!mdp5_encoder->enabled))
+ return;
+
+ mdp5_ctl_set_encoder_state(ctl, pipeline, false);
+
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
+
+ mdp5_encoder->enabled = false;
+}
+
+static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+ int intfn = intf->num;
+ unsigned long flags;
+
+ if (WARN_ON(mdp5_encoder->enabled))
+ return;
+
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
+
+ mdp5_ctl_set_encoder_state(ctl, pipeline, true);
+
+ mdp5_encoder->enabled = true;
+}
+
+static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
+ else
+ mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode);
+}
+
+static void mdp5_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_cmd_encoder_disable(encoder);
+ else
+ mdp5_vid_encoder_disable(encoder);
+}
+
+static void mdp5_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ /* this isn't right I think */
+ struct drm_crtc_state *cstate = encoder->crtc->state;
+
+ mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode);
+
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_cmd_encoder_enable(encoder);
+ else
+ mdp5_vid_encoder_enable(encoder);
+}
+
+static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
+
+ mdp5_cstate->ctl = ctl;
+ mdp5_cstate->pipeline.intf = intf;
+
+ /*
+ * This is a bit awkward, but we want to flush the CTL and hit the
+ * START bit at most once for an atomic update. In the non-full-
+ * modeset case, this is done from crtc->atomic_flush(), but that
+ * is too early in the case of full modeset, in which case we
+ * defer to encoder->enable(). But we need to *know* whether
+ * encoder->enable() will be called to do this:
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ mdp5_cstate->defer_start = true;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
+ .disable = mdp5_encoder_disable,
+ .enable = mdp5_encoder_enable,
+ .atomic_check = mdp5_encoder_atomic_check,
+};
+
+int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf->num;
+
+ return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
+}
+
+u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf->num;
+
+ return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
+}
+
+int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
+ struct mdp5_kms *mdp5_kms;
+ struct device *dev;
+ int intf_num;
+ u32 data = 0;
+
+ if (!encoder || !slave_encoder)
+ return -EINVAL;
+
+ mdp5_kms = get_kms(encoder);
+ intf_num = mdp5_encoder->intf->num;
+
+ /* Switch slave encoder's TimingGen Sync mode,
+ * to use the master's enable signal for the slave encoder.
+ */
+ if (intf_num == 1)
+ data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
+ else if (intf_num == 2)
+ data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
+ else
+ return -EINVAL;
+
+ dev = &mdp5_kms->pdev->dev;
+ /* Make sure clocks are on when connectors calling this function. */
+ pm_runtime_get_sync(dev);
+
+ /* Dumb Panel, Sync mode */
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
+
+ mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+
+ /* TODO: Expand this to set writeback modes too */
+ if (cmd_mode) {
+ WARN_ON(intf->type != INTF_DSI);
+ intf->mode = MDP5_INTF_DSI_MODE_COMMAND;
+ } else {
+ if (intf->type == INTF_DSI)
+ intf->mode = MDP5_INTF_DSI_MODE_VIDEO;
+ else
+ intf->mode = MDP5_INTF_MODE_NONE;
+ }
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf,
+ struct mdp5_ctl *ctl)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp5_encoder *mdp5_encoder;
+ int enc_type = (intf->type == INTF_DSI) ?
+ DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS;
+ int ret;
+
+ mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
+ if (!mdp5_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ encoder = &mdp5_encoder->base;
+ mdp5_encoder->ctl = ctl;
+ mdp5_encoder->intf = intf;
+
+ spin_lock_init(&mdp5_encoder->intf_lock);
+
+ drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL);
+
+ drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp5_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
new file mode 100644
index 0000000000..43443a435d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/irq.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask)
+{
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR,
+ irqmask ^ (irqmask & old_irqmask));
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
+}
+
+static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
+ DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev);
+ drm_state_dump(mdp5_kms->dev, &p);
+ if (mdp5_kms->smp)
+ mdp5_smp_dump(mdp5_kms->smp, &p);
+ }
+}
+
+void mdp5_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ pm_runtime_put_sync(dev);
+}
+
+int mdp5_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
+ struct mdp_irq *error_handler = &mdp5_kms->error_handler;
+
+ error_handler->irq = mdp5_irq_error_handler;
+ error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
+ MDP5_IRQ_INTF1_UNDER_RUN |
+ MDP5_IRQ_INTF2_UNDER_RUN |
+ MDP5_IRQ_INTF3_UNDER_RUN;
+
+ pm_runtime_get_sync(dev);
+ mdp_irq_register(mdp_kms, error_handler);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+void mdp5_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ pm_runtime_put_sync(dev);
+}
+
+irqreturn_t mdp5_irq(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct drm_device *dev = mdp5_kms->dev;
+ struct drm_crtc *crtc;
+ uint32_t status, enable;
+
+ enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
+ status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable;
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ mdp_dispatch_irqs(mdp_kms, status);
+
+ drm_for_each_crtc(crtc, dev)
+ if (status & mdp5_crtc_vblank(crtc))
+ drm_crtc_handle_vblank(crtc);
+
+ return IRQ_HANDLED;
+}
+
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp5_crtc_vblank(crtc), true);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp5_crtc_vblank(crtc), false);
+ pm_runtime_put_sync(dev);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
new file mode 100644
index 0000000000..92bf9d949d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -0,0 +1,1009 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/interconnect.h>
+#include <linux/of_irq.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_vblank.h>
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "mdp5_kms.h"
+
+static int mdp5_hw_init(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
+ unsigned long flags;
+
+ pm_runtime_get_sync(dev);
+
+ /* Magic unknown register writes:
+ *
+ * W VBIF:0x004 00000001 (mdss_mdp.c:839)
+ * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
+ * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
+ * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
+ * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
+ * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
+ * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
+ * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
+ * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
+ *
+ * Downstream fbdev driver gets these register offsets/values
+ * from DT.. not really sure what these registers are or if
+ * different values for different boards/SoC's, etc. I guess
+ * they are the golden registers.
+ *
+ * Not setting these does not seem to cause any problem. But
+ * we may be getting lucky with the bootloader initializing
+ * them for us. OTOH, if we can always count on the bootloader
+ * setting the golden registers, then perhaps we don't need to
+ * care.
+ */
+
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
+
+ mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct mdp5_global_state *
+mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
+{
+ return to_mdp5_global_state(mdp5_kms->glob_state.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct drm_private_state *priv_state;
+ int ret;
+
+ ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_mdp5_global_state(priv_state);
+}
+
+static struct drm_private_state *
+mdp5_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct mdp5_global_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void mdp5_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
+
+ kfree(mdp5_state);
+}
+
+static const struct drm_private_state_funcs mdp5_global_state_funcs = {
+ .atomic_duplicate_state = mdp5_global_duplicate_state,
+ .atomic_destroy_state = mdp5_global_destroy_state,
+};
+
+static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
+{
+ struct mdp5_global_state *state;
+
+ drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->mdp5_kms = mdp5_kms;
+
+ drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
+ &state->base,
+ &mdp5_global_state_funcs);
+ return 0;
+}
+
+static void mdp5_enable_commit(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ pm_runtime_get_sync(&mdp5_kms->pdev->dev);
+}
+
+static void mdp5_disable_commit(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ pm_runtime_put_sync(&mdp5_kms->pdev->dev);
+}
+
+static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct mdp5_global_state *global_state;
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
+
+ if (mdp5_kms->smp)
+ mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
+}
+
+static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ /* TODO */
+}
+
+static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask)
+ mdp5_crtc_wait_for_commit_done(crtc);
+}
+
+static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct mdp5_global_state *global_state;
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
+
+ if (mdp5_kms->smp)
+ mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
+}
+
+static int mdp5_set_split_display(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder,
+ bool is_cmd_mode)
+{
+ if (is_cmd_mode)
+ return mdp5_cmd_encoder_set_split_display(encoder,
+ slave_encoder);
+ else
+ return mdp5_vid_encoder_set_split_display(encoder,
+ slave_encoder);
+}
+
+static void mdp5_destroy(struct mdp5_kms *mdp5_kms);
+
+static void mdp5_kms_destroy(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct msm_gem_address_space *aspace = kms->aspace;
+ int i;
+
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++)
+ mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++)
+ mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
+ }
+
+ mdp_kms_destroy(&mdp5_kms->base);
+ mdp5_destroy(mdp5_kms);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int smp_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!mdp5_kms->smp) {
+ drm_printf(&p, "no SMP pool\n");
+ return 0;
+ }
+
+ mdp5_smp_dump(mdp5_kms->smp, &p);
+
+ return 0;
+}
+
+static struct drm_info_list mdp5_debugfs_list[] = {
+ {"smp", smp_show },
+};
+
+static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ drm_debugfs_create_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list),
+ minor->debugfs_root, minor);
+
+ return 0;
+}
+#endif
+
+static const struct mdp_kms_funcs kms_funcs = {
+ .base = {
+ .hw_init = mdp5_hw_init,
+ .irq_preinstall = mdp5_irq_preinstall,
+ .irq_postinstall = mdp5_irq_postinstall,
+ .irq_uninstall = mdp5_irq_uninstall,
+ .irq = mdp5_irq,
+ .enable_vblank = mdp5_enable_vblank,
+ .disable_vblank = mdp5_disable_vblank,
+ .flush_commit = mdp5_flush_commit,
+ .enable_commit = mdp5_enable_commit,
+ .disable_commit = mdp5_disable_commit,
+ .prepare_commit = mdp5_prepare_commit,
+ .wait_flush = mdp5_wait_flush,
+ .complete_commit = mdp5_complete_commit,
+ .get_format = mdp_get_format,
+ .set_split_display = mdp5_set_split_display,
+ .destroy = mdp5_kms_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = mdp5_kms_debugfs_init,
+#endif
+ },
+ .set_irqmask = mdp5_set_irqmask,
+};
+
+static int mdp5_disable(struct mdp5_kms *mdp5_kms)
+{
+ DBG("");
+
+ mdp5_kms->enable_count--;
+ WARN_ON(mdp5_kms->enable_count < 0);
+
+ clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
+ clk_disable_unprepare(mdp5_kms->tbu_clk);
+ clk_disable_unprepare(mdp5_kms->ahb_clk);
+ clk_disable_unprepare(mdp5_kms->axi_clk);
+ clk_disable_unprepare(mdp5_kms->core_clk);
+ clk_disable_unprepare(mdp5_kms->lut_clk);
+
+ return 0;
+}
+
+static int mdp5_enable(struct mdp5_kms *mdp5_kms)
+{
+ DBG("");
+
+ mdp5_kms->enable_count++;
+
+ clk_prepare_enable(mdp5_kms->ahb_clk);
+ clk_prepare_enable(mdp5_kms->axi_clk);
+ clk_prepare_enable(mdp5_kms->core_clk);
+ clk_prepare_enable(mdp5_kms->lut_clk);
+ clk_prepare_enable(mdp5_kms->tbu_clk);
+ clk_prepare_enable(mdp5_kms->tbu_rt_clk);
+
+ return 0;
+}
+
+static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf,
+ struct mdp5_ctl *ctl)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct drm_encoder *encoder;
+
+ encoder = mdp5_encoder_init(dev, intf, ctl);
+ if (IS_ERR(encoder)) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
+ return encoder;
+ }
+
+ return encoder;
+}
+
+static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
+{
+ const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
+ const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
+ int id = 0, i;
+
+ for (i = 0; i < intf_cnt; i++) {
+ if (intfs[i] == INTF_DSI) {
+ if (intf_num == i)
+ return id;
+
+ id++;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
+ struct mdp5_ctl *ctl;
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ switch (intf->type) {
+ case INTF_eDP:
+ DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
+ break;
+ case INTF_HDMI:
+ if (!priv->hdmi)
+ break;
+
+ ctl = mdp5_ctlm_request(ctlm, intf->num);
+ if (!ctl) {
+ ret = -EINVAL;
+ break;
+ }
+
+ encoder = construct_encoder(mdp5_kms, intf, ctl);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ break;
+ case INTF_DSI:
+ {
+ const struct mdp5_cfg_hw *hw_cfg =
+ mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
+
+ if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
+ DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
+ intf->num);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!priv->dsi[dsi_id])
+ break;
+
+ ctl = mdp5_ctlm_request(ctlm, intf->num);
+ if (!ctl) {
+ ret = -EINVAL;
+ break;
+ }
+
+ encoder = construct_encoder(mdp5_kms, intf, ctl);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ if (!ret)
+ mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
+
+ break;
+ }
+ default:
+ DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int modeset_init(struct mdp5_kms *mdp5_kms)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned int num_crtcs;
+ int i, ret, pi = 0, ci = 0;
+ struct drm_plane *primary[MAX_BASES] = { NULL };
+ struct drm_plane *cursor[MAX_BASES] = { NULL };
+ struct drm_encoder *encoder;
+ unsigned int num_encoders;
+
+ /*
+ * Construct encoders and modeset initialize connector devices
+ * for each external display interface.
+ */
+ for (i = 0; i < mdp5_kms->num_intfs; i++) {
+ ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
+ if (ret)
+ goto fail;
+ }
+
+ num_encoders = 0;
+ drm_for_each_encoder(encoder, dev)
+ num_encoders++;
+
+ /*
+ * We should ideally have less number of encoders (set up by parsing
+ * the MDP5 interfaces) than the number of layer mixers present in HW,
+ * but let's be safe here anyway
+ */
+ num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers);
+
+ /*
+ * Construct planes equaling the number of hw pipes, and CRTCs for the
+ * N encoders set up by the driver. The first N planes become primary
+ * planes for the CRTCs, with the remainder as overlay planes:
+ */
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ struct drm_plane *plane;
+ enum drm_plane_type type;
+
+ if (i < num_crtcs)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
+ type = DRM_PLANE_TYPE_CURSOR;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ plane = mdp5_plane_init(dev, type);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
+ goto fail;
+ }
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ primary[pi++] = plane;
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ cursor[ci++] = plane;
+ }
+
+ for (i = 0; i < num_crtcs; i++) {
+ struct drm_crtc *crtc;
+
+ crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
+ goto fail;
+ }
+ priv->num_crtcs++;
+ }
+
+ /*
+ * Now that we know the number of crtcs we've created, set the possible
+ * crtcs for the encoders
+ */
+ drm_for_each_encoder(encoder, dev)
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
+ u32 *major, u32 *minor)
+{
+ struct device *dev = &mdp5_kms->pdev->dev;
+ u32 version;
+
+ pm_runtime_get_sync(dev);
+ version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
+ pm_runtime_put_sync(dev);
+
+ *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
+ *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
+
+ DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
+}
+
+static int get_clk(struct platform_device *pdev, struct clk **clkp,
+ const char *name, bool mandatory)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk = msm_clk_get(pdev, name);
+ if (IS_ERR(clk) && mandatory) {
+ DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ if (IS_ERR(clk))
+ DBG("skipping %s", name);
+ else
+ *clkp = clk;
+
+ return 0;
+}
+
+static int mdp5_init(struct platform_device *pdev, struct drm_device *dev);
+
+static int mdp5_kms_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev;
+ struct mdp5_kms *mdp5_kms;
+ struct mdp5_cfg *config;
+ struct msm_kms *kms;
+ struct msm_gem_address_space *aspace;
+ int irq, i, ret;
+
+ ret = mdp5_init(to_platform_device(dev->dev), dev);
+ if (ret)
+ return ret;
+
+ /* priv->kms would have been populated by the MDP5 driver */
+ kms = priv->kms;
+ if (!kms)
+ return -ENOMEM;
+
+ mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ pdev = mdp5_kms->pdev;
+
+ ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n");
+ goto fail;
+ }
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
+ goto fail;
+ }
+
+ kms->irq = irq;
+
+ config = mdp5_cfg_get_config(mdp5_kms->cfg);
+
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ pm_runtime_get_sync(&pdev->dev);
+ for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
+ if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
+ !config->hw->intf.base[i])
+ continue;
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
+
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
+ }
+ mdelay(16);
+
+ aspace = msm_kms_init_aspace(mdp5_kms->dev);
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ kms->aspace = aspace;
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ ret = modeset_init(mdp5_kms);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
+ goto fail;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 0xffff;
+ dev->mode_config.max_height = 0xffff;
+
+ dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
+ dev->vblank_disable_immediate = true;
+
+ return 0;
+fail:
+ if (kms)
+ mdp5_kms_destroy(kms);
+
+ return ret;
+}
+
+static void mdp5_destroy(struct mdp5_kms *mdp5_kms)
+{
+ int i;
+
+ if (mdp5_kms->ctlm)
+ mdp5_ctlm_destroy(mdp5_kms->ctlm);
+ if (mdp5_kms->smp)
+ mdp5_smp_destroy(mdp5_kms->smp);
+ if (mdp5_kms->cfg)
+ mdp5_cfg_destroy(mdp5_kms->cfg);
+
+ for (i = 0; i < mdp5_kms->num_intfs; i++)
+ kfree(mdp5_kms->intfs[i]);
+
+ if (mdp5_kms->rpm_enabled)
+ pm_runtime_disable(&mdp5_kms->pdev->dev);
+
+ drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
+ drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
+}
+
+static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
+ const enum mdp5_pipe *pipes, const uint32_t *offsets,
+ uint32_t caps)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct mdp5_hw_pipe *hwpipe;
+
+ hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
+ if (IS_ERR(hwpipe)) {
+ ret = PTR_ERR(hwpipe);
+ DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
+ pipe2name(pipes[i]), ret);
+ return ret;
+ }
+ hwpipe->idx = mdp5_kms->num_hwpipes;
+ mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
+ }
+
+ return 0;
+}
+
+static int hwpipe_init(struct mdp5_kms *mdp5_kms)
+{
+ static const enum mdp5_pipe rgb_planes[] = {
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
+ };
+ static const enum mdp5_pipe vig_planes[] = {
+ SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
+ };
+ static const enum mdp5_pipe dma_planes[] = {
+ SSPP_DMA0, SSPP_DMA1,
+ };
+ static const enum mdp5_pipe cursor_planes[] = {
+ SSPP_CURSOR0, SSPP_CURSOR1,
+ };
+ const struct mdp5_cfg_hw *hw_cfg;
+ int ret;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /* Construct RGB pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
+ hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
+ if (ret)
+ return ret;
+
+ /* Construct video (VIG) pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
+ hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
+ if (ret)
+ return ret;
+
+ /* Construct DMA pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
+ hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
+ if (ret)
+ return ret;
+
+ /* Construct cursor pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
+ cursor_planes, hw_cfg->pipe_cursor.base,
+ hw_cfg->pipe_cursor.caps);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hwmixer_init(struct mdp5_kms *mdp5_kms)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ const struct mdp5_cfg_hw *hw_cfg;
+ int i, ret;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ for (i = 0; i < hw_cfg->lm.count; i++) {
+ struct mdp5_hw_mixer *mixer;
+
+ mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
+ if (IS_ERR(mixer)) {
+ ret = PTR_ERR(mixer);
+ DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
+ i, ret);
+ return ret;
+ }
+
+ mixer->idx = mdp5_kms->num_hwmixers;
+ mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
+ }
+
+ return 0;
+}
+
+static int interface_init(struct mdp5_kms *mdp5_kms)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ const struct mdp5_cfg_hw *hw_cfg;
+ const enum mdp5_intf_type *intf_types;
+ int i;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ intf_types = hw_cfg->intf.connect;
+
+ for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
+ struct mdp5_interface *intf;
+
+ if (intf_types[i] == INTF_DISABLED)
+ continue;
+
+ intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ if (!intf) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
+ return -ENOMEM;
+ }
+
+ intf->num = i;
+ intf->type = intf_types[i];
+ intf->mode = MDP5_INTF_MODE_NONE;
+ intf->idx = mdp5_kms->num_intfs;
+ mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
+ }
+
+ return 0;
+}
+
+static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms;
+ struct mdp5_cfg *config;
+ u32 major, minor;
+ int ret;
+
+ mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
+ if (!mdp5_kms) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ spin_lock_init(&mdp5_kms->resource_lock);
+
+ mdp5_kms->dev = dev;
+ mdp5_kms->pdev = pdev;
+
+ ret = mdp5_global_obj_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
+ if (IS_ERR(mdp5_kms->mmio)) {
+ ret = PTR_ERR(mdp5_kms->mmio);
+ goto fail;
+ }
+
+ /* mandatory clocks: */
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
+ if (ret)
+ goto fail;
+
+ /* optional clocks: */
+ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
+ get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
+ get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
+
+ /* we need to set a default rate before enabling. Set a safe
+ * rate first, then figure out hw revision, and then set a
+ * more optimal rate:
+ */
+ clk_set_rate(mdp5_kms->core_clk, 200000000);
+
+ /* set uninit-ed kms */
+ priv->kms = &mdp5_kms->base.base;
+
+ pm_runtime_enable(&pdev->dev);
+ mdp5_kms->rpm_enabled = true;
+
+ read_mdp_hw_revision(mdp5_kms, &major, &minor);
+
+ mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
+ if (IS_ERR(mdp5_kms->cfg)) {
+ ret = PTR_ERR(mdp5_kms->cfg);
+ mdp5_kms->cfg = NULL;
+ goto fail;
+ }
+
+ config = mdp5_cfg_get_config(mdp5_kms->cfg);
+ mdp5_kms->caps = config->hw->mdp.caps;
+
+ /* TODO: compute core clock rate at runtime */
+ clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
+
+ /*
+ * Some chipsets have a Shared Memory Pool (SMP), while others
+ * have dedicated latency buffering per source pipe instead;
+ * this section initializes the SMP:
+ */
+ if (mdp5_kms->caps & MDP_CAP_SMP) {
+ mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
+ if (IS_ERR(mdp5_kms->smp)) {
+ ret = PTR_ERR(mdp5_kms->smp);
+ mdp5_kms->smp = NULL;
+ goto fail;
+ }
+ }
+
+ mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
+ if (IS_ERR(mdp5_kms->ctlm)) {
+ ret = PTR_ERR(mdp5_kms->ctlm);
+ mdp5_kms->ctlm = NULL;
+ goto fail;
+ }
+
+ ret = hwpipe_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ ret = hwmixer_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ ret = interface_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ if (mdp5_kms)
+ mdp5_destroy(mdp5_kms);
+ return ret;
+}
+
+static int mdp5_setup_interconnect(struct platform_device *pdev)
+{
+ struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem");
+ struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem");
+ struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem");
+
+ if (IS_ERR(path0))
+ return PTR_ERR(path0);
+
+ if (!path0) {
+ /* no interconnect support is not necessarily a fatal
+ * condition, the platform may simply not have an
+ * interconnect driver yet. But warn about it in case
+ * bootloader didn't setup bus clocks high enough for
+ * scanout.
+ */
+ dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n");
+ return 0;
+ }
+
+ icc_set_bw(path0, 0, MBps_to_icc(6400));
+
+ if (!IS_ERR_OR_NULL(path1))
+ icc_set_bw(path1, 0, MBps_to_icc(6400));
+ if (!IS_ERR_OR_NULL(path_rot))
+ icc_set_bw(path_rot, 0, MBps_to_icc(6400));
+
+ return 0;
+}
+
+static int mdp5_dev_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ DBG("");
+
+ ret = mdp5_setup_interconnect(pdev);
+ if (ret)
+ return ret;
+
+ return msm_drv_probe(&pdev->dev, mdp5_kms_init);
+}
+
+static int mdp5_dev_remove(struct platform_device *pdev)
+{
+ DBG("");
+ component_master_del(&pdev->dev, &msm_drm_ops);
+ return 0;
+}
+
+static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+
+ DBG("");
+
+ return mdp5_disable(mdp5_kms);
+}
+
+static __maybe_unused int mdp5_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+
+ DBG("");
+
+ return mdp5_enable(mdp5_kms);
+}
+
+static const struct dev_pm_ops mdp5_pm_ops = {
+ SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
+};
+
+static const struct of_device_id mdp5_dt_match[] = {
+ { .compatible = "qcom,mdp5", },
+ /* to support downstream DT files */
+ { .compatible = "qcom,mdss_mdp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdp5_dt_match);
+
+static struct platform_driver mdp5_driver = {
+ .probe = mdp5_dev_probe,
+ .remove = mdp5_dev_remove,
+ .shutdown = msm_drv_shutdown,
+ .driver = {
+ .name = "msm_mdp",
+ .of_match_table = mdp5_dt_match,
+ .pm = &mdp5_pm_ops,
+ },
+};
+
+void __init msm_mdp_register(void)
+{
+ DBG("");
+ platform_driver_register(&mdp5_driver);
+}
+
+void __exit msm_mdp_unregister(void)
+{
+ DBG("");
+ platform_driver_unregister(&mdp5_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
new file mode 100644
index 0000000000..29bf11f086
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MDP5_KMS_H__
+#define __MDP5_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "disp/mdp_kms.h"
+#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
+#include "mdp5.xml.h"
+#include "mdp5_pipe.h"
+#include "mdp5_mixer.h"
+#include "mdp5_ctl.h"
+#include "mdp5_smp.h"
+
+struct mdp5_kms {
+ struct mdp_kms base;
+
+ struct drm_device *dev;
+
+ struct platform_device *pdev;
+
+ unsigned num_hwpipes;
+ struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
+
+ unsigned num_hwmixers;
+ struct mdp5_hw_mixer *hwmixers[8];
+
+ unsigned num_intfs;
+ struct mdp5_interface *intfs[5];
+
+ struct mdp5_cfg_handler *cfg;
+ uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
+
+ /*
+ * Global private object state, Do not access directly, use
+ * mdp5_global_get_state()
+ */
+ struct drm_modeset_lock glob_state_lock;
+ struct drm_private_obj glob_state;
+
+ struct mdp5_smp *smp;
+ struct mdp5_ctl_manager *ctlm;
+
+ /* io/register spaces: */
+ void __iomem *mmio;
+
+ struct clk *axi_clk;
+ struct clk *ahb_clk;
+ struct clk *core_clk;
+ struct clk *lut_clk;
+ struct clk *tbu_clk;
+ struct clk *tbu_rt_clk;
+ struct clk *vsync_clk;
+
+ /*
+ * lock to protect access to global resources: ie., following register:
+ * - REG_MDP5_DISP_INTF_SEL
+ */
+ spinlock_t resource_lock;
+
+ bool rpm_enabled;
+
+ struct mdp_irq error_handler;
+
+ int enable_count;
+};
+#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
+
+/* Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+#define to_mdp5_global_state(x) container_of(x, struct mdp5_global_state, base)
+struct mdp5_global_state {
+ struct drm_private_state base;
+
+ struct drm_atomic_state *state;
+ struct mdp5_kms *mdp5_kms;
+
+ struct mdp5_hw_pipe_state hwpipe;
+ struct mdp5_hw_mixer_state hwmixer;
+ struct mdp5_smp_state smp;
+};
+
+struct mdp5_global_state * mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms);
+struct mdp5_global_state *__must_check mdp5_get_global_state(struct drm_atomic_state *s);
+
+/* Atomic plane state. Subclasses the base drm_plane_state in order to
+ * track assigned hwpipe and hw specific state.
+ */
+struct mdp5_plane_state {
+ struct drm_plane_state base;
+
+ struct mdp5_hw_pipe *hwpipe;
+ struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */
+
+ /* assigned by crtc blender */
+ enum mdp_mixer_stage_id stage;
+
+ /* whether attached CRTC needs pixel data explicitly flushed to
+ * display (ex. DSI command mode display)
+ */
+ bool needs_dirtyfb;
+};
+#define to_mdp5_plane_state(x) \
+ container_of(x, struct mdp5_plane_state, base)
+
+struct mdp5_pipeline {
+ struct mdp5_interface *intf;
+ struct mdp5_hw_mixer *mixer;
+ struct mdp5_hw_mixer *r_mixer; /* right mixer */
+};
+
+struct mdp5_crtc_state {
+ struct drm_crtc_state base;
+
+ struct mdp5_ctl *ctl;
+ struct mdp5_pipeline pipeline;
+
+ /* these are derivatives of intf/mixer state in mdp5_pipeline */
+ u32 vblank_irqmask;
+ u32 err_irqmask;
+ u32 pp_done_irqmask;
+
+ bool cmd_mode;
+
+ /* should we not write CTL[n].START register on flush? If the
+ * encoder has changed this is set to true, since encoder->enable()
+ * is called after crtc state is committed, but we only want to
+ * write the CTL[n].START register once. This lets us defer
+ * writing CTL[n].START until encoder->enable()
+ */
+ bool defer_start;
+};
+#define to_mdp5_crtc_state(x) \
+ container_of(x, struct mdp5_crtc_state, base)
+
+enum mdp5_intf_mode {
+ MDP5_INTF_MODE_NONE = 0,
+
+ /* Modes used for DSI interface (INTF_DSI type): */
+ MDP5_INTF_DSI_MODE_VIDEO,
+ MDP5_INTF_DSI_MODE_COMMAND,
+
+ /* Modes used for WB interface (INTF_WB type): */
+ MDP5_INTF_WB_MODE_BLOCK,
+ MDP5_INTF_WB_MODE_LINE,
+};
+
+struct mdp5_interface {
+ int idx;
+ int num; /* display interface number */
+ enum mdp5_intf_type type;
+ enum mdp5_intf_mode mode;
+};
+
+struct mdp5_encoder {
+ struct drm_encoder base;
+ spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
+ bool enabled;
+ uint32_t bsc;
+
+ struct mdp5_interface *intf;
+ struct mdp5_ctl *ctl;
+};
+#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
+
+static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
+{
+ WARN_ON(mdp5_kms->enable_count <= 0);
+ msm_writel(data, mdp5_kms->mmio + reg);
+}
+
+static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
+{
+ WARN_ON(mdp5_kms->enable_count <= 0);
+ return msm_readl(mdp5_kms->mmio + reg);
+}
+
+static inline const char *stage2name(enum mdp_mixer_stage_id stage)
+{
+ static const char *names[] = {
+#define NAME(n) [n] = #n
+ NAME(STAGE_UNUSED), NAME(STAGE_BASE),
+ NAME(STAGE0), NAME(STAGE1), NAME(STAGE2),
+ NAME(STAGE3), NAME(STAGE4), NAME(STAGE6),
+#undef NAME
+ };
+ return names[stage];
+}
+
+static inline const char *pipe2name(enum mdp5_pipe pipe)
+{
+ static const char *names[] = {
+#define NAME(n) [SSPP_ ## n] = #n
+ NAME(VIG0), NAME(VIG1), NAME(VIG2),
+ NAME(RGB0), NAME(RGB1), NAME(RGB2),
+ NAME(DMA0), NAME(DMA1),
+ NAME(VIG3), NAME(RGB3),
+ NAME(CURSOR0), NAME(CURSOR1),
+#undef NAME
+ };
+ return names[pipe];
+}
+
+static inline int pipe2nclients(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_RGB0:
+ case SSPP_RGB1:
+ case SSPP_RGB2:
+ case SSPP_RGB3:
+ return 1;
+ default:
+ return 3;
+ }
+}
+
+static inline uint32_t intf2err(int intf_num)
+{
+ switch (intf_num) {
+ case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
+ case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
+ case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
+ case 3: return MDP5_IRQ_INTF3_UNDER_RUN;
+ default: return 0;
+ }
+}
+
+static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer,
+ struct mdp5_interface *intf)
+{
+ /*
+ * In case of DSI Command Mode, the Ping Pong's read pointer IRQ
+ * acts as a Vblank signal. The Ping Pong buffer used is bound to
+ * layer mixer.
+ */
+
+ if ((intf->type == INTF_DSI) &&
+ (intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
+ return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp;
+
+ if (intf->type == INTF_WB)
+ return MDP5_IRQ_WB_2_DONE;
+
+ switch (intf->num) {
+ case 0: return MDP5_IRQ_INTF0_VSYNC;
+ case 1: return MDP5_IRQ_INTF1_VSYNC;
+ case 2: return MDP5_IRQ_INTF2_VSYNC;
+ case 3: return MDP5_IRQ_INTF3_VSYNC;
+ default: return 0;
+ }
+}
+
+static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
+{
+ return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
+}
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
+void mdp5_irq_preinstall(struct msm_kms *kms);
+int mdp5_irq_postinstall(struct msm_kms *kms);
+void mdp5_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp5_irq(struct msm_kms *kms);
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
+void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
+
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum drm_plane_type type);
+
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
+
+struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc);
+struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc);
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc);
+void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ struct drm_plane *cursor_plane, int id);
+
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl);
+int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode);
+int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
+u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
+
+#ifdef CONFIG_DRM_MSM_DSI
+void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void mdp5_cmd_encoder_disable(struct drm_encoder *encoder);
+void mdp5_cmd_encoder_enable(struct drm_encoder *encoder);
+int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+#else
+static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+{
+}
+static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
+{
+}
+static inline int mdp5_cmd_encoder_set_split_display(
+ struct drm_encoder *encoder, struct drm_encoder *slave_encoder)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
new file mode 100644
index 0000000000..2536def2a0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#include "mdp5_kms.h"
+
+/*
+ * As of now, there are only 2 combinations possible for source split:
+ *
+ * Left | Right
+ * -----|------
+ * LM0 | LM1
+ * LM2 | LM5
+ *
+ */
+static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 };
+
+static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm)
+{
+ int i;
+ int pair_lm;
+
+ pair_lm = lm_right_pair[lm];
+ if (pair_lm < 0)
+ return -EINVAL;
+
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
+ struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i];
+
+ if (mixer->lm == pair_lm)
+ return mixer->idx;
+ }
+
+ return -1;
+}
+
+int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
+ uint32_t caps, struct mdp5_hw_mixer **mixer,
+ struct mdp5_hw_mixer **r_mixer)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_global_state *global_state = mdp5_get_global_state(s);
+ struct mdp5_hw_mixer_state *new_state;
+ int i;
+
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ new_state = &global_state->hwmixer;
+
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
+ struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
+
+ /*
+ * skip if already in-use by a different CRTC. If there is a
+ * mixer already assigned to this CRTC, it means this call is
+ * a request to get an additional right mixer. Assume that the
+ * existing mixer is the 'left' one, and try to see if we can
+ * get its corresponding 'right' pair.
+ */
+ if (new_state->hwmixer_to_crtc[cur->idx] &&
+ new_state->hwmixer_to_crtc[cur->idx] != crtc)
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ if (r_mixer) {
+ int pair_idx;
+
+ pair_idx = get_right_pair_idx(mdp5_kms, cur->lm);
+ if (pair_idx < 0)
+ return -EINVAL;
+
+ if (new_state->hwmixer_to_crtc[pair_idx])
+ continue;
+
+ *r_mixer = mdp5_kms->hwmixers[pair_idx];
+ }
+
+ /*
+ * prefer a pair-able LM over an unpairable one. We can
+ * switch the CRTC from Normal mode to Source Split mode
+ * without requiring a full modeset if we had already
+ * assigned this CRTC a pair-able LM.
+ *
+ * TODO: There will be assignment sequences which would
+ * result in the CRTC requiring a full modeset, even
+ * if we have the LM resources to prevent it. For a platform
+ * with a few displays, we don't run out of pair-able LMs
+ * so easily. For now, ignore the possibility of requiring
+ * a full modeset.
+ */
+ if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR)
+ *mixer = cur;
+ }
+
+ if (!(*mixer))
+ return -ENOMEM;
+
+ if (r_mixer && !(*r_mixer))
+ return -ENOMEM;
+
+ DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name);
+
+ new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc;
+ if (r_mixer) {
+ DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm,
+ crtc->name);
+ new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc;
+ }
+
+ return 0;
+}
+
+int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
+{
+ struct mdp5_global_state *global_state = mdp5_get_global_state(s);
+ struct mdp5_hw_mixer_state *new_state;
+
+ if (!mixer)
+ return 0;
+
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ new_state = &global_state->hwmixer;
+
+ if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
+ return -EINVAL;
+
+ DBG("%s: release from crtc %s", mixer->name,
+ new_state->hwmixer_to_crtc[mixer->idx]->name);
+
+ new_state->hwmixer_to_crtc[mixer->idx] = NULL;
+
+ return 0;
+}
+
+void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
+{
+ kfree(mixer);
+}
+
+static const char * const mixer_names[] = {
+ "LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
+};
+
+struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
+{
+ struct mdp5_hw_mixer *mixer;
+
+ mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return ERR_PTR(-ENOMEM);
+
+ mixer->name = mixer_names[lm->id];
+ mixer->lm = lm->id;
+ mixer->caps = lm->caps;
+ mixer->pp = lm->pp;
+ mixer->dspp = lm->dspp;
+ mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id);
+
+ return mixer;
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
new file mode 100644
index 0000000000..545ee223b9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDP5_LM_H__
+#define __MDP5_LM_H__
+
+/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */
+struct mdp5_hw_mixer {
+ int idx;
+
+ const char *name;
+
+ int lm; /* the LM instance # */
+ uint32_t caps;
+ int pp;
+ int dspp;
+
+ uint32_t flush_mask; /* used to commit LM registers */
+};
+
+/* global atomic state of assignment between CRTCs and Layer Mixers: */
+struct mdp5_hw_mixer_state {
+ struct drm_crtc *hwmixer_to_crtc[8];
+};
+
+struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
+void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
+int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
+ uint32_t caps, struct mdp5_hw_mixer **mixer,
+ struct mdp5_hw_mixer **r_mixer);
+int mdp5_mixer_release(struct drm_atomic_state *s,
+ struct mdp5_hw_mixer *mixer);
+
+#endif /* __MDP5_LM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
new file mode 100644
index 0000000000..e4b8a78983
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "mdp5_kms.h"
+
+int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg,
+ struct mdp5_hw_pipe **hwpipe,
+ struct mdp5_hw_pipe **r_hwpipe)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_global_state *new_global_state, *old_global_state;
+ struct mdp5_hw_pipe_state *old_state, *new_state;
+ int i, j;
+
+ new_global_state = mdp5_get_global_state(s);
+ if (IS_ERR(new_global_state))
+ return PTR_ERR(new_global_state);
+
+ /* grab old_state after mdp5_get_global_state(), since now we hold lock: */
+ old_global_state = mdp5_get_existing_global_state(mdp5_kms);
+
+ old_state = &old_global_state->hwpipe;
+ new_state = &new_global_state->hwpipe;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i];
+
+ /* skip if already in-use.. check both new and old state,
+ * since we cannot immediately re-use a pipe that is
+ * released in the current update in some cases:
+ * (1) mdp5 can have SMP (non-double-buffered)
+ * (2) hw pipe previously assigned to different CRTC
+ * (vblanks might not be aligned)
+ */
+ if (new_state->hwpipe_to_plane[cur->idx] ||
+ old_state->hwpipe_to_plane[cur->idx])
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ /*
+ * don't assign a cursor pipe to a plane that isn't going to
+ * be used as a cursor
+ */
+ if (cur->caps & MDP_PIPE_CAP_CURSOR &&
+ plane->type != DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ /* possible candidate, take the one with the
+ * fewest unneeded caps bits set:
+ */
+ if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) <
+ hweight_long((*hwpipe)->caps & ~caps))) {
+ bool r_found = false;
+
+ if (r_hwpipe) {
+ for (j = i + 1; j < mdp5_kms->num_hwpipes;
+ j++) {
+ struct mdp5_hw_pipe *r_cur =
+ mdp5_kms->hwpipes[j];
+
+ /* reject different types of hwpipes */
+ if (r_cur->caps != cur->caps)
+ continue;
+
+ /* respect priority, eg. VIG0 > VIG1 */
+ if (cur->pipe > r_cur->pipe)
+ continue;
+
+ *r_hwpipe = r_cur;
+ r_found = true;
+ break;
+ }
+ }
+
+ if (!r_hwpipe || r_found)
+ *hwpipe = cur;
+ }
+ }
+
+ if (!(*hwpipe))
+ return -ENOMEM;
+
+ if (r_hwpipe && !(*r_hwpipe))
+ return -ENOMEM;
+
+ if (mdp5_kms->smp) {
+ int ret;
+
+ /* We don't support SMP and 2 hwpipes/plane together */
+ WARN_ON(r_hwpipe);
+
+ DBG("%s: alloc SMP blocks", (*hwpipe)->name);
+ ret = mdp5_smp_assign(mdp5_kms->smp, &new_global_state->smp,
+ (*hwpipe)->pipe, blkcfg);
+ if (ret)
+ return -ENOMEM;
+
+ (*hwpipe)->blkcfg = blkcfg;
+ }
+
+ DBG("%s: assign to plane %s for caps %x",
+ (*hwpipe)->name, plane->name, caps);
+ new_state->hwpipe_to_plane[(*hwpipe)->idx] = plane;
+
+ if (r_hwpipe) {
+ DBG("%s: assign to right of plane %s for caps %x",
+ (*r_hwpipe)->name, plane->name, caps);
+ new_state->hwpipe_to_plane[(*r_hwpipe)->idx] = plane;
+ }
+
+ return 0;
+}
+
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_global_state *state;
+ struct mdp5_hw_pipe_state *new_state;
+
+ if (!hwpipe)
+ return 0;
+
+ state = mdp5_get_global_state(s);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ new_state = &state->hwpipe;
+
+ if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
+ return -EINVAL;
+
+ DBG("%s: release from plane %s", hwpipe->name,
+ new_state->hwpipe_to_plane[hwpipe->idx]->name);
+
+ if (mdp5_kms->smp) {
+ DBG("%s: free SMP blocks", hwpipe->name);
+ mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe);
+ }
+
+ new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
+
+ return 0;
+}
+
+void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
+{
+ kfree(hwpipe);
+}
+
+struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+ uint32_t reg_offset, uint32_t caps)
+{
+ struct mdp5_hw_pipe *hwpipe;
+
+ hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL);
+ if (!hwpipe)
+ return ERR_PTR(-ENOMEM);
+
+ hwpipe->name = pipe2name(pipe);
+ hwpipe->pipe = pipe;
+ hwpipe->reg_offset = reg_offset;
+ hwpipe->caps = caps;
+ hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
+
+ return hwpipe;
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
new file mode 100644
index 0000000000..cca67938ca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MDP5_PIPE_H__
+#define __MDP5_PIPE_H__
+
+/* TODO: Add SSPP_MAX in mdp5.xml.h */
+#define SSPP_MAX (SSPP_CURSOR1 + 1)
+
+/* represents a hw pipe, which is dynamically assigned to a plane */
+struct mdp5_hw_pipe {
+ int idx;
+
+ const char *name;
+ enum mdp5_pipe pipe;
+
+ uint32_t reg_offset;
+ uint32_t caps;
+
+ uint32_t flush_mask; /* used to commit pipe registers */
+
+ /* number of smp blocks per plane, ie:
+ * nblks_y | (nblks_u << 8) | (nblks_v << 16)
+ */
+ uint32_t blkcfg;
+};
+
+/* global atomic state of assignment between pipes and planes: */
+struct mdp5_hw_pipe_state {
+ struct drm_plane *hwpipe_to_plane[SSPP_MAX];
+};
+
+int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg,
+ struct mdp5_hw_pipe **hwpipe,
+ struct mdp5_hw_pipe **r_hwpipe);
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
+
+struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+ uint32_t reg_offset, uint32_t caps);
+void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe);
+
+#endif /* __MDP5_PIPE_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
new file mode 100644
index 0000000000..0d5ff03cb0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -0,0 +1,1048 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
+
+#include "mdp5_kms.h"
+
+struct mdp5_plane {
+ struct drm_plane base;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+};
+#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
+
+static int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_rect *src, struct drm_rect *dest);
+
+static struct mdp5_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static bool plane_enabled(struct drm_plane_state *state)
+{
+ return state->visible;
+}
+
+static void mdp5_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+ drm_plane_cleanup(plane);
+
+ kfree(mdp5_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+static void mdp5_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ unsigned int zpos;
+
+ drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ zpos = STAGE_BASE;
+ else
+ zpos = STAGE0 + drm_plane_index(plane);
+ drm_plane_create_zpos_property(plane, zpos, 1, 255);
+}
+
+static void
+mdp5_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+ struct mdp5_kms *mdp5_kms = get_kms(state->plane);
+
+ drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
+ pstate->hwpipe->name : "(null)");
+ if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
+ drm_printf(p, "\tright-hwpipe=%s\n",
+ pstate->r_hwpipe ? pstate->r_hwpipe->name :
+ "(null)");
+ drm_printf(p, "\tblend_mode=%u\n", pstate->base.pixel_blend_mode);
+ drm_printf(p, "\tzpos=%u\n", pstate->base.zpos);
+ drm_printf(p, "\tnormalized_zpos=%u\n", pstate->base.normalized_zpos);
+ drm_printf(p, "\talpha=%u\n", pstate->base.alpha);
+ drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
+}
+
+static void mdp5_plane_reset(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *mdp5_state;
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(to_mdp5_plane_state(plane->state));
+ plane->state = NULL;
+ mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return;
+ __drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
+}
+
+static struct drm_plane_state *
+mdp5_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *mdp5_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
+ sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
+
+ return &mdp5_state->base;
+}
+
+static void mdp5_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+
+ __drm_atomic_helper_plane_destroy_state(state);
+
+ kfree(pstate);
+}
+
+static const struct drm_plane_funcs mdp5_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = mdp5_plane_destroy,
+ .reset = mdp5_plane_reset,
+ .atomic_duplicate_state = mdp5_plane_duplicate_state,
+ .atomic_destroy_state = mdp5_plane_destroy_state,
+ .atomic_print_state = mdp5_plane_atomic_print_state,
+};
+
+static int mdp5_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb;
+
+ if (!new_state->fb)
+ return 0;
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
+ return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb);
+}
+
+static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct msm_kms *kms = &mdp5_kms->base.base;
+ struct drm_framebuffer *fb = old_state->fb;
+ bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb;
+
+ if (!fb)
+ return;
+
+ DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
+ msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb);
+}
+
+static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
+ struct drm_plane *plane = state->plane;
+ struct drm_plane_state *old_state = plane->state;
+ struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
+ bool new_hwpipe = false;
+ bool need_right_hwpipe = false;
+ uint32_t max_width, max_height;
+ bool out_of_bounds = false;
+ uint32_t caps = 0;
+ int min_scale, max_scale;
+ int ret;
+
+ DBG("%s: check (%d -> %d)", plane->name,
+ plane_enabled(old_state), plane_enabled(state));
+
+ max_width = config->hw->lm.max_width << 16;
+ max_height = config->hw->lm.max_height << 16;
+
+ /* Make sure source dimensions are within bounds. */
+ if (state->src_h > max_height)
+ out_of_bounds = true;
+
+ if (state->src_w > max_width) {
+ /* If source split is supported, we can go up to 2x
+ * the max LM width, but we'd need to stage another
+ * hwpipe to the right LM. So, the drm_plane would
+ * consist of 2 hwpipes.
+ */
+ if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
+ (state->src_w <= 2 * max_width))
+ need_right_hwpipe = true;
+ else
+ out_of_bounds = true;
+ }
+
+ if (out_of_bounds) {
+ struct drm_rect src = drm_plane_state_src(state);
+ DBG("Invalid source size "DRM_RECT_FP_FMT,
+ DRM_RECT_FP_ARG(&src));
+ return -ERANGE;
+ }
+
+ min_scale = FRAC_16_16(1, 8);
+ max_scale = FRAC_16_16(8, 1);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ min_scale, max_scale,
+ true, true);
+ if (ret)
+ return ret;
+
+ if (plane_enabled(state)) {
+ unsigned int rotation;
+ const struct mdp_format *format;
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ uint32_t blkcfg = 0;
+
+ format = to_mdp_format(msm_framebuffer_format(state->fb));
+ if (MDP_FORMAT_IS_YUV(format))
+ caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
+
+ if (((state->src_w >> 16) != state->crtc_w) ||
+ ((state->src_h >> 16) != state->crtc_h))
+ caps |= MDP_PIPE_CAP_SCALE;
+
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ caps |= MDP_PIPE_CAP_HFLIP;
+
+ if (rotation & DRM_MODE_REFLECT_Y)
+ caps |= MDP_PIPE_CAP_VFLIP;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ caps |= MDP_PIPE_CAP_CURSOR;
+
+ /* (re)allocate hw pipe if we don't have one or caps-mismatch: */
+ if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
+ new_hwpipe = true;
+
+ /*
+ * (re)allocte hw pipe if we're either requesting for 2 hw pipes
+ * or we're switching from 2 hw pipes to 1 hw pipe because the
+ * new src_w can be supported by 1 hw pipe itself.
+ */
+ if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
+ (!need_right_hwpipe && mdp5_state->r_hwpipe))
+ new_hwpipe = true;
+
+ if (mdp5_kms->smp) {
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(state->fb));
+
+ blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
+ state->src_w >> 16, false);
+
+ if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
+ new_hwpipe = true;
+ }
+
+ /* (re)assign hwpipe if needed, otherwise keep old one: */
+ if (new_hwpipe) {
+ /* TODO maybe we want to re-assign hwpipe sometimes
+ * in cases when we no-longer need some caps to make
+ * it available for other planes?
+ */
+ struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
+ struct mdp5_hw_pipe *old_right_hwpipe =
+ mdp5_state->r_hwpipe;
+ struct mdp5_hw_pipe *new_hwpipe = NULL;
+ struct mdp5_hw_pipe *new_right_hwpipe = NULL;
+
+ ret = mdp5_pipe_assign(state->state, plane, caps,
+ blkcfg, &new_hwpipe,
+ need_right_hwpipe ?
+ &new_right_hwpipe : NULL);
+ if (ret) {
+ DBG("%s: failed to assign hwpipe(s)!",
+ plane->name);
+ return ret;
+ }
+
+ mdp5_state->hwpipe = new_hwpipe;
+ if (need_right_hwpipe)
+ mdp5_state->r_hwpipe = new_right_hwpipe;
+ else
+ /*
+ * set it to NULL so that the driver knows we
+ * don't have a right hwpipe when committing a
+ * new state
+ */
+ mdp5_state->r_hwpipe = NULL;
+
+
+ ret = mdp5_pipe_release(state->state, old_hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, old_right_hwpipe);
+ if (ret)
+ return ret;
+
+ }
+ } else {
+ ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ if (ret)
+ return ret;
+
+ mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
+ }
+
+ return 0;
+}
+
+static int mdp5_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc;
+ if (!crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
+}
+
+static void mdp5_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+
+ DBG("%s: update", plane->name);
+
+ if (plane_enabled(new_state)) {
+ int ret;
+
+ ret = mdp5_plane_mode_set(plane,
+ new_state->crtc, new_state->fb,
+ &new_state->src, &new_state->dst);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ }
+}
+
+static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state);
+ struct drm_crtc_state *crtc_state;
+ int min_scale, max_scale;
+ int ret;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_plane_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ if (!crtc_state->active)
+ return -EINVAL;
+
+ /* don't use fast path if we don't have a hwpipe allocated yet */
+ if (!mdp5_state->hwpipe)
+ return -EINVAL;
+
+ /* only allow changing of position(crtc x/y or src x/y) in fast path */
+ if (plane->state->crtc != new_plane_state->crtc ||
+ plane->state->src_w != new_plane_state->src_w ||
+ plane->state->src_h != new_plane_state->src_h ||
+ plane->state->crtc_w != new_plane_state->crtc_w ||
+ plane->state->crtc_h != new_plane_state->crtc_h ||
+ !plane->state->fb ||
+ plane->state->fb != new_plane_state->fb)
+ return -EINVAL;
+
+ min_scale = FRAC_16_16(1, 8);
+ max_scale = FRAC_16_16(8, 1);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ min_scale, max_scale,
+ true, true);
+ if (ret)
+ return ret;
+
+ /*
+ * if the visibility of the plane changes (i.e, if the cursor is
+ * clipped out completely, we can't take the async path because
+ * we need to stage/unstage the plane from the Layer Mixer(s). We
+ * also assign/unassign the hwpipe(s) tied to the plane. We avoid
+ * taking the fast path for both these reasons.
+ */
+ if (new_plane_state->visible != plane->state->visible)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+
+ if (plane_enabled(new_state)) {
+ struct mdp5_ctl *ctl;
+ struct mdp5_pipeline *pipeline =
+ mdp5_crtc_get_pipeline(new_state->crtc);
+ int ret;
+
+ ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
+ &new_state->src, &new_state->dst);
+ WARN_ON(ret < 0);
+
+ ctl = mdp5_crtc_get_ctl(new_state->crtc);
+
+ mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
+ }
+
+ *to_mdp5_plane_state(plane->state) =
+ *to_mdp5_plane_state(new_state);
+
+ new_state->fb = old_fb;
+}
+
+static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
+ .prepare_fb = mdp5_plane_prepare_fb,
+ .cleanup_fb = mdp5_plane_cleanup_fb,
+ .atomic_check = mdp5_plane_atomic_check,
+ .atomic_update = mdp5_plane_atomic_update,
+ .atomic_async_check = mdp5_plane_atomic_async_check,
+ .atomic_async_update = mdp5_plane_atomic_async_update,
+};
+
+static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
+ enum mdp5_pipe pipe,
+ struct drm_framebuffer *fb)
+{
+ struct msm_kms *kms = &mdp5_kms->base.base;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+ MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+ MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 0));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 1));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 2));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 3));
+}
+
+/* Note: mdp5_plane->pipe_lock must be locked */
+static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe)
+{
+ uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) &
+ ~MDP5_PIPE_OP_MODE_CSC_1_EN;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value);
+}
+
+/* Note: mdp5_plane->pipe_lock must be locked */
+static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
+ struct csc_cfg *csc)
+{
+ uint32_t i, mode = 0; /* RGB, no CSC */
+ uint32_t *matrix;
+
+ if (unlikely(!csc))
+ return;
+
+ if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type))
+ mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV);
+ if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type))
+ mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV);
+ mode |= MDP5_PIPE_OP_MODE_CSC_1_EN;
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode);
+
+ matrix = csc->matrix;
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8]));
+
+ for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) {
+ uint32_t *pre_clamp = csc->pre_clamp;
+ uint32_t *post_clamp = csc->post_clamp;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i),
+ MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) |
+ MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i),
+ MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) |
+ MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i),
+ MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i),
+ MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i]));
+ }
+}
+
+#define PHASE_STEP_SHIFT 21
+#define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */
+
+static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
+{
+ uint32_t unit;
+
+ if (src == 0 || dst == 0)
+ return -EINVAL;
+
+ /*
+ * PHASE_STEP_X/Y is coded on 26 bits (25:0),
+ * where 2^21 represents the unity "1" in fixed-point hardware design.
+ * This leaves 5 bits for the integer part (downscale case):
+ * -> maximum downscale ratio = 0b1_1111 = 31
+ */
+ if (src > (dst * DOWN_SCALE_RATIO_MAX))
+ return -EOVERFLOW;
+
+ unit = 1 << PHASE_STEP_SHIFT;
+ *out_phase = mult_frac(unit, src, dst);
+
+ return 0;
+}
+
+static int calc_scalex_steps(struct drm_plane *plane,
+ uint32_t pixel_format, uint32_t src, uint32_t dest,
+ uint32_t phasex_steps[COMP_MAX])
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
+ uint32_t phasex_step;
+ int ret;
+
+ ret = calc_phase_step(src, dest, &phasex_step);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
+ return ret;
+ }
+
+ phasex_steps[COMP_0] = phasex_step;
+ phasex_steps[COMP_3] = phasex_step;
+ phasex_steps[COMP_1_2] = phasex_step / info->hsub;
+
+ return 0;
+}
+
+static int calc_scaley_steps(struct drm_plane *plane,
+ uint32_t pixel_format, uint32_t src, uint32_t dest,
+ uint32_t phasey_steps[COMP_MAX])
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
+ uint32_t phasey_step;
+ int ret;
+
+ ret = calc_phase_step(src, dest, &phasey_step);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
+ return ret;
+ }
+
+ phasey_steps[COMP_0] = phasey_step;
+ phasey_steps[COMP_3] = phasey_step;
+ phasey_steps[COMP_1_2] = phasey_step / info->vsub;
+
+ return 0;
+}
+
+static uint32_t get_scale_config(const struct mdp_format *format,
+ uint32_t src, uint32_t dst, bool horz)
+{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
+ bool scaling = format->is_yuv ? true : (src != dst);
+ uint32_t sub;
+ uint32_t ya_filter, uv_filter;
+ bool yuv = format->is_yuv;
+
+ if (!scaling)
+ return 0;
+
+ if (yuv) {
+ sub = horz ? info->hsub : info->vsub;
+ uv_filter = ((src / sub) <= dst) ?
+ SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+ }
+ ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+
+ if (horz)
+ return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) |
+ COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter));
+ else
+ return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) |
+ COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter));
+}
+
+static void calc_pixel_ext(const struct mdp_format *format,
+ uint32_t src, uint32_t dst, uint32_t phase_step[2],
+ int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX],
+ bool horz)
+{
+ bool scaling = format->is_yuv ? true : (src != dst);
+ int i;
+
+ /*
+ * Note:
+ * We assume here that:
+ * 1. PCMN filter is used for downscale
+ * 2. bilinear filter is used for upscale
+ * 3. we are in a single pipe configuration
+ */
+
+ for (i = 0; i < COMP_MAX; i++) {
+ pix_ext_edge1[i] = 0;
+ pix_ext_edge2[i] = scaling ? 1 : 0;
+ }
+}
+
+static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
+ const struct mdp_format *format,
+ uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
+ uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
+{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
+ uint32_t lr, tb, req;
+ int i;
+
+ for (i = 0; i < COMP_MAX; i++) {
+ uint32_t roi_w = src_w;
+ uint32_t roi_h = src_h;
+
+ if (format->is_yuv && i == COMP_1_2) {
+ roi_w /= info->hsub;
+ roi_h /= info->vsub;
+ }
+
+ lr = (pe_left[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) :
+ MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]);
+
+ lr |= (pe_right[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) :
+ MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]);
+
+ tb = (pe_top[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) :
+ MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]);
+
+ tb |= (pe_bottom[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) :
+ MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]);
+
+ req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w +
+ pe_left[i] + pe_right[i]);
+
+ req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h +
+ pe_top[i] + pe_bottom[i]);
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req);
+
+ DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i,
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT),
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT),
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF),
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF),
+ FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT));
+
+ DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i,
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT),
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT),
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF),
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF),
+ FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM));
+ }
+}
+
+struct pixel_ext {
+ int left[COMP_MAX];
+ int right[COMP_MAX];
+ int top[COMP_MAX];
+ int bottom[COMP_MAX];
+};
+
+struct phase_step {
+ u32 x[COMP_MAX];
+ u32 y[COMP_MAX];
+};
+
+static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
+ struct mdp5_hw_pipe *hwpipe,
+ struct drm_framebuffer *fb,
+ struct phase_step *step,
+ struct pixel_ext *pe,
+ u32 scale_config, u32 hdecm, u32 vdecm,
+ bool hflip, bool vflip,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_img_w, u32 src_img_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(fb));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
+ MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
+ MDP5_PIPE_SRC_XY_X(src_x) |
+ MDP5_PIPE_SRC_XY_Y(src_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
+ MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
+ MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
+ MDP5_PIPE_OUT_XY_X(crtc_x) |
+ MDP5_PIPE_OUT_XY_Y(crtc_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
+ MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+ MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
+ MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
+ MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+ (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
+ (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
+ COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
+ MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
+
+ /* not using secure mode: */
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
+ mdp5_write_pixel_ext(mdp5_kms, pipe, format,
+ src_w, pe->left, pe->right,
+ src_h, pe->top, pe->bottom);
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+ step->x[COMP_0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+ step->y[COMP_0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+ step->x[COMP_1_2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+ step->y[COMP_1_2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+ MDP5_PIPE_DECIMATION_VERT(vdecm) |
+ MDP5_PIPE_DECIMATION_HORZ(hdecm));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
+ scale_config);
+ }
+
+ if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
+ if (MDP_FORMAT_IS_YUV(format))
+ csc_enable(mdp5_kms, pipe,
+ mdp_get_default_csc_cfg(CSC_YUV2RGB));
+ else
+ csc_disable(mdp5_kms, pipe);
+ }
+
+ set_scanout_locked(mdp5_kms, pipe, fb);
+}
+
+static int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_rect *src, struct drm_rect *dest)
+{
+ struct drm_plane_state *pstate = plane->state;
+ struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ struct mdp5_hw_pipe *right_hwpipe;
+ const struct mdp_format *format;
+ uint32_t nplanes, config = 0;
+ struct phase_step step = { { 0 } };
+ struct pixel_ext pe = { { 0 } };
+ uint32_t hdecm = 0, vdecm = 0;
+ uint32_t pix_format;
+ unsigned int rotation;
+ bool vflip, hflip;
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y;
+ uint32_t src_w, src_h;
+ uint32_t src_img_w, src_img_h;
+ int ret;
+
+ nplanes = fb->format->num_planes;
+
+ /* bad formats should already be rejected: */
+ if (WARN_ON(nplanes > pipe2nclients(pipe)))
+ return -EINVAL;
+
+ format = to_mdp_format(msm_framebuffer_format(fb));
+ pix_format = format->base.pixel_format;
+
+ src_x = src->x1;
+ src_y = src->y1;
+ src_w = drm_rect_width(src);
+ src_h = drm_rect_height(src);
+
+ crtc_x = dest->x1;
+ crtc_y = dest->y1;
+ crtc_w = drm_rect_width(dest);
+ crtc_h = drm_rect_height(dest);
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ src_img_w = min(fb->width, src_w);
+ src_img_h = min(fb->height, src_h);
+
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+ right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
+ if (right_hwpipe) {
+ /*
+ * if the plane comprises of 2 hw pipes, assume that the width
+ * is split equally across them. The only parameters that varies
+ * between the 2 pipes are src_x and crtc_x
+ */
+ crtc_w /= 2;
+ src_w /= 2;
+ src_img_w /= 2;
+ }
+
+ ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
+ if (ret)
+ return ret;
+
+ ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
+ if (ret)
+ return ret;
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
+ calc_pixel_ext(format, src_w, crtc_w, step.x,
+ pe.left, pe.right, true);
+ calc_pixel_ext(format, src_h, crtc_h, step.y,
+ pe.top, pe.bottom, false);
+ }
+
+ /* TODO calc hdecm, vdecm */
+
+ /* SCALE is used to both scale and up-sample chroma components */
+ config |= get_scale_config(format, src_w, crtc_w, true);
+ config |= get_scale_config(format, src_h, crtc_h, false);
+ DBG("scale config = %x", config);
+
+ rotation = drm_rotation_simplify(pstate->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ hflip = !!(rotation & DRM_MODE_REFLECT_X);
+ vflip = !!(rotation & DRM_MODE_REFLECT_Y);
+
+ mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
+ config, hdecm, vdecm, hflip, vflip,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_img_w, src_img_h,
+ src_x, src_y, src_w, src_h);
+ if (right_hwpipe)
+ mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
+ config, hdecm, vdecm, hflip, vflip,
+ crtc_x + crtc_w, crtc_y, crtc_w, crtc_h,
+ src_img_w, src_img_h,
+ src_x + src_w, src_y, src_w, src_h);
+
+ return ret;
+}
+
+/*
+ * Use this func and the one below only after the atomic state has been
+ * successfully swapped
+ */
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+
+ if (WARN_ON(!pstate->hwpipe))
+ return SSPP_NONE;
+
+ return pstate->hwpipe->pipe;
+}
+
+enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+
+ if (!pstate->r_hwpipe)
+ return SSPP_NONE;
+
+ return pstate->r_hwpipe->pipe;
+}
+
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+ u32 mask;
+
+ if (WARN_ON(!pstate->hwpipe))
+ return 0;
+
+ mask = pstate->hwpipe->flush_mask;
+
+ if (pstate->r_hwpipe)
+ mask |= pstate->r_hwpipe->flush_mask;
+
+ return mask;
+}
+
+/* initialize plane */
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum drm_plane_type type)
+{
+ struct drm_plane *plane = NULL;
+ struct mdp5_plane *mdp5_plane;
+ int ret;
+
+ mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
+ if (!mdp5_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp5_plane->base;
+
+ mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
+ ARRAY_SIZE(mdp5_plane->formats), false);
+
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ NULL, type, NULL);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
+
+ mdp5_plane_install_properties(plane, &plane->base);
+
+ drm_plane_enable_fb_damage_clips(plane);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp5_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
new file mode 100644
index 0000000000..56a3063545
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_util.h>
+
+#include "mdp5_kms.h"
+#include "mdp5_smp.h"
+
+
+struct mdp5_smp {
+ struct drm_device *dev;
+
+ uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
+
+ int blk_cnt;
+ int blk_size;
+
+ /* register cache */
+ u32 alloc_w[22];
+ u32 alloc_r[22];
+ u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
+ u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
+ u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
+};
+
+static inline
+struct mdp5_kms *get_kms(struct mdp5_smp *smp)
+{
+ struct msm_drm_private *priv = smp->dev->dev_private;
+
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
+{
+#define CID_UNUSED 0
+
+ if (WARN_ON(plane >= pipe2nclients(pipe)))
+ return CID_UNUSED;
+
+ /*
+ * Note on SMP clients:
+ * For ViG pipes, fetch Y/Cr/Cb-components clients are always
+ * consecutive, and in that order.
+ *
+ * e.g.:
+ * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
+ * Y plane's client ID is N
+ * Cr plane's client ID is N + 1
+ * Cb plane's client ID is N + 2
+ */
+
+ return mdp5_cfg->smp.clients[pipe] + plane;
+}
+
+/* allocate blocks for the specified request: */
+static int smp_request_block(struct mdp5_smp *smp,
+ struct mdp5_smp_state *state,
+ u32 cid, int nblks)
+{
+ void *cs = state->client_state[cid];
+ int i, avail, cnt = smp->blk_cnt;
+ uint8_t reserved;
+
+ /* we shouldn't be requesting blocks for an in-use client: */
+ WARN_ON(!bitmap_empty(cs, cnt));
+
+ reserved = smp->reserved[cid];
+
+ if (reserved) {
+ nblks = max(0, nblks - reserved);
+ DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
+ }
+
+ avail = cnt - bitmap_weight(state->state, cnt);
+ if (nblks > avail) {
+ DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ nblks, avail);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nblks; i++) {
+ int blk = find_first_zero_bit(state->state, cnt);
+ set_bit(blk, cs);
+ set_bit(blk, state->state);
+ }
+
+ return 0;
+}
+
+static void set_fifo_thresholds(struct mdp5_smp *smp,
+ enum mdp5_pipe pipe, int nblks)
+{
+ u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
+ u32 val;
+
+ /* 1/4 of SMP pool that is being fetched */
+ val = (nblks * smp_entries_per_blk) / 4;
+
+ smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
+ smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
+ smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
+}
+
+/*
+ * NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width. Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
+ const struct mdp_format *format,
+ u32 width, bool hdecim)
+{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
+ int i, hsub, nplanes, nlines;
+ uint32_t blkcfg = 0;
+
+ nplanes = info->num_planes;
+ hsub = info->hsub;
+
+ /* different if BWC (compressed framebuffer?) enabled: */
+ nlines = 2;
+
+ /* Newer MDPs have split/packing logic, which fetches sub-sampled
+ * U and V components (splits them from Y if necessary) and packs
+ * them together, writes to SMP using a single client.
+ */
+ if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
+ nplanes = 2;
+
+ /* if decimation is enabled, HW decimates less on the
+ * sub sampled chroma components
+ */
+ if (hdecim && (hsub > 1))
+ hsub = 1;
+ }
+
+ for (i = 0; i < nplanes; i++) {
+ int n, fetch_stride, cpp;
+
+ cpp = info->cpp[i];
+ fetch_stride = width * cpp / (i ? hsub : 1);
+
+ n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
+
+ /* for hw rev v1.00 */
+ if (rev == 0)
+ n = roundup_pow_of_two(n);
+
+ blkcfg |= (n << (8 * i));
+ }
+
+ return blkcfg;
+}
+
+int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe, uint32_t blkcfg)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, ret;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ int n = blkcfg & 0xff;
+
+ if (!n)
+ continue;
+
+ DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
+ ret = smp_request_block(smp, state, cid, n);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+ n, ret);
+ return ret;
+ }
+
+ blkcfg >>= 8;
+ }
+
+ state->assigned |= (1 << pipe);
+
+ return 0;
+}
+
+/* Release SMP blocks for all clients of the pipe */
+void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe)
+{
+ int i;
+ int cnt = smp->blk_cnt;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ void *cs = state->client_state[cid];
+
+ /* update global state: */
+ bitmap_andnot(state->state, state->state, cs, cnt);
+
+ /* clear client's state */
+ bitmap_zero(cs, cnt);
+ }
+
+ state->released |= (1 << pipe);
+}
+
+/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
+ * happen after scanout completes.
+ */
+static unsigned update_smp_state(struct mdp5_smp *smp,
+ u32 cid, mdp5_smp_state_t *assigned)
+{
+ int cnt = smp->blk_cnt;
+ unsigned nblks = 0;
+ u32 blk, val;
+
+ for_each_set_bit(blk, *assigned, cnt) {
+ int idx = blk / 3;
+ int fld = blk % 3;
+
+ val = smp->alloc_w[idx];
+
+ switch (fld) {
+ case 0:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
+ break;
+ case 1:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
+ break;
+ case 2:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
+ break;
+ }
+
+ smp->alloc_w[idx] = val;
+ smp->alloc_r[idx] = val;
+
+ nblks++;
+ }
+
+ return nblks;
+}
+
+static void write_smp_alloc_regs(struct mdp5_smp *smp)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int i, num_regs;
+
+ num_regs = smp->blk_cnt / 3 + 1;
+
+ for (i = 0; i < num_regs; i++) {
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
+ smp->alloc_w[i]);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
+ smp->alloc_r[i]);
+ }
+}
+
+static void write_smp_fifo_regs(struct mdp5_smp *smp)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int i;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ enum mdp5_pipe pipe = hwpipe->pipe;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
+ smp->pipe_reqprio_fifo_wm0[pipe]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
+ smp->pipe_reqprio_fifo_wm1[pipe]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
+ smp->pipe_reqprio_fifo_wm2[pipe]);
+ }
+}
+
+void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
+{
+ enum mdp5_pipe pipe;
+
+ for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
+ unsigned i, nblks = 0;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ void *cs = state->client_state[cid];
+
+ nblks += update_smp_state(smp, cid, cs);
+
+ DBG("assign %s:%u, %u blks",
+ pipe2name(pipe), i, nblks);
+ }
+
+ set_fifo_thresholds(smp, pipe, nblks);
+ }
+
+ write_smp_alloc_regs(smp);
+ write_smp_fifo_regs(smp);
+
+ state->assigned = 0;
+}
+
+void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
+{
+ enum mdp5_pipe pipe;
+
+ for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
+ DBG("release %s", pipe2name(pipe));
+ set_fifo_thresholds(smp, pipe, 0);
+ }
+
+ write_smp_fifo_regs(smp);
+
+ state->released = 0;
+}
+
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct mdp5_hw_pipe_state *hwpstate;
+ struct mdp5_smp_state *state;
+ struct mdp5_global_state *global_state;
+ int total = 0, i, j;
+
+ drm_printf(p, "name\tinuse\tplane\n");
+ drm_printf(p, "----\t-----\t-----\n");
+
+ if (drm_can_sleep())
+ drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
+
+ /* grab these *after* we hold the state_lock */
+ hwpstate = &global_state->hwpipe;
+ state = &global_state->smp;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ for (j = 0; j < pipe2nclients(pipe); j++) {
+ u32 cid = pipe2client(pipe, j);
+ void *cs = state->client_state[cid];
+ int inuse = bitmap_weight(cs, smp->blk_cnt);
+
+ drm_printf(p, "%s:%d\t%d\t%s\n",
+ pipe2name(pipe), j, inuse,
+ plane ? plane->name : NULL);
+
+ total += inuse;
+ }
+ }
+
+ drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
+ drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
+ bitmap_weight(state->state, smp->blk_cnt));
+
+ if (drm_can_sleep())
+ drm_modeset_unlock(&mdp5_kms->glob_state_lock);
+}
+
+void mdp5_smp_destroy(struct mdp5_smp *smp)
+{
+ kfree(smp);
+}
+
+struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
+{
+ struct mdp5_smp_state *state;
+ struct mdp5_global_state *global_state;
+ struct mdp5_smp *smp = NULL;
+ int ret;
+
+ smp = kzalloc(sizeof(*smp), GFP_KERNEL);
+ if (unlikely(!smp)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ smp->dev = mdp5_kms->dev;
+ smp->blk_cnt = cfg->mmb_count;
+ smp->blk_size = cfg->mmb_size;
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
+ state = &global_state->smp;
+
+ /* statically tied MMBs cannot be re-allocated: */
+ bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
+ memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
+
+ return smp;
+fail:
+ if (smp)
+ mdp5_smp_destroy(smp);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
new file mode 100644
index 0000000000..ba5618e136
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MDP5_SMP_H__
+#define __MDP5_SMP_H__
+
+#include <drm/drm_print.h>
+
+#include "msm_drv.h"
+
+/*
+ * SMP - Shared Memory Pool:
+ *
+ * SMP blocks are shared between all the clients, where each plane in
+ * a scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
+ * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
+ *
+ * Based on the size of the attached scanout buffer, a certain # of
+ * blocks must be allocated to that client out of the shared pool.
+ *
+ * In some hw, some blocks are statically allocated for certain pipes
+ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
+ *
+ *
+ * Atomic SMP State:
+ *
+ * On atomic updates that modify SMP configuration, the state is cloned
+ * (copied) and modified. For test-only, or in cases where atomic
+ * update fails (or if we hit ww_mutex deadlock/backoff condition) the
+ * new state is simply thrown away.
+ *
+ * Because the SMP registers are not double buffered, updates are a
+ * two step process:
+ *
+ * 1) in _prepare_commit() we configure things (via read-modify-write)
+ * for the newly assigned pipes, so we don't take away blocks
+ * assigned to pipes that are still scanning out
+ * 2) in _complete_commit(), after vblank/etc, we clear things for the
+ * released clients, since at that point old pipes are no longer
+ * scanning out.
+ */
+struct mdp5_smp_state {
+ /* global state of what blocks are in use: */
+ mdp5_smp_state_t state;
+
+ /* per client state of what blocks they are using: */
+ mdp5_smp_state_t client_state[MAX_CLIENTS];
+
+ /* assigned pipes (hw updated at _prepare_commit()): */
+ unsigned long assigned;
+
+ /* released pipes (hw updated at _complete_commit()): */
+ unsigned long released;
+};
+
+struct mdp5_kms;
+struct mdp5_smp;
+
+/*
+ * SMP module prototypes:
+ * mdp5_smp_init() returns a SMP @handler,
+ * which is then used to call the other mdp5_smp_*(handler, ...) functions.
+ */
+
+struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms,
+ const struct mdp5_smp_block *cfg);
+void mdp5_smp_destroy(struct mdp5_smp *smp);
+
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p);
+
+uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
+ const struct mdp_format *format,
+ u32 width, bool hdecim);
+
+int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe, uint32_t blkcfg);
+void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe);
+
+void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state);
+void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state);
+
+#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
new file mode 100644
index 0000000000..4dd8d7db28
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
@@ -0,0 +1,111 @@
+#ifndef MDP_COMMON_XML
+#define MDP_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42)
+
+Copyright (C) 2013-2022 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp_chroma_samp_type {
+ CHROMA_FULL = 0,
+ CHROMA_H2V1 = 1,
+ CHROMA_H1V2 = 2,
+ CHROMA_420 = 3,
+};
+
+enum mdp_fetch_type {
+ MDP_PLANE_INTERLEAVED = 0,
+ MDP_PLANE_PLANAR = 1,
+ MDP_PLANE_PSEUDO_PLANAR = 2,
+};
+
+enum mdp_mixer_stage_id {
+ STAGE_UNUSED = 0,
+ STAGE_BASE = 1,
+ STAGE0 = 2,
+ STAGE1 = 3,
+ STAGE2 = 4,
+ STAGE3 = 5,
+ STAGE4 = 6,
+ STAGE5 = 7,
+ STAGE6 = 8,
+ STAGE_MAX = 8,
+};
+
+enum mdp_alpha_type {
+ FG_CONST = 0,
+ BG_CONST = 1,
+ FG_PIXEL = 2,
+ BG_PIXEL = 3,
+};
+
+enum mdp_component_type {
+ COMP_0 = 0,
+ COMP_1_2 = 1,
+ COMP_3 = 2,
+ COMP_MAX = 3,
+};
+
+enum mdp_bpc {
+ BPC1 = 0,
+ BPC5 = 1,
+ BPC6 = 2,
+ BPC8 = 3,
+};
+
+enum mdp_bpc_alpha {
+ BPC1A = 0,
+ BPC4A = 1,
+ BPC6A = 2,
+ BPC8A = 3,
+};
+
+
+#endif /* MDP_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
new file mode 100644
index 0000000000..025595336f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+static struct csc_cfg csc_convert[CSC_MAX] = {
+ [CSC_RGB2RGB] = {
+ .type = CSC_RGB2RGB,
+ .matrix = {
+ 0x0200, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000,
+ 0x0000, 0x0000, 0x0200
+ },
+ .pre_bias = { 0x0, 0x0, 0x0 },
+ .post_bias = { 0x0, 0x0, 0x0 },
+ .pre_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff },
+ .post_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff },
+ },
+ [CSC_YUV2RGB] = {
+ .type = CSC_YUV2RGB,
+ .matrix = {
+ 0x0254, 0x0000, 0x0331,
+ 0x0254, 0xff37, 0xfe60,
+ 0x0254, 0x0409, 0x0000
+ },
+ .pre_bias = { 0xfff0, 0xff80, 0xff80 },
+ .post_bias = { 0x00, 0x00, 0x00 },
+ .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ },
+ [CSC_RGB2YUV] = {
+ .type = CSC_RGB2YUV,
+ .matrix = {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ .pre_bias = { 0x00, 0x00, 0x00 },
+ .post_bias = { 0x10, 0x80, 0x80 },
+ .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ .post_clamp = { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0 },
+ },
+ [CSC_YUV2YUV] = {
+ .type = CSC_YUV2YUV,
+ .matrix = {
+ 0x0200, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000,
+ 0x0000, 0x0000, 0x0200
+ },
+ .pre_bias = { 0x00, 0x00, 0x00 },
+ .post_bias = { 0x00, 0x00, 0x00 },
+ .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ },
+};
+
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \
+ .base = { .pixel_format = DRM_FORMAT_ ## name }, \
+ .bpc_a = BPC ## a ## A, \
+ .bpc_r = BPC ## r, \
+ .bpc_g = BPC ## g, \
+ .bpc_b = BPC ## b, \
+ .unpack = { e0, e1, e2, e3 }, \
+ .alpha_enable = alpha, \
+ .unpack_tight = tight, \
+ .cpp = c, \
+ .unpack_count = cnt, \
+ .fetch_type = fp, \
+ .chroma_sample = cs, \
+ .is_yuv = yuv, \
+}
+
+#define BPC0A 0
+
+/*
+ * Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking
+ * mdp_get_rgb_formats()'s implementation.
+ */
+static const struct mdp_format formats[] = {
+ /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */
+ FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+
+ /* --- RGB formats above / YUV formats below this line --- */
+
+ /* 2 plane YUV */
+ FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
+ FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
+ FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+ FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+ /* 1 plane YUV */
+ FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ /* 3 plane YUV */
+ FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1,
+ MDP_PLANE_PLANAR, CHROMA_420, true),
+ FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1,
+ MDP_PLANE_PLANAR, CHROMA_420, true),
+};
+
+/*
+ * Note:
+ * @rgb_only must be set to true, when requesting
+ * supported formats for RGB pipes.
+ */
+uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
+ bool rgb_only)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct mdp_format *f = &formats[i];
+
+ if (i == max_formats)
+ break;
+
+ if (rgb_only && MDP_FORMAT_IS_YUV(f))
+ break;
+
+ pixel_formats[i] = f->base.pixel_format;
+ }
+
+ return i;
+}
+
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
+ uint64_t modifier)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct mdp_format *f = &formats[i];
+ if (f->base.pixel_format == format)
+ return &f->base;
+ }
+ return NULL;
+}
+
+struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type)
+{
+ if (WARN_ON(type >= CSC_MAX))
+ return NULL;
+
+ return &csc_convert[type];
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.c b/drivers/gpu/drm/msm/disp/mdp_kms.c
new file mode 100644
index 0000000000..3c35ccfc73
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+
+struct mdp_irq_wait {
+ struct mdp_irq irq;
+ int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void update_irq(struct mdp_kms *mdp_kms)
+{
+ struct mdp_irq *irq;
+ uint32_t irqmask = mdp_kms->vblank_mask;
+
+ assert_spin_locked(&list_lock);
+
+ list_for_each_entry(irq, &mdp_kms->irq_list, node)
+ irqmask |= irq->irqmask;
+
+ mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
+ mdp_kms->cur_irq_mask = irqmask;
+}
+
+/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
+ * link changes, this must be called to figure out the new global irqmask
+ */
+void mdp_irq_update(struct mdp_kms *mdp_kms)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&list_lock, flags);
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
+{
+ struct mdp_irq *handler, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp_kms->in_irq = true;
+ list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
+ if (handler->irqmask & status) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ handler->irq(handler, handler->irqmask & status);
+ spin_lock_irqsave(&list_lock, flags);
+ }
+ }
+ mdp_kms->in_irq = false;
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+}
+
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ if (enable)
+ mdp_kms->vblank_mask |= mask;
+ else
+ mdp_kms->vblank_mask &= ~mask;
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp_irq_wait *wait =
+ container_of(irq, struct mdp_irq_wait, irq);
+ wait->count--;
+ wake_up_all(&wait_event);
+}
+
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ struct mdp_irq_wait wait = {
+ .irq = {
+ .irq = wait_irq,
+ .irqmask = irqmask,
+ },
+ .count = 1,
+ };
+ mdp_irq_register(mdp_kms, &wait.irq);
+ wait_event_timeout(wait_event, (wait.count <= 0),
+ msecs_to_jiffies(100));
+ mdp_irq_unregister(mdp_kms, &wait.irq);
+}
+
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!irq->registered) {
+ irq->registered = true;
+ list_add(&irq->node, &mdp_kms->irq_list);
+ needs_update = !mdp_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ mdp_irq_update(mdp_kms);
+}
+
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (irq->registered) {
+ irq->registered = false;
+ list_del(&irq->node);
+ needs_update = !mdp_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ mdp_irq_update(mdp_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
new file mode 100644
index 0000000000..b0286d5d51
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MDP_KMS_H__
+#define __MDP_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp_common.xml.h"
+
+struct mdp_kms;
+
+struct mdp_kms_funcs {
+ struct msm_kms_funcs base;
+ void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
+};
+
+struct mdp_kms {
+ struct msm_kms base;
+
+ const struct mdp_kms_funcs *funcs;
+
+ /* irq handling: */
+ bool in_irq;
+ struct list_head irq_list; /* list of mdp4_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ uint32_t cur_irq_mask; /* current irq mask */
+};
+#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
+
+static inline int mdp_kms_init(struct mdp_kms *mdp_kms,
+ const struct mdp_kms_funcs *funcs)
+{
+ mdp_kms->funcs = funcs;
+ INIT_LIST_HEAD(&mdp_kms->irq_list);
+ return msm_kms_init(&mdp_kms->base, &funcs->base);
+}
+
+static inline void mdp_kms_destroy(struct mdp_kms *mdp_kms)
+{
+ msm_kms_destroy(&mdp_kms->base);
+}
+
+/*
+ * irq helpers:
+ */
+
+/* For transiently registering for different MDP irqs that various parts
+ * of the KMS code need during setup/configuration. These are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct mdp_irq {
+ struct list_head node;
+ uint32_t irqmask;
+ bool registered;
+ void (*irq)(struct mdp_irq *irq, uint32_t irqstatus);
+};
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status);
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable);
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+void mdp_irq_update(struct mdp_kms *mdp_kms);
+
+/*
+ * pixel format helpers:
+ */
+
+struct mdp_format {
+ struct msm_format base;
+ enum mdp_bpc bpc_r, bpc_g, bpc_b;
+ enum mdp_bpc_alpha bpc_a;
+ uint8_t unpack[4];
+ bool alpha_enable, unpack_tight;
+ uint8_t cpp, unpack_count;
+ enum mdp_fetch_type fetch_type;
+ enum mdp_chroma_samp_type chroma_sample;
+ bool is_yuv;
+};
+#define to_mdp_format(x) container_of(x, struct mdp_format, base)
+#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
+
+uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
+
+/* MDP capabilities */
+#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
+#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */
+#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */
+#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */
+
+/* MDP pipe capabilities */
+#define MDP_PIPE_CAP_HFLIP BIT(0)
+#define MDP_PIPE_CAP_VFLIP BIT(1)
+#define MDP_PIPE_CAP_SCALE BIT(2)
+#define MDP_PIPE_CAP_CSC BIT(3)
+#define MDP_PIPE_CAP_DECIMATION BIT(4)
+#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
+#define MDP_PIPE_CAP_CURSOR BIT(6)
+
+/* MDP layer mixer caps */
+#define MDP_LM_CAP_DISPLAY BIT(0)
+#define MDP_LM_CAP_WB BIT(1)
+#define MDP_LM_CAP_PAIR BIT(2)
+
+static inline bool pipe_supports_yuv(uint32_t pipe_caps)
+{
+ return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
+ (pipe_caps & MDP_PIPE_CAP_CSC);
+}
+
+enum csc_type {
+ CSC_RGB2RGB = 0,
+ CSC_YUV2RGB,
+ CSC_RGB2YUV,
+ CSC_YUV2YUV,
+ CSC_MAX
+};
+
+struct csc_cfg {
+ enum csc_type type;
+ uint32_t matrix[9];
+ uint32_t pre_bias[3];
+ uint32_t post_bias[3];
+ uint32_t pre_clamp[6];
+ uint32_t post_clamp[6];
+};
+
+struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type);
+
+#endif /* __MDP_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
new file mode 100644
index 0000000000..e75b97127c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_disp_snapshot.h"
+
+static ssize_t __maybe_unused disp_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct drm_print_iterator iter;
+ struct drm_printer p;
+ struct msm_disp_state *disp_state;
+
+ disp_state = data;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ msm_disp_state_print(disp_state, &p);
+
+ return count - iter.remain;
+}
+
+struct msm_disp_state *
+msm_disp_snapshot_state_sync(struct msm_kms *kms)
+{
+ struct drm_device *drm_dev = kms->dev;
+ struct msm_disp_state *disp_state;
+
+ WARN_ON(!mutex_is_locked(&kms->dump_mutex));
+
+ disp_state = kzalloc(sizeof(struct msm_disp_state), GFP_KERNEL);
+ if (!disp_state)
+ return ERR_PTR(-ENOMEM);
+
+ disp_state->dev = drm_dev->dev;
+ disp_state->drm_dev = drm_dev;
+
+ INIT_LIST_HEAD(&disp_state->blocks);
+
+ msm_disp_snapshot_capture_state(disp_state);
+
+ return disp_state;
+}
+
+static void _msm_disp_snapshot_work(struct kthread_work *work)
+{
+ struct msm_kms *kms = container_of(work, struct msm_kms, dump_work);
+ struct msm_disp_state *disp_state;
+ struct drm_printer p;
+
+ /* Serialize dumping here */
+ mutex_lock(&kms->dump_mutex);
+ disp_state = msm_disp_snapshot_state_sync(kms);
+ mutex_unlock(&kms->dump_mutex);
+
+ if (IS_ERR(disp_state))
+ return;
+
+ if (MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE) {
+ p = drm_info_printer(disp_state->drm_dev->dev);
+ msm_disp_state_print(disp_state, &p);
+ }
+
+ /*
+ * If COREDUMP is disabled, the stub will call the free function.
+ * If there is a codedump pending for the device, the dev_coredumpm()
+ * will also free new coredump state.
+ */
+ dev_coredumpm(disp_state->dev, THIS_MODULE, disp_state, 0, GFP_KERNEL,
+ disp_devcoredump_read, msm_disp_state_free);
+}
+
+void msm_disp_snapshot_state(struct drm_device *drm_dev)
+{
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!drm_dev) {
+ DRM_ERROR("invalid params\n");
+ return;
+ }
+
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ kthread_queue_work(kms->dump_worker, &kms->dump_work);
+}
+
+int msm_disp_snapshot_init(struct drm_device *drm_dev)
+{
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!drm_dev) {
+ DRM_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ mutex_init(&kms->dump_mutex);
+
+ kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot");
+ if (IS_ERR(kms->dump_worker))
+ DRM_ERROR("failed to create disp state task\n");
+
+ kthread_init_work(&kms->dump_work, _msm_disp_snapshot_work);
+
+ return 0;
+}
+
+void msm_disp_snapshot_destroy(struct drm_device *drm_dev)
+{
+ struct msm_kms *kms;
+ struct msm_drm_private *priv;
+
+ if (!drm_dev) {
+ DRM_ERROR("invalid params\n");
+ return;
+ }
+
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ if (kms->dump_worker)
+ kthread_destroy_worker(kms->dump_worker);
+
+ mutex_destroy(&kms->dump_mutex);
+}
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
new file mode 100644
index 0000000000..b5f452bd7a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef MSM_DISP_SNAPSHOT_H_
+#define MSM_DISP_SNAPSHOT_H_
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
+#include "../../../drm_crtc_internal.h"
+#include <drm/drm_print.h>
+#include <drm/drm_atomic.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
+#include <linux/devcoredump.h>
+#include "msm_kms.h"
+
+#define MSM_DISP_SNAPSHOT_MAX_BLKS 10
+
+/* debug option to print the registers in logs */
+#define MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE 0
+
+/* print debug ranges in groups of 4 u32s */
+#define REG_DUMP_ALIGN 16
+
+/**
+ * struct msm_disp_state - structure to store current dpu state
+ * @dev: device pointer
+ * @drm_dev: drm device pointer
+ * @atomic_state: atomic state duplicated at the time of the error
+ * @time: timestamp at which the coredump was captured
+ */
+struct msm_disp_state {
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ struct list_head blocks;
+
+ struct drm_atomic_state *atomic_state;
+
+ struct timespec64 time;
+};
+
+/**
+ * struct msm_disp_state_block - structure to store each hardware block state
+ * @name: name of the block
+ * @drm_dev: handle to the linked list head
+ * @size: size of the register space of this hardware block
+ * @state: array holding the register dump of this hardware block
+ * @base_addr: starting address of this hardware block's register space
+ */
+struct msm_disp_state_block {
+ char name[SZ_128];
+ struct list_head node;
+ unsigned int size;
+ u32 *state;
+ void __iomem *base_addr;
+};
+
+/**
+ * msm_disp_snapshot_init - initialize display snapshot
+ * @drm_dev: drm device handle
+ *
+ * Returns: 0 or -ERROR
+ */
+int msm_disp_snapshot_init(struct drm_device *drm_dev);
+
+/**
+ * msm_disp_snapshot_destroy - destroy the display snapshot
+ * @drm_dev: drm device handle
+ *
+ * Returns: none
+ */
+void msm_disp_snapshot_destroy(struct drm_device *drm_dev);
+
+/**
+ * msm_disp_snapshot_state_sync - synchronously snapshot display state
+ * @kms: the kms object
+ *
+ * Returns state or error
+ *
+ * Must be called with &kms->dump_mutex held
+ */
+struct msm_disp_state *msm_disp_snapshot_state_sync(struct msm_kms *kms);
+
+/**
+ * msm_disp_snapshot_state - trigger to dump the display snapshot
+ * @drm_dev: handle to drm device
+
+ * Returns: none
+ */
+void msm_disp_snapshot_state(struct drm_device *drm_dev);
+
+/**
+ * msm_disp_state_print - print out the current dpu state
+ * @disp_state: handle to drm device
+ * @p: handle to drm printer
+ *
+ * Returns: none
+ */
+void msm_disp_state_print(struct msm_disp_state *disp_state, struct drm_printer *p);
+
+/**
+ * msm_disp_snapshot_capture_state - utility to capture atomic state and hw registers
+ * @disp_state: handle to msm_disp_state struct
+
+ * Returns: none
+ */
+void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state);
+
+/**
+ * msm_disp_state_free - free the memory after the coredump has been read
+ * @data: handle to struct msm_disp_state
+
+ * Returns: none
+ */
+void msm_disp_state_free(void *data);
+
+/**
+ * msm_disp_snapshot_add_block - add a hardware block with its register dump
+ * @disp_state: handle to struct msm_disp_state
+ * @name: name of the hardware block
+ * @len: size of the register space of the hardware block
+ * @base_addr: starting address of the register space of the hardware block
+ * @fmt: format in which the block names need to be printed
+ *
+ * Returns: none
+ */
+__printf(4, 5)
+void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
+ void __iomem *base_addr, const char *fmt, ...);
+
+#endif /* MSM_DISP_SNAPSHOT_H_ */
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
new file mode 100644
index 0000000000..add72bbc28
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <generated/utsrelease.h>
+
+#include "msm_disp_snapshot.h"
+
+static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *base_addr)
+{
+ u32 len_padded;
+ u32 num_rows;
+ u32 x0, x4, x8, xc;
+ void __iomem *addr;
+ u32 *dump_addr = NULL;
+ void __iomem *end_addr;
+ int i;
+
+ len_padded = aligned_len * REG_DUMP_ALIGN;
+ num_rows = aligned_len / REG_DUMP_ALIGN;
+
+ addr = base_addr;
+ end_addr = base_addr + aligned_len;
+
+ if (!(*reg))
+ *reg = kzalloc(len_padded, GFP_KERNEL);
+
+ if (*reg)
+ dump_addr = *reg;
+
+ for (i = 0; i < num_rows; i++) {
+ x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
+ x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
+ x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
+ xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
+
+ if (dump_addr) {
+ dump_addr[i * 4] = x0;
+ dump_addr[i * 4 + 1] = x4;
+ dump_addr[i * 4 + 2] = x8;
+ dump_addr[i * 4 + 3] = xc;
+ }
+
+ addr += REG_DUMP_ALIGN;
+ }
+}
+
+static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr,
+ struct drm_printer *p)
+{
+ int i;
+ u32 *dump_addr = NULL;
+ void __iomem *addr;
+ u32 num_rows;
+
+ addr = base_addr;
+ num_rows = len / REG_DUMP_ALIGN;
+
+ if (*reg)
+ dump_addr = *reg;
+
+ for (i = 0; i < num_rows; i++) {
+ drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
+ (unsigned long)(addr - base_addr),
+ dump_addr[i * 4], dump_addr[i * 4 + 1],
+ dump_addr[i * 4 + 2], dump_addr[i * 4 + 3]);
+ addr += REG_DUMP_ALIGN;
+ }
+}
+
+void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
+{
+ struct msm_disp_state_block *block, *tmp;
+
+ if (!p) {
+ DRM_ERROR("invalid drm printer\n");
+ return;
+ }
+
+ drm_printf(p, "---\n");
+ drm_printf(p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(p, "dpu devcoredump\n");
+ drm_printf(p, "time: %lld.%09ld\n",
+ state->time.tv_sec, state->time.tv_nsec);
+
+ list_for_each_entry_safe(block, tmp, &state->blocks, node) {
+ drm_printf(p, "====================%s================\n", block->name);
+ msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p);
+ }
+
+ drm_printf(p, "===================dpu drm state================\n");
+
+ if (state->atomic_state)
+ drm_atomic_print_new_state(state->atomic_state, p);
+}
+
+static void msm_disp_capture_atomic_state(struct msm_disp_state *disp_state)
+{
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx ctx;
+
+ ktime_get_real_ts64(&disp_state->time);
+
+ ddev = disp_state->drm_dev;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ while (drm_modeset_lock_all_ctx(ddev, &ctx) != 0)
+ drm_modeset_backoff(&ctx);
+
+ disp_state->atomic_state = drm_atomic_helper_duplicate_state(ddev,
+ &ctx);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
+{
+ struct msm_drm_private *priv;
+ struct drm_device *drm_dev;
+ struct msm_kms *kms;
+ int i;
+
+ drm_dev = disp_state->drm_dev;
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (!priv->dp[i])
+ continue;
+
+ msm_dp_snapshot(disp_state, priv->dp[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (!priv->dsi[i])
+ continue;
+
+ msm_dsi_snapshot(disp_state, priv->dsi[i]);
+ }
+
+ if (kms->funcs->snapshot)
+ kms->funcs->snapshot(disp_state, kms);
+
+ msm_disp_capture_atomic_state(disp_state);
+}
+
+void msm_disp_state_free(void *data)
+{
+ struct msm_disp_state *disp_state = data;
+ struct msm_disp_state_block *block, *tmp;
+
+ if (disp_state->atomic_state) {
+ drm_atomic_state_put(disp_state->atomic_state);
+ disp_state->atomic_state = NULL;
+ }
+
+ list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
+ list_del(&block->node);
+ kfree(block->state);
+ kfree(block);
+ }
+
+ kfree(disp_state);
+}
+
+void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
+ void __iomem *base_addr, const char *fmt, ...)
+{
+ struct msm_disp_state_block *new_blk;
+ struct va_format vaf;
+ va_list va;
+
+ new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
+ if (!new_blk)
+ return;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+ snprintf(new_blk->name, sizeof(new_blk->name), "%pV", &vaf);
+
+ va_end(va);
+
+ INIT_LIST_HEAD(&new_blk->node);
+ new_blk->size = ALIGN(len, REG_DUMP_ALIGN);
+ new_blk->base_addr = base_addr;
+
+ msm_disp_state_dump_regs(&new_blk->state, new_blk->size, base_addr);
+ list_add_tail(&new_blk->node, &disp_state->blocks);
+}